aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/ata/Kconfig37
-rw-r--r--drivers/ata/Makefile3
-rw-r--r--drivers/ata/sata_dwc.c2316
-rw-r--r--drivers/ata/sata_dwc_ncq.c2933
-rwxr-xr-xdrivers/ata/sata_dwc_pmp.c3053
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/trng4xx.c336
-rw-r--r--drivers/crypto/Kconfig52
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/amcc/crypto4xx_alg.c3846
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c2899
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.h365
-rw-r--r--drivers/crypto/amcc/crypto4xx_reg_def.h14
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c114
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.h530
-rw-r--r--drivers/crypto/pka_4xx.c1333
-rw-r--r--drivers/crypto/pka_4xx_access.c201
-rw-r--r--drivers/crypto/pka_4xx_access.h86
-rw-r--r--drivers/crypto/pka_4xx_firmware.h515
-rw-r--r--drivers/dma/Kconfig40
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/apm82181-adma.c2433
-rw-r--r--drivers/dma/dmaengine.c11
-rw-r--r--drivers/dma/ppc460ex-adma.c5409
-rw-r--r--drivers/dma/ppc460ex-plbadma.c2009
-rwxr-xr-xdrivers/dma/ppc460ex_4chan_dma.c1103
-rwxr-xr-xdrivers/dma/ppc460ex_4chan_dma.h531
-rwxr-xr-xdrivers/dma/ppc460ex_4chan_sgdma.c1003
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/a3g_button.c643
-rw-r--r--drivers/leds/Makefile4
-rw-r--r--drivers/leds/led-class-3g.c281
-rw-r--r--drivers/leds/led-class.c24
-rw-r--r--drivers/leds/leds-apollo3g.c365
-rw-r--r--drivers/leds/leds-gpio.c4
-rw-r--r--drivers/leds/leds.h19
-rw-r--r--drivers/md/Kconfig13
-rw-r--r--drivers/md/Makefile4
-rw-r--r--drivers/md/md.c111
-rw-r--r--drivers/md/md.h14
-rw-r--r--drivers/md/multipath.c2
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c179
-rw-r--r--drivers/md/raid5.h8
-rw-r--r--drivers/message/fusion/mptbase.c12
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--[-rwxr-xr-x]drivers/mtd/chips/cfi_util.c0
-rw-r--r--[-rwxr-xr-x]drivers/mtd/inftlcore.c0
-rw-r--r--drivers/mtd/maps/physmap_of.c9
-rw-r--r--drivers/mtd/nand/Kconfig7
-rw-r--r--drivers/mtd/nand/ndfc.c7
-rw-r--r--drivers/net/ibm_newemac/Kconfig53
-rw-r--r--drivers/net/ibm_newemac/core.c761
-rw-r--r--drivers/net/ibm_newemac/core.h22
-rw-r--r--drivers/net/ibm_newemac/emac.h21
-rw-r--r--drivers/net/ibm_newemac/mal.c487
-rw-r--r--drivers/net/ibm_newemac/mal.h108
-rw-r--r--drivers/net/ibm_newemac/phy.c188
-rw-r--r--drivers/net/ibm_newemac/rgmii.c2
-rw-r--r--drivers/net/ibm_newemac/tah.c4
-rw-r--r--drivers/net/ibm_newemac/zmii.c2
-rw-r--r--drivers/pci/pcie/Kconfig12
-rw-r--r--drivers/rtc/Kconfig7
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/ibm_rtc.c479
-rwxr-xr-xdrivers/scp/Kconfig12
-rwxr-xr-xdrivers/scp/Makefile5
-rwxr-xr-xdrivers/scp/scp-dev.c654
-rwxr-xr-xdrivers/scp/scp-dev.h196
-rwxr-xr-xdrivers/scp/spi_eeprom.c350
-rw-r--r--drivers/serial/8250.c4
-rw-r--r--drivers/test/Kconfig20
-rw-r--r--drivers/test/Makefile6
-rw-r--r--drivers/test/l2cache/Makefile6
-rw-r--r--drivers/test/l2cache/ppc4xx_l2cache_test_module.c389
-rw-r--r--drivers/usb/gadget/Kconfig103
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/dwc_otg/Makefile12
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_attr.c785
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_attr.h67
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_cil.c3237
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_cil.h991
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_cil_intr.c701
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_driver.c1345
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_driver.h82
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c2900
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h660
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c1746
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c696
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c1408
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_pcd.h209
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_pcd_intr.c2519
-rw-r--r--drivers/usb/gadget/dwc_otg/dwc_otg_regs.h3606
-rw-r--r--drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h304
-rw-r--r--drivers/usb/gadget/dwc_otg/ppc4xx_dma.c735
-rw-r--r--drivers/usb/gadget/dwc_otg/ppc4xx_dma.h620
-rw-r--r--drivers/usb/gadget/gadget_chips.h8
-rw-r--r--drivers/usb/gadget/u_ether.c27
-rw-r--r--drivers/usb/host/Kconfig2
103 files changed, 59242 insertions, 224 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 48bbdbe43e6..2db8a713a24 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -54,6 +54,8 @@ source "drivers/spi/Kconfig"
source "drivers/pps/Kconfig"
+source "drivers/scp/Kconfig"
+
source "drivers/gpio/Kconfig"
source "drivers/w1/Kconfig"
@@ -113,4 +115,6 @@ source "drivers/xen/Kconfig"
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
+
+source "drivers/test/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a..1c20084726b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -53,6 +53,7 @@ obj-y += ieee1394/
obj-$(CONFIG_UIO) += uio/
obj-y += cdrom/
obj-y += auxdisplay/
+obj-$(CONFIG_SCP) += scp/
obj-$(CONFIG_PCCARD) += pcmcia/
obj-$(CONFIG_DIO) += dio/
obj-$(CONFIG_SBUS) += sbus/
@@ -111,3 +112,4 @@ obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
obj-y += ieee802154/
+obj-$(CONFIG_PPC4xx_TEST) += test/
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index f2df6e2a224..1a3fcdffa00 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -103,6 +103,43 @@ config ATA_SFF
if ATA_SFF
+config SATA_DWC
+ tristate "DesignWare Cores SATA support"
+ depends on 460EX || APM82181
+ help
+ This option enables support for the Synopsys DesignWare Cores SATA
+ controller.
+ It can be found on the AMCC 460EX.
+ If unsure, say N.
+
+config SATA_DWC_PMP
+ tristate "DesignWare Cores SATA with PMP support"
+ depends on 460EX || APM82181
+ help
+ This option enables support for the Synopsys DesignWare Cores SATA
+ controller.
+ It can be found on the AMCC 460EX.
+ If unsure, say N.
+
+config SATA_DWC_NCQ
+ tristate "DesignWare Cores SATA with PMP and NCQ support"
+ depends on 460EX || APM82181
+ help
+ This option enables support for the Synopsys DesignWare Cores SATA
+ controller.
+ It can be found on the AMCC 460EX.
+ If unsure, say N.
+
+config SATA_DWC_DEBUG
+ bool "Enable debug output"
+ depends on SATA_DWC || SATA_DWC_PMP || SATA_DWC_NCQ
+ default n
+
+config SATA_DWC_VDEBUG
+ bool "Enable verbose debug output"
+ depends on ( SATA_DWC || SATA_DWC_PMP || SATA_DWC_NCQ ) && SATA_DWC_DEBUG
+ default n
+
config SATA_SVW
tristate "ServerWorks Frodo / Apple K2 SATA support"
depends on PCI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 01e126f343b..576dd780794 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -18,6 +18,9 @@ obj-$(CONFIG_SATA_MV) += sata_mv.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
obj-$(CONFIG_SATA_FSL) += sata_fsl.o
+obj-$(CONFIG_SATA_DWC) += sata_dwc.o
+obj-$(CONFIG_SATA_DWC_PMP) += sata_dwc_pmp.o
+obj-$(CONFIG_SATA_DWC_NCQ) += sata_dwc_ncq.o
obj-$(CONFIG_PATA_ALI) += pata_ali.o
obj-$(CONFIG_PATA_AMD) += pata_amd.o
diff --git a/drivers/ata/sata_dwc.c b/drivers/ata/sata_dwc.c
new file mode 100644
index 00000000000..8e5405c71a4
--- /dev/null
+++ b/drivers/ata/sata_dwc.c
@@ -0,0 +1,2316 @@
+/*
+ * drivers/ata/sata_dwc.c
+ *
+ * Synopsys DesignWare Cores (DWC) SATA host driver
+ *
+ * Author: Mark Miesfeld <mmiesfeld@amcc.com>
+ *
+ * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
+ * Copyright 2008 DENX Software Engineering
+ *
+ * Based on versions provided by AMCC and Synopsys which are:
+ * Copyright 2006 Applied Micro Circuits Corporation
+ * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#ifdef CONFIG_SATA_DWC_DEBUG
+#define DEBUG
+#endif
+
+#ifdef CONFIG_SATA_DWC_VDEBUG
+#define VERBOSE_DEBUG
+#define DEBUG_NCQ
+#endif
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/libata.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+#define DRV_NAME "sata-dwc"
+#define DRV_VERSION "1.0"
+
+/* SATA DMA driver Globals */
+#if defined(CONFIG_APM82181)
+#define DMA_NUM_CHANS 2
+#else
+#define DMA_NUM_CHANS 1
+#endif
+
+#define DMA_NUM_CHAN_REGS 8
+
+/* SATA DMA Register definitions */
+#if defined(CONFIG_APM82181)
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#else
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#endif
+
+struct dmareg {
+ u32 low; /* Low bits 0-31 */
+ u32 high; /* High bits 32-63 */
+};
+
+/* DMA Per Channel registers */
+
+struct dma_chan_regs {
+ struct dmareg sar; /* Source Address */
+ struct dmareg dar; /* Destination address */
+ struct dmareg llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+ struct dmareg sstat; /* Source Status not implemented in core */
+ struct dmareg dstat; /* Destination Status not implemented in core */
+ struct dmareg sstatar; /* Source Status Address not impl in core */
+ struct dmareg dstatar; /* Destination Status Address not implemented */
+ struct dmareg cfg; /* Config */
+ struct dmareg sgr; /* Source Gather */
+ struct dmareg dsr; /* Destination Scatter */
+};
+
+/* Generic Interrupt Registers */
+struct dma_interrupt_regs {
+ struct dmareg tfr; /* Transfer Interrupt */
+ struct dmareg block; /* Block Interrupt */
+ struct dmareg srctran; /* Source Transfer Interrupt */
+ struct dmareg dsttran; /* Dest Transfer Interrupt */
+ struct dmareg error; /* Error */
+};
+
+struct ahb_dma_regs {
+ struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
+ struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
+ struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
+ struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
+ struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
+ struct dmareg statusInt; /* Interrupt combined */
+ struct dmareg rq_srcreg; /* Src Trans Req */
+ struct dmareg rq_dstreg; /* Dst Trans Req */
+ struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */
+ struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */
+ struct dmareg rq_lst_srcreg; /* Last Src Trans Req */
+ struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */
+ struct dmareg dma_cfg; /* DMA Config */
+ struct dmareg dma_chan_en; /* DMA Channel Enable */
+ struct dmareg dma_id; /* DMA ID */
+ struct dmareg dma_test; /* DMA Test */
+ struct dmareg res1; /* reserved */
+ struct dmareg res2; /* reserved */
+
+ /* DMA Comp Params
+ * Param 6 = dma_param[0], Param 5 = dma_param[1],
+ * Param 4 = dma_param[2] ...
+ */
+ struct dmareg dma_params[6];
+};
+
+/* Data structure for linked list item */
+struct lli {
+ u32 sar; /* Source Address */
+ u32 dar; /* Destination address */
+ u32 llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+#if defined(CONFIG_APM82181)
+ u32 dstat; /* Source status is not supported */
+#else
+ struct dmareg dstat; /* Destination Status */
+#endif
+};
+
+#define SATA_DWC_DMAC_LLI_SZ (sizeof(struct lli))
+#define SATA_DWC_DMAC_LLI_NUM 256
+#define SATA_DWC_DMAC_TWIDTH_BYTES 4
+#define SATA_DWC_DMAC_LLI_TBL_SZ \
+ (SATA_DWC_DMAC_LLI_SZ * SATA_DWC_DMAC_LLI_NUM)
+#if defined(CONFIG_APM82181)
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#else
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#endif
+/* DMA Register Operation Bits */
+#define DMA_EN 0x00000001 /* Enable AHB DMA */
+#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
+#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
+ ((0x000000001 << (ch)) << 8))
+#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
+
+/* Channel Control Register */
+#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
+#define DMA_CTL_LLP_SRCEN 0x10000000 /* Blk chain enable Src */
+#define DMA_CTL_LLP_DSTEN 0x08000000 /* Blk chain enable Dst */
+/*
+ * This define is used to set block chaining disabled in the control low
+ * register. It is already in little endian format so it can be &'d dirctly.
+ * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
+ */
+#define DMA_CTL_LLP_DISABLE_LE32 0xffffffe7
+#define DMA_CTL_SMS(num) ((num & 0x3) << 25) /*Src Master Select*/
+#define DMA_CTL_DMS(num) ((num & 0x3) << 23) /*Dst Master Select*/
+#define DMA_CTL_TTFC(type) ((type & 0x7) << 20) /*Type&Flow cntr*/
+#define DMA_CTL_TTFC_P2M_DMAC 0x00000002 /*Per mem,DMAC cntr*/
+#define DMA_CTL_TTFC_M2P_PER 0x00000003 /*Mem per,peri cntr*/
+#define DMA_CTL_SRC_MSIZE(size) ((size & 0x7) << 14) /*Src Burst Len*/
+#define DMA_CTL_DST_MSIZE(size) ((size & 0x7) << 11) /*Dst Burst Len*/
+#define DMA_CTL_SINC_INC 0x00000000 /*Src addr incr*/
+#define DMA_CTL_SINC_DEC 0x00000200
+#define DMA_CTL_SINC_NOCHANGE 0x00000400
+#define DMA_CTL_DINC_INC 0x00000000 /*Dst addr incr*/
+#define DMA_CTL_DINC_DEC 0x00000080
+#define DMA_CTL_DINC_NOCHANGE 0x00000100
+#define DMA_CTL_SRC_TRWID(size) ((size & 0x7) << 4) /*Src Trnsfr Width*/
+#define DMA_CTL_DST_TRWID(size) ((size & 0x7) << 1) /*Dst Trnsfr Width*/
+#define DMA_CTL_INT_EN 0x00000001 /*Interrupt Enable*/
+
+/* Channel Configuration Register high bits */
+#define DMA_CFG_FCMOD_REQ 0x00000001 /*Flow cntrl req*/
+#define DMA_CFG_PROTCTL (0x00000003 << 2) /*Protection cntrl*/
+
+/* Channel Configuration Register low bits */
+#define DMA_CFG_RELD_DST 0x80000000 /*Reload Dst/Src Addr*/
+#define DMA_CFG_RELD_SRC 0x40000000
+#define DMA_CFG_HS_SELSRC 0x00000800 /*SW hndshk Src/Dst*/
+#define DMA_CFG_HS_SELDST 0x00000400
+#define DMA_CFG_FIFOEMPTY (0x00000001 << 9) /*FIFO Empty bit*/
+
+/* Assign hardware handshaking interface (x) to dst / sre peripheral */
+#define DMA_CFG_HW_HS_DEST(int_num) ((int_num & 0xF) << 11)
+#define DMA_CFG_HW_HS_SRC(int_num) ((int_num & 0xF) << 7)
+
+/* Channel Linked List Pointer Register */
+#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
+#define DMA_LLP_AHBMASTER1 0 /* List Master Select */
+#define DMA_LLP_AHBMASTER2 1
+
+#define SATA_DWC_MAX_PORTS 1
+
+#define SATA_DWC_SCR_OFFSET 0x24
+#define SATA_DWC_REG_OFFSET 0x64
+
+/* DWC SATA Registers */
+struct sata_dwc_regs {
+ u32 fptagr; /* 1st party DMA tag */
+ u32 fpbor; /* 1st party DMA buffer offset */
+ u32 fptcr; /* 1st party DMA Xfr count */
+ u32 dmacr; /* DMA Control */
+ u32 dbtsr; /* DMA Burst Transac size */
+ u32 intpr; /* Interrupt Pending */
+ u32 intmr; /* Interrupt Mask */
+ u32 errmr; /* Error Mask */
+ u32 llcr; /* Link Layer Control */
+ u32 phycr; /* PHY Control */
+ u32 physr; /* PHY Status */
+ u32 rxbistpd; /* Recvd BIST pattern def register */
+ u32 rxbistpd1; /* Recvd BIST data dword1 */
+ u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
+ u32 txbistpd; /* Trans BIST pattern def register */
+ u32 txbistpd1; /* Trans BIST data dword1 */
+ u32 txbistpd2; /* Trans BIST data dword2 */
+ u32 bistcr; /* BIST Control Register */
+ u32 bistfctr; /* BIST FIS Count Register */
+ u32 bistsr; /* BIST Status Register */
+ u32 bistdecr; /* BIST Dword Error count register */
+ u32 res[15]; /* Reserved locations */
+ u32 testr; /* Test Register */
+ u32 versionr; /* Version Register */
+ u32 idr; /* ID Register */
+ u32 unimpl[192]; /* Unimplemented */
+ u32 dmadr[256]; /* FIFO Locations in DMA Mode */
+};
+
+#define SCR_SCONTROL_DET_ENABLE 0x00000001
+#define SCR_SSTATUS_DET_PRESENT 0x00000001
+#define SCR_SERROR_DIAG_X 0x04000000
+
+/* DWC SATA Register Operations */
+#define SATA_DWC_TXFIFO_DEPTH 0x01FF
+#define SATA_DWC_RXFIFO_DEPTH 0x01FF
+
+#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004
+#define SATA_DWC_DMACR_TXCHEN (0x00000001 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RXCHEN (0x00000002 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN
+
+#define SATA_DWC_DBTSR_MWR(size) ((size/4) & \
+ SATA_DWC_TXFIFO_DEPTH)
+#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \
+ SATA_DWC_RXFIFO_DEPTH) << 16)
+#define SATA_DWC_INTPR_DMAT 0x00000001
+#define SATA_DWC_INTPR_NEWFP 0x00000002
+#define SATA_DWC_INTPR_PMABRT 0x00000004
+#define SATA_DWC_INTPR_ERR 0x00000008
+#define SATA_DWC_INTPR_NEWBIST 0x00000010
+#define SATA_DWC_INTPR_IPF 0x10000000
+#define SATA_DWC_INTMR_DMATM 0x00000001
+#define SATA_DWC_INTMR_NEWFPM 0x00000002
+#define SATA_DWC_INTMR_PMABRTM 0x00000004
+#define SATA_DWC_INTMR_ERRM 0x00000008
+#define SATA_DWC_INTMR_NEWBISTM 0x00000010
+#define SATA_DWC_LLCR_SCRAMEN 0x00000001
+#define SATA_DWC_LLCR_DESCRAMEN 0x00000002
+#define SATA_DWC_LLCR_RPDEN 0x00000004
+
+/* This is all error bits, zero's are reserved fields. */
+#define SATA_DWC_SERROR_ERR_BITS 0x0FFF0F03
+
+#define SATA_DWC_SCR0_SPD_GET(v) ((v >> 4) & 0x0000000F)
+
+struct sata_dwc_device {
+ struct resource reg; /* Resource for register */
+ struct device *dev; /* generic device struct */
+ struct ata_probe_ent *pe; /* ptr to probe-ent */
+ struct ata_host *host;
+ u8 *reg_base;
+ struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
+ u8 *scr_base;
+ int dma_channel; /* DWC SATA DMA channel */
+ int irq_dma;
+};
+
+#define SATA_DWC_QCMD_MAX 32
+
+struct sata_dwc_device_port {
+ struct sata_dwc_device *hsdev;
+ int cmd_issued[SATA_DWC_QCMD_MAX];
+ struct lli *llit[SATA_DWC_QCMD_MAX];
+ dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
+ u32 dma_chan[SATA_DWC_QCMD_MAX];
+ int dma_pending[SATA_DWC_QCMD_MAX];
+ u32 sata_dwc_sactive_issued; /* issued queued ops */
+ u32 sata_dwc_sactive_queued; /* queued ops */
+ u32 dma_interrupt_count;
+
+};
+
+static struct sata_dwc_device* dwc_dev_list[2];
+static int dma_intr_registered = 0;
+
+/*
+ * Commonly used DWC SATA driver Macros
+ */
+#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *) \
+ (host)->private_data)
+#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *) \
+ (ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *) \
+ (ap)->private_data)
+#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *) \
+ (qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *) \
+ (hsdevp)->hsdev)
+
+enum {
+ SATA_DWC_CMD_ISSUED_NOT = 0,
+ SATA_DWC_CMD_ISSUED_PENDING = 1,
+ SATA_DWC_CMD_ISSUED_EXEC = 2,
+ SATA_DWC_CMD_ISSUED_NODATA = 3,
+
+ SATA_DWC_DMA_PENDING_NONE = 0,
+ SATA_DWC_DMA_PENDING_TX = 1,
+ SATA_DWC_DMA_PENDING_RX = 2,
+};
+
+/*
+ * Globals
+ */
+static struct ahb_dma_regs *sata_dma_regs = 0;
+/*
+* Duc: Moving to device port field to make it private per each port
+*/
+//static u32 sata_dwc_sactive_issued = 0; /* issued queued ops */
+//static u32 sata_dwc_sactive_queued = 0; /* queued ops */
+//static u32 dma_interrupt_count;
+
+/*
+ * Prototypes
+ */
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status);
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
+static void sata_dwc_port_stop(struct ata_port *ap);
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
+
+static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq);
+static void dma_dwc_exit(struct sata_dwc_device *hsdev);
+static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr);
+static void dma_dwc_xfer_start(int dma_ch);
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch);
+
+static const char *dir_2_txt(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ return "bi";
+ case DMA_FROM_DEVICE:
+ return "from";
+ case DMA_TO_DEVICE:
+ return "to";
+ case DMA_NONE:
+ return "none";
+ default:
+ return "err";
+ }
+}
+
+static const char *prot_2_txt(enum ata_tf_protocols protocol)
+{
+ switch (protocol) {
+ case ATA_PROT_UNKNOWN:
+ return "unknown";
+ case ATA_PROT_NODATA:
+ return "nodata";
+ case ATA_PROT_PIO:
+ return "pio";
+ case ATA_PROT_DMA:
+ return "dma";
+ case ATA_PROT_NCQ:
+ return "ncq";
+ case ATAPI_PROT_PIO:
+ return "atapi pio";
+ case ATAPI_PROT_NODATA:
+ return "atapi nodata";
+ case ATAPI_PROT_DMA:
+ return "atapi dma";
+ default:
+ return "err";
+ }
+}
+
+inline const char *ata_cmd_2_txt(const struct ata_taskfile *tf)
+{
+ switch (tf->command) {
+ case ATA_CMD_CHK_POWER:
+ return "ATA_CMD_CHK_POWER";
+ case ATA_CMD_EDD:
+ return "ATA_CMD_EDD";
+ case ATA_CMD_FLUSH:
+ return "ATA_CMD_FLUSH";
+ case ATA_CMD_FLUSH_EXT:
+ return "ATA_CMD_FLUSH_EXT";
+ case ATA_CMD_ID_ATA:
+ return "ATA_CMD_ID_ATA";
+ case ATA_CMD_ID_ATAPI:
+ return "ATA_CMD_ID_ATAPI";
+ case ATA_CMD_FPDMA_READ:
+ return "ATA_CMD_FPDMA_READ";
+ case ATA_CMD_FPDMA_WRITE:
+ return "ATA_CMD_FPDMA_WRITE";
+ case ATA_CMD_READ:
+ return "ATA_CMD_READ";
+ case ATA_CMD_READ_EXT:
+ return "ATA_CMD_READ_EXT";
+ case ATA_CMD_WRITE:
+ return "ATA_CMD_WRITE";
+ case ATA_CMD_WRITE_EXT:
+ return "ATA_CMD_WRITE_EXT";
+ case ATA_CMD_PIO_READ:
+ return "ATA_CMD_PIO_READ";
+ case ATA_CMD_PIO_READ_EXT:
+ return "ATA_CMD_PIO_READ_EXT";
+ case ATA_CMD_PIO_WRITE:
+ return "ATA_CMD_PIO_WRITE";
+ case ATA_CMD_PIO_WRITE_EXT:
+ return "ATA_CMD_PIO_WRITE_EXT";
+ case ATA_CMD_SET_FEATURES:
+ return "ATA_CMD_SET_FEATURES";
+ case ATA_CMD_PACKET:
+ return "ATA_CMD_PACKET";
+ default:
+ return "ATA_CMD_???";
+ }
+}
+
+static void sata_dwc_tf_dump(struct device *dwc_dev, struct ata_taskfile *tf)
+{
+ dev_vdbg(dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx"
+ "device: %x\n", tf->command, prot_2_txt(tf->protocol),
+ tf->flags, tf->device);
+ dev_vdbg(dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam:"
+ "0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
+ tf->lbam, tf->lbah);
+ dev_vdbg(dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x "
+ "hob_lbam: 0x%x hob_lbah: 0x%x\n", tf->hob_feature,
+ tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
+ tf->hob_lbah);
+}
+
+/*
+ * Function: get_burst_length_encode
+ * arguments: datalength: length in bytes of data
+ * returns value to be programmed in register corrresponding to data length
+ * This value is effectively the log(base 2) of the length
+ */
+static inline int get_burst_length_encode(int datalength)
+{
+ int items = datalength >> 2; /* div by 4 to get lword count */
+
+ if (items >= 64)
+ return 5;
+
+ if (items >= 32)
+ return 4;
+
+ if (items >= 16)
+ return 3;
+
+ if (items >= 8)
+ return 2;
+
+ if (items >= 4)
+ return 1;
+
+ return 0;
+}
+
+static inline void clear_chan_interrupts(int c)
+{
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.block.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.srctran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.dsttran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low), DMA_CHANNEL(c));
+}
+
+/*
+ * Function: dma_request_channel
+ * arguments: None
+ * returns channel number if available else -1
+ * This function assigns the next available DMA channel from the list to the
+ * requester
+ */
+static int dma_request_channel(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ if (!(in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(hsdev->dma_channel))) {
+ #if defined(VERBOSE_DEBUG)
+ dev_notice(ap->dev, "%s Successfully requested DMA channel %d\n",
+ __func__, hsdev->dma_channel);
+ #endif
+ return (hsdev->dma_channel);
+ }
+
+ return -1;
+}
+
+
+
+/*
+ * Function: dma_dwc_interrupt
+ * arguments: irq, dev_id, pt_regs
+ * returns channel number if available else -1
+ * Interrupt Handler for DW AHB SATA DMA
+ */
+static int dma_dwc_interrupt(int irq, void *hsdev_instance)
+{
+ int chan;
+ volatile u32 tfr_reg, err_reg;
+ unsigned long flags;
+ struct sata_dwc_device *hsdev =
+ (struct sata_dwc_device *)hsdev_instance;
+ struct ata_host *host = (struct ata_host *)hsdev->host;
+ struct ata_port *ap;
+ struct sata_dwc_device_port *hsdevp;
+ u8 tag = 0;
+ unsigned int port = 0;
+ spin_lock_irqsave(&host->lock, flags);
+
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+ tag = ap->link.active_tag;
+#if defined(VERBOSE_DEBUG)
+ dev_notice(ap->dev, "%s: DMA interrupt in channel %d\n", __func__, hsdevp->hsdev->dma_channel);
+#endif
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ dev_dbg(ap->dev, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
+ tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
+#if 0
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Check for end-of-transfer interrupt. */
+
+ /* Do nothing if this interrupt does not happen in hsdev DMA channel */
+ if (hsdev->dma_channel != chan) {
+ continue;
+ }
+#endif
+ chan = hsdev->dma_channel;
+
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ /*
+ *Each DMA command produces 2 interrupts. Only
+ * complete the command after both interrupts have been
+ * seen. (See sata_dwc_isr())
+ */
+ hsdevp->dma_interrupt_count++;
+
+ sata_dwc_clear_dmacr(hsdevp, tag);
+
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "DMA not pending eot=0x%08x "
+ "err=0x%08x tag=0x%02x pending=%d\n",
+ tfr_reg, err_reg, tag,
+ hsdevp->dma_pending[tag]);
+ }
+
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+
+ /* Clear the interrupt */
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low),
+ DMA_CHANNEL(chan));
+ }
+
+ /* Check for error interrupt. */
+ /* We do not expect error happens */
+ if (unlikely(err_reg & DMA_CHANNEL(chan))) {
+ /* TODO Need error handler ! */
+ dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
+ err_reg);
+
+ /* Clear the interrupt. */
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low),
+ DMA_CHANNEL(chan));
+ }
+#if 0
+ }
+#endif
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dma_dwc_handler(int irq, void *hsdev_instance)
+{
+ volatile u32 tfr_reg, err_reg;
+ int chan;
+
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Check for end-of-transfer interrupt. */
+
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+
+ /* Check for error interrupt. */
+ if (err_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * Function: dma_request_interrupts
+ * arguments: hsdev
+ * returns status
+ * This function registers ISR for a particular DMA channel interrupt
+ */
+static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
+{
+ int retval = 0;
+ int chan;
+
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Unmask error interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.error.low,
+ in_le32(&sata_dma_regs->interrupt_mask.error.low) | DMA_ENABLE_CHAN(chan));
+
+ /* Unmask end-of-transfer interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.tfr.low,
+ in_le32(&sata_dma_regs->interrupt_mask.tfr.low) | DMA_ENABLE_CHAN(chan));
+#if 0
+ out_le32(&sata_dma_regs->interrupt_mask.block.low,
+ DMA_ENABLE_CHAN(chan));
+
+ out_le32(&sata_dma_regs->interrupt_mask.srctran.low,
+ DMA_ENABLE_CHAN(chan));
+
+ out_le32(&sata_dma_regs->interrupt_mask.dsttran.low,
+ DMA_ENABLE_CHAN(chan));
+#endif
+ }
+
+ /*
+ * FIXME: 2 SATA controllers share the same DMA engine so
+ * currently, they also share same DMA interrupt
+ */
+ if (!dma_intr_registered) {
+ //retval = request_irq(irq, dma_dwc_interrupt, IRQF_SHARED, "SATA DMA", hsdev);
+ retval = request_irq(irq, dma_dwc_handler, IRQF_DISABLED, "SATA DMA", NULL);
+ if (retval) {
+ dev_err(hsdev->dev, "%s: could not get IRQ %d\n", __func__, irq);
+ return -ENODEV;
+ }
+ dma_intr_registered = 1;
+ }
+
+
+ /* Mark this interrupt as requested */
+ hsdev->irq_dma = irq;
+
+ return 0;
+}
+
+/*
+ * Function: map_sg_to_lli
+ * arguments: sg: scatter/gather list(sg)
+ * num_elems: no of elements in sg list
+ * dma_lli: LLI table
+ * dest: destination address
+ * read: whether the transfer is read or write
+ * returns array of AHB DMA Linked List Items
+ * This function creates a list of LLIs for DMA Xfr and returns the number
+ * of elements in the DMA linked list.
+ *
+ * Note that the Synopsis driver has a comment proposing that better performance
+ * is possible by only enabling interrupts on the last item in the linked list.
+ * However, it seems that could be a problem if an error happened on one of the
+ * first items. The transfer would halt, but no error interrupt would occur.
+ *
+ * Currently this function sets interrupts enabled for each linked list item:
+ * DMA_CTL_INT_EN.
+ */
+static int map_sg_to_lli(struct ata_queued_cmd *qc, struct lli *lli,
+ dma_addr_t dma_lli, void __iomem *dmadr_addr)
+{
+ struct scatterlist *sg = qc->sg;
+ struct device *dwc_dev = qc->ap->dev;
+ int num_elems = qc->n_elem;
+ int dir = qc->dma_dir;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(qc->ap);
+
+ int i, idx = 0;
+ int fis_len = 0;
+ dma_addr_t next_llp;
+ int bl;
+ unsigned int dma_ts = 0;
+
+ dev_dbg(dwc_dev, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x "
+ "dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
+ (u32)dmadr_addr);
+
+ bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
+
+ for (i = 0; i < num_elems; i++, sg++) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ dev_dbg(dwc_dev, "%s: elem=%d sg_addr=0x%x sg_len=%d\n",
+ __func__, i, addr, sg_len);
+
+ while (sg_len) {
+
+ if (unlikely(idx >= SATA_DWC_DMAC_LLI_NUM)) {
+ /* The LLI table is not large enough. */
+ dev_err(dwc_dev, "LLI table overrun (idx=%d)\n",
+ idx);
+ break;
+ }
+ len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
+ SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
+
+ offset = addr & 0xffff;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ /*
+ * Make sure a LLI block is not created that will span a
+ * 8K max FIS boundary. If the block spans such a FIS
+ * boundary, there is a chance that a DMA burst will
+ * cross that boundary -- this results in an error in
+ * the host controller.
+ */
+ if (unlikely(fis_len + len > 8192)) {
+ dev_dbg(dwc_dev, "SPLITTING: fis_len=%d(0x%x) "
+ "len=%d(0x%x)\n", fis_len, fis_len,
+ len, len);
+ len = 8192 - fis_len;
+ fis_len = 0;
+ } else {
+ fis_len += len;
+ }
+ if (fis_len == 8192)
+ fis_len = 0;
+
+ /*
+ * Set DMA addresses and lower half of control register
+ * based on direction.
+ */
+#if defined(VERBOSE_DEBUG)
+ dev_notice(qc->ap->dev, "%s: sg_len = %d, len = %d\n", __func__, sg_len, len);
+#endif
+
+#if defined(CONFIG_APM82181)
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(1) | /* Source: Master 2 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(2) | /* Source: Master 3 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(2) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ }
+#else
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(1) |
+ DMA_CTL_DMS(0) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+#endif
+ dev_dbg(dwc_dev, "%s setting ctl.high len: 0x%08x val: "
+ "0x%08x\n", __func__, len,
+ DMA_CTL_BLK_TS(len / 4));
+
+ /* Program the LLI CTL high register */
+ dma_ts = DMA_CTL_BLK_TS(len / 4);
+ lli[idx].ctl.high = cpu_to_le32(dma_ts);
+
+ /*
+ *Program the next pointer. The next pointer must be
+ * the physical address, not the virtual address.
+ */
+ next_llp = (dma_lli + ((idx + 1) * sizeof(struct lli)));
+
+ /* The last 2 bits encode the list master select. */
+#if defined(CONFIG_APM82181)
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER1);
+#else
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
+#endif
+
+ lli[idx].llp = cpu_to_le32(next_llp);
+#if defined(VERBOSE_DEBUG)
+ dev_notice(qc->ap->dev, "%s: index %d\n", __func__, idx);
+ dev_notice(qc->ap->dev, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx].ctl.high);
+ dev_notice(qc->ap->dev, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx].ctl.low);
+ dev_notice(qc->ap->dev, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx].dar);
+ dev_notice(qc->ap->dev, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx].sar);
+ dev_notice(qc->ap->dev, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx].llp);
+#endif
+
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /*
+ * The last next ptr has to be zero and the last control low register
+ * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
+ * and destination enable) set back to 0 (disabled.) This is what tells
+ * the core that this is the last item in the linked list.
+ */
+ if (likely(idx)) {
+ lli[idx-1].llp = 0x00000000;
+ lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
+
+ /* Flush cache to memory */
+ dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
+ DMA_BIDIRECTIONAL);
+ }
+
+#if defined(VERBOSE_DEBUG)
+ dev_notice(qc->ap->dev, "%s: Final index %d\n", __func__, idx-1);
+ dev_notice(qc->ap->dev, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx-1].ctl.high);
+ dev_notice(qc->ap->dev, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx-1].ctl.low);
+ dev_notice(qc->ap->dev, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx-1].dar);
+ dev_notice(qc->ap->dev, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx-1].sar);
+ dev_notice(qc->ap->dev, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx-1].llp);
+#endif
+
+ return idx;
+}
+
+/*
+ * Function: dma_dwc_xfer_start
+ * arguments: Channel number
+ * Return : None
+ * Enables the DMA channel
+ */
+static void dma_dwc_xfer_start(int dma_ch)
+{
+ /* Enable the DMA channel */
+ out_le32(&(sata_dma_regs->dma_chan_en.low),
+ in_le32(&(sata_dma_regs->dma_chan_en.low)) |
+ DMA_ENABLE_CHAN(dma_ch));
+#if defined(VERBOSE_DEBUG)
+ printk("%s: setting sata_dma_regs->dma_chan_en.low with val: 0x%08x\n",
+ __func__, in_le32(&(sata_dma_regs->dma_chan_en.low)));
+#endif
+}
+
+static int dma_dwc_channel_enabled(int ch)
+{
+ if (in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(ch))
+ return 1;
+
+ return 0;
+}
+
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch)
+{
+ int enabled = dma_dwc_channel_enabled(dma_ch);
+
+ dev_info(ap->dev, "%s terminate DMA on channel=%d enabled=%d\n",
+ __func__, dma_ch, enabled);
+
+ if (enabled) {
+ out_le32(&(sata_dma_regs->dma_chan_en.low),
+ in_le32(&(sata_dma_regs->dma_chan_en.low)) | DMA_DISABLE_CHAN(dma_ch));
+
+ do {
+ enabled = dma_dwc_channel_enabled(dma_ch);
+ } while (enabled);
+ }
+}
+
+static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr)
+{
+ int dma_ch;
+ int num_lli;
+
+ /* Acquire DMA channel */
+ dma_ch = dma_request_channel(qc->ap);
+ if (unlikely(dma_ch == -1)) {
+ dev_err(qc->ap->dev, "%s: dma channel unavailable\n", __func__);
+ return -EAGAIN;
+ }
+#if defined(VERBOSE_DEBUG)
+ dev_notice(qc->ap->dev, "%s: Got channel %d\n", __func__, dma_ch);
+#endif
+ /* Convert SG list to linked list of items (LLIs) for AHB DMA */
+ num_lli = map_sg_to_lli(qc, lli, dma_lli, addr);
+
+ dev_dbg(qc->ap->dev, "%s sg: 0x%p, count: %d lli: %p dma_lli: 0x%0xlx addr:"
+ " %p lli count: %d\n", __func__, qc->sg, qc->n_elem, lli,
+ (u32)dma_lli, addr, num_lli);
+
+ /* Clear channel interrupts */
+ clear_chan_interrupts(dma_ch);
+
+ /* Program the CFG register. */
+#if defined(CONFIG_APM82181)
+ if (dma_ch == 0) {
+ /* Buffer mode enabled, FIFO_MODE = 0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x000000d);
+ /* Channel 0 bit [7:5]*/
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000000);
+ } else if (dma_ch == 1) {
+ /* Buffer mode enabled, FIFO_MODE = 0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000088d);
+ /* Channel 1 - bit [7:5] */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
+ }
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high),
+ DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0);
+#endif
+ /* Program the address of the linked list */
+#if defined(CONFIG_APM82181)
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER1));
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
+#endif
+
+ /* Program the CTL register with src enable / dst enable */
+ //out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low),
+ // DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), 0x18000000);
+
+#if defined(VERBOSE_DEBUG)
+ dev_notice(qc->ap->dev, "%s DMA channel %d is ready\n", __func__, dma_ch);
+ dev_notice(qc->ap->dev, "%s setting cfg.high of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high)));
+ dev_notice(qc->ap->dev, "%s setting cfg.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low)));
+ dev_notice(qc->ap->dev, "%s setting llp.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low)));
+ dev_notice(qc->ap->dev, "%s setting ctl.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low)));
+#endif
+
+ return dma_ch;
+}
+
+/*
+ * Function: dma_dwc_exit
+ * arguments: None
+ * returns status
+ * This function exits the SATA DMA driver
+ */
+static void dma_dwc_exit(struct sata_dwc_device *hsdev)
+{
+ dev_dbg(hsdev->dev, "%s:\n", __func__);
+
+ if (sata_dma_regs)
+ iounmap(sata_dma_regs);
+
+ if (hsdev->irq_dma)
+ free_irq(hsdev->irq_dma, hsdev);
+}
+
+/*
+ * Function: dma_dwc_init
+ * arguments: hsdev
+ * returns status
+ * This function initializes the SATA DMA driver
+ */
+static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
+{
+ int err;
+
+ err = dma_request_interrupts(hsdev, irq);
+ if (err) {
+ dev_err(hsdev->dev, "%s: dma_request_interrupts returns %d\n",
+ __func__, err);
+ goto error_out;
+ }
+
+ /* Enabe DMA */
+ out_le32(&(sata_dma_regs->dma_cfg.low), DMA_EN);
+
+ dev_notice(hsdev->dev, "DMA initialized\n");
+ dev_notice(hsdev->dev, "DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low)));
+ dev_dbg(hsdev->dev, "SATA DMA registers=0x%p\n", sata_dma_regs);
+
+ return 0;
+
+error_out:
+ dma_dwc_exit(hsdev);
+
+ return err;
+}
+
+static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+
+ *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
+ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, *val);
+
+ return 0;
+}
+
+static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+ dev_dbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, val);
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+ out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
+
+ return 0;
+}
+
+static inline u32 core_scr_read ( struct ata_port *ap, unsigned int scr)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ return in_le32((void __iomem *)hsdev->scr_base + (scr * 4));
+}
+
+
+static inline void core_scr_write ( struct ata_port *ap, unsigned int scr, u32 val)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32((void __iomem *)hsdev->scr_base + (scr * 4), val);
+}
+
+static inline void clear_serror(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32( (void __iomem *)hsdev->scr_base + 4,
+ in_le32((void __iomem *)hsdev->scr_base + 4));
+}
+
+static inline void clear_intpr(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr,
+ in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+static inline void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr,
+ in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+static inline void disable_err_irq(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ in_le32(&hsdev->sata_dwc_regs->intmr) & ~SATA_DWC_INTMR_ERRM);
+ out_le32(&hsdev->sata_dwc_regs->errmr, ~SATA_DWC_SERROR_ERR_BITS);
+}
+
+static inline void enable_err_irq(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ in_le32(&hsdev->sata_dwc_regs->intmr) | SATA_DWC_INTMR_ERRM);
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
+}
+
+static inline u32 qcmd_tag_to_mask(u8 tag)
+{
+ return 0x00000001 << (tag & 0x1f);
+}
+
+/* See ahci.c */
+static void sata_dwc_error_intr(struct ata_port *ap,
+ struct sata_dwc_device *hsdev, uint intpr)
+{
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct ata_eh_info *ehi = &ap->link.eh_info;
+ unsigned int err_mask = 0, action = 0;
+ struct ata_queued_cmd *qc;
+ u32 serror;
+ u8 status, tag;
+ volatile u32 err_reg;
+#if defined(VERBOSE_DEBUG)
+ int chan = hsdev->dma_channel;
+#endif
+ ata_ehi_clear_desc(ehi);
+
+ serror = core_scr_read(ap, SCR_ERROR);
+ status = ap->ops->sff_check_status(ap);
+
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+ tag = ap->link.active_tag;
+
+ dev_err(ap->dev, "%s SCR_ERROR=0x%08x intpr=0x%08x status=0x%08x "
+ "dma_intp=%d pending=%d issued=%d dma_err_status=0x%08x\n",
+ __func__, serror, intpr, status, hsdevp->dma_interrupt_count,
+ hsdevp->dma_pending[tag], hsdevp->cmd_issued[tag], err_reg);
+#if defined(VERBOSE_DEBUG)
+ printk("%s reading cfg.high of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].cfg.high)));
+ printk("%s reading cfg.low of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].cfg.low)));
+ printk("%s reading llp.low of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].llp.low)));
+ printk("%s reading ctl.low of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].ctl.low)));
+ printk("%s reading sar.low of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].sar.low)));
+ printk("%s reading sar.high of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].sar.high)));
+ printk("%s reading dar.low of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].dar.low)));
+ printk("%s reading dar.high of channel %d with val: 0x%08x\n", __func__, chan, in_le32(&(sata_dma_regs->chan_regs[chan].dar.high)));
+#endif
+ /* Clear error register and interrupt bit */
+ clear_serror(ap);
+ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_ERR);
+
+ /* This is the only error happening now. */
+ /* TODO check for exact error */
+ err_mask |= AC_ERR_HOST_BUS;
+ action |= ATA_EH_RESET;
+
+ /* Pass this on to EH */
+ ehi->serror |= serror;
+ ehi->action |= action;
+
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc)
+ qc->err_mask |= err_mask;
+ else
+ ehi->err_mask |= err_mask;
+
+ ata_port_abort(ap);
+
+ /*
+ if (irq_stat & PORT_IRQ_FREEZE)
+ ata_port_freeze(ap);
+ else
+ ata_port_abort(ap);
+ */
+}
+
+/*
+ * Function : sata_dwc_isr
+ * arguments : irq, void *dev_instance, struct pt_regs *regs
+ * Return value : irqreturn_t - status of IRQ
+ * This Interrupt handler called via port ops registered function.
+ * .irq_handler = sata_dwc_isr
+ */
+static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
+{
+ struct ata_host *host = (struct ata_host *)dev_instance;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
+ struct ata_port *ap;
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ u8 status, tag;
+ int handled, num_processed, port = 0;
+ uint intpr, sactive, sactive2, tag_mask;
+ struct sata_dwc_device_port *hsdevp;
+
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Read the interrupt register */
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+#if defined(VERBOSE_DEBUG)
+ dev_notice(ap->dev, "%s: INTERRUPT in HSDEV with DMA channel %d\n", __func__, hsdevp->hsdev->dma_channel);
+#endif
+ dev_dbg(ap->dev, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr,
+ ap->link.active_tag);
+
+ /* Check for error interrupt */
+ if (intpr & SATA_DWC_INTPR_ERR) {
+ sata_dwc_error_intr(ap, hsdev, intpr);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /* Check for DMA SETUP FIS (FP DMA) interrupt */
+ if (intpr & SATA_DWC_INTPR_NEWFP) {
+ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
+
+ tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
+ dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PENDING)
+ dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
+
+ hsdevp->sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
+
+ qc = ata_qc_from_tag(ap, tag);
+ /*
+ * Start FP DMA for NCQ command. At this point the tag is the
+ * active tag. It is the tag that matches the command about to
+ * be completed.
+ */
+ qc->ap->link.active_tag = tag;
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ sactive = core_scr_read(ap, SCR_ACTIVE);
+ tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ /* If no sactive issued and tag_mask is zero then this is not NCQ */
+ if (hsdevp->sata_dwc_sactive_issued == 0 && tag_mask == 0) {
+#if 0
+ if (ap->link.active_tag == ATA_TAG_POISON)
+ tag = 0;
+ else
+#endif
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* DEV interrupt w/ no active qc? */
+ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+ dev_err(ap->dev, "%s intr with no active qc qc=%p\n",
+ __func__, qc);
+ ata_sff_check_status(ap);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ status = ap->ops->sff_check_status(ap);
+
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ if (status & ATA_ERR) {
+ dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
+ __func__, prot_2_txt(qc->tf.protocol));
+drv_still_busy:
+ if (ata_is_dma(qc->tf.protocol)) {
+ int dma_flag = hsdevp->dma_pending[tag];
+ /*
+ * Each DMA transaction produces 2 interrupts. The DMAC
+ * transfer complete interrupt and the SATA controller
+ * operation done interrupt. The command should be
+ * completed only after both interrupts are seen.
+ */
+ hsdevp->dma_interrupt_count++;
+ if (unlikely(dma_flag == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "%s: DMA not pending "
+ "intpr=0x%08x status=0x%08x pend=%d\n",
+ __func__, intpr, status, dma_flag);
+ }
+
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else if (ata_is_pio(qc->tf.protocol)) {
+ ata_sff_hsm_move(ap, qc, status, 0);
+ handled = 1;
+ goto done_irqrestore;
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto drv_still_busy;
+ }
+
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /*
+ * This is a NCQ command. At this point we need to figure out for which
+ * tags we have gotten a completion interrupt. One interrupt may serve
+ * as completion for more than one operation when commands are queued
+ * (NCQ). We need to process each completed command.
+ */
+
+process_cmd: /* process completed commands */
+ sactive = core_scr_read(ap, SCR_ACTIVE);
+ tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ if (sactive != 0 || hsdevp->sata_dwc_sactive_issued > 1 || tag_mask > 1) {
+ dev_dbg(ap->dev, "%s NCQ: sactive=0x%08x sactive_issued=0x%08x"
+ " tag_mask=0x%08x\n", __func__, sactive,
+ hsdevp->sata_dwc_sactive_issued, tag_mask);
+ }
+
+ if (unlikely((tag_mask | hsdevp->sata_dwc_sactive_issued) != hsdevp->sata_dwc_sactive_issued)) {
+ dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x "
+ "sata_dwc_sactive_issued=0x%08x tag_mask=0x%08x\n",
+ sactive, hsdevp->sata_dwc_sactive_issued, tag_mask);
+ }
+
+ /* read just to clear ... not bad if currently still busy */
+ status = ap->ops->sff_check_status(ap);
+ dev_dbg(ap->dev, "%s ATA status register=0x%x\n", __func__, status);
+
+ tag = 0;
+ num_processed = 0;
+ while (tag_mask) {
+ num_processed++;
+ while (!(tag_mask & 0x00000001)) {
+ tag++;
+ tag_mask <<= 1;
+ }
+ tag_mask &= (~0x00000001);
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* To be picked up by completion functions */
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ /* Let libata/scsi layers handle error */
+ if (unlikely(status & ATA_ERR)) {
+ dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n",
+ __func__, status);
+
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /* Process completed command */
+ dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
+ prot_2_txt(qc->tf.protocol));
+ if (ata_is_dma(qc->tf.protocol)) {
+ hsdevp->dma_interrupt_count++;
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)
+ dev_warn(ap->dev,
+ "%s: DMA not pending?\n", __func__);
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto still_busy;
+ }
+ continue;
+
+still_busy:
+ ap->stats.idle_irq++;
+ dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
+ ap->print_id);
+ } /* while tag_mask */
+
+ /*
+ * Check to see if any commands completed while we were processing our
+ * initial set of completed commands (reading of status clears
+ * interrupts, so we might miss a completed command interrupt if one
+ * came in while we were processing:
+ * we read status as part of processing a completed command).
+ */
+ sactive2 = core_scr_read(ap, SCR_ACTIVE);
+ if (sactive2 != sactive) {
+ dev_dbg(ap->dev, "More finished - sactive=0x%x sactive2=0x%x\n",
+ sactive, sactive2);
+ goto process_cmd;
+ }
+ handled = 1;
+
+done_irqrestore:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_RX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else {
+ /*
+ * This should not happen, it indicates the driver is out of
+ * sync. If it does happen, clear dmacr anyway.
+ */
+ dev_err(hsdev->dev, "%s DMA protocol RX and TX DMA not pending "
+ "tag=0x%02x pending=%d dmacr: 0x%08x\n",
+ __func__, tag, hsdevp->dma_pending[tag],
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+ }
+}
+
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
+{
+ struct ata_queued_cmd *qc;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u8 tag = 0;
+
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+
+#ifdef DEBUG_NCQ
+ if (tag > 0) {
+ dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
+ "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
+ dir_2_txt(qc->dma_dir), prot_2_txt(qc->tf.protocol),
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+#endif
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
+ "pending dmacr: 0x%08x\n", __func__,
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+ sata_dwc_qc_complete(ap, qc, check_status);
+ ap->link.active_tag = ATA_TAG_POISON;
+ } else {
+ sata_dwc_qc_complete(ap, qc, check_status);
+ }
+}
+
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status)
+{
+ u8 status = 0;
+ int i = 0;
+ u32 mask = 0x0;
+ u8 tag = qc->tag;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ u32 serror;
+
+ dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
+
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX))
+ dev_err(ap->dev, "TX DMA PENDINGING\n");
+ else if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX))
+ dev_err(ap->dev, "RX DMA PENDINGING\n");
+
+ if (check_status) {
+ i = 0;
+ do {
+ /* check main status, clearing INTRQ */
+ status = ap->ops->sff_check_status(ap);
+ if (status & ATA_BUSY) {
+ dev_dbg(ap->dev, "STATUS BUSY (0x%02x) [%d]\n",
+ status, i);
+ }
+ if (++i > 10)
+ break;
+ } while (status & ATA_BUSY);
+
+ status = ap->ops->sff_check_status(ap);
+ if (unlikely(status & ATA_BUSY))
+ dev_err(ap->dev, "QC complete cmd=0x%02x STATUS BUSY "
+ "(0x%02x) [%d]\n", qc->tf.command, status, i);
+ serror = core_scr_read(ap, SCR_ERROR);
+ if (unlikely(serror & SATA_DWC_SERROR_ERR_BITS))
+ dev_err(ap->dev, "****** SERROR=0x%08x ******\n",
+ serror);
+ }
+ dev_dbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u: "
+ "protocol=%d\n", qc->tf.command, status, ap->print_id,
+ qc->tf.protocol);
+
+ /* clear active bit */
+ mask = (~(qcmd_tag_to_mask(tag)));
+ hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask;
+ hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask;
+
+ /* Complete taskfile transaction (does not read SCR registers) */
+ ata_qc_complete(qc);
+
+ return 0;
+}
+
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
+{
+ /* Enable selective interrupts by setting the interrupt mask register */
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ SATA_DWC_INTMR_ERRM |
+ SATA_DWC_INTMR_NEWFPM |
+ SATA_DWC_INTMR_PMABRTM |
+ SATA_DWC_INTMR_DMATM);
+
+ /*
+ * Unmask the error bits that should trigger an error interrupt by
+ * setting the error mask register.
+ */
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERROR_ERR_BITS);
+
+ dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", __func__,
+ in_le32(&hsdev->sata_dwc_regs->intmr),
+ in_le32(&hsdev->sata_dwc_regs->errmr));
+}
+
+static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
+{
+ port->cmd_addr = (void *)base + 0x00;
+ port->data_addr = (void *)base + 0x00;
+
+ port->error_addr = (void *)base + 0x04;
+ port->feature_addr = (void *)base + 0x04;
+
+ port->nsect_addr = (void *)base + 0x08;
+
+ port->lbal_addr = (void *)base + 0x0c;
+ port->lbam_addr = (void *)base + 0x10;
+ port->lbah_addr = (void *)base + 0x14;
+
+ port->device_addr = (void *)base + 0x18;
+ port->command_addr = (void *)base + 0x1c;
+ port->status_addr = (void *)base + 0x1c;
+
+ port->altstatus_addr = (void *)base + 0x20;
+ port->ctl_addr = (void *)base + 0x20;
+}
+
+/*
+ * Function : sata_dwc_port_start
+ * arguments : struct ata_ioports *port
+ * Return value : returns 0 if success, error code otherwise
+ * This function allocates the scatter gather LLI table for AHB DMA
+ */
+static int sata_dwc_port_start(struct ata_port *ap)
+{
+ int err = 0;
+ struct sata_dwc_device *hsdev;
+ struct sata_dwc_device_port *hsdevp = NULL;
+ struct device *pdev;
+ u32 sstatus;
+ int i;
+
+ hsdev = HSDEV_FROM_AP(ap);
+
+ dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
+
+ hsdev->host = ap->host;
+ pdev = ap->host->dev;
+ if (!pdev) {
+ dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
+ err = -ENODEV;
+ goto cleanup_exit;
+ }
+
+ /* Allocate Port Struct */
+ hsdevp = kmalloc(sizeof(*hsdevp), GFP_KERNEL);
+ if (!hsdevp) {
+ dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ memset(hsdevp, 0, sizeof(*hsdevp));
+ hsdevp->hsdev = hsdev;
+
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
+ hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
+
+ ap->prd = 0; /* set these so libata doesn't use them */
+ ap->prd_dma = 0;
+
+ /*
+ * DMA - Assign scatter gather LLI table. We can't use the libata
+ * version since it's PRD is IDE PCI specific.
+ */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ hsdevp->llit[i] = dma_alloc_coherent(pdev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ &(hsdevp->llit_dma[i]),
+ GFP_ATOMIC);
+ if (!hsdevp->llit[i]) {
+ dev_err(ap->dev, "%s: dma_alloc_coherent failed size "
+ "0x%x\n", __func__, SATA_DWC_DMAC_LLI_TBL_SZ);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ }
+
+ if (ap->port_no == 0) {
+ dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ dev_notice(ap->dev, "%s: setting burst size in DBTSR: 0x%08x\n",
+ __func__, in_le32(&hsdev->sata_dwc_regs->dbtsr));
+ }
+
+ /* Clear any error bits before libata starts issuing commands */
+ clear_serror(ap);
+
+ ap->private_data = hsdevp;
+
+ /* Are we in Gen I or II */
+ sstatus = core_scr_read(ap, SCR_STATUS);
+ switch (SATA_DWC_SCR0_SPD_GET(sstatus)) {
+ case 0x0:
+ dev_info(ap->dev, "**** No neg speed (nothing attached?) \n");
+ break;
+ case 0x1:
+ dev_info(ap->dev, "**** GEN I speed rate negotiated \n");
+ break;
+ case 0x2:
+ dev_info(ap->dev, "**** GEN II speed rate negotiated \n");
+ break;
+ }
+
+cleanup_exit:
+ if (err) {
+ kfree(hsdevp);
+ sata_dwc_port_stop(ap);
+ dev_dbg(ap->dev, "%s: fail\n", __func__);
+ } else {
+ dev_dbg(ap->dev, "%s: done\n", __func__);
+ }
+
+ return err;
+}
+
+static void sata_dwc_port_stop(struct ata_port *ap)
+{
+ int i;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dev_dbg(ap->dev, "%s: ap->id = %d\n", __func__, ap->print_id);
+
+ if (hsdevp && hsdev) {
+ /* deallocate LLI table */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ dma_free_coherent(ap->host->dev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ hsdevp->llit[i], hsdevp->llit_dma[i]);
+ }
+
+ kfree(hsdevp);
+ }
+ ap->private_data = NULL;
+}
+
+/*
+ * Function : sata_dwc_exec_command_by_tag
+ * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
+ * Return value : None
+ * This function keeps track of individual command tag ids and calls
+ * ata_exec_command in libata
+ */
+static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
+ struct ata_taskfile *tf,
+ u8 tag, u32 cmd_issued)
+{
+ unsigned long flags;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
+ ata_cmd_2_txt(tf), tag);
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ hsdevp->cmd_issued[tag] = cmd_issued;
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ /*
+ * Clear SError before executing a new command.
+ *
+ * TODO if we read a PM's registers now, we will throw away the task
+ * file values loaded into the shadow registers for this command.
+ *
+ * sata_dwc_scr_write and read can not be used here. Clearing the PM
+ * managed SError register for the disk needs to be done before the
+ * task file is loaded.
+ */
+ clear_serror(ap);
+ ata_sff_exec_command(ap, tf);
+}
+
+static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
+ SATA_DWC_CMD_ISSUED_PENDING);
+}
+
+static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+
+ sata_dwc_bmdma_setup_by_tag(qc, tag);
+}
+
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ volatile int start_dma;
+ u32 reg, dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
+ struct ata_port *ap = qc->ap;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir = qc->dma_dir;
+ dma_chan = hsdevp->dma_chan[tag];
+
+ /* Used for ata_bmdma_start(qc) -- we are not BMDMA compatible */
+
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
+ start_dma = 1;
+ if (dir == DMA_TO_DEVICE)
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
+ else
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
+ } else {
+ dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
+ "(tag=%d) - DMA NOT started\n", __func__,
+ hsdevp->cmd_issued[tag], tag);
+ start_dma = 0;
+ }
+
+ dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
+ "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
+ dir_2_txt(qc->dma_dir), start_dma);
+ sata_dwc_tf_dump(hsdev->dev, &(qc->tf));
+
+ if (start_dma) {
+ reg = core_scr_read(ap, SCR_ERROR);
+ if (unlikely(reg & SATA_DWC_SERROR_ERR_BITS)) {
+ dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
+ __func__, reg);
+ }
+
+ if (dir == DMA_TO_DEVICE)
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXCHEN);
+ else
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_RXCHEN);
+#if defined(VERBOSE_DEBUG)
+ dev_notice(ap->dev, "%s: setting DMACR: 0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->dmacr));
+#endif
+ /* Enable AHB DMA transfer on the specified channel */
+ dma_dwc_xfer_start(dma_chan);
+ }
+}
+
+static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+
+ dev_dbg(qc->ap->dev, "%s\n", __func__);
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+}
+
+/*
+ * Function : sata_dwc_qc_prep_by_tag
+ * arguments : ata_queued_cmd *qc, u8 tag
+ * Return value : None
+ * qc_prep for a particular queued command based on tag
+ */
+static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ struct ata_port *ap = qc->ap;
+ u32 dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir;
+ int err;
+
+ dir = qc->dma_dir;
+
+ if ((dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+ dev_dbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
+ __func__, ap->port_no, dir_2_txt(dir), qc->n_elem);
+
+ dma_chan = dma_dwc_xfer_setup(qc, hsdevp->llit[tag],
+ hsdevp->llit_dma[tag],
+ (void *__iomem)(&hsdev->sata_dwc_regs->dmadr));
+ if (unlikely(dma_chan < 0)) {
+ dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
+ __func__, err);
+ return;
+ }
+
+ hsdevp->dma_chan[tag] = dma_chan;
+}
+
+static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0 || ap->link.sactive > 1) {
+ dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s"
+ " ap active_tag=0x%08x ap sactive=0x%08x\n",
+ __func__, ap->print_id, qc->tf.command,
+ ata_cmd_2_txt(&qc->tf), qc->tag,
+ prot_2_txt(qc->tf.protocol), ap->link.active_tag,
+ ap->link.sactive);
+ }
+#endif
+
+ if (ata_is_ncq(qc->tf.protocol)) {
+ ap->ops->sff_tf_load(ap, &qc->tf);
+ sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
+ SATA_DWC_CMD_ISSUED_PENDING);
+ } else {
+ ata_sff_qc_issue(qc);
+ }
+
+ return 0;
+}
+
+/*
+ * Function : sata_dwc_eng_timeout
+ * arguments : ata_port *ap
+ * Return value : None
+ * error handler for DMA time out
+ * ata_eng_timeout(ap) -- this does bmdma stuff which can not be done by this
+ * driver. SEE ALSO ata_qc_timeout(ap)
+ */
+static void sata_dwc_eng_timeout(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ u8 tag;
+ uint mask = 0x0;
+ unsigned long flags;
+ u32 serror, intpr, dma_ch;
+
+ tag = ap->link.active_tag;
+ dma_ch = hsdevp->dma_chan[tag];
+ qc = ata_qc_from_tag(ap, tag);
+
+ dev_err(ap->dev, "%s: id=%d active_tag=%d qc=%p dma_chan=%d\n",
+ __func__, ap->print_id, tag, qc, dma_ch);
+
+ if (unlikely(!qc)) {
+ dev_err(ap->dev,
+ "%s: timeout without queued command\n", __func__);
+ return;
+ }
+
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+ serror = core_scr_read(ap, SCR_ERROR);
+
+ dev_err(ap->dev, "intpr=0x%08x serror=0x%08x\n", intpr, serror);
+
+ /* If there are no error bits set, can we just pass this on to eh? */
+ if (!(serror & SATA_DWC_SERROR_ERR_BITS) &&
+ !(intpr & SATA_DWC_INTPR_ERR)) {
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ if (dma_dwc_channel_enabled(dma_ch))
+ dma_dwc_terminate_dma(ap, dma_ch);
+
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+
+ /* clear active bit */
+ mask = (~(qcmd_tag_to_mask(tag)));
+ hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask;
+ hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask;
+
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+ } else {
+ /* This is wrong, what really needs to be done is a reset. */
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ /* disable DMAC */
+ dma_dwc_terminate_dma(ap, dma_ch);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+
+ WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ if (qc->flags & ATA_QCFLAG_ACTIVE) {
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ /*
+ * test-only: The original code (AMCC: 2.6.19) called
+ * ata_eng_timeout(ap) here. This function is not available
+ * anymore. So what to do now?
+ */
+ }
+}
+
+/*
+ * Function : sata_dwc_qc_prep
+ * arguments : ata_queued_cmd *qc
+ * Return value : None
+ * qc_prep for a particular queued command
+ */
+static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
+{
+ u32 sactive;
+ u8 tag = qc->tag;
+
+ if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0) {
+ dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
+ __func__, tag, qc->ap->link.active_tag);
+ }
+#endif
+
+ if (qc->tf.protocol == ATA_PROT_NCQ) {
+ sactive = core_scr_read(qc->ap, SCR_ACTIVE);
+ sactive |= (0x00000001 << tag);
+ core_scr_write(qc->ap, SCR_ACTIVE, sactive);
+ dev_dbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
+ "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
+ sactive);
+ } else {
+ tag = 0;
+ }
+
+ sata_dwc_qc_prep_by_tag(qc, tag);
+
+}
+
+/*
+ * test-only: Needed when no drive is connected upon driver startup.
+ * Otherwise, by using the default prereset routine, the driver crashes
+ * upon loading.
+ * Not sure if this could be handled differently.
+ */
+static int sata_dwc_prereset(struct ata_link *link, unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ struct ata_eh_context *ehc = &ap->link.eh_context;
+ int rc = 0;
+
+ if (ata_link_online(link)) {
+ rc = ata_sff_wait_ready(link, deadline);
+ } else {
+ /* tell EH to bail */
+ ehc->i.action &= ~ATA_EH_RESET;
+ }
+
+ return rc;
+}
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_dwc_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ /*
+ * test-only: Currently this driver doesn't handle NCQ
+ * correctly. We enable NCQ but set the queue depth to a
+ * max of 1. This will get fixed in in a future release.
+ */
+// .sg_tablesize = LIBATA_MAX_PRD,
+ .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+static void sata_dwc_error_handler(struct ata_port *ap)
+{
+ ap->link.flags |= ATA_LFLAG_NO_HRST;
+ ata_sff_error_handler(ap);
+ ap->link.flags &= ~ATA_LFLAG_NO_HRST;
+}
+
+
+static struct ata_port_operations sata_dwc_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .error_handler = sata_dwc_error_handler,
+
+ .qc_prep = sata_dwc_qc_prep,
+ .qc_issue = sata_dwc_qc_issue,
+
+ .scr_read = sata_dwc_scr_read,
+ .scr_write = sata_dwc_scr_write,
+
+ .port_start = sata_dwc_port_start,
+ .port_stop = sata_dwc_port_stop,
+
+ .bmdma_setup = sata_dwc_bmdma_setup,
+ .bmdma_start = sata_dwc_bmdma_start,
+
+ .prereset = sata_dwc_prereset,
+
+ /* test-only: really needed? */
+ .eng_timeout = sata_dwc_eng_timeout,
+};
+
+static const struct ata_port_info sata_dwc_port_info[] = {
+ {
+ /*
+ * test-only: Currently this driver doesn't handle NCQ
+ * correctly. So we disable NCQ here for now. To enable
+ * it ATA_FLAG_NCQ needs to be added to the flags below.
+ */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_NCQ,
+ .pio_mask = 0x1f, /* pio 0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sata_dwc_ops,
+ },
+};
+
+static int sata_dwc_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct sata_dwc_device *hsdev;
+ u32 idr, versionr;
+ char *ver = (char *)&versionr;
+ u8 *base = NULL;
+ int err = 0;
+ int irq;
+ struct ata_host *host;
+ struct ata_port_info pi = sata_dwc_port_info[0];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+
+ const unsigned int *dma_channel;
+
+ /*
+ * Check if device is enabled
+ */
+ if (!of_device_is_available(ofdev->node)) {
+ printk(KERN_INFO "%s: Port disabled via device-tree\n",
+ ofdev->node->full_name);
+ return 0;
+ }
+
+
+ /* Allocate DWC SATA device */
+ hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL);
+ if (hsdev == NULL) {
+ dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+ memset(hsdev, 0, sizeof(*hsdev));
+
+
+ // Identify SATA controller index from the cell-index property
+ dma_channel = of_get_property(ofdev->node, "dma-channel", NULL);
+ if ( dma_channel ) {
+ dev_notice(&ofdev->dev, "Gettting DMA channel %d\n", *dma_channel);
+ hsdev->dma_channel = *dma_channel;
+ } else
+ hsdev->dma_channel = 0;
+
+ /* Ioremap SATA registers */
+ base = of_iomap(ofdev->node, 0);
+ if (!base) {
+ dev_err(&ofdev->dev, "ioremap failed for SATA register address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ hsdev->reg_base = base;
+ dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
+
+ /* Synopsys DWC SATA specific Registers */
+ hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
+
+ /* Allocate and fill host */
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
+ if (!host) {
+ dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+
+ host->private_data = hsdev;
+
+ /* Setup port */
+ host->ports[0]->ioaddr.cmd_addr = base;
+ host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
+ hsdev->scr_base = (u8 *)(base + SATA_DWC_SCR_OFFSET);
+ //scr_addr_sstatus = base + SATA_DWC_SCR_OFFSET;
+ sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
+
+ /* Read the ID and Version Registers */
+ idr = in_le32(&hsdev->sata_dwc_regs->idr);
+ versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
+ dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
+ idr, ver[0], ver[1], ver[2]);
+
+ /* Get SATA DMA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->node, 1);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* Get physical SATA DMA register base address */
+ if (!sata_dma_regs) {
+ sata_dma_regs = of_iomap(ofdev->node, 1);
+ if (!sata_dma_regs) {
+ dev_err(&ofdev->dev, "ioremap failed for AHBDMA register address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ }
+ /* Save dev for later use in dev_xxx() routines */
+ hsdev->dev = &ofdev->dev;
+
+ /* Init glovbal dev list */
+ dwc_dev_list[hsdev->dma_channel] = hsdev;
+
+ /* Initialize AHB DMAC */
+ dma_dwc_init(hsdev, irq);
+
+ /* Enable SATA Interrupts */
+ sata_dwc_enable_interrupts(hsdev);
+
+ /* Get SATA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->node, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /*
+ * Now, register with libATA core, this will also initiate the
+ * device discovery process, invoking our port_start() handler &
+ * error_handler() to execute a dummy Softreset EH session
+ */
+ ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ /* Everything is fine */
+ return 0;
+
+error_out:
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ if (base)
+ iounmap(base);
+
+ if (hsdev)
+ kfree(hsdev);
+
+ return err;
+}
+
+static int sata_dwc_remove(struct of_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_dwc_device *hsdev = host->private_data;
+
+ ata_host_detach(host);
+
+ dev_set_drvdata(dev, NULL);
+
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ iounmap(hsdev->reg_base);
+ kfree(hsdev);
+ kfree(host);
+
+ dev_dbg(&ofdev->dev, "done\n");
+
+ return 0;
+}
+
+static const struct of_device_id sata_dwc_match[] = {
+ { .compatible = "amcc,sata-460ex", },
+ { .compatible = "amcc,sata-apm82181", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sata_dwc_match);
+
+static struct of_platform_driver sata_dwc_driver = {
+ .name = "sata-dwc",
+ .match_table = sata_dwc_match,
+ .probe = sata_dwc_probe,
+ .remove = sata_dwc_remove,
+};
+
+static int __init sata_dwc_init(void)
+{
+ return of_register_platform_driver(&sata_dwc_driver);
+}
+
+static void __exit sata_dwc_exit(void)
+{
+ of_unregister_platform_driver(&sata_dwc_driver);
+}
+
+module_init(sata_dwc_init);
+module_exit(sata_dwc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
+MODULE_DESCRIPTION("DesignWare Cores SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_dwc_ncq.c b/drivers/ata/sata_dwc_ncq.c
new file mode 100644
index 00000000000..86bc1a158a6
--- /dev/null
+++ b/drivers/ata/sata_dwc_ncq.c
@@ -0,0 +1,2933 @@
+/*
+ * drivers/ata/sata_dwc.c
+ *
+ * Synopsys DesignWare Cores (DWC) SATA host driver
+ *
+ * Author: Mark Miesfeld <mmiesfeld@amcc.com>
+ *
+ * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
+ * Copyright 2008 DENX Software Engineering
+ *
+ * V2.0: Support Port Multiplier
+ * V2.1: Support NCQ
+ *
+ * Based on versions provided by AMCC and Synopsys which are:
+ * Copyright 2006 Applied Micro Circuits Corporation
+ * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/libata.h>
+#include <linux/rtc.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+
+#ifdef CONFIG_SATA_DWC_DEBUG
+#define DWC_DEBUG
+#define dwc_dev_dbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_dbg(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_dbg(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+#else
+#define dwc_dev_dbg(dev, format, arg...) \
+ ({ 0; })
+#define dwc_port_dbg(ap, format, arg...) \
+ ({ 0; })
+#define dwc_link_dbg(link, format, arg...) \
+ ({ 0; })
+#endif
+
+#ifdef CONFIG_SATA_DWC_VDEBUG
+#define DWC_VDEBUG
+#define DEBUG_NCQ
+#define dwc_dev_vdbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_vdbg(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_vdbg(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+#else
+#define dwc_dev_vdbg(dev, format, arg...) \
+ ({ 0; })
+#define dwc_port_vdbg(ap, format, arg...) \
+ ({ 0; })
+#define dwc_link_vdbg(link, format, arg...) \
+ ({ 0; })
+#endif
+
+#define dwc_dev_info(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_info(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_info(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+
+
+
+#define DRV_NAME "sata-dwc"
+#define DRV_VERSION "2.1"
+
+/* Port Multiplier discovery Signature */
+#define PSCR_SCONTROL_DET_ENABLE 0x00000001
+#define PSCR_SSTATUS_DET_PRESENT 0x00000001
+#define PSCR_SERROR_DIAG_X 0x04000000
+
+/* Port multiplier port entry in SCONTROL register */
+#define SCONTROL_PMP_MASK 0x000f0000
+#define PMP_TO_SCONTROL(p) ((p << 16) & 0x000f0000)
+#define SCONTROL_TO_PMP(p) (((p) & 0x000f0000) >> 16)
+
+
+/* SATA DMA driver Globals */
+#if defined(CONFIG_APM82181)
+#define DMA_NUM_CHANS 2
+#else
+#define DMA_NUM_CHANS 1
+#endif
+
+#define DMA_NUM_CHAN_REGS 8
+
+/* SATA DMA Register definitions */
+#if defined(CONFIG_APM82181)
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#else
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#endif
+
+struct dmareg {
+ u32 low; /* Low bits 0-31 */
+ u32 high; /* High bits 32-63 */
+};
+
+/* DMA Per Channel registers */
+struct dma_chan_regs {
+ struct dmareg sar; /* Source Address */
+ struct dmareg dar; /* Destination address */
+ struct dmareg llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+ struct dmareg sstat; /* Source Status not implemented in core */
+ struct dmareg dstat; /* Destination Status not implemented in core */
+ struct dmareg sstatar; /* Source Status Address not impl in core */
+ struct dmareg dstatar; /* Destination Status Address not implemented */
+ struct dmareg cfg; /* Config */
+ struct dmareg sgr; /* Source Gather */
+ struct dmareg dsr; /* Destination Scatter */
+};
+
+/* Generic Interrupt Registers */
+struct dma_interrupt_regs {
+ struct dmareg tfr; /* Transfer Interrupt */
+ struct dmareg block; /* Block Interrupt */
+ struct dmareg srctran; /* Source Transfer Interrupt */
+ struct dmareg dsttran; /* Dest Transfer Interrupt */
+ struct dmareg error; /* Error */
+};
+
+struct ahb_dma_regs {
+ struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
+ struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
+ struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
+ struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
+ struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
+ struct dmareg statusInt; /* Interrupt combined */
+ struct dmareg rq_srcreg; /* Src Trans Req */
+ struct dmareg rq_dstreg; /* Dst Trans Req */
+ struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */
+ struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */
+ struct dmareg rq_lst_srcreg; /* Last Src Trans Req */
+ struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */
+ struct dmareg dma_cfg; /* DMA Config */
+ struct dmareg dma_chan_en; /* DMA Channel Enable */
+ struct dmareg dma_id; /* DMA ID */
+ struct dmareg dma_test; /* DMA Test */
+ struct dmareg res1; /* reserved */
+ struct dmareg res2; /* reserved */
+
+ /* DMA Comp Params
+ * Param 6 = dma_param[0], Param 5 = dma_param[1],
+ * Param 4 = dma_param[2] ...
+ */
+ struct dmareg dma_params[6];
+};
+
+/* Data structure for linked list item */
+struct lli {
+ u32 sar; /* Source Address */
+ u32 dar; /* Destination address */
+ u32 llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+#if defined(CONFIG_APM82181)
+ u32 dstat; /* Source status is not supported */
+#else
+ struct dmareg dstat; /* Destination Status */
+#endif
+};
+
+#define SATA_DWC_DMAC_LLI_SZ (sizeof(struct lli))
+#define SATA_DWC_DMAC_LLI_NUM 256
+#define SATA_DWC_DMAC_TWIDTH_BYTES 4
+#define SATA_DWC_DMAC_LLI_TBL_SZ \
+ (SATA_DWC_DMAC_LLI_SZ * SATA_DWC_DMAC_LLI_NUM)
+#if defined(CONFIG_APM82181)
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#else
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#endif
+/* DMA Register Operation Bits */
+#define DMA_EN 0x00000001 /* Enable AHB DMA */
+#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
+#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
+ ((0x000000001 << (ch)) << 8))
+#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
+
+/* Channel Control Register */
+#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
+#define DMA_CTL_LLP_SRCEN 0x10000000 /* Blk chain enable Src */
+#define DMA_CTL_LLP_DSTEN 0x08000000 /* Blk chain enable Dst */
+/*
+ * This define is used to set block chaining disabled in the control low
+ * register. It is already in little endian format so it can be &'d dirctly.
+ * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
+ */
+#define DMA_CTL_LLP_DISABLE_LE32 0xffffffe7
+#define DMA_CTL_SMS(num) ((num & 0x3) << 25) /*Src Master Select*/
+#define DMA_CTL_DMS(num) ((num & 0x3) << 23) /*Dst Master Select*/
+#define DMA_CTL_TTFC(type) ((type & 0x7) << 20) /*Type&Flow cntr*/
+#define DMA_CTL_TTFC_P2M_DMAC 0x00000002 /*Per mem,DMAC cntr*/
+#define DMA_CTL_TTFC_M2P_PER 0x00000003 /*Mem per,peri cntr*/
+#define DMA_CTL_SRC_MSIZE(size) ((size & 0x7) << 14) /*Src Burst Len*/
+#define DMA_CTL_DST_MSIZE(size) ((size & 0x7) << 11) /*Dst Burst Len*/
+#define DMA_CTL_SINC_INC 0x00000000 /*Src addr incr*/
+#define DMA_CTL_SINC_DEC 0x00000200
+#define DMA_CTL_SINC_NOCHANGE 0x00000400
+#define DMA_CTL_DINC_INC 0x00000000 /*Dst addr incr*/
+#define DMA_CTL_DINC_DEC 0x00000080
+#define DMA_CTL_DINC_NOCHANGE 0x00000100
+#define DMA_CTL_SRC_TRWID(size) ((size & 0x7) << 4) /*Src Trnsfr Width*/
+#define DMA_CTL_DST_TRWID(size) ((size & 0x7) << 1) /*Dst Trnsfr Width*/
+#define DMA_CTL_INT_EN 0x00000001 /*Interrupt Enable*/
+
+/* Channel Configuration Register high bits */
+#define DMA_CFG_FCMOD_REQ 0x00000001 /*Flow cntrl req*/
+#define DMA_CFG_PROTCTL (0x00000003 << 2) /*Protection cntrl*/
+
+/* Channel Configuration Register low bits */
+#define DMA_CFG_RELD_DST 0x80000000 /*Reload Dst/Src Addr*/
+#define DMA_CFG_RELD_SRC 0x40000000
+#define DMA_CFG_HS_SELSRC 0x00000800 /*SW hndshk Src/Dst*/
+#define DMA_CFG_HS_SELDST 0x00000400
+#define DMA_CFG_FIFOEMPTY (0x00000001 << 9) /*FIFO Empty bit*/
+
+/* Assign hardware handshaking interface (x) to dst / sre peripheral */
+#define DMA_CFG_HW_HS_DEST(int_num) ((int_num & 0xF) << 11)
+#define DMA_CFG_HW_HS_SRC(int_num) ((int_num & 0xF) << 7)
+
+/* Channel Linked List Pointer Register */
+#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
+#define DMA_LLP_AHBMASTER1 0 /* List Master Select */
+#define DMA_LLP_AHBMASTER2 1
+
+#define SATA_DWC_MAX_PORTS 1
+
+#define SATA_DWC_SCR_OFFSET 0x24
+#define SATA_DWC_REG_OFFSET 0x64
+
+/* DWC SATA Registers */
+struct sata_dwc_regs {
+ u32 fptagr; /* 1st party DMA tag */
+ u32 fpbor; /* 1st party DMA buffer offset */
+ u32 fptcr; /* 1st party DMA Xfr count */
+ u32 dmacr; /* DMA Control */
+ u32 dbtsr; /* DMA Burst Transac size */
+ u32 intpr; /* Interrupt Pending */
+ u32 intmr; /* Interrupt Mask */
+ u32 errmr; /* Error Mask */
+ u32 llcr; /* Link Layer Control */
+ u32 phycr; /* PHY Control */
+ u32 physr; /* PHY Status */
+ u32 rxbistpd; /* Recvd BIST pattern def register */
+ u32 rxbistpd1; /* Recvd BIST data dword1 */
+ u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
+ u32 txbistpd; /* Trans BIST pattern def register */
+ u32 txbistpd1; /* Trans BIST data dword1 */
+ u32 txbistpd2; /* Trans BIST data dword2 */
+ u32 bistcr; /* BIST Control Register */
+ u32 bistfctr; /* BIST FIS Count Register */
+ u32 bistsr; /* BIST Status Register */
+ u32 bistdecr; /* BIST Dword Error count register */
+ u32 res[15]; /* Reserved locations */
+ u32 testr; /* Test Register */
+ u32 versionr; /* Version Register */
+ u32 idr; /* ID Register */
+ u32 unimpl[192]; /* Unimplemented */
+ u32 dmadr[256]; /* FIFO Locations in DMA Mode */
+};
+
+#define SCR_SCONTROL_DET_ENABLE 0x00000001
+#define SCR_SSTATUS_DET_PRESENT 0x00000001
+#define SCR_SERROR_DIAG_X 0x04000000
+
+/* DWC SATA Register Operations */
+#define SATA_DWC_TXFIFO_DEPTH 0x01FF
+#define SATA_DWC_RXFIFO_DEPTH 0x01FF
+
+#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004
+#define SATA_DWC_DMACR_TXCHEN (0x00000001 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RXCHEN (0x00000002 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN
+
+#define SATA_DWC_DBTSR_MWR(size) ((size/4) & \
+ SATA_DWC_TXFIFO_DEPTH)
+#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \
+ SATA_DWC_RXFIFO_DEPTH) << 16)
+
+// SATA DWC Interrupts
+#define SATA_DWC_INTPR_DMAT 0x00000001
+#define SATA_DWC_INTPR_NEWFP 0x00000002
+#define SATA_DWC_INTPR_PMABRT 0x00000004
+#define SATA_DWC_INTPR_ERR 0x00000008
+#define SATA_DWC_INTPR_NEWBIST 0x00000010
+#define SATA_DWC_INTPR_IPF 0x80000000
+// Interrupt masks
+#define SATA_DWC_INTMR_DMATM 0x00000001
+#define SATA_DWC_INTMR_NEWFPM 0x00000002
+#define SATA_DWC_INTMR_PMABRTM 0x00000004
+#define SATA_DWC_INTMR_ERRM 0x00000008
+#define SATA_DWC_INTMR_NEWBISTM 0x00000010
+#define SATA_DWC_INTMR_PRIMERRM 0x00000020
+#define SATA_DWC_INTPR_CMDGOOD 0x00000080
+#define SATA_DWC_INTPR_CMDABORT 0x00000040
+
+#define SATA_DWC_LLCR_SCRAMEN 0x00000001
+#define SATA_DWC_LLCR_DESCRAMEN 0x00000002
+#define SATA_DWC_LLCR_RPDEN 0x00000004
+
+// Defines for SError register
+#define SATA_DWC_SERR_ERRI 0x00000001 // Recovered data integrity error
+#define SATA_DWC_SERR_ERRM 0x00000002 // Recovered communication error
+#define SATA_DWC_SERR_ERRT 0x00000100 // Non-recovered transient data integrity error
+#define SATA_DWC_SERR_ERRC 0x00000200 // Non-recovered persistent communication or data integrity error
+#define SATA_DWC_SERR_ERRP 0x00000400 // Protocol error
+#define SATA_DWC_SERR_ERRE 0x00000800 // Internal host adapter error
+#define SATA_DWC_SERR_DIAGN 0x00010000 // PHYRdy change
+#define SATA_DWC_SERR_DIAGI 0x00020000 // PHY internal error
+#define SATA_DWC_SERR_DIAGW 0x00040000 // Phy COMWAKE signal is detected
+#define SATA_DWC_SERR_DIAGB 0x00080000 // 10b to 8b decoder err
+#define SATA_DWC_SERR_DIAGT 0x00100000 // Disparity error
+#define SATA_DWC_SERR_DIAGC 0x00200000 // CRC error
+#define SATA_DWC_SERR_DIAGH 0x00400000 // Handshake error
+#define SATA_DWC_SERR_DIAGL 0x00800000 // Link sequence (illegal transition) error
+#define SATA_DWC_SERR_DIAGS 0x01000000 // Transport state transition error
+#define SATA_DWC_SERR_DIAGF 0x02000000 // Unrecognized FIS type
+#define SATA_DWC_SERR_DIAGX 0x04000000 // Exchanged error - Set when PHY COMINIT signal is detected.
+#define SATA_DWC_SERR_DIAGA 0x08000000 // Port Selector Presence detected
+
+/* This is all error bits, zero's are reserved fields. */
+#define SATA_DWC_SERR_ERR_BITS 0x0FFF0F03
+
+#define SATA_DWC_SCR0_SPD_GET(v) ((v >> 4) & 0x0000000F)
+
+struct sata_dwc_device {
+ struct resource reg; /* Resource for register */
+ struct device *dev; /* generic device struct */
+ struct ata_probe_ent *pe; /* ptr to probe-ent */
+ struct ata_host *host;
+ u8 *reg_base;
+ struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
+ u8 *scr_base;
+ int dma_channel; /* DWC SATA DMA channel */
+ int irq_dma;
+ struct timer_list an_timer;
+};
+
+#define SATA_DWC_QCMD_MAX 32
+
+struct sata_dwc_device_port {
+ struct sata_dwc_device *hsdev;
+ int cmd_issued[SATA_DWC_QCMD_MAX]; // QC issued
+ struct lli *llit[SATA_DWC_QCMD_MAX];
+ dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
+ u32 dma_chan[SATA_DWC_QCMD_MAX]; // Consider to be removed
+ int dma_pending[SATA_DWC_QCMD_MAX]; // DMA command needs to be processed
+ int num_lli[SATA_DWC_QCMD_MAX];
+ u32 dma_complete; // tasks completes DMA transfer
+ u32 sactive_issued; /* issued queued ops */
+ u32 sactive_queued; /* queued ops */
+ int no_dma_pending; // Number of pending DMA
+ int dma_pending_isr_count; // Number of interrupt count
+ int max_tag; // maximum tag, used for debug NCQ only
+};
+
+static struct sata_dwc_device* dwc_dev_list[2]; // Device list.
+static int dma_intr_registered = 0;
+
+
+/*
+ * Commonly used DWC SATA driver Macros
+ */
+#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *) \
+ (host)->private_data)
+#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *) \
+ (ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *) \
+ (ap)->private_data)
+#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *) \
+ (qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *) \
+ (hsdevp)->hsdev)
+
+enum {
+ SATA_DWC_CMD_ISSUED_NOT = 0,
+ SATA_DWC_CMD_ISSUED_PENDING = 1,
+ SATA_DWC_CMD_ISSUED_EXEC = 2,
+ SATA_DWC_CMD_ISSUED_NODATA = 3,
+
+ SATA_DWC_DMA_PENDING_NONE = 0,
+ SATA_DWC_DMA_PENDING_TX = 1,
+ SATA_DWC_DMA_PENDING_RX = 2,
+ SATA_DWC_DMA_DONE = 3,
+};
+
+/*
+ * Globals
+ */
+static struct ahb_dma_regs *sata_dma_regs = 0;
+
+/*
+ * Prototypes
+ */
+static void sata_dwc_start_dma_transfer(struct ata_queued_cmd *qc);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status);
+static void sata_dwc_port_stop(struct ata_port *ap);
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
+
+static int dma_dwc_init(struct sata_dwc_device *hsdev);
+static void dma_dwc_exit(struct sata_dwc_device *hsdev);
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch);
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev);
+static void sata_dwc_init_port ( struct ata_port *ap );
+u8 sata_dwc_check_status(struct ata_port *ap);
+static inline u32 sata_dwc_core_scr_read ( struct ata_port *ap, unsigned int scr);
+
+
+
+
+
+
+/*
+ * Convert DMA direction to text
+ */
+static const char *dir_2_txt(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ return "bi";
+ case DMA_FROM_DEVICE:
+ return "from";
+ case DMA_TO_DEVICE:
+ return "to";
+ case DMA_NONE:
+ return "none";
+ default:
+ return "err";
+ }
+}
+
+
+/*
+ * Convert QC protocol to text
+ */
+static const char *prot_2_txt(enum ata_tf_protocols protocol)
+{
+ switch (protocol) {
+ case ATA_PROT_UNKNOWN:
+ return "unknown";
+ case ATA_PROT_NODATA:
+ return "nodata";
+ case ATA_PROT_PIO:
+ return "pio";
+ case ATA_PROT_DMA:
+ return "dma";
+ case ATA_PROT_NCQ:
+ return "ncq";
+ case ATAPI_PROT_PIO:
+ return "atapi pio";
+ case ATAPI_PROT_NODATA:
+ return "atapi nodata";
+ case ATAPI_PROT_DMA:
+ return "atapi dma";
+ default:
+ return "err";
+ }
+}
+
+
+/*
+ * Convert SERROR register bits to text
+ */
+static void print_serror_2_txt ( u32 serror) {
+ if ( serror ) {
+ printk("Detect errors (0x%08x):", serror);
+ if ( serror & SATA_DWC_SERR_ERRI )
+ printk(" ERRI");
+ if ( serror & SATA_DWC_SERR_ERRM )
+ printk(" ERRM");
+ if ( serror & SATA_DWC_SERR_ERRT )
+ printk(" ERRT");
+ if ( serror & SATA_DWC_SERR_ERRC )
+ printk(" ERRC");
+ if ( serror & SATA_DWC_SERR_ERRP )
+ printk(" ERRP");
+ if ( serror & SATA_DWC_SERR_ERRE )
+ printk(" ERRE");
+ if ( serror & SATA_DWC_SERR_DIAGN )
+ printk(" DIAGN");
+ if ( serror & SATA_DWC_SERR_DIAGI )
+ printk(" DIAGI");
+ if ( serror & SATA_DWC_SERR_DIAGW )
+ printk(" DIAGW");
+ if ( serror & SATA_DWC_SERR_DIAGB )
+ printk(" DIAGB");
+ if ( serror & SATA_DWC_SERR_DIAGT )
+ printk(" DIAGT");
+ if ( serror & SATA_DWC_SERR_DIAGC )
+ printk(" DIAGC");
+ if ( serror & SATA_DWC_SERR_DIAGH )
+ printk(" DIAGH");
+ if ( serror & SATA_DWC_SERR_DIAGL )
+ printk(" DIAGL");
+ if ( serror & SATA_DWC_SERR_DIAGS )
+ printk(" DIAGS");
+ if ( serror & SATA_DWC_SERR_DIAGF )
+ printk(" DIAGF");
+ if ( serror & SATA_DWC_SERR_DIAGX )
+ printk(" DIAGX");
+ if ( serror & SATA_DWC_SERR_DIAGA )
+ printk(" DIAGA");
+ printk("\n");
+ }
+}
+
+
+
+/*
+ * Convert SATA command to text
+ */
+inline const char *ata_cmd_2_txt(const struct ata_taskfile *tf)
+{
+ switch (tf->command) {
+ case ATA_CMD_CHK_POWER:
+ return "ATA_CMD_CHK_POWER";
+ case ATA_CMD_EDD:
+ return "ATA_CMD_EDD";
+ case ATA_CMD_FLUSH:
+ return "ATA_CMD_FLUSH";
+ case ATA_CMD_FLUSH_EXT:
+ return "ATA_CMD_FLUSH_EXT";
+ case ATA_CMD_ID_ATA:
+ return "ATA_CMD_ID_ATA";
+ case ATA_CMD_ID_ATAPI:
+ return "ATA_CMD_ID_ATAPI";
+ case ATA_CMD_FPDMA_READ:
+ return "ATA_CMD_FPDMA_READ";
+ case ATA_CMD_FPDMA_WRITE:
+ return "ATA_CMD_FPDMA_WRITE";
+ case ATA_CMD_READ:
+ return "ATA_CMD_READ";
+ case ATA_CMD_READ_EXT:
+ return "ATA_CMD_READ_EXT";
+ case ATA_CMD_READ_NATIVE_MAX_EXT :
+ return "ATA_CMD_READ_NATIVE_MAX_EXT";
+ case ATA_CMD_VERIFY_EXT :
+ return "ATA_CMD_VERIFY_EXT";
+ case ATA_CMD_WRITE:
+ return "ATA_CMD_WRITE";
+ case ATA_CMD_WRITE_EXT:
+ return "ATA_CMD_WRITE_EXT";
+ case ATA_CMD_PIO_READ:
+ return "ATA_CMD_PIO_READ";
+ case ATA_CMD_PIO_READ_EXT:
+ return "ATA_CMD_PIO_READ_EXT";
+ case ATA_CMD_PIO_WRITE:
+ return "ATA_CMD_PIO_WRITE";
+ case ATA_CMD_PIO_WRITE_EXT:
+ return "ATA_CMD_PIO_WRITE_EXT";
+ case ATA_CMD_SET_FEATURES:
+ return "ATA_CMD_SET_FEATURES";
+ case ATA_CMD_PACKET:
+ return "ATA_CMD_PACKET";
+ case ATA_CMD_PMP_READ:
+ return "ATA_CMD_PMP_READ";
+ case ATA_CMD_PMP_WRITE:
+ return "ATA_CMD_PMP_WRITE";
+ default:
+ return "ATA_CMD_???";
+ }
+}
+
+
+
+
+/*
+ * Dump content of the taskfile
+ */
+static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ ata_port_printk(ap, KERN_INFO, "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx"
+ "device: %x\n", tf->command, prot_2_txt(tf->protocol),
+ tf->flags, tf->device);
+ printk("\tfeature: 0x%02x nsect: 0x%x lbal: 0x%x lbam:"
+ "0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
+ tf->lbam, tf->lbah);
+ printk("\thob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x "
+ "hob_lbam: 0x%x hob_lbah: 0x%x\n", tf->hob_feature,
+ tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
+ tf->hob_lbah);
+}
+
+
+/*
+ * Print out current setting of the DMA configuration by reading
+ * DMA registers.
+ */
+static void print_dma_registers( int dma_chan ) {
+ printk("Content of DMA registers in channel %d:\n", dma_chan);
+ printk("\t- cfg.low : 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.low)));
+ printk("\t- cfg.high: 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.high)));
+ printk("\t- ctl.low : 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].ctl.low)));
+ printk("\t- ctl.high: 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].ctl.high)));
+ printk("\t- llp.low : 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].llp.low)));
+ printk("\t- sar.low : 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.low)));
+ printk("\t- sar.high: 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.high)));
+ printk("\t- dar.low : 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.low)));
+ printk("\t- dar.high: 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.high)));
+ printk("\t- sgr.low : 0x%08x\n", in_le32(&(sata_dma_regs->chan_regs[dma_chan].sgr.low)));
+}
+
+
+/*
+ * Print out DMA information set up in LLI
+ */
+static void print_dma_configuration ( struct lli *lli, int idx ) {
+ printk("SATA DWC Port DMA configuration\n");
+ printk("index %d\n", idx);
+ printk(" - lli[%d].ctl.high: 0x%08x\n", idx, lli[idx].ctl.high);
+ printk(" - lli[%d].ctl.low : 0x%08x\n", idx, lli[idx].ctl.low);
+ printk(" - lli[%d].lli.dar : 0x%08x\n", idx, lli[idx].dar);
+ printk(" - lli[%d].lli.sar : 0x%08x\n", idx, lli[idx].sar);
+ printk(" - lli[%d].next_llp: 0x%08x\n", idx, lli[idx].llp);
+}
+
+
+/*
+ * Function: get_burst_length_encode
+ * arguments: datalength: length in bytes of data
+ * returns value to be programmed in register corrresponding to data length
+ * This value is effectively the log(base 2) of the length
+ */
+static inline int get_burst_length_encode(int datalength)
+{
+ int items = datalength >> 2; /* div by 4 to get lword count */
+
+ if (items >= 64)
+ return 5;
+
+ if (items >= 32)
+ return 4;
+
+ if (items >= 16)
+ return 3;
+
+ if (items >= 8)
+ return 2;
+
+ if (items >= 4)
+ return 1;
+
+ return 0;
+}
+
+
+/*
+ * Clear Interrupts on a DMA channel
+ */
+static inline void clear_chan_interrupts(int c)
+{
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.block.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.srctran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.dsttran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low), DMA_CHANNEL(c));
+}
+
+
+/*
+ * Function: dma_request_channel
+ * arguments: None
+ * returns channel number if available else -1
+ * This function assigns the next available DMA channel from the list to the
+ * requester
+ */
+static int dma_request_channel(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ if (!(in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(hsdev->dma_channel))) {
+ dwc_port_vdbg(ap, "%s Successfully requested DMA channel %d\n",
+ __func__, hsdev->dma_channel);
+ return (hsdev->dma_channel);
+ }
+
+ return -1;
+}
+
+
+static inline u32 qcmd_tag_to_mask(u8 tag)
+{
+ return 0x00000001 << (tag & 0x1f);
+}
+
+
+
+
+/*
+ * Function: dma_dwc_interrupt
+ * arguments: irq, dev_id, pt_regs
+ * returns channel number if available else -1
+ * Interrupt Handler for DW AHB SATA DMA
+ */
+static int dma_dwc_interrupt(int irq, void *hsdev_instance)
+{
+ volatile u32 tfr_reg, err_reg;
+ unsigned long flags;
+ struct sata_dwc_device *hsdev =
+ (struct sata_dwc_device *)hsdev_instance;
+ struct ata_host *host = (struct ata_host *)hsdev->host;
+ struct ata_port *ap;
+ struct sata_dwc_device_port *hsdevp;
+ u8 tag = 0;
+ int chan;
+ unsigned int port = 0;
+ struct ata_queued_cmd *qc;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+ if ( ap->link.active_tag == ATA_TAG_POISON )
+ tag = 0;
+ else
+ tag = ap->link.active_tag;
+
+ dwc_port_dbg(ap, "%s: DMA interrupt in channel %d, tag=%d\n", __func__, hsdev->dma_channel, tag);
+
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ dwc_port_vdbg(ap, "tfr_reg=0x%08x err_reg=0x%08x pending=%s\n",
+ tfr_reg, err_reg, (hsdevp->dma_pending[tag]==0)? "DMA_PENDING_NONE" : (hsdevp->dma_pending[tag]==1? "DMA_PENDING_TX" : "DMA_PENDING_RX"));
+ chan = hsdev->dma_channel;
+
+ /*
+ * Interrupt to indicate DMA transfer completion
+ * to the destination peripheral
+ */
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ sata_dwc_clear_dmacr(hsdevp, tag);
+ hsdevp->no_dma_pending--;
+
+ // It should be SATA_DWC_DMA_PENDING_TX or SATA_DWC_DMA_PENDING_RX
+ // Otherwise, this is out of sync.
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "DMA not pending tfr=0x%08x "
+ "err=0x%08x tag=0x%02x pending=%d\n",
+ tfr_reg, err_reg, tag,
+ hsdevp->dma_pending[tag]);
+ }
+
+ // Update DMA pending for the completion tag.
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_DONE;
+
+ /* Clear the interrupt */
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low),
+ DMA_CHANNEL(chan));
+ }
+
+ /*
+ * Process Error Interrupt.
+ * When this occurs, the DMA transfer is cancelled and the
+ * channel is disabled.
+ */
+ if (unlikely(err_reg & DMA_CHANNEL(chan))) {
+ dev_err(ap->dev, "error interrupt err_reg=0x%08x\n", err_reg);
+ spin_lock_irqsave(ap->lock, flags);
+
+ /* disable DMAC */
+ dma_dwc_terminate_dma(ap, chan);
+
+ // Set QC flag to Fail state
+ qc = ata_qc_from_tag(ap, tag);
+ if ( qc )
+ qc->err_mask |= AC_ERR_ATA_BUS;
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* Clear the interrupt. */
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low),
+ DMA_CHANNEL(chan));
+
+ // Update DMA pending for the completion tag (ignore current QC)
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_DONE;
+ }
+ hsdevp->dma_complete |= qcmd_tag_to_mask(tag);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_HANDLED;
+}
+
+
+
+static irqreturn_t dma_dwc_handler(int irq, void *hsdev_instance)
+{
+ volatile u32 tfr_reg, err_reg;
+ int chan;
+
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Check for end-of-transfer interrupt. */
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+
+ /* Check for error interrupt. */
+ if (err_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dma_register_interrupt (struct sata_dwc_device *hsdev)
+{
+ int retval = 0;
+ int irq = hsdev->irq_dma;
+ /*
+ * FIXME: 2 SATA controllers share the same DMA engine so
+ * currently, they also share same DMA interrupt
+ */
+ if (!dma_intr_registered) {
+ printk("%s register irq (%d)\n", __func__, irq);
+ //retval = request_irq(irq, dma_dwc_handler, IRQF_SHARED, "SATA DMA", hsdev);
+ retval = request_irq(irq, dma_dwc_handler, IRQF_DISABLED, "SATA DMA", NULL);
+ if (retval) {
+ dev_err(hsdev->dev, "%s: could not get IRQ %d\n", __func__, irq);
+ return -ENODEV;
+ }
+ dma_intr_registered = 1;
+ }
+ return retval;
+}
+
+/*
+ * Function: dma_request_interrupts
+ * arguments: hsdev
+ * returns status
+ * This function registers ISR for a particular DMA channel interrupt
+ */
+static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
+{
+ int retval = 0;
+ int dma_chan = hsdev->dma_channel;
+
+ /* Unmask error interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.error.low,
+ in_le32(&sata_dma_regs->interrupt_mask.error.low) | DMA_ENABLE_CHAN(dma_chan));
+
+ /* Unmask end-of-transfer interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.tfr.low,
+ in_le32(&sata_dma_regs->interrupt_mask.tfr.low) | DMA_ENABLE_CHAN(dma_chan));
+
+#ifdef DWC_VDEBUG
+ dwc_dev_info(hsdev->dev, "%s Current value of:\n", __func__);
+ printk(" - interrupt_mask.error=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.error.low));
+ printk(" - interrupt_mask.tfr=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.tfr.low));
+#endif
+ return retval;
+}
+
+
+
+/*
+ * Function: map_sg_to_lli
+ * arguments: sg: scatter/gather list(sg)
+ * num_elems: no of elements in sg list
+ * dma_lli: LLI table
+ * dest: destination address
+ * read: whether the transfer is read or write
+ * returns array of AHB DMA Linked List Items
+ * This function creates a list of LLIs for DMA Xfr and returns the number
+ * of elements in the DMA linked list.
+ *
+ * Note that the Synopsis driver has a comment proposing that better performance
+ * is possible by only enabling interrupts on the last item in the linked list.
+ * However, it seems that could be a problem if an error happened on one of the
+ * first items. The transfer would halt, but no error interrupt would occur.
+ *
+ * Currently this function sets interrupts enabled for each linked list item:
+ * DMA_CTL_INT_EN.
+ */
+static int map_sg_to_lli(struct ata_queued_cmd *qc, struct lli *lli,
+ dma_addr_t dma_lli, void __iomem *dmadr_addr)
+{
+ struct scatterlist *sg = qc->sg;
+ struct device *dwc_dev = qc->ap->dev;
+ int num_elems = qc->n_elem;
+ int dir = qc->dma_dir;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(qc->ap);
+
+ int i, idx = 0;
+ int fis_len = 0;
+ dma_addr_t next_llp;
+ int bl;
+ unsigned int dma_ts = 0;
+
+ dwc_port_dbg(qc->ap, "%s\n", __func__);
+#ifdef DWC_VDEBUG
+ printk("sg=%p nelem=%d lli=%p dma_lli=0x%08x "
+ "dmadr=0x%08x\n", sg, num_elems, lli, (u32)dma_lli,
+ (u32)dmadr_addr);
+#endif
+
+ bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
+
+ for (i = 0; i < num_elems; i++, sg++) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+#ifdef DWC_VDEBUG
+ printk("elem=%d sg_addr=0x%x sg_len=%d\n",
+ i, addr, sg_len);
+#endif
+ while (sg_len) {
+
+ if (unlikely(idx >= SATA_DWC_DMAC_LLI_NUM)) {
+ /* The LLI table is not large enough. */
+ dev_err(dwc_dev, "LLI table overrun (idx=%d)\n",
+ idx);
+ break;
+ }
+ len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
+ SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
+
+ offset = addr & 0xffff;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ /*
+ * Make sure a LLI block is not created that will span a
+ * 8K max FIS boundary. If the block spans such a FIS
+ * boundary, there is a chance that a DMA burst will
+ * cross that boundary -- this results in an error in
+ * the host controller.
+ */
+ if (unlikely(fis_len + len > 8192)) {
+#ifdef DWC_VDEBUG
+ printk("SPLITTING: fis_len=%d(0x%x) "
+ "len=%d(0x%x)\n", fis_len, fis_len, len, len);
+#endif
+ len = 8192 - fis_len;
+ fis_len = 0;
+ } else {
+ fis_len += len;
+ }
+ if (fis_len == 8192)
+ fis_len = 0;
+
+ /*
+ * Set DMA addresses and lower half of control register
+ * based on direction.
+ */
+#ifdef DWC_VDEBUG
+ printk("sg_len = %d, len = %d\n", sg_len, len);
+#endif
+
+#if defined(CONFIG_APM82181)
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(1) | /* Source: Master 2 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(2) | /* Source: Master 3 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(2) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ }
+#else
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(1) |
+ DMA_CTL_DMS(0) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+#endif
+ dwc_port_vdbg(qc->ap, "%s setting ctl.high len: 0x%08x val: "
+ "0x%08x\n", __func__, len,
+ DMA_CTL_BLK_TS(len / 4));
+
+ /* Program the LLI CTL high register */
+ dma_ts = DMA_CTL_BLK_TS(len / 4);
+ lli[idx].ctl.high = cpu_to_le32(dma_ts);
+
+ /*
+ * Program the next pointer. The next pointer must be
+ * the physical address, not the virtual address.
+ */
+ next_llp = (dma_lli + ((idx + 1) * sizeof(struct lli)));
+
+ /* The last 2 bits encode the list master select. */
+#if defined(CONFIG_APM82181)
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER1);
+#else
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
+#endif
+
+ lli[idx].llp = cpu_to_le32(next_llp);
+
+#ifdef CONFIG_SATA_DWC_VDEBUG
+ print_dma_configuration(lli, idx);
+#endif
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /*
+ * The last next ptr has to be zero and the last control low register
+ * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
+ * and destination enable) set back to 0 (disabled.) This is what tells
+ * the core that this is the last item in the linked list.
+ */
+ if (likely(idx)) {
+ lli[idx-1].llp = 0x00000000;
+ lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
+
+ /* Flush cache to memory */
+ dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
+ DMA_BIDIRECTIONAL);
+ }
+
+#ifdef DWC_VDEBUG
+ printk("%s: Final index %d. Setting:\n", __func__, idx-1);
+ printk("- ctl.high = 0x%08x\n", lli[idx-1].ctl.high);
+ printk("- ctl.low = 0x%08x\n", lli[idx-1].ctl.low);
+ printk("- lli.dar = 0x%08x\n", lli[idx-1].dar);
+ printk("- lli.sar = 0x%08x\n", lli[idx-1].sar);
+ printk("- next_llp = 0x%08x\n", lli[idx-1].llp);
+#endif
+ return idx;
+}
+
+
+/*
+ * Check if the selected DMA channel is currently enabled.
+ */
+static int dma_dwc_channel_enabled(int ch)
+{
+ u32 dma_chan;
+
+ // Read the DMA channel register
+ dma_chan = in_le32(&(sata_dma_regs->dma_chan_en.low));
+ if (dma_chan & DMA_CHANNEL(ch))
+ return 1;
+
+ return 0;
+}
+
+
+/*
+ * Terminate the current DMA transaction abnormally if it is currently enabled
+ * If it is currently disable, do nothing.
+ */
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch)
+{
+ int enabled = dma_dwc_channel_enabled(dma_ch);
+ u32 cfg0_l, chan_en;
+
+ if (enabled) {
+ dev_info(ap->dev, "%s: Terminate DMA on channel=%d, chan_en=0x%08x \n",
+ __func__, dma_ch, in_le32(&(sata_dma_regs->dma_chan_en.low)));
+
+ // Disable a channel Prior to Transfer Completion
+ // 1. Set the AHBDMA0_CFG0_L[CH_SUSP] bit to tell the SATA DMA to halt all transfers
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low)) | 0x100);
+
+ // 2. Poll the AHBDMA0_CFG0_L[FIFO_EMPTY] bit until it indicates that the channel FIFO is empty
+ do {
+ cfg0_l = in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low));
+ dwc_port_dbg(ap, "Polling the AHBDMA0_CFG0_L register (cfg.low=0x%08x)\n", cfg0_l);
+ ndelay(100);
+ } while ((cfg0_l & 0x200) == 0);
+
+ // 3. Clear the channel enable register
+ chan_en = in_le32(&(sata_dma_regs->dma_chan_en.low));
+ chan_en |= DMA_ENABLE_CHAN(dma_ch); // Set the write enable bit
+ chan_en &= ~DMA_CHANNEL(dma_ch); // Clear the channel enable
+ out_le32(&(sata_dma_regs->dma_chan_en.low), chan_en);
+
+ // Wait for the channel is disabled
+ do {
+ enabled = dma_dwc_channel_enabled(dma_ch);
+ dwc_port_info(ap, "In the while loop of %s, enabled=%d\n",__func__, enabled);
+ ndelay(100);
+ } while (enabled);
+ }
+}
+
+
+/*
+ * Configure DMA channel registers ready for data transfer
+ */
+static void configure_dma_channel(int dma_ch, dma_addr_t dma_lli) {
+ /* Clear channel interrupts */
+ clear_chan_interrupts(dma_ch);
+
+ /* Program the CFG register. */
+#if defined(CONFIG_APM82181)
+ if (dma_ch == 0) {
+ /* Buffer mode enabled, FIFO_MODE=0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x000000d);
+ /* Channel 0 bit[7:5] */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
+ } else if (dma_ch == 1) {
+ /* Buffer mode enabled, FIFO_MODE=0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000088d);
+ /* Channel 1 bit[7:5] */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
+ }
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high),
+ DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0);
+#endif
+
+ /* Program the address of the linked list */
+#if defined(CONFIG_APM82181)
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER1));
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
+#endif
+
+ /* Program the CTL register with src enable / dst enable */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low),
+ DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
+}
+
+
+/*
+ * Function: dma_dwc_exit
+ * arguments: None
+ * returns status
+ * This function exits the SATA DMA driver
+ */
+static void dma_dwc_exit(struct sata_dwc_device *hsdev)
+{
+ dwc_dev_vdbg(hsdev->dev, "%s:\n", __func__);
+ if (sata_dma_regs)
+ iounmap(sata_dma_regs);
+
+ if (hsdev->irq_dma)
+ free_irq(hsdev->irq_dma, hsdev);
+}
+
+
+/*
+ * Function: dma_dwc_init
+ * arguments: hsdev
+ * returns status
+ * This function initializes the SATA DMA driver
+ */
+static int dma_dwc_init(struct sata_dwc_device *hsdev)
+{
+ int err;
+ int irq = hsdev->irq_dma;
+
+ err = dma_request_interrupts(hsdev, irq);
+ if (err) {
+ dev_err(hsdev->dev, "%s: dma_request_interrupts returns %d\n",
+ __func__, err);
+ goto error_out;
+ }
+
+ /* Enabe DMA */
+ out_le32(&(sata_dma_regs->dma_cfg.low), DMA_EN);
+
+ dev_notice(hsdev->dev, "DMA initialized\n");
+ dev_notice(hsdev->dev, "DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low)));
+ dwc_dev_vdbg(hsdev->dev, "SATA DMA registers=0x%p\n", sata_dma_regs);
+
+ return 0;
+
+error_out:
+ dma_dwc_exit(hsdev);
+
+ return err;
+}
+
+
+static void sata_dwc_dev_config(struct ata_device *adev)
+{
+ /*
+ * Does not support NCQ over a port multiplier
+ * (no FIS-based switching).
+ */
+ if (adev->flags & ATA_DFLAG_NCQ) {
+ if (sata_pmp_attached(adev->link->ap)) {
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ ata_dev_printk(adev, KERN_INFO,
+ "NCQ disabled for command-based switching\n");
+ }
+ }
+
+ /*
+ * Since the sata_pmp_error_handler function in libata-pmp
+ * make FLAG_AN disabled in the first time SATA port is configured.
+ * Asynchronous notification is not configured.
+ * This will enable the AN feature manually.
+ */
+ adev->flags |= ATA_DFLAG_AN;
+}
+
+
+
+static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+
+ *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
+ dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, *val);
+
+ return 0;
+}
+
+
+static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+ dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, val);
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+ out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
+
+ return 0;
+}
+
+
+static inline u32 sata_dwc_core_scr_read ( struct ata_port *ap, unsigned int scr)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ return in_le32((void __iomem *)hsdev->scr_base + (scr * 4));
+}
+
+
+static inline void sata_dwc_core_scr_write ( struct ata_port *ap, unsigned int scr, u32 val)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32((void __iomem *)hsdev->scr_base + (scr * 4), val);
+}
+
+
+/*
+ * Clear content of the SERROR register
+ */
+static inline void clear_serror(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32( (void __iomem *)hsdev->scr_base + 4,
+ in_le32((void __iomem *)hsdev->scr_base + 4));
+}
+
+
+static inline void clear_intpr(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr,
+ in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+
+static inline void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr, bit);
+ // in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+
+static inline void enable_err_irq(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ in_le32(&hsdev->sata_dwc_regs->intmr) | SATA_DWC_INTMR_ERRM);
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS);
+}
+
+
+/*
+ * Timer to monitor SCR_NOTIFICATION registers on the
+ * SATA port. This is enabled only when the SATA PMP
+ * card is plugged into the SATA port.
+ */
+static void sata_dwc_an_chk(unsigned long arg)
+{
+ struct ata_port *ap = (void *)arg;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ unsigned long flags;
+ int rc = 0x0;
+ u32 sntf = 0x0;
+
+ if ( !sata_pmp_attached(ap) ) {
+ ata_port_printk(ap, KERN_INFO, "Maximum qc->tag in 10 seconds: %d\n", hsdevp->max_tag);
+ hsdevp->max_tag = 0;
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(10000);
+ add_timer(&hsdev->an_timer);
+ return;
+ }
+ spin_lock_irqsave(ap->lock, flags);
+ rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+
+ // If some changes on the SCR4, call asynchronous notification
+ if ( (rc == 0) & (sntf != 0)) {
+ dwc_port_dbg(ap, "Call assynchronous notification sntf=0x%08x\n", sntf);
+ sata_async_notification(ap);
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(8000);
+ } else {
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(3000);
+ }
+ add_timer(&hsdev->an_timer);
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+
+/*
+ * sata_dwc_pmp_select - Set the PMP field in SControl to the specified port number.
+ *
+ * @port: The value (port number) to set the PMP field to.
+ *
+ * @return: The old value of the PMP field.
+ */
+static u32 sata_dwc_pmp_select(struct ata_port *ap, u32 port)
+{
+ u32 scontrol, old_port;
+ if (sata_pmp_supported(ap)) {
+ scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL);
+ old_port = SCONTROL_TO_PMP(scontrol);
+
+ // Select new PMP port
+ if ( port != old_port ) {
+ scontrol &= ~SCONTROL_PMP_MASK;
+ sata_dwc_core_scr_write(ap, SCR_CONTROL, scontrol | PMP_TO_SCONTROL(port));
+ dwc_port_dbg(ap, "%s: old port=%d new port=%d\n", __func__, old_port, port);
+ }
+ return old_port;
+ }
+ else
+ return port;
+}
+
+
+/*
+ * Get the current PMP port
+ */
+static inline u32 current_pmp(struct ata_port *ap)
+{
+ return SCONTROL_TO_PMP(sata_dwc_core_scr_read(ap, SCR_CONTROL));
+}
+
+
+/*
+ * Process when a PMP card is attached in the SATA port.
+ * Since our SATA port support command base switching only,
+ * NCQ will not be available.
+ * We disable the NCQ feature in SATA port.
+ */
+static void sata_dwc_pmp_attach ( struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dev_info(ap->dev, "Attach SATA port multiplier with %d ports\n", ap->nr_pmp_links);
+ // Disable NCQ
+ ap->flags &= ~ATA_FLAG_NCQ;
+
+ // Initialize timer for checking AN
+ init_timer(&hsdev->an_timer);
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(20000);
+ hsdev->an_timer.function = sata_dwc_an_chk;
+ hsdev->an_timer.data = (unsigned long)(ap);
+ add_timer(&hsdev->an_timer);
+}
+
+
+/*
+ * Process when PMP card is removed from the SATA port.
+ * Re-enable NCQ for using by the SATA drive in the future
+ */
+static void sata_dwc_pmp_detach ( struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dev_info(ap->dev, "Detach SATA port\n");
+ // Re-enable the NCQ
+ ap->flags |= ATA_FLAG_NCQ;
+
+ sata_dwc_pmp_select(ap, 0);
+
+ // Delete timer since PMP card is detached
+ del_timer(&hsdev->an_timer);
+}
+
+
+
+// Check the link to be ready
+int sata_dwc_check_ready ( struct ata_link *link ) {
+ u8 status;
+ struct ata_port *ap = link->ap;
+ status = ioread8(ap->ioaddr.status_addr);
+ return ata_check_ready(status);
+}
+
+
+/*
+ * Do soft reset on the current SATA link.
+ */
+static int sata_dwc_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc;
+ struct ata_port *ap = link->ap;
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct ata_taskfile tf;
+
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+
+ /* Issue bus reset */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+
+ /* Always check readiness of the master device */
+ rc = ata_wait_after_reset(link, deadline, sata_dwc_check_ready);
+
+ // Classify the ata_port
+ *classes = ATA_DEV_NONE;
+ /* Verify if SStatus indicates device presence */
+ if (ata_link_online(link)) {
+ memset(&tf, 0, sizeof(tf));
+ ata_sff_tf_read(ap, &tf);
+ *classes = ata_dev_classify(&tf);
+ }
+
+ if ( *classes == ATA_DEV_PMP)
+ dwc_link_dbg(link, "-->found PMP device by sig\n");
+
+ clear_serror(link->ap);
+
+ return rc;
+}
+
+/*
+ * Set default parameters for SATA Drivers.
+ * This should be called each time hard reset is executed.
+ */
+static void sata_dwc_default_params ( struct sata_dwc_device_port *hsdevp ) {
+ int i;
+
+ hsdevp->sactive_issued = 0;
+ hsdevp->sactive_queued = 0;
+ hsdevp->dma_complete = 0;
+ hsdevp->no_dma_pending = 0;
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
+ hsdevp->dma_pending[i] = SATA_DWC_DMA_PENDING_NONE;
+ }
+
+}
+
+
+/*
+ * sata_dwc_hardreset - Do hardreset the SATA controller
+ */
+static int sata_dwc_hardreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc;
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(link->ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
+ bool online;
+
+ dwc_link_dbg(link, "%s\n", __func__);
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+
+ // Terminate DMA channel if it is currently enabled
+ dma_dwc_terminate_dma(link->ap, hsdev->dma_channel);
+
+ // Call standard hard reset
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+
+ // Reconfigure the port after hard reset
+ if ( ata_link_online(link) )
+ sata_dwc_init_port(link->ap);
+ link->active_tag = ATA_TAG_POISON;
+
+ // Reset parameters
+ sata_dwc_default_params(hsdevp);
+ printk("DMA Interrupt Mask = 0x%08x", in_le32(&sata_dma_regs->interrupt_mask.tfr.low));
+
+ return online ? -EAGAIN : rc;
+}
+
+
+/*
+ * Do hard reset on each PMP link
+ */
+static int sata_dwc_pmp_hardreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc = 0;
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+ rc = sata_std_hardreset(link, classes, deadline);
+ return rc;
+}
+
+
+/* See ahci.c */
+/*
+ * Process error when the SATAn_INTPR's ERR bit is set
+ * The processing is based on SCR_ERROR register content
+ */
+static void sata_dwc_error_intr(struct ata_port *ap,
+ struct sata_dwc_device *hsdev, uint intpr)
+{
+ struct ata_eh_info *ehi;
+ struct ata_link *link;
+ struct ata_queued_cmd *active_qc = NULL;
+ u32 serror;
+ bool freeze = false, abort = false;
+ int pmp, ret;
+ unsigned int err_mask = 0, action = 0;
+#if defined(DWC_VDEBUG)
+ int dma_chan = hsdev->dma_channel;
+#endif
+
+ link = &ap->link;
+ ehi = &link->eh_info;
+
+ /* Record irq stat */
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x", intpr);
+
+ // Record SERROR
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ dwc_port_dbg(ap, "%s serror = 0x%08x\n", __func__, serror);
+
+ // Clear SERROR and interrupt bit
+ clear_serror(ap);
+ clear_intpr(hsdev);
+
+ // Print content of SERROR in case of error detected
+ print_serror_2_txt(serror);
+
+#if defined(DWC_VDEBUG)
+ print_dma_registers(dma_chan);
+#endif
+
+ // Process hotplug for SATA port
+ if ( serror & (SATA_DWC_SERR_DIAGX | SATA_DWC_SERR_DIAGW)) {
+ dwc_port_info(ap, "Detect hot plug signal\n");
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, serror & SATA_DWC_SERR_DIAGN ? "PHY RDY changed" : "device exchanged");
+ freeze = true;
+ }
+
+ // Process PHY internal error / Link sequence (illegal transition) error
+ if ( serror & (SATA_DWC_SERR_DIAGI | SATA_DWC_SERR_DIAGL)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process Internal host adapter error
+ if ( serror & SATA_DWC_SERR_ERRE ) {
+ dev_err(ap->dev, "Detect Internal host adapter error\n");
+ // --> need to review
+ ehi->err_mask |= AC_ERR_HOST_BUS;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process Protocol Error
+ if ( serror & SATA_DWC_SERR_ERRP ) {
+ dev_err(ap->dev, "Detect Protocol error\n");
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process non-recovered persistent communication error
+ if ( serror & SATA_DWC_SERR_ERRC ) {
+ dev_err(ap->dev, "Detect non-recovered persistent communication error\n");
+ // --> TODO: review processing error
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+ ehi->action |= ATA_EH_SOFTRESET;
+ //ehi->flags |= ATA_EHI_NO_AUTOPSY;
+ //freeze = true;
+ }
+
+ // Non-recovered transient data integrity error
+ if ( serror & SATA_DWC_SERR_ERRT ) {
+ dev_err(ap->dev, "Detect non-recovered transient data integrity error\n");
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+ //ehi->err_mask |= AC_ERR_DEV;
+ ehi->action |= ATA_EH_SOFTRESET;
+ //ehi->flags |= ATA_EHI_NO_AUTOPSY;
+ }
+
+ // Since below errors have been recovered by hardware
+ // they don't need any error processing.
+ if ( serror & SATA_DWC_SERR_ERRM ) {
+ dev_warn(ap->dev, "Detect recovered communication error");
+ }
+ if ( serror & SATA_DWC_SERR_ERRI ) {
+ dev_warn(ap->dev, "Detect recovered data integrity error");
+ }
+
+ // If any error occur, process the qc
+ if (serror & (SATA_DWC_SERR_ERRT | SATA_DWC_SERR_ERRC)) {
+ abort = true;
+ /* find out the offending link and qc */
+ if (sata_pmp_attached(ap)) {
+ pmp = current_pmp(ap);
+ // If we are working on the PMP port
+ if ( pmp < ap->nr_pmp_links ) {
+ link = &ap->pmp_link[pmp];
+ ehi = &link->eh_info;
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ err_mask |= AC_ERR_DEV;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
+ } else {
+ err_mask |= AC_ERR_HSM;
+ action |= ATA_EH_RESET;
+ freeze = true;
+ }
+ }
+ // Work on SATA port
+ else {
+ freeze = true;
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ }
+
+ if ( active_qc) {
+ active_qc->err_mask |= err_mask;
+ } else {
+ ehi->err_mask = err_mask;
+ }
+ }
+
+ if ( freeze | abort ) {
+ // Terminate DMA channel if it is currenly in use
+ if ( dma_request_channel(ap) != -1 ) {
+ dwc_port_dbg(ap, "Terminate DMA channel %d for handling error\n", hsdev->dma_channel);
+ dma_dwc_terminate_dma(ap, hsdev->dma_channel);
+ }
+ }
+
+ if (freeze) {
+ ret = ata_port_freeze(ap);
+ ata_port_printk(ap, KERN_INFO, "Freeze port with %d QCs aborted\n", ret);
+ }
+ else if (abort) {
+ if (active_qc) {
+ ret = ata_link_abort(active_qc->dev->link);
+ ata_link_printk(link, KERN_INFO, "Abort %d QCs\n", ret);
+ } else {
+ ret = ata_port_abort(ap);
+ ata_port_printk(ap, KERN_INFO, "Abort %d QCs on the SATA port\n", ret);
+ }
+ }
+}
+
+
+/*
+ * The main interrupt handler.
+ * arguments : irq, void *dev_instance, struct pt_regs *regs
+ * Return value : irqreturn_t - status of IRQ
+ * This Interrupt handler called via port ops registered function.
+ * .irq_handler = sata_dwc_isr
+ */
+static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
+{
+ struct ata_host *host = (struct ata_host *)dev_instance;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
+ struct ata_port *ap;
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ u8 status = 0, tag;
+ int handled;
+ int port = 0;
+ u32 intpr, sactive, tag_mask, mask;
+ struct sata_dwc_device_port *hsdevp;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Read the interrupt register */
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap,"%s - interrupt intpr=0x%08x, qc_allocated=0x%08x, sactive_issued=0x%08x, dma_complete=0x%08x\n",__func__,intpr, ap->qc_allocated, hsdevp->sactive_issued, hsdevp->dma_complete);
+
+ /* Check for error interrupt */
+ if (intpr & SATA_DWC_INTPR_ERR) {
+ sata_dwc_error_intr(ap, hsdev, intpr);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /*
+ * For NCQ commands, an interrupt with NEWFP bit set will be issued.
+ * With non-NCQ command, this interrupt will never occur.
+ * This is step 5 of the First Party DMA transfer
+ */
+ if (intpr & SATA_DWC_INTPR_NEWFP) {
+ // Clear Interrupt
+ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
+ if ( ap->qc_allocated == 0x0 ) {
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ // Read the FPTAGR register for the NCQ tag
+ tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
+ // Setting this prevents more QCs to be queued
+ ap->link.active_tag = tag;
+ dwc_port_dbg(ap, "%s: NEWFP interrupt (intpr=0x%08x), fptagr=%d, fpbor=0x%08x, ap->link.active_tag=0x%08x\n", __func__, intpr, tag, in_le32(&hsdev->sata_dwc_regs->fpbor), ap->link.active_tag);
+
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PENDING)
+ dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
+
+ // Get the QC from the tag
+ qc = ata_qc_from_tag(ap, tag);
+ if ( !qc) {
+ dev_warn(ap->dev, "No QC available for tag %d (intpr=0x%08x, qc_allocated=0x%08x, qc_active=0x%08x)\n", tag, intpr, ap->qc_allocated, ap->qc_active);
+ hsdevp->sactive_issued &= ~qcmd_tag_to_mask(tag);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ // Update sactive_issued to indicate a new command issued
+ hsdevp->sactive_issued |= qcmd_tag_to_mask(tag);
+
+ /*
+ * Start FP DMA for NCQ command. At this point the tag is the
+ * active tag. It is the tag that matches the command about to
+ * be completed.
+ */
+ sata_dwc_start_dma_transfer(qc);
+
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ handled = 1;
+ /*
+ * The second interrupt is signalled to indicate the command complete.
+ */
+ // Complete PIO and internal command queues.
+ sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE);
+ tag_mask = (hsdevp->sactive_issued | sactive) ^ sactive;
+ // PIO and DMA interrupt
+ if ((hsdevp->sactive_issued == 0) && (tag_mask == 0)) {
+ dwc_port_dbg(ap, "Process non-NCQ commands, ap->qc_active=0x%08x, sactive=0x%08x\n",ap->qc_active, sactive);
+ if (ap->link.active_tag == ATA_TAG_POISON)
+ tag = 0;
+ else
+ tag = ap->link.active_tag;
+ // Get qc from tag
+ qc = ata_qc_from_tag(ap, tag);
+ // Call check status to clear BUSY bit
+ status = ap->ops->sff_check_status(ap);
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+ if ( hsdevp->dma_pending[tag] == SATA_DWC_DMA_DONE)
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+ if ( qc ) {
+ if (ata_is_pio(qc->tf.protocol))
+ ata_sff_hsm_move(ap, qc, status, 0);
+ else {
+ sata_dwc_qc_complete(ap, qc, 0);
+ mask = ~qcmd_tag_to_mask(tag);
+ hsdevp->dma_complete = hsdevp->dma_complete & mask;
+ }
+ handled = 1;
+ goto done_irqrestore;
+ }/* else if ( ap->qc_allocated != 0x0 )
+ dev_warn(ap->dev, "%s: No QC available (tag=%d, qc_allocated=0x%08x)\n",__func__,tag, ap->qc_allocated); */
+ }
+
+ // Process NCQ QC completes
+ else
+ {
+ /*
+ * Each NEWFP command must follow by a DMA interrupt. We clear interrupt only
+ * when no DMA transfer pending.
+ * Since we don't clear interrupt bit, this interrupt will signal again.
+ */
+ if ( hsdevp->no_dma_pending == 0 ) {
+ status = ap->ops->sff_check_status(ap);
+ hsdevp->dma_pending_isr_count = 0;
+ } else {
+ hsdevp->dma_pending_isr_count++;
+ if( hsdevp->dma_pending_isr_count > 10 ) {
+ status = ap->ops->sff_check_status(ap); // For test only
+ if ( ap->link.active_tag == ATA_TAG_POISON )
+ tag = 0;
+ else
+ tag = ap->link.active_tag;
+ hsdevp->dma_pending_isr_count=0;
+#ifdef DWC_VDEBUG
+ printk("%s count exceed 10 times (hsdevp->no_dma_pending=%d, num_lli=%d, qc_allocated=0x%08x)\n", __func__, hsdevp->no_dma_pending, hsdevp->num_lli[tag], ap->qc_allocated);
+ print_dma_registers(hsdev->dma_channel);
+ print_dma_configuration(hsdevp->llit[tag], 0);
+#endif
+ }
+ ndelay(100);
+ //goto done_irqrestore;
+ }
+
+ dwc_port_dbg(ap, "Process NCQ commands, ap->qc_active=0x%08x, dma_complete=0x%08x, status=0x%02x\n",ap->qc_active, hsdevp->dma_complete, status);
+ for(tag=0; tag<32; tag++) {
+ if ( hsdevp->dma_complete & qcmd_tag_to_mask(tag) ) {
+ qc = ata_qc_from_tag(ap, tag);
+ mask = ~qcmd_tag_to_mask(tag);
+ hsdevp->dma_complete = hsdevp->dma_complete & mask;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+ if ( ! qc ) {
+ ata_port_printk(ap, KERN_INFO, "Tag %d is set but not available\n", tag);
+ continue;
+ }
+ sata_dwc_qc_complete(ap, qc, 0);
+ }
+ }
+ handled = 1;
+ // Assign active_tag to ATA_TAG_POISON so that the qc_defer not defer new QC.
+ if ( hsdevp->no_dma_pending == 0 ) {
+ ap->link.active_tag = ATA_TAG_POISON;
+ }
+ }
+
+done_irqrestore:
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_RETVAL(handled);
+}
+
+
+/*
+ * Clear DMA Control Register after completing transferring data
+ * using AHB DMA.
+ */
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
+ // Clear receive channel enable bit
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_RX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+
+ } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
+ // Clear transmit channel enable bit
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+
+ } else {
+ /*
+ * This should not happen, it indicates the driver is out of
+ * sync. If it does happen, clear dmacr anyway.
+ */
+ dev_err(hsdev->dev, "%s DMA protocol RX and TX DMA not pending "
+ "tag=0x%02x pending=%d dmacr: 0x%08x\n",
+ __func__, tag, hsdevp->dma_pending[tag],
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ printk("dma_complete=0x%08x, No.DMA Pending=0x%08x\n", hsdevp->dma_complete, hsdevp->no_dma_pending);
+
+ // Clear all transmit and receive bit, but TXMOD bit is set to 1
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+ }
+}
+
+
+
+/*
+ * Complete a QC
+ */
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status)
+{
+ u8 status = 0;
+ int i = 0;
+ u8 tag = qc->tag;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct lli *llit;
+
+ dwc_port_dbg(ap, "%s checkstatus? %s - qc->tag=%d\n", __func__, check_status?"yes":"no", tag);
+
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX))
+ dev_err(ap->dev, "%s: TX DMA PENDING - tag=%d\n\n", __func__, tag);
+ else if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX))
+ dev_err(ap->dev, "%s: RX DMA PENDING - tag=%d\n\n", __func__, tag);
+
+ if (check_status) {
+ i = 0;
+ /* check main status, clearing INTRQ */
+ status = ap->ops->sff_check_status(ap);
+ while ( status & ATA_BUSY ) {
+ if (++i > 200)
+ break;
+ mdelay(1);
+ status = ap->ops->sff_check_status(ap);
+ }
+
+ if (unlikely(status & ATA_BUSY))
+ dev_err(ap->dev, "QC complete cmd=0x%02x STATUS BUSY "
+ "(0x%02x) [%d]\n", qc->tf.command, status, i);
+ }
+ dwc_port_vdbg(ap, "QC complete cmd=0x%02x status=0x%02x ata%u: "
+ "protocol=%d\n", qc->tf.command, status, ap->print_id,
+ qc->tf.protocol);
+
+ /* clear active bit */
+ hsdevp->sactive_queued = hsdevp->sactive_queued & (~(qcmd_tag_to_mask(tag)));
+ hsdevp->sactive_issued = hsdevp->sactive_issued & (~(qcmd_tag_to_mask(tag)));
+ // Clear lli structure
+ llit = hsdevp->llit[tag];
+ for(i=0; i < hsdevp->num_lli[tag];i++)
+ llit[i].llp = 0;
+ hsdevp->num_lli[tag] = 0;
+
+ dwc_port_vdbg(ap, "%s - sactive_queued=0x%08x, sactive_issued=0x%08x\n",__func__, hsdevp->sactive_queued, hsdevp->sactive_issued);
+ dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+
+ /* Complete taskfile transaction (does not read SCR registers) */
+ ata_qc_complete(qc);
+ return 0;
+}
+
+
+/*
+ * Clear interrupt and error flags in DMA status register.
+ */
+void sata_dwc_irq_clear (struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dwc_port_dbg(ap,"%s\n",__func__);
+ // Clear DMA interrupts
+ clear_chan_interrupts(hsdev->dma_channel);
+ //sata_dma_regs
+ //out_le32(&hsdev->sata_dwc_regs->intmr,
+ // in_le32(&hsdev->sata_dwc_regs->intmr) & ~SATA_DWC_INTMR_ERRM);
+ //out_le32(&hsdev->sata_dwc_regs->errmr, 0x0);
+ //sata_dwc_check_status(ap);
+}
+
+/*
+ * Turn on IRQ
+ */
+u8 sata_dwc_irq_on(struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u8 tmp;
+
+ dwc_port_dbg(ap,"%s\n",__func__);
+ ap->ctl &= ~ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ if (ioaddr->ctl_addr)
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ tmp = ata_wait_idle(ap);
+
+ ap->ops->sff_irq_clear(ap);
+ enable_err_irq(hsdev);
+
+ return tmp;
+}
+
+
+/*
+ * This function enables the interrupts in IMR and unmasks them in ERRMR
+ *
+ */
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
+{
+ // Enable interrupts
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ SATA_DWC_INTMR_ERRM |
+ SATA_DWC_INTMR_NEWFPM |
+ SATA_DWC_INTMR_PMABRTM |
+ SATA_DWC_INTMR_DMATM);
+
+ /*
+ * Unmask the error bits that should trigger an error interrupt by
+ * setting the error mask register.
+ */
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS);
+
+ dwc_dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", __func__,
+ in_le32(&hsdev->sata_dwc_regs->intmr),
+ in_le32(&hsdev->sata_dwc_regs->errmr));
+}
+
+
+/*
+ * Configure DMA and interrupts on SATA port. This should be called after
+ * hardreset is executed on the SATA port.
+ */
+static void sata_dwc_init_port ( struct ata_port *ap ) {
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ // Configure DMA
+ if (ap->port_no == 0) {
+ dwc_port_dbg(ap, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+
+ // Clear all transmit/receive bits
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dwc_dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ }
+
+ // Enable interrupts
+ sata_dwc_enable_interrupts(hsdev);
+}
+
+
+/*
+ * Setup SATA ioport with corresponding register addresses
+ */
+static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
+{
+ port->cmd_addr = (void *)base + 0x00;
+ port->data_addr = (void *)base + 0x00;
+
+ port->error_addr = (void *)base + 0x04;
+ port->feature_addr = (void *)base + 0x04;
+
+ port->nsect_addr = (void *)base + 0x08;
+
+ port->lbal_addr = (void *)base + 0x0c;
+ port->lbam_addr = (void *)base + 0x10;
+ port->lbah_addr = (void *)base + 0x14;
+
+ port->device_addr = (void *)base + 0x18;
+ port->command_addr = (void *)base + 0x1c;
+ port->status_addr = (void *)base + 0x1c;
+
+ port->altstatus_addr = (void *)base + 0x20;
+ port->ctl_addr = (void *)base + 0x20;
+}
+
+
+/*
+ * Function : sata_dwc_port_start
+ * arguments : struct ata_ioports *port
+ * Return value : returns 0 if success, error code otherwise
+ * This function allocates the scatter gather LLI table for AHB DMA
+ */
+static int sata_dwc_port_start(struct ata_port *ap)
+{
+ int err = 0;
+ struct sata_dwc_device *hsdev;
+ struct sata_dwc_device_port *hsdevp = NULL;
+ struct device *pdev;
+ u32 sstatus;
+ int i;
+
+ hsdev = HSDEV_FROM_AP(ap);
+
+ dwc_dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
+
+ hsdev->host = ap->host;
+ pdev = ap->host->dev;
+ if (!pdev) {
+ dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
+ err = -ENODEV;
+ goto cleanup_exit;
+ }
+
+ /* Allocate Port Struct */
+ hsdevp = kmalloc(sizeof(*hsdevp), GFP_KERNEL);
+ if (!hsdevp) {
+ dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ memset(hsdevp, 0, sizeof(*hsdevp));
+ hsdevp->hsdev = hsdev;
+
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
+ hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
+
+ ap->prd = 0; /* set these so libata doesn't use them */
+ ap->prd_dma = 0;
+
+ /*
+ * DMA - Assign scatter gather LLI table. We can't use the libata
+ * version since it's PRD is IDE PCI specific.
+ */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ hsdevp->llit[i] = dma_alloc_coherent(pdev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ &(hsdevp->llit_dma[i]),
+ GFP_ATOMIC);
+ if (!hsdevp->llit[i]) {
+ dev_err(ap->dev, "%s: dma_alloc_coherent failed size "
+ "0x%x\n", __func__, SATA_DWC_DMAC_LLI_TBL_SZ);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ }
+
+ if (ap->port_no == 0) {
+ dwc_dev_vdbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dwc_dev_vdbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ ata_port_printk(ap, KERN_INFO, "%s: setting burst size in DBTSR: 0x%08x\n",
+ __func__, in_le32(&hsdev->sata_dwc_regs->dbtsr));
+ }
+
+ /* Clear any error bits before libata starts issuing commands */
+ clear_serror(ap);
+
+ ap->private_data = hsdevp;
+
+ /* Are we in Gen I or II */
+ sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS);
+ switch (SATA_DWC_SCR0_SPD_GET(sstatus)) {
+ case 0x0:
+ dev_info(ap->dev, "**** No neg speed (nothing attached?) \n");
+ break;
+ case 0x1:
+ dev_info(ap->dev, "**** GEN I speed rate negotiated \n");
+ break;
+ case 0x2:
+ dev_info(ap->dev, "**** GEN II speed rate negotiated \n");
+ break;
+ }
+ // Initialize timer for checking AN
+/* init_timer(&hsdev->an_timer);
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(20000);
+ hsdev->an_timer.function = sata_dwc_an_chk;
+ hsdev->an_timer.data = (unsigned long)(ap);
+ add_timer(&hsdev->an_timer);
+*/
+cleanup_exit:
+ if (err) {
+ kfree(hsdevp);
+ sata_dwc_port_stop(ap);
+ dwc_dev_vdbg(ap->dev, "%s: fail\n", __func__);
+ } else {
+ dwc_dev_vdbg(ap->dev, "%s: done\n", __func__);
+ }
+
+ return err;
+}
+
+/*
+ * Free DMA data structure before stopping a SATA port.
+ */
+static void sata_dwc_port_stop(struct ata_port *ap)
+{
+ int i;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "%s\n", __func__);
+ if (hsdevp && hsdev) {
+ /* deallocate LLI table */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ dma_free_coherent(ap->host->dev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ hsdevp->llit[i], hsdevp->llit_dma[i]);
+ }
+ kfree(hsdevp);
+ }
+ ap->private_data = NULL;
+}
+
+
+/*
+ * Since the SATA DWC is master only. The dev select operation will
+ * be removed. So, this function will do nothing
+ */
+void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
+{
+ // Do nothing
+ ndelay(100);
+}
+
+
+/*
+ * Function : sata_dwc_exec_command_by_tag
+ * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
+ * Return value : None
+ * This function keeps track of individual command tag ids and calls
+ * ata_exec_command in libata
+ */
+static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
+ struct ata_taskfile *tf,
+ u8 tag, u32 cmd_issued)
+{
+ unsigned long flags;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "%s cmd(0x%02x): %s tag=%d, ap->link->tag=0x%08x\n", __func__, tf->command,
+ ata_cmd_2_txt(tf), tag, ap->link.active_tag);
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ hsdevp->cmd_issued[tag] = cmd_issued;
+ hsdevp->sactive_queued |= qcmd_tag_to_mask(tag);
+ //spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ /*
+ * Clear SError before executing a new command.
+ *
+ * TODO if we read a PM's registers now, we will throw away the task
+ * file values loaded into the shadow registers for this command.
+ *
+ * sata_dwc_scr_write and read can not be used here. Clearing the PM
+ * managed SError register for the disk needs to be done before the
+ * task file is loaded.
+ */
+ clear_serror(ap);
+ ap->ops->sff_exec_command(ap, tf);
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+}
+
+
+/*
+ * This works only for non-NCQ and PIO commands
+ */
+static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ u8 tag;
+ dwc_port_dbg(qc->ap,"%s\n", __func__);
+ if (ata_is_dma(qc->tf.protocol)) {
+ tag = qc->tag;
+ } else {
+ tag = 0;
+ }
+ sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag, SATA_DWC_CMD_ISSUED_PENDING);
+}
+
+
+
+/*
+ * Configure DMA registers and then start DMA transfer
+ */
+static void sata_dwc_start_dma_transfer(struct ata_queued_cmd *qc)
+{
+ volatile int start_dma;
+ u32 reg, dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
+ struct ata_port *ap = qc->ap;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir = qc->dma_dir;
+ int idx = 0;
+ int tag = qc->tag;
+
+ // Identify the QC tag
+ dwc_port_dbg(qc->ap, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+
+ // Check DMA channel for available
+ if ( dma_dwc_channel_enabled(hsdev->dma_channel) == 1 )
+ printk("%s: DMA channel is currently in use\n",__func__);
+
+ // Request DMA channel. Try to request 10 times maximum
+ while ( idx < 10 ) {
+ dma_chan = dma_request_channel(qc->ap);
+ if ( dma_chan >= 0 )
+ break;
+ udelay(10);
+ idx++;
+ }
+
+ // In case DMA channel is not available, mask the NCQ to be error
+ if ( dma_chan < 0 ) {
+ dev_err(qc->ap->dev, "%s: dma channel unavailable\n", __func__);
+ // Offending this QC
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ return;
+ }
+
+ // Configure DMA channel ready for DMA transfer
+ configure_dma_channel(dma_chan,hsdevp->llit_dma[tag]);
+
+ /* Used for ata_bmdma_start(qc) -- we are not BMDMA compatible */
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
+ start_dma = 1;
+ if (dir == DMA_TO_DEVICE)
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
+ else
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_EXEC;
+ } else {
+ dwc_port_info(ap, "ERROR(%s): Command not pending cmd_issued=%d "
+ "(tag=%d) - DMA NOT started\n", __func__,
+ hsdevp->cmd_issued[tag], tag);
+ start_dma = 0;
+ }
+
+ dwc_port_dbg(ap, "%s tag: %x cmd: 0x%02x dma_dir: %s, "
+ "start_dma? %d\n", __func__, tag, qc->tf.command,
+ dir_2_txt(qc->dma_dir), start_dma);
+#ifdef DWC_VDEBUG
+ sata_dwc_tf_dump(ap, &(qc->tf));
+#endif
+
+ // Start DMA transfer
+ if (start_dma) {
+ reg = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ if (unlikely(reg & SATA_DWC_SERR_ERR_BITS)) {
+ dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
+ __func__, reg);
+ }
+
+ // Set DMA control registers
+ if (dir == DMA_TO_DEVICE)
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXCHEN);
+ else
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_RXCHEN);
+
+ dwc_dev_dbg(ap->dev, "%s: setting DMACR: 0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->dmacr));
+
+ /* Step 6: Enable the DMA channel to start transfer */
+ out_le32(&(sata_dma_regs->dma_chan_en.low),
+ in_le32(&(sata_dma_regs->dma_chan_en.low)) | DMA_ENABLE_CHAN(dma_chan));
+
+ // Delay to wait for data transfer complete
+ dwc_port_vdbg(ap, "DMA CFG = 0x%08x (dma_ch=%d)\n", in_le32(&(sata_dma_regs->dma_cfg.low)), dma_chan);
+ dwc_port_dbg(ap, "%s: setting sata_dma_regs->dma_chan_en.low with val: 0x%08x\n",
+ __func__, in_le32(&(sata_dma_regs->dma_chan_en.low)));
+ hsdevp->no_dma_pending++;
+ }
+}
+
+
+/*
+ * Function : sata_dwc_qc_prep
+ * arguments : ata_queued_cmd *qc
+ * Return value : None
+ * qc_prep for a particular queued command
+ */
+static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
+{
+ u32 sstatus;
+ u8 tag;
+ int num_lli;
+ struct ata_port *ap = qc->ap;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct ata_eh_info *ehi;
+
+ dwc_port_dbg(qc->ap, "%s: qc->tag=%d\n", __func__, qc->tag);
+ if ( (qc->tag < 32) && (qc->tag > hsdevp->max_tag))
+ hsdevp->max_tag = qc->tag;
+
+ /*
+ * Fix the problem when PMP card is unplugged from the SATA port.
+ * QC is still issued but no device present. Ignore the current QC.
+ * and pass error to error handler
+ */
+ sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS);
+ if ( sstatus == 0x0) {
+ ata_port_printk(ap, KERN_INFO, "Detect connection lost while commands are executing --> ignore current command\n");
+ ehi = &ap->link.eh_info;
+ ata_ehi_hotplugged(ehi);
+ ap->link.eh_context.i.action |= ATA_EH_RESET;
+ return;
+ }
+
+
+ // Do nothing if not DMA or NCQ
+ if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+ // Set the tag
+ if (ata_is_ncq(qc->tf.protocol) ) {
+ if ( qc->ap->link.active_tag != ATA_TAG_POISON)
+ printk("Some process change ap->link.active_tag to %d\n", qc->ap->link.active_tag);
+ tag = qc->tag;
+ }
+ else
+ tag = 0;
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0) {
+ dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
+ __func__, tag, qc->ap->link.active_tag);
+ }
+#endif
+
+ /* Convert SG list to linked list of items (LLIs) for AHB DMA */
+ num_lli = map_sg_to_lli(qc, hsdevp->llit[tag], hsdevp->llit_dma[tag], (void *__iomem)(&hsdev->sata_dwc_regs->dmadr));
+ hsdevp->num_lli[tag] = num_lli;
+ dwc_port_vdbg(qc->ap, "%s sg: 0x%p, count: %d lli: %p dma_lli: 0x%0xlx addr:"
+ " %p lli count: %d\n", __func__, qc->sg, qc->n_elem, hsdevp->llit[tag],
+ (u32)hsdevp->llit_dma[tag], (void *__iomem)(&hsdev->sata_dwc_regs->dmadr), num_lli);
+}
+
+/*
+ * Process command queue issue
+ */
+static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ int ret = 0;
+ u32 sactive;
+ u8 status;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "%s, qc->tag=%d, prot=%s, cmd=%s, hsdevp->sactive_issued=0x%08x\n",__func__, qc->tag, prot_2_txt(qc->tf.protocol), ata_cmd_2_txt(&qc->tf), hsdevp->sactive_issued);
+
+#ifdef DEBUG_NCQ
+ if ((qc->tag > 0) && (qc->tag < 31) ) {
+ dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s"
+ " ap active_tag=0x%08x ap sactive=0x%08x\n",
+ __func__, ap->print_id, qc->tf.command,
+ ata_cmd_2_txt(&qc->tf), qc->tag,
+ prot_2_txt(qc->tf.protocol), ap->link.active_tag,
+ ap->link.sactive);
+ }
+#endif
+ // Set PMP field in the SCONTROL register
+ if ( sata_pmp_attached(ap) )
+ sata_dwc_pmp_select(ap, qc->dev->link->pmp);
+
+ // Process NCQ
+ if (ata_is_ncq(qc->tf.protocol)) {
+ /*
+ * If the device is in BUSY state, ignore the current QC.
+ */
+ status = ap->ops->sff_check_status(ap);
+ if ( status & ATA_BUSY ) {
+ // Ignore the QC when device is BUSY more than 1000 ms
+ sactive = sata_dwc_core_scr_read(qc->ap, SCR_ACTIVE);
+ ata_port_printk(ap, KERN_INFO, "Ignore current QC because of device BUSY (tag=%d, sactive=0x%08x)\n", qc->tag, sactive);
+ return AC_ERR_SYSTEM;
+ }
+ // FPDMA Step 1.
+ // Load command from taskfile to device
+ ap->ops->sff_tf_load(ap, &qc->tf);
+ // Write command to the COMMAND register
+ sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
+ SATA_DWC_CMD_ISSUED_PENDING);
+
+ // Write the QC tag to the SACTIVE register
+ sactive = sata_dwc_core_scr_read(qc->ap, SCR_ACTIVE);
+ sactive |= (0x00000001 << qc->tag);
+ sata_dwc_core_scr_write(qc->ap, SCR_ACTIVE, sactive);
+
+ /*
+ * FPDMA Step 2.
+ * Check to see if device clears BUSY bit.
+ * If not, set the link.active_tag to the value different than
+ * ATA_TAG_POISON so that the qc_defer will defer additional QCs
+ * (no more QC is queued)
+ */
+ if ( ap->link.active_tag != ATA_TAG_POISON)
+ dev_warn(ap->dev, "Some process change ap->link.active_tag to %d\n", ap->link.active_tag);
+ status = ap->ops->sff_check_status(ap);
+ if ( status & ATA_BUSY )
+ ap->link.active_tag = qc->tag;
+ } else {
+ // Without this line, PMP may fail to execute the ATA_CMD_READ_NATIVE_MAX_EXT command
+ ap->link.active_tag = qc->tag;
+ // Call SFF qc_issue to process non-NCQ commands
+ ret = ata_sff_qc_issue(qc);
+ }
+
+ return 0;
+}
+
+
+static void sata_dwc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ata_eh_freeze_port(qc->ap);
+}
+
+
+static void sata_dwc_error_handler(struct ata_port *ap)
+{
+#ifdef DWC_VDEBUG
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+#endif
+ dwc_port_dbg(ap, "%s\n", __func__);
+#ifdef DWC_VDEBUG
+ dwc_port_vdbg(ap, "%s - sactive_queued=0x%08x, sactive_issued=0x%08x, no_dma_pending=%d\n",__func__, hsdevp->sactive_queued, hsdevp->sactive_issued, hsdevp->no_dma_pending);
+ dwc_port_vdbg(ap, "qc_active=0x%08x, qc_allocated=0x%08x\n", ap->qc_active, ap->qc_allocated);
+#endif
+ // Call PMP Error Handler to handle SATA port errors
+ sata_pmp_error_handler(ap);
+}
+
+/*
+ * sata_dwc_check_status - Get value of the Status Register
+ * @ap: Port to check
+ *
+ * Output content of the status register (CDR7)
+ */
+u8 sata_dwc_check_status(struct ata_port *ap)
+{
+ return ioread8(ap->ioaddr.status_addr);
+}
+
+
+/*
+ * Freeze the port by clear interrupt
+ * @ap: Port to freeze
+ */
+void sata_dwc_freeze(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ dwc_port_dbg(ap, "call %s ...\n",__func__);
+ // turn IRQ off
+ clear_intpr(hsdev);
+ clear_serror(ap);
+ out_le32(&hsdev->sata_dwc_regs->intmr, 0x0);
+}
+
+/*
+ * Thaw the port by turning IRQ on
+ */
+void sata_dwc_thaw(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "call %s ...\n",__func__);
+ // Clear IRQ
+ clear_intpr(hsdev);
+ // Turn IRQ back on
+ sata_dwc_enable_interrupts(hsdev);
+}
+
+
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_dwc_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ //.sg_tablesize = LIBATA_MAX_PRD,
+ .can_queue = ATA_MAX_QUEUE, //ATA_DEF_QUEUE,
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+
+static struct ata_port_operations sata_dwc_ops = {
+ .inherits = &sata_pmp_port_ops,
+ .dev_config = sata_dwc_dev_config,
+
+ .error_handler = sata_dwc_error_handler,
+ .softreset = sata_dwc_softreset,
+ .hardreset = sata_dwc_hardreset,
+ .pmp_softreset = sata_dwc_softreset,
+ .pmp_hardreset = sata_dwc_pmp_hardreset,
+
+ .qc_defer = sata_pmp_qc_defer_cmd_switch,
+ .qc_prep = sata_dwc_qc_prep,
+ .qc_issue = sata_dwc_qc_issue,
+ .qc_fill_rtf = ata_sff_qc_fill_rtf,
+
+ .scr_read = sata_dwc_scr_read,
+ .scr_write = sata_dwc_scr_write,
+
+ .port_start = sata_dwc_port_start,
+ .port_stop = sata_dwc_port_stop,
+
+ .bmdma_setup = sata_dwc_bmdma_setup,
+ .bmdma_start = sata_dwc_start_dma_transfer,
+ // Reuse some SFF functions
+ .sff_check_status = sata_dwc_check_status,
+ .sff_tf_read = ata_sff_tf_read,
+ .sff_data_xfer = ata_sff_data_xfer,
+ .sff_tf_load = ata_sff_tf_load,
+ .sff_dev_select = sata_dwc_dev_select,
+ .sff_exec_command = ata_sff_exec_command,
+
+ .sff_irq_on = sata_dwc_irq_on,
+/* .sff_irq_clear = sata_dwc_irq_clear,
+ .freeze = sata_dwc_freeze,
+ .thaw = sata_dwc_thaw,
+ .sff_irq_on = ata_sff_irq_on,
+ */
+ .sff_irq_clear = ata_sff_irq_clear,
+ .freeze = ata_sff_freeze,
+ .thaw = ata_sff_thaw,
+ .pmp_attach = sata_dwc_pmp_attach,
+ .pmp_detach = sata_dwc_pmp_detach,
+ .post_internal_cmd = sata_dwc_post_internal_cmd,
+};
+
+static const struct ata_port_info sata_dwc_port_info[] = {
+ {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_NCQ |
+ ATA_FLAG_PMP | ATA_FLAG_AN,
+ .pio_mask = 0x1f, /* pio 0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sata_dwc_ops,
+ },
+};
+
+
+
+/*
+ * Each SATA DWC node in the device tree will call this function once.
+ */
+static int sata_dwc_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct sata_dwc_device *hsdev;
+ u32 idr, versionr;
+ char *ver = (char *)&versionr;
+ u8 *base = NULL;
+ int err = 0;
+ int irq;
+ struct ata_host *host;
+ struct ata_port_info pi = sata_dwc_port_info[0];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+
+ const unsigned int *dma_channel;
+ /*
+ * Check if device is enabled
+ */
+ if (!of_device_is_available(ofdev->node)) {
+ printk(KERN_INFO "%s: Port disabled via device-tree\n",
+ ofdev->node->full_name);
+ return 0;
+ }
+
+ /* Allocate DWC SATA device */
+ hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL);
+ if (hsdev == NULL) {
+ dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+ memset(hsdev, 0, sizeof(*hsdev));
+
+
+ // Identify SATA DMA channel used for the current SATA device
+ dma_channel = of_get_property(ofdev->node, "dma-channel", NULL);
+ if ( dma_channel ) {
+ dev_notice(&ofdev->dev, "Gettting DMA channel %d\n", *dma_channel);
+ hsdev->dma_channel = *dma_channel;
+ } else
+ hsdev->dma_channel = 0;
+
+ /* Ioremap SATA registers */
+ base = of_iomap(ofdev->node, 0);
+ if (!base) {
+ dev_err(&ofdev->dev, "ioremap failed for SATA register address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ hsdev->reg_base = base;
+ dwc_dev_vdbg(&ofdev->dev, "ioremap done for SATA register address\n");
+
+ /* Synopsys DWC SATA specific Registers */
+ hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
+
+ /* Allocate and fill host */
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
+ if (!host) {
+ dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+
+ host->private_data = hsdev;
+
+ /* Setup port */
+ host->ports[0]->ioaddr.cmd_addr = base;
+ host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
+ hsdev->scr_base = (u8 *)(base + SATA_DWC_SCR_OFFSET);
+ sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
+
+ /* Read the ID and Version Registers */
+ idr = in_le32(&hsdev->sata_dwc_regs->idr);
+ versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
+ dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
+ idr, ver[0], ver[1], ver[2]);
+
+ /* Get SATA DMA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->node, 1);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* Get physical SATA DMA register base address */
+ if (!sata_dma_regs) {
+ sata_dma_regs = of_iomap(ofdev->node, 1);
+ if (!sata_dma_regs) {
+ dev_err(&ofdev->dev, "ioremap failed for AHBDMA register address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ }
+ /* Save dev for later use in dev_xxx() routines */
+ hsdev->dev = &ofdev->dev;
+
+ /* Init glovbal dev list */
+ dwc_dev_list[hsdev->dma_channel] = hsdev;
+
+ /* Initialize AHB DMAC */
+ hsdev->irq_dma = irq;
+ dma_dwc_init(hsdev);
+ dma_register_interrupt(hsdev);
+
+
+ /* Enable SATA Interrupts */
+ sata_dwc_enable_interrupts(hsdev);
+
+ /* Get SATA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->node, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /*
+ * Now, register with libATA core, this will also initiate the
+ * device discovery process, invoking our port_start() handler &
+ * error_handler() to execute a dummy Softreset EH session
+ */
+ ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ /* Everything is fine */
+ return 0;
+
+error_out:
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ if (base)
+ iounmap(base);
+
+ if (hsdev)
+ kfree(hsdev);
+
+ return err;
+}
+
+
+static int sata_dwc_remove(struct of_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_dwc_device *hsdev = host->private_data;
+
+ ata_host_detach(host);
+
+ dev_set_drvdata(dev, NULL);
+
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ iounmap(hsdev->reg_base);
+ kfree(hsdev);
+ kfree(host);
+
+ dwc_dev_vdbg(&ofdev->dev, "done\n");
+
+ return 0;
+}
+
+static const struct of_device_id sata_dwc_match[] = {
+ { .compatible = "amcc,sata-460ex", },
+ { .compatible = "amcc,sata-apm82181", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sata_dwc_match);
+
+static struct of_platform_driver sata_dwc_driver = {
+ .name = "sata-dwc",
+ .match_table = sata_dwc_match,
+ .probe = sata_dwc_probe,
+ .remove = sata_dwc_remove,
+};
+
+
+static int __init sata_dwc_init(void)
+{
+ return of_register_platform_driver(&sata_dwc_driver);
+}
+
+
+static void __exit sata_dwc_exit(void)
+{
+ of_unregister_platform_driver(&sata_dwc_driver);
+}
+
+module_init(sata_dwc_init);
+module_exit(sata_dwc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
+MODULE_DESCRIPTION("DesignWare Cores SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_dwc_pmp.c b/drivers/ata/sata_dwc_pmp.c
new file mode 100755
index 00000000000..c1b0cff6ae6
--- /dev/null
+++ b/drivers/ata/sata_dwc_pmp.c
@@ -0,0 +1,3053 @@
+/*
+ * drivers/ata/sata_dwc.c
+ *
+ * Synopsys DesignWare Cores (DWC) SATA host driver
+ *
+ * Author: Mark Miesfeld <mmiesfeld@amcc.com>
+ *
+ * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
+ * Copyright 2008 DENX Software Engineering
+ *
+ * Based on versions provided by AMCC and Synopsys which are:
+ * Copyright 2006 Applied Micro Circuits Corporation
+ * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/libata.h>
+#include <linux/rtc.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+
+#ifdef CONFIG_SATA_DWC_DEBUG
+#define dwc_dev_dbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_dbg(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_dbg(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+#else
+#define dwc_dev_dbg(dev, format, arg...) \
+ ({ 0; })
+#define dwc_port_dbg(ap, format, arg...) \
+ ({ 0; })
+#define dwc_link_dbg(link, format, arg...) \
+ ({ 0; })
+#endif
+
+#ifdef CONFIG_SATA_DWC_VDEBUG
+#define DEBUG_NCQ
+#define dwc_dev_vdbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_vdbg(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_vdbg(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+#else
+#define dwc_dev_vdbg(dev, format, arg...) \
+ ({ 0; })
+#define dwc_port_vdbg(ap, format, arg...) \
+ ({ 0; })
+#define dwc_link_vdbg(link, format, arg...) \
+ ({ 0; })
+#endif
+
+#define dwc_dev_info(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_info(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_info(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+
+
+
+#define DRV_NAME "sata-dwc"
+#define DRV_VERSION "2.0"
+
+/* Port Multiplier discovery Signature */
+#define PSCR_SCONTROL_DET_ENABLE 0x00000001
+#define PSCR_SSTATUS_DET_PRESENT 0x00000001
+#define PSCR_SERROR_DIAG_X 0x04000000
+
+/* Port multiplier port entry in SCONTROL register */
+#define SCONTROL_PMP_MASK 0x000f0000
+#define PMP_TO_SCONTROL(p) ((p << 16) & 0x000f0000)
+#define SCONTROL_TO_PMP(p) (((p) & 0x000f0000) >> 16)
+
+
+/* SATA DMA driver Globals */
+#if defined(CONFIG_APM82181)
+#define DMA_NUM_CHANS 2
+#else
+#define DMA_NUM_CHANS 1
+#endif
+
+#define DMA_NUM_CHAN_REGS 8
+
+/* SATA DMA Register definitions */
+#if defined(CONFIG_APM82181)
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#else
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#endif
+
+#if defined(CONFIG_APM82181)
+ extern void signal_hdd_led(int, int);
+#endif
+struct dmareg {
+ u32 low; /* Low bits 0-31 */
+ u32 high; /* High bits 32-63 */
+};
+
+/* DMA Per Channel registers */
+
+struct dma_chan_regs {
+ struct dmareg sar; /* Source Address */
+ struct dmareg dar; /* Destination address */
+ struct dmareg llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+ struct dmareg sstat; /* Source Status not implemented in core */
+ struct dmareg dstat; /* Destination Status not implemented in core */
+ struct dmareg sstatar; /* Source Status Address not impl in core */
+ struct dmareg dstatar; /* Destination Status Address not implemented */
+ struct dmareg cfg; /* Config */
+ struct dmareg sgr; /* Source Gather */
+ struct dmareg dsr; /* Destination Scatter */
+};
+
+/* Generic Interrupt Registers */
+struct dma_interrupt_regs {
+ struct dmareg tfr; /* Transfer Interrupt */
+ struct dmareg block; /* Block Interrupt */
+ struct dmareg srctran; /* Source Transfer Interrupt */
+ struct dmareg dsttran; /* Dest Transfer Interrupt */
+ struct dmareg error; /* Error */
+};
+
+struct ahb_dma_regs {
+ struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
+ struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
+ struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
+ struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
+ struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
+ struct dmareg statusInt; /* Interrupt combined */
+ struct dmareg rq_srcreg; /* Src Trans Req */
+ struct dmareg rq_dstreg; /* Dst Trans Req */
+ struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */
+ struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */
+ struct dmareg rq_lst_srcreg; /* Last Src Trans Req */
+ struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */
+ struct dmareg dma_cfg; /* DMA Config */
+ struct dmareg dma_chan_en; /* DMA Channel Enable */
+ struct dmareg dma_id; /* DMA ID */
+ struct dmareg dma_test; /* DMA Test */
+ struct dmareg res1; /* reserved */
+ struct dmareg res2; /* reserved */
+
+ /* DMA Comp Params
+ * Param 6 = dma_param[0], Param 5 = dma_param[1],
+ * Param 4 = dma_param[2] ...
+ */
+ struct dmareg dma_params[6];
+};
+
+/* Data structure for linked list item */
+struct lli {
+ u32 sar; /* Source Address */
+ u32 dar; /* Destination address */
+ u32 llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+#if defined(CONFIG_APM82181)
+ u32 dstat; /* Source status is not supported */
+#else
+ struct dmareg dstat; /* Destination Status */
+#endif
+};
+
+#define SATA_DWC_DMAC_LLI_SZ (sizeof(struct lli))
+#define SATA_DWC_DMAC_LLI_NUM 256
+#define SATA_DWC_DMAC_TWIDTH_BYTES 4
+#define SATA_DWC_DMAC_LLI_TBL_SZ \
+ (SATA_DWC_DMAC_LLI_SZ * SATA_DWC_DMAC_LLI_NUM)
+#if defined(CONFIG_APM82181)
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#else
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#endif
+/* DMA Register Operation Bits */
+#define DMA_EN 0x00000001 /* Enable AHB DMA */
+#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
+#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
+ ((0x000000001 << (ch)) << 8))
+#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
+
+/* Channel Control Register */
+#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
+#define DMA_CTL_LLP_SRCEN 0x10000000 /* Blk chain enable Src */
+#define DMA_CTL_LLP_DSTEN 0x08000000 /* Blk chain enable Dst */
+/*
+ * This define is used to set block chaining disabled in the control low
+ * register. It is already in little endian format so it can be &'d dirctly.
+ * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
+ */
+#define DMA_CTL_LLP_DISABLE_LE32 0xffffffe7
+#define DMA_CTL_SMS(num) ((num & 0x3) << 25) /*Src Master Select*/
+#define DMA_CTL_DMS(num) ((num & 0x3) << 23) /*Dst Master Select*/
+#define DMA_CTL_TTFC(type) ((type & 0x7) << 20) /*Type&Flow cntr*/
+#define DMA_CTL_TTFC_P2M_DMAC 0x00000002 /*Per mem,DMAC cntr*/
+#define DMA_CTL_TTFC_M2P_PER 0x00000003 /*Mem per,peri cntr*/
+#define DMA_CTL_SRC_MSIZE(size) ((size & 0x7) << 14) /*Src Burst Len*/
+#define DMA_CTL_DST_MSIZE(size) ((size & 0x7) << 11) /*Dst Burst Len*/
+#define DMA_CTL_SINC_INC 0x00000000 /*Src addr incr*/
+#define DMA_CTL_SINC_DEC 0x00000200
+#define DMA_CTL_SINC_NOCHANGE 0x00000400
+#define DMA_CTL_DINC_INC 0x00000000 /*Dst addr incr*/
+#define DMA_CTL_DINC_DEC 0x00000080
+#define DMA_CTL_DINC_NOCHANGE 0x00000100
+#define DMA_CTL_SRC_TRWID(size) ((size & 0x7) << 4) /*Src Trnsfr Width*/
+#define DMA_CTL_DST_TRWID(size) ((size & 0x7) << 1) /*Dst Trnsfr Width*/
+#define DMA_CTL_INT_EN 0x00000001 /*Interrupt Enable*/
+
+/* Channel Configuration Register high bits */
+#define DMA_CFG_FCMOD_REQ 0x00000001 /*Flow cntrl req*/
+#define DMA_CFG_PROTCTL (0x00000003 << 2) /*Protection cntrl*/
+
+/* Channel Configuration Register low bits */
+#define DMA_CFG_RELD_DST 0x80000000 /*Reload Dst/Src Addr*/
+#define DMA_CFG_RELD_SRC 0x40000000
+#define DMA_CFG_HS_SELSRC 0x00000800 /*SW hndshk Src/Dst*/
+#define DMA_CFG_HS_SELDST 0x00000400
+#define DMA_CFG_FIFOEMPTY (0x00000001 << 9) /*FIFO Empty bit*/
+
+/* Assign hardware handshaking interface (x) to dst / sre peripheral */
+#define DMA_CFG_HW_HS_DEST(int_num) ((int_num & 0xF) << 11)
+#define DMA_CFG_HW_HS_SRC(int_num) ((int_num & 0xF) << 7)
+
+/* Channel Linked List Pointer Register */
+#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
+#define DMA_LLP_AHBMASTER1 0 /* List Master Select */
+#define DMA_LLP_AHBMASTER2 1
+
+#define SATA_DWC_MAX_PORTS 1
+
+#define SATA_DWC_SCR_OFFSET 0x24
+#define SATA_DWC_REG_OFFSET 0x64
+
+/* DWC SATA Registers */
+struct sata_dwc_regs {
+ u32 fptagr; /* 1st party DMA tag */
+ u32 fpbor; /* 1st party DMA buffer offset */
+ u32 fptcr; /* 1st party DMA Xfr count */
+ u32 dmacr; /* DMA Control */
+ u32 dbtsr; /* DMA Burst Transac size */
+ u32 intpr; /* Interrupt Pending */
+ u32 intmr; /* Interrupt Mask */
+ u32 errmr; /* Error Mask */
+ u32 llcr; /* Link Layer Control */
+ u32 phycr; /* PHY Control */
+ u32 physr; /* PHY Status */
+ u32 rxbistpd; /* Recvd BIST pattern def register */
+ u32 rxbistpd1; /* Recvd BIST data dword1 */
+ u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
+ u32 txbistpd; /* Trans BIST pattern def register */
+ u32 txbistpd1; /* Trans BIST data dword1 */
+ u32 txbistpd2; /* Trans BIST data dword2 */
+ u32 bistcr; /* BIST Control Register */
+ u32 bistfctr; /* BIST FIS Count Register */
+ u32 bistsr; /* BIST Status Register */
+ u32 bistdecr; /* BIST Dword Error count register */
+ u32 res[15]; /* Reserved locations */
+ u32 testr; /* Test Register */
+ u32 versionr; /* Version Register */
+ u32 idr; /* ID Register */
+ u32 unimpl[192]; /* Unimplemented */
+ u32 dmadr[256]; /* FIFO Locations in DMA Mode */
+};
+
+#define SCR_SCONTROL_DET_ENABLE 0x00000001
+#define SCR_SSTATUS_DET_PRESENT 0x00000001
+#define SCR_SERROR_DIAG_X 0x04000000
+
+/* DWC SATA Register Operations */
+#define SATA_DWC_TXFIFO_DEPTH 0x01FF
+#define SATA_DWC_RXFIFO_DEPTH 0x01FF
+
+#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004
+#define SATA_DWC_DMACR_TXCHEN (0x00000001 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RXCHEN (0x00000002 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN
+
+#define SATA_DWC_DBTSR_MWR(size) ((size/4) & \
+ SATA_DWC_TXFIFO_DEPTH)
+#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \
+ SATA_DWC_RXFIFO_DEPTH) << 16)
+
+// SATA DWC Interrupts
+#define SATA_DWC_INTPR_DMAT 0x00000001
+#define SATA_DWC_INTPR_NEWFP 0x00000002
+#define SATA_DWC_INTPR_PMABRT 0x00000004
+#define SATA_DWC_INTPR_ERR 0x00000008
+#define SATA_DWC_INTPR_NEWBIST 0x00000010
+#define SATA_DWC_INTPR_IPF 0x80000000
+// Interrupt masks
+#define SATA_DWC_INTMR_DMATM 0x00000001
+#define SATA_DWC_INTMR_NEWFPM 0x00000002
+#define SATA_DWC_INTMR_PMABRTM 0x00000004
+#define SATA_DWC_INTMR_ERRM 0x00000008
+#define SATA_DWC_INTMR_NEWBISTM 0x00000010
+#define SATA_DWC_INTMR_PRIMERRM 0x00000020
+#define SATA_DWC_INTPR_CMDGOOD 0x00000080
+#define SATA_DWC_INTPR_CMDABORT 0x00000040
+
+#define SATA_DWC_LLCR_SCRAMEN 0x00000001
+#define SATA_DWC_LLCR_DESCRAMEN 0x00000002
+#define SATA_DWC_LLCR_RPDEN 0x00000004
+
+// Defines for SError register
+#define SATA_DWC_SERR_ERRI 0x00000001 // Recovered data integrity error
+#define SATA_DWC_SERR_ERRM 0x00000002 // Recovered communication error
+#define SATA_DWC_SERR_ERRT 0x00000100 // Non-recovered transient data integrity error
+#define SATA_DWC_SERR_ERRC 0x00000200 // Non-recovered persistent communication or data integrity error
+#define SATA_DWC_SERR_ERRP 0x00000400 // Protocol error
+#define SATA_DWC_SERR_ERRE 0x00000800 // Internal host adapter error
+#define SATA_DWC_SERR_DIAGN 0x00010000 // PHYRdy change
+#define SATA_DWC_SERR_DIAGI 0x00020000 // PHY internal error
+#define SATA_DWC_SERR_DIAGW 0x00040000 // Phy COMWAKE signal is detected
+#define SATA_DWC_SERR_DIAGB 0x00080000 // 10b to 8b decoder err
+#define SATA_DWC_SERR_DIAGT 0x00100000 // Disparity error
+#define SATA_DWC_SERR_DIAGC 0x00200000 // CRC error
+#define SATA_DWC_SERR_DIAGH 0x00400000 // Handshake error
+#define SATA_DWC_SERR_DIAGL 0x00800000 // Link sequence (illegal transition) error
+#define SATA_DWC_SERR_DIAGS 0x01000000 // Transport state transition error
+#define SATA_DWC_SERR_DIAGF 0x02000000 // Unrecognized FIS type
+#define SATA_DWC_SERR_DIAGX 0x04000000 // Exchanged error - Set when PHY COMINIT signal is detected.
+#define SATA_DWC_SERR_DIAGA 0x08000000 // Port Selector Presence detected
+
+/* This is all error bits, zero's are reserved fields. */
+#define SATA_DWC_SERR_ERR_BITS 0x0FFF0F03
+
+#define SATA_DWC_SCR0_SPD_GET(v) ((v >> 4) & 0x0000000F)
+
+struct sata_dwc_device {
+ struct resource reg; /* Resource for register */
+ struct device *dev; /* generic device struct */
+ struct ata_probe_ent *pe; /* ptr to probe-ent */
+ struct ata_host *host;
+ u8 *reg_base;
+ struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
+ u8 *scr_base;
+ int dma_channel; /* DWC SATA DMA channel */
+ int irq_dma;
+ struct timer_list an_timer;
+};
+
+#define SATA_DWC_QCMD_MAX 32
+
+struct sata_dwc_device_port {
+ struct sata_dwc_device *hsdev;
+ int cmd_issued[SATA_DWC_QCMD_MAX];
+ struct lli *llit[SATA_DWC_QCMD_MAX];
+ dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
+ u32 dma_chan[SATA_DWC_QCMD_MAX];
+ int dma_pending[SATA_DWC_QCMD_MAX];
+ u32 sata_dwc_sactive_issued; /* issued queued ops */
+ u32 sata_dwc_sactive_queued; /* queued ops */
+ u32 dma_interrupt_count;
+
+};
+
+static struct sata_dwc_device* dwc_dev_list[2];
+static int dma_intr_registered = 0;
+/*
+ * Commonly used DWC SATA driver Macros
+ */
+#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *) \
+ (host)->private_data)
+#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *) \
+ (ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *) \
+ (ap)->private_data)
+#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *) \
+ (qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *) \
+ (hsdevp)->hsdev)
+
+enum {
+ SATA_DWC_CMD_ISSUED_NOT = 0,
+ SATA_DWC_CMD_ISSUED_PENDING = 1,
+ SATA_DWC_CMD_ISSUED_EXEC = 2,
+ SATA_DWC_CMD_ISSUED_NODATA = 3,
+
+ SATA_DWC_DMA_PENDING_NONE = 0,
+ SATA_DWC_DMA_PENDING_TX = 1,
+ SATA_DWC_DMA_PENDING_RX = 2,
+};
+
+/*
+ * Globals
+ */
+static struct ahb_dma_regs *sata_dma_regs = 0;
+
+/*
+ * Prototypes
+ */
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status);
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
+static void sata_dwc_port_stop(struct ata_port *ap);
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
+
+static int dma_dwc_init(struct sata_dwc_device *hsdev);
+static void dma_dwc_exit(struct sata_dwc_device *hsdev);
+static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr);
+static void dma_dwc_xfer_start(int dma_ch);
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch);
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev);
+static void sata_dwc_init_port ( struct ata_port *ap );
+u8 sata_dwc_check_status(struct ata_port *ap);
+
+
+
+
+static const char *dir_2_txt(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ return "bi";
+ case DMA_FROM_DEVICE:
+ return "from";
+ case DMA_TO_DEVICE:
+ return "to";
+ case DMA_NONE:
+ return "none";
+ default:
+ return "err";
+ }
+}
+
+static const char *prot_2_txt(enum ata_tf_protocols protocol)
+{
+ switch (protocol) {
+ case ATA_PROT_UNKNOWN:
+ return "unknown";
+ case ATA_PROT_NODATA:
+ return "nodata";
+ case ATA_PROT_PIO:
+ return "pio";
+ case ATA_PROT_DMA:
+ return "dma";
+ case ATA_PROT_NCQ:
+ return "ncq";
+ case ATAPI_PROT_PIO:
+ return "atapi pio";
+ case ATAPI_PROT_NODATA:
+ return "atapi nodata";
+ case ATAPI_PROT_DMA:
+ return "atapi dma";
+ default:
+ return "err";
+ }
+}
+
+inline const char *ata_cmd_2_txt(const struct ata_taskfile *tf)
+{
+ switch (tf->command) {
+ case ATA_CMD_CHK_POWER:
+ return "ATA_CMD_CHK_POWER";
+ case ATA_CMD_EDD:
+ return "ATA_CMD_EDD";
+ case ATA_CMD_FLUSH:
+ return "ATA_CMD_FLUSH";
+ case ATA_CMD_FLUSH_EXT:
+ return "ATA_CMD_FLUSH_EXT";
+ case ATA_CMD_ID_ATA:
+ return "ATA_CMD_ID_ATA";
+ case ATA_CMD_ID_ATAPI:
+ return "ATA_CMD_ID_ATAPI";
+ case ATA_CMD_FPDMA_READ:
+ return "ATA_CMD_FPDMA_READ";
+ case ATA_CMD_FPDMA_WRITE:
+ return "ATA_CMD_FPDMA_WRITE";
+ case ATA_CMD_READ:
+ return "ATA_CMD_READ";
+ case ATA_CMD_READ_EXT:
+ return "ATA_CMD_READ_EXT";
+ case ATA_CMD_READ_NATIVE_MAX_EXT :
+ return "ATA_CMD_READ_NATIVE_MAX_EXT";
+ case ATA_CMD_VERIFY_EXT :
+ return "ATA_CMD_VERIFY_EXT";
+ case ATA_CMD_WRITE:
+ return "ATA_CMD_WRITE";
+ case ATA_CMD_WRITE_EXT:
+ return "ATA_CMD_WRITE_EXT";
+ case ATA_CMD_PIO_READ:
+ return "ATA_CMD_PIO_READ";
+ case ATA_CMD_PIO_READ_EXT:
+ return "ATA_CMD_PIO_READ_EXT";
+ case ATA_CMD_PIO_WRITE:
+ return "ATA_CMD_PIO_WRITE";
+ case ATA_CMD_PIO_WRITE_EXT:
+ return "ATA_CMD_PIO_WRITE_EXT";
+ case ATA_CMD_SET_FEATURES:
+ return "ATA_CMD_SET_FEATURES";
+ case ATA_CMD_PACKET:
+ return "ATA_CMD_PACKET";
+ case ATA_CMD_PMP_READ:
+ return "ATA_CMD_PMP_READ";
+ case ATA_CMD_PMP_WRITE:
+ return "ATA_CMD_PMP_WRITE";
+ default:
+ return "ATA_CMD_???";
+ }
+}
+
+/*
+ * Dump content of the taskfile
+ */
+static void sata_dwc_tf_dump(struct device *dwc_dev, struct ata_taskfile *tf)
+{
+ dwc_dev_vdbg(dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx"
+ "device: %x\n", tf->command, prot_2_txt(tf->protocol),
+ tf->flags, tf->device);
+ dwc_dev_vdbg(dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam:"
+ "0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
+ tf->lbam, tf->lbah);
+ dwc_dev_vdbg(dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x "
+ "hob_lbam: 0x%x hob_lbah: 0x%x\n", tf->hob_feature,
+ tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
+ tf->hob_lbah);
+}
+
+/*
+ * Function: get_burst_length_encode
+ * arguments: datalength: length in bytes of data
+ * returns value to be programmed in register corrresponding to data length
+ * This value is effectively the log(base 2) of the length
+ */
+static inline int get_burst_length_encode(int datalength)
+{
+ int items = datalength >> 2; /* div by 4 to get lword count */
+
+ if (items >= 64)
+ return 5;
+
+ if (items >= 32)
+ return 4;
+
+ if (items >= 16)
+ return 3;
+
+ if (items >= 8)
+ return 2;
+
+ if (items >= 4)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Clear Interrupts on a DMA channel
+ */
+static inline void clear_chan_interrupts(int c)
+{
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.block.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.srctran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.dsttran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low), DMA_CHANNEL(c));
+}
+
+/*
+ * Function: dma_request_channel
+ * arguments: None
+ * returns channel number if available else -1
+ * This function assigns the next available DMA channel from the list to the
+ * requester
+ */
+static int dma_request_channel(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ if (!(in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(hsdev->dma_channel))) {
+ dwc_port_vdbg(ap, "%s Successfully requested DMA channel %d\n",
+ __func__, hsdev->dma_channel);
+ return (hsdev->dma_channel);
+ }
+
+ return -1;
+}
+
+
+
+/*
+ * Function: dma_dwc_interrupt
+ * arguments: irq, dev_id, pt_regs
+ * returns channel number if available else -1
+ * Interrupt Handler for DW AHB SATA DMA
+ */
+static int dma_dwc_interrupt(int irq, void *hsdev_instance)
+{
+ volatile u32 tfr_reg, err_reg;
+ unsigned long flags;
+ struct sata_dwc_device *hsdev =
+ (struct sata_dwc_device *)hsdev_instance;
+ struct ata_host *host = (struct ata_host *)hsdev->host;
+ struct ata_port *ap;
+ struct sata_dwc_device_port *hsdevp;
+ u8 tag = 0;
+ int chan;
+ unsigned int port = 0;
+ spin_lock_irqsave(&host->lock, flags);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+ tag = ap->link.active_tag;
+
+ dwc_port_vdbg(ap, "%s: DMA interrupt in channel %d\n", __func__, hsdev->dma_channel);
+
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ dwc_port_vdbg(ap, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
+ tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
+ chan = hsdev->dma_channel;
+
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ /*
+ *Each DMA command produces 2 interrupts. Only
+ * complete the command after both interrupts have been
+ * seen. (See sata_dwc_isr())
+ */
+ hsdevp->dma_interrupt_count++;
+ sata_dwc_clear_dmacr(hsdevp, tag);
+
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "DMA not pending eot=0x%08x "
+ "err=0x%08x tag=0x%02x pending=%d\n",
+ tfr_reg, err_reg, tag,
+ hsdevp->dma_pending[tag]);
+ }
+
+ // Do remain jobs after DMA transfer complete
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+
+ /* Clear the interrupt */
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low),
+ DMA_CHANNEL(chan));
+ }
+
+ /* Process error interrupt. */
+ // We do not expect error happen
+ if (unlikely(err_reg & DMA_CHANNEL(chan))) {
+ /* TODO Need error handler ! */
+ dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
+ err_reg);
+
+ spin_lock_irqsave(ap->lock, flags);
+ //if (ata_is_dma(qc->tf.protocol)) {
+ /* disable DMAC */
+ dma_dwc_terminate_dma(ap, chan);
+ //}
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* Clear the interrupt. */
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low),
+ DMA_CHANNEL(chan));
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dma_dwc_handler(int irq, void *hsdev_instance)
+{
+ volatile u32 tfr_reg, err_reg;
+ int chan;
+
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Check for end-of-transfer interrupt. */
+
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+ else
+
+ /* Check for error interrupt. */
+ if (err_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dma_register_interrupt (struct sata_dwc_device *hsdev)
+{
+ int retval = 0;
+ int irq = hsdev->irq_dma;
+ /*
+ * FIXME: 2 SATA controllers share the same DMA engine so
+ * currently, they also share same DMA interrupt
+ */
+ if (!dma_intr_registered) {
+ printk("%s register irq (%d)\n", __func__, irq);
+ retval = request_irq(irq, dma_dwc_handler, IRQF_SHARED, "SATA DMA", hsdev);
+ //retval = request_irq(irq, dma_dwc_handler, IRQF_DISABLED, "SATA DMA", NULL);
+ if (retval) {
+ dev_err(hsdev->dev, "%s: could not get IRQ %d\n", __func__, irq);
+ return -ENODEV;
+ }
+ //dma_intr_registered = 1;
+ }
+ return retval;
+}
+
+/*
+ * Function: dma_request_interrupts
+ * arguments: hsdev
+ * returns status
+ * This function registers ISR for a particular DMA channel interrupt
+ */
+static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
+{
+ int retval = 0;
+ int dma_chan = hsdev->dma_channel;
+
+ /* Unmask error interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.error.low,
+ in_le32(&sata_dma_regs->interrupt_mask.error.low) | DMA_ENABLE_CHAN(dma_chan));
+
+ /* Unmask end-of-transfer interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.tfr.low,
+ in_le32(&sata_dma_regs->interrupt_mask.tfr.low) | DMA_ENABLE_CHAN(dma_chan));
+
+ dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.error=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.error.low));
+ dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.tfr=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.tfr.low));
+#if 0
+ out_le32(&sata_dma_regs->interrupt_mask.block.low,
+ DMA_ENABLE_CHAN(dma_chan));
+
+ out_le32(&sata_dma_regs->interrupt_mask.srctran.low,
+ DMA_ENABLE_CHAN(dma_chan));
+
+ out_le32(&sata_dma_regs->interrupt_mask.dsttran.low,
+ DMA_ENABLE_CHAN(dma_chan));
+#endif
+ return retval;
+}
+
+/*
+ * Function: map_sg_to_lli
+ * arguments: sg: scatter/gather list(sg)
+ * num_elems: no of elements in sg list
+ * dma_lli: LLI table
+ * dest: destination address
+ * read: whether the transfer is read or write
+ * returns array of AHB DMA Linked List Items
+ * This function creates a list of LLIs for DMA Xfr and returns the number
+ * of elements in the DMA linked list.
+ *
+ * Note that the Synopsis driver has a comment proposing that better performance
+ * is possible by only enabling interrupts on the last item in the linked list.
+ * However, it seems that could be a problem if an error happened on one of the
+ * first items. The transfer would halt, but no error interrupt would occur.
+ *
+ * Currently this function sets interrupts enabled for each linked list item:
+ * DMA_CTL_INT_EN.
+ */
+static int map_sg_to_lli(struct ata_queued_cmd *qc, struct lli *lli,
+ dma_addr_t dma_lli, void __iomem *dmadr_addr)
+{
+ struct scatterlist *sg = qc->sg;
+ struct device *dwc_dev = qc->ap->dev;
+ int num_elems = qc->n_elem;
+ int dir = qc->dma_dir;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(qc->ap);
+
+ int i, idx = 0;
+ int fis_len = 0;
+ dma_addr_t next_llp;
+ int bl;
+ unsigned int dma_ts = 0;
+
+ dwc_port_vdbg(qc->ap, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x "
+ "dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
+ (u32)dmadr_addr);
+
+ bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
+
+ for (i = 0; i < num_elems; i++, sg++) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ dwc_port_vdbg(qc->ap, "%s: elem=%d sg_addr=0x%x sg_len=%d\n",
+ __func__, i, addr, sg_len);
+
+ while (sg_len) {
+
+ if (unlikely(idx >= SATA_DWC_DMAC_LLI_NUM)) {
+ /* The LLI table is not large enough. */
+ dev_err(dwc_dev, "LLI table overrun (idx=%d)\n",
+ idx);
+ break;
+ }
+ len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
+ SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
+
+ offset = addr & 0xffff;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ /*
+ * Make sure a LLI block is not created that will span a
+ * 8K max FIS boundary. If the block spans such a FIS
+ * boundary, there is a chance that a DMA burst will
+ * cross that boundary -- this results in an error in
+ * the host controller.
+ */
+ if (unlikely(fis_len + len > 8192)) {
+ dwc_port_vdbg(qc->ap, "SPLITTING: fis_len=%d(0x%x) "
+ "len=%d(0x%x)\n", fis_len, fis_len,
+ len, len);
+ len = 8192 - fis_len;
+ fis_len = 0;
+ } else {
+ fis_len += len;
+ }
+ if (fis_len == 8192)
+ fis_len = 0;
+
+ /*
+ * Set DMA addresses and lower half of control register
+ * based on direction.
+ */
+ dwc_port_vdbg(qc->ap, "%s: sg_len = %d, len = %d\n", __func__, sg_len, len);
+
+#if defined(CONFIG_APM82181)
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(1) | /* Source: Master 2 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(2) | /* Source: Master 3 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(2) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ }
+#else
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(1) |
+ DMA_CTL_DMS(0) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+#endif
+ dwc_port_vdbg(qc->ap, "%s setting ctl.high len: 0x%08x val: "
+ "0x%08x\n", __func__, len,
+ DMA_CTL_BLK_TS(len / 4));
+
+ /* Program the LLI CTL high register */
+ dma_ts = DMA_CTL_BLK_TS(len / 4);
+ lli[idx].ctl.high = cpu_to_le32(dma_ts);
+
+ /*
+ *Program the next pointer. The next pointer must be
+ * the physical address, not the virtual address.
+ */
+ next_llp = (dma_lli + ((idx + 1) * sizeof(struct lli)));
+
+ /* The last 2 bits encode the list master select. */
+#if defined(CONFIG_APM82181)
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER1);
+#else
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
+#endif
+
+ lli[idx].llp = cpu_to_le32(next_llp);
+
+ dwc_port_vdbg(qc->ap, "%s: index %d\n", __func__, idx);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx].ctl.high);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx].ctl.low);
+ dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx].dar);
+ dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx].sar);
+ dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx].llp);
+
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /*
+ * The last next ptr has to be zero and the last control low register
+ * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
+ * and destination enable) set back to 0 (disabled.) This is what tells
+ * the core that this is the last item in the linked list.
+ */
+ if (likely(idx)) {
+ lli[idx-1].llp = 0x00000000;
+ lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
+
+ /* Flush cache to memory */
+ dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
+ DMA_BIDIRECTIONAL);
+ }
+
+ dwc_port_vdbg(qc->ap, "%s: Final index %d\n", __func__, idx-1);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx-1].ctl.high);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx-1].ctl.low);
+ dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx-1].dar);
+ dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx-1].sar);
+ dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx-1].llp);
+
+ return idx;
+}
+
+/*
+ * Function: dma_dwc_xfer_start
+ * arguments: Channel number
+ * Return : None
+ * Enables the DMA channel
+ */
+static void dma_dwc_xfer_start(int dma_ch)
+{
+ /* Enable the DMA channel */
+ out_le32(&(sata_dma_regs->dma_chan_en.low),
+ in_le32(&(sata_dma_regs->dma_chan_en.low)) |
+ DMA_ENABLE_CHAN(dma_ch));
+
+#if defined(CONFIG_SATA_DWC_VDEBUG)
+ printk("DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low)));
+ printk("%s: setting sata_dma_regs->dma_chan_en.low with val: 0x%08x\n",
+ __func__, in_le32(&(sata_dma_regs->dma_chan_en.low)));
+#endif
+
+
+#if defined(CONFIG_APM82181)
+ signal_hdd_led(1 /*blink=yes*/, 2 /* _3G_LED_GREEN */);
+#endif
+
+}
+
+/*
+ * Check if the selected DMA channel is currently enabled.
+ */
+static int dma_dwc_channel_enabled(int ch)
+{
+ u32 dma_chan;
+
+ // Read the DMA channel register
+ dma_chan = in_le32(&(sata_dma_regs->dma_chan_en.low));
+ if (dma_chan & DMA_CHANNEL(ch))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Terminate the current DMA transaction
+ */
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch)
+{
+ int enabled = dma_dwc_channel_enabled(dma_ch);
+
+ dev_info(ap->dev, "%s terminate DMA on channel=%d enabled=%d\n",
+ __func__, dma_ch, enabled);
+
+ if (enabled) {
+ // Disable the selected channel
+ out_le32(&(sata_dma_regs->dma_chan_en.low),
+ in_le32(&(sata_dma_regs->dma_chan_en.low)) | DMA_DISABLE_CHAN(dma_ch));
+
+ // Wait for the channel is disabled
+ do {
+ enabled = dma_dwc_channel_enabled(dma_ch);
+ msleep(10);
+ } while (enabled);
+ }
+}
+
+
+/*
+ * Setup data and DMA configuration ready for DMA transfer
+ */
+static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr)
+{
+ int dma_ch;
+ int num_lli;
+
+ /* Acquire DMA channel */
+ dma_ch = dma_request_channel(qc->ap);
+ if (unlikely(dma_ch == -1)) {
+ dev_err(qc->ap->dev, "%s: dma channel unavailable\n", __func__);
+ return -EAGAIN;
+ }
+ dwc_port_vdbg(qc->ap, "%s: Got channel %d\n", __func__, dma_ch);
+
+ /* Convert SG list to linked list of items (LLIs) for AHB DMA */
+ num_lli = map_sg_to_lli(qc, lli, dma_lli, addr);
+
+ dwc_port_vdbg(qc->ap, "%s sg: 0x%p, count: %d lli: %p dma_lli: 0x%0xlx addr:"
+ " %p lli count: %d\n", __func__, qc->sg, qc->n_elem, lli,
+ (u32)dma_lli, addr, num_lli);
+
+ /* Clear channel interrupts */
+ clear_chan_interrupts(dma_ch);
+
+ /* Program the CFG register. */
+#if defined(CONFIG_APM82181)
+ if (dma_ch == 0) {
+ /* Buffer mode enabled, FIFO_MODE=0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x000000d);
+ /* Channel 0 bit[7:5] */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
+ } else if (dma_ch == 1) {
+ /* Buffer mode enabled, FIFO_MODE=0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000088d);
+ /* Channel 1 bit[7:5] */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
+ }
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high),
+ DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0);
+#endif
+
+ /* Program the address of the linked list */
+#if defined(CONFIG_APM82181)
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER1));
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
+#endif
+
+ /* Program the CTL register with src enable / dst enable */
+ //out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low),
+ // DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), 0x18000000);
+
+ dwc_port_vdbg(qc->ap, "%s DMA channel %d is ready\n", __func__, dma_ch);
+ dwc_port_vdbg(qc->ap, "%s setting cfg.high of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high)));
+ dwc_port_vdbg(qc->ap, "%s setting cfg.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low)));
+ dwc_port_vdbg(qc->ap, "%s setting llp.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low)));
+ dwc_port_vdbg(qc->ap, "%s setting ctl.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low)));
+
+ return dma_ch;
+}
+
+/*
+ * Function: dma_dwc_exit
+ * arguments: None
+ * returns status
+ * This function exits the SATA DMA driver
+ */
+static void dma_dwc_exit(struct sata_dwc_device *hsdev)
+{
+ dwc_dev_vdbg(hsdev->dev, "%s:\n", __func__);
+ if (sata_dma_regs)
+ iounmap(sata_dma_regs);
+
+ if (hsdev->irq_dma)
+ free_irq(hsdev->irq_dma, hsdev);
+}
+
+/*
+ * Function: dma_dwc_init
+ * arguments: hsdev
+ * returns status
+ * This function initializes the SATA DMA driver
+ */
+static int dma_dwc_init(struct sata_dwc_device *hsdev)
+{
+ int err;
+ int irq = hsdev->irq_dma;
+
+ err = dma_request_interrupts(hsdev, irq);
+ if (err) {
+ dev_err(hsdev->dev, "%s: dma_request_interrupts returns %d\n",
+ __func__, err);
+ goto error_out;
+ }
+
+ /* Enabe DMA */
+ out_le32(&(sata_dma_regs->dma_cfg.low), DMA_EN);
+
+ dev_notice(hsdev->dev, "DMA initialized\n");
+ dev_notice(hsdev->dev, "DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low)));
+ dwc_dev_vdbg(hsdev->dev, "SATA DMA registers=0x%p\n", sata_dma_regs);
+
+ return 0;
+
+error_out:
+ dma_dwc_exit(hsdev);
+
+ return err;
+}
+
+
+static void sata_dwc_dev_config(struct ata_device *adev)
+{
+ /*
+ * Does not support NCQ over a port multiplier
+ * (no FIS-based switching).
+ */
+ if (adev->flags & ATA_DFLAG_NCQ) {
+ /*
+ * TODO: debug why enabling NCQ makes the linux crashed
+ * in hot plug after the first hot unplug action.
+ * --> need to investigate more
+ */
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ if (sata_pmp_attached(adev->link->ap)) {
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ ata_dev_printk(adev, KERN_INFO,
+ "NCQ disabled for command-based switching\n");
+ }
+ }
+
+ /*
+ * Since the sata_pmp_error_handler function in libata-pmp
+ * make FLAG_AN disabled in the first time SATA port is configured.
+ * Asynchronous notification is not configured.
+ * This will enable the AN feature manually.
+ */
+ adev->flags |= ATA_DFLAG_AN;
+}
+
+
+static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+
+ *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
+ dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, *val);
+
+ return 0;
+}
+
+static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+ dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, val);
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+ out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
+
+ return 0;
+}
+
+static inline u32 sata_dwc_core_scr_read ( struct ata_port *ap, unsigned int scr)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ return in_le32((void __iomem *)hsdev->scr_base + (scr * 4));
+}
+
+
+static inline void sata_dwc_core_scr_write ( struct ata_port *ap, unsigned int scr, u32 val)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32((void __iomem *)hsdev->scr_base + (scr * 4), val);
+}
+
+static inline void clear_serror(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32( (void __iomem *)hsdev->scr_base + 4,
+ in_le32((void __iomem *)hsdev->scr_base + 4));
+}
+
+static inline void clear_intpr(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr,
+ in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+static inline void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr, bit);
+ // in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+
+static inline void enable_err_irq(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ in_le32(&hsdev->sata_dwc_regs->intmr) | SATA_DWC_INTMR_ERRM);
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS);
+}
+
+static inline u32 qcmd_tag_to_mask(u8 tag)
+{
+ return 0x00000001 << (tag & 0x1f);
+}
+
+
+/*
+ * Timer to monitor SCR_NOTIFICATION registers on the
+ * SATA port
+ */
+static void sata_dwc_an_chk(unsigned long arg)
+{
+ struct ata_port *ap = (void *)arg;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ unsigned long flags;
+ int rc = 0x0;
+ u32 sntf = 0x0;
+
+ spin_lock_irqsave(ap->lock, flags);
+ rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+
+ // If some changes on the SCR4, call asynchronous notification
+ if ( (rc == 0) & (sntf != 0)) {
+ dwc_port_dbg(ap, "Call assynchronous notification sntf=0x%08x\n", sntf);
+ sata_async_notification(ap);
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(8000);
+ } else {
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(3000);
+ }
+ add_timer(&hsdev->an_timer);
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+
+/*
+ * sata_dwc_pmp_select - Set the PMP field in SControl to the specified port number.
+ *
+ * @port: The value (port number) to set the PMP field to.
+ *
+ * @return: The old value of the PMP field.
+ */
+static u32 sata_dwc_pmp_select(struct ata_port *ap, u32 port)
+{
+ u32 scontrol, old_port;
+ if (sata_pmp_supported(ap)) {
+ scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL);
+ old_port = SCONTROL_TO_PMP(scontrol);
+
+ // Select new PMP port
+ if ( port != old_port ) {
+ scontrol &= ~SCONTROL_PMP_MASK;
+ sata_dwc_core_scr_write(ap, SCR_CONTROL, scontrol | PMP_TO_SCONTROL(port));
+ dwc_port_dbg(ap, "%s: old port=%d new port=%d\n", __func__, old_port, port);
+ }
+ return old_port;
+ }
+ else
+ return port;
+}
+
+/*
+ * Get the current PMP port
+ */
+static inline u32 current_pmp(struct ata_port *ap)
+{
+ return SCONTROL_TO_PMP(sata_dwc_core_scr_read(ap, SCR_CONTROL));
+}
+
+
+/*
+ * Process when a PMP card is attached in the SATA port.
+ * Since our SATA port support command base switching only,
+ * NCQ will not be available.
+ * We disable the NCQ feature in SATA port.
+ */
+static void sata_dwc_pmp_attach ( struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dev_info(ap->dev, "Attach SATA port multiplier with %d ports\n", ap->nr_pmp_links);
+ // Disable NCQ
+ ap->flags &= ~ATA_FLAG_NCQ;
+
+ // Initialize timer for checking AN
+ init_timer(&hsdev->an_timer);
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(20000);
+ hsdev->an_timer.function = sata_dwc_an_chk;
+ hsdev->an_timer.data = (unsigned long)(ap);
+ add_timer(&hsdev->an_timer);
+}
+
+/*
+ * Process when PMP card is removed from the SATA port.
+ * Re-enable NCQ for using by the SATA drive in the future
+ */
+static void sata_dwc_pmp_detach ( struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dev_info(ap->dev, "Detach SATA port\n");
+ // Re-enable NCQ
+ // TODO: remove the below comment out when NCQ problem fixed
+ //ap->flags |= ATA_FLAG_NCQ;
+
+ sata_dwc_pmp_select(ap, 0);
+
+ // Delete timer since PMP card is detached
+ del_timer(&hsdev->an_timer);
+}
+
+
+
+// Check the link to be ready
+int sata_dwc_check_ready ( struct ata_link *link ) {
+ u8 status;
+ struct ata_port *ap = link->ap;
+ status = ioread8(ap->ioaddr.status_addr);
+ return ata_check_ready(status);
+}
+
+
+/*
+ * Do soft reset on the current SATA link.
+ */
+static int sata_dwc_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc;
+ struct ata_port *ap = link->ap;
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct ata_taskfile tf;
+
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+
+ /* Issue bus reset */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+
+ /* Always check readiness of the master device */
+ rc = ata_wait_after_reset(link, deadline, sata_dwc_check_ready);
+
+ // Classify the ata_port
+ *classes = ATA_DEV_NONE;
+ /* Verify if SStatus indicates device presence */
+ if (ata_link_online(link)) {
+ memset(&tf, 0, sizeof(tf));
+ ata_sff_tf_read(ap, &tf);
+ *classes = ata_dev_classify(&tf);
+ }
+
+ if ( *classes == ATA_DEV_PMP)
+ dwc_link_dbg(link, "-->found PMP device by sig\n");
+
+ clear_serror(link->ap);
+
+ return rc;
+}
+
+
+
+
+/*
+ * sata_dwc_hardreset - Do hardreset the SATA controller
+ */
+static int sata_dwc_hardreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc;
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ bool online;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
+
+ dwc_link_dbg(link, "%s\n", __func__);
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+ dwc_port_vdbg(link->ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+
+ // Call standard hard reset
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+
+ // Reconfigure the port after hard reset
+ if ( ata_link_online(link) )
+ sata_dwc_init_port(link->ap);
+
+ return online ? -EAGAIN : rc;
+}
+
+/*
+ * Do hard reset on each PMP link
+ */
+static int sata_dwc_pmp_hardreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc = 0;
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+ rc = sata_std_hardreset(link, classes, deadline);
+ return rc;
+}
+
+/* See ahci.c */
+/*
+ * Process error when the SATAn_INTPR's ERR bit is set
+ * The processing is based on SCR_ERROR register content
+ */
+static void sata_dwc_error_intr(struct ata_port *ap,
+ struct sata_dwc_device *hsdev, uint intpr)
+{
+ struct ata_eh_info *ehi;
+ struct ata_link *link;
+ struct ata_queued_cmd *active_qc = NULL;
+ u32 serror;
+ bool freeze = false, abort = false;
+ int pmp, ret;
+ unsigned int err_mask = 0, action = 0;
+#if defined(CONFIG_SATA_DWC_VDEBUG)
+ int dma_chan = hsdev->dma_channel;
+#endif
+
+ link = &ap->link;
+ ehi = &link->eh_info;
+
+ /* Record irq stat */
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x", intpr);
+
+ // Record SERROR
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ dwc_port_dbg(ap, "%s serror = 0x%08x\n", __func__, serror);
+
+ // Clear SERROR and interrupt bit
+ clear_serror(ap);
+ clear_intpr(hsdev);
+
+ // Print out for test only
+ if ( serror ) {
+ dwc_port_info(ap, "Detect errors:");
+ if ( serror & SATA_DWC_SERR_ERRI )
+ printk(" ERRI");
+ if ( serror & SATA_DWC_SERR_ERRM )
+ printk(" ERRM");
+ if ( serror & SATA_DWC_SERR_ERRT )
+ printk(" ERRT");
+ if ( serror & SATA_DWC_SERR_ERRC )
+ printk(" ERRC");
+ if ( serror & SATA_DWC_SERR_ERRP )
+ printk(" ERRP");
+ if ( serror & SATA_DWC_SERR_ERRE )
+ printk(" ERRE");
+ if ( serror & SATA_DWC_SERR_DIAGN )
+ printk(" DIAGN");
+ if ( serror & SATA_DWC_SERR_DIAGI )
+ printk(" DIAGI");
+ if ( serror & SATA_DWC_SERR_DIAGW )
+ printk(" DIAGW");
+ if ( serror & SATA_DWC_SERR_DIAGB )
+ printk(" DIAGB");
+ if ( serror & SATA_DWC_SERR_DIAGT )
+ printk(" DIAGT");
+ if ( serror & SATA_DWC_SERR_DIAGC )
+ printk(" DIAGC");
+ if ( serror & SATA_DWC_SERR_DIAGH )
+ printk(" DIAGH");
+ if ( serror & SATA_DWC_SERR_DIAGL )
+ printk(" DIAGL");
+ if ( serror & SATA_DWC_SERR_DIAGS )
+ printk(" DIAGS");
+ if ( serror & SATA_DWC_SERR_DIAGF )
+ printk(" DIAGF");
+ if ( serror & SATA_DWC_SERR_DIAGX )
+ printk(" DIAGX");
+ if ( serror & SATA_DWC_SERR_DIAGA )
+ printk(" DIAGA");
+ printk("\n");
+ }
+
+#if defined(CONFIG_SATA_DWC_VDEBUG)
+ printk("%s reading cfg.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.high)));
+ printk("%s reading cfg.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.low)));
+ printk("%s reading llp.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].llp.low)));
+ printk("%s reading ctl.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].ctl.low)));
+ printk("%s reading sar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.low)));
+ printk("%s reading sar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.high)));
+ printk("%s reading dar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.low)));
+ printk("%s reading dar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.high)));
+#endif
+
+ // Process hotplug for SATA port
+ if ( serror & (SATA_DWC_SERR_DIAGX | SATA_DWC_SERR_DIAGW)) {
+ dwc_port_info(ap, "Detect hot plug signal\n");
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, serror & SATA_DWC_SERR_DIAGN ? "PHY RDY changed" : "device exchanged");
+ freeze = true;
+ }
+
+ // Process PHY internal error / Link sequence (illegal transition) error
+ if ( serror & (SATA_DWC_SERR_DIAGI | SATA_DWC_SERR_DIAGL)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process Internal host adapter error
+ if ( serror & SATA_DWC_SERR_ERRE ) {
+ dev_err(ap->dev, "Detect Internal host adapter error\n");
+ // --> need to review
+ ehi->err_mask |= AC_ERR_HOST_BUS;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process Protocol Error
+ if ( serror & SATA_DWC_SERR_ERRP ) {
+ dev_err(ap->dev, "Detect Protocol error\n");
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process non-recovered persistent communication error
+ if ( serror & SATA_DWC_SERR_ERRC ) {
+ dev_err(ap->dev, "Detect non-recovered persistent communication error\n");
+ // --> TODO: review processing error
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+ ehi->action |= ATA_EH_SOFTRESET;
+ //ehi->flags |= ATA_EHI_NO_AUTOPSY;
+ //freeze = true;
+ }
+
+ // Non-recovered transient data integrity error
+ if ( serror & SATA_DWC_SERR_ERRT ) {
+ dev_err(ap->dev, "Detect non-recovered transient data integrity error\n");
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+ //ehi->err_mask |= AC_ERR_DEV;
+ ehi->action |= ATA_EH_SOFTRESET;
+ //ehi->flags |= ATA_EHI_NO_AUTOPSY;
+ }
+
+ // Since below errors have been recovered by hardware
+ // they don't need any error processing.
+ if ( serror & SATA_DWC_SERR_ERRM ) {
+ dev_warn(ap->dev, "Detect recovered communication error");
+ }
+ if ( serror & SATA_DWC_SERR_ERRI ) {
+ dev_warn(ap->dev, "Detect recovered data integrity error");
+ }
+
+ // If any error occur, process the qc
+ if (serror & (SATA_DWC_SERR_ERRT | SATA_DWC_SERR_ERRC)) {
+ //if (serror & 0x03f60f0) {
+ abort = true;
+ /* find out the offending link and qc */
+ if (sata_pmp_attached(ap)) {
+ pmp = current_pmp(ap);
+ // If we are working on the PMP port
+ if ( pmp < ap->nr_pmp_links ) {
+ link = &ap->pmp_link[pmp];
+ ehi = &link->eh_info;
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ err_mask |= AC_ERR_DEV;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
+ } else {
+ err_mask |= AC_ERR_HSM;
+ action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ }
+ // Work on SATA port
+ else {
+ freeze = true;
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ }
+
+ if ( active_qc) {
+ active_qc->err_mask |= err_mask;
+ } else {
+ ehi->err_mask = err_mask;
+ }
+ }
+
+ if ( freeze | abort ) {
+ //sata_dwc_qc_complete(ap, active_qc, 1);
+ // Terminate DMA channel if it is currenly in use
+ if ( dma_request_channel(ap) != -1 ) {
+ dwc_port_dbg(ap, "Terminate DMA channel %d for handling error\n", hsdev->dma_channel);
+ dma_dwc_terminate_dma(ap, hsdev->dma_channel);
+ }
+ }
+
+ if (freeze) {
+ ret = ata_port_freeze(ap);
+ ata_port_printk(ap, KERN_INFO, "Freeze port with %d QCs aborted\n", ret);
+ }
+ else if (abort) {
+ if (active_qc) {
+ ret = ata_link_abort(active_qc->dev->link);
+ ata_link_printk(link, KERN_INFO, "Abort %d QCs\n", ret);
+ } else {
+ ret = ata_port_abort(ap);
+ ata_port_printk(ap, KERN_INFO, "Abort %d QCs on the SATA port\n", ret);
+ }
+ }
+}
+
+
+/*
+ * Function : sata_dwc_isr
+ * arguments : irq, void *dev_instance, struct pt_regs *regs
+ * Return value : irqreturn_t - status of IRQ
+ * This Interrupt handler called via port ops registered function.
+ * .irq_handler = sata_dwc_isr
+ */
+static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
+{
+ struct ata_host *host = (struct ata_host *)dev_instance;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
+ struct ata_port *ap;
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ u8 status, tag;
+ int handled, num_processed, port = 0;
+ u32 intpr, sactive, sactive2, tag_mask;
+ struct sata_dwc_device_port *hsdevp;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Read the interrupt register */
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap,"%s\n",__func__);
+ if ( intpr != 0x80000080)
+ dwc_port_dbg(ap, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, ap->link.active_tag);
+ //dwc_port_dbg(ap, "%s: INTMR=0x%08x, ERRMR=0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->intmr), in_le32(&hsdev->sata_dwc_regs->errmr));
+
+ /* Check for error interrupt */
+ if (intpr & SATA_DWC_INTPR_ERR) {
+ sata_dwc_error_intr(ap, hsdev, intpr);
+ handled = 1;
+ signal_hdd_led(0 /*off blink*/, 1 /*red color*/);
+ goto done_irqrestore;
+ }
+
+ /* Check for DMA SETUP FIS (FP DMA) interrupt */
+ if (intpr & SATA_DWC_INTPR_NEWFP) {
+ dwc_port_dbg(ap, "%s: NEWFP INTERRUPT in HSDEV with DMA channel %d\n", __func__, hsdev->dma_channel);
+ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
+
+ tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
+ dwc_dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PENDING)
+ dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
+
+ hsdevp->sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
+
+ qc = ata_qc_from_tag(ap, tag);
+ /*
+ * Start FP DMA for NCQ command. At this point the tag is the
+ * active tag. It is the tag that matches the command about to
+ * be completed.
+ */
+ qc->ap->link.active_tag = tag;
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+ qc->ap->hsm_task_state = HSM_ST_LAST;
+
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE);
+ tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ /* If no sactive issued and tag_mask is zero then this is not NCQ */
+ if (hsdevp->sata_dwc_sactive_issued == 0 && tag_mask == 0) {
+ if (ap->link.active_tag == ATA_TAG_POISON)
+ tag = 0;
+ else
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* DEV interrupt w/ no active qc? */
+ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+ dev_err(ap->dev, "%s intr with no active qc qc=%p\n",
+ __func__, qc);
+ ap->ops->sff_check_status(ap);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ status = ap->ops->sff_check_status(ap);
+
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ if (status & ATA_ERR) {
+ dwc_dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ dwc_dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
+ __func__, prot_2_txt(qc->tf.protocol));
+drv_still_busy:
+ if (ata_is_dma(qc->tf.protocol)) {
+ int dma_flag = hsdevp->dma_pending[tag];
+ /*
+ * Each DMA transaction produces 2 interrupts. The DMAC
+ * transfer complete interrupt and the SATA controller
+ * operation done interrupt. The command should be
+ * completed only after both interrupts are seen.
+ */
+ hsdevp->dma_interrupt_count++;
+ if (unlikely(dma_flag == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "%s: DMA not pending "
+ "intpr=0x%08x status=0x%08x pend=%d\n",
+ __func__, intpr, status, dma_flag);
+ }
+
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else if (ata_is_pio(qc->tf.protocol)) {
+ ata_sff_hsm_move(ap, qc, status, 0);
+ handled = 1;
+ goto done_irqrestore;
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto drv_still_busy;
+ }
+
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /*
+ * This is a NCQ command. At this point we need to figure out for which
+ * tags we have gotten a completion interrupt. One interrupt may serve
+ * as completion for more than one operation when commands are queued
+ * (NCQ). We need to process each completed command.
+ */
+
+process_cmd: /* process completed commands */
+ sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE);
+ tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ if (sactive != 0 || hsdevp->sata_dwc_sactive_issued > 1 || tag_mask > 1) {
+ dwc_dev_dbg(ap->dev, "%s NCQ: sactive=0x%08x sactive_issued=0x%08x"
+ " tag_mask=0x%08x\n", __func__, sactive,
+ hsdevp->sata_dwc_sactive_issued, tag_mask);
+ }
+
+ if (unlikely((tag_mask | hsdevp->sata_dwc_sactive_issued) != hsdevp->sata_dwc_sactive_issued)) {
+ dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x "
+ "sata_dwc_sactive_issued=0x%08x tag_mask=0x%08x\n",
+ sactive, hsdevp->sata_dwc_sactive_issued, tag_mask);
+ }
+
+ /* read just to clear ... not bad if currently still busy */
+ status = ap->ops->sff_check_status(ap);
+ dwc_dev_dbg(ap->dev, "%s ATA status register=0x%x, tag_mask=0x%x\n", __func__, status, tag_mask);
+
+ tag = 0;
+ num_processed = 0;
+ while (tag_mask) {
+ num_processed++;
+ while (!(tag_mask & 0x00000001)) {
+ tag++;
+ tag_mask <<= 1;
+ }
+ tag_mask &= (~0x00000001);
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* To be picked up by completion functions */
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ /* Let libata/scsi layers handle error */
+ if (unlikely(status & ATA_ERR)) {
+ dwc_dev_vdbg(ap->dev, "%s ATA_ERR (0x%x)\n",
+ __func__, status);
+
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /* Process completed command */
+ dwc_dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
+ prot_2_txt(qc->tf.protocol));
+ if (ata_is_dma(qc->tf.protocol)) {
+ hsdevp->dma_interrupt_count++;
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)
+ dev_warn(ap->dev,
+ "%s: DMA not pending?\n", __func__);
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto still_busy;
+ }
+ continue;
+
+still_busy:
+ ap->stats.idle_irq++;
+ dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
+ ap->print_id);
+ } /* while tag_mask */
+
+ /*
+ * Check to see if any commands completed while we were processing our
+ * initial set of completed commands (reading of status clears
+ * interrupts, so we might miss a completed command interrupt if one
+ * came in while we were processing:
+ * we read status as part of processing a completed command).
+ */
+ sactive2 = sata_dwc_core_scr_read(ap, SCR_ACTIVE);
+ if (sactive2 != sactive) {
+ dwc_dev_dbg(ap->dev, "More finished - sactive=0x%x sactive2=0x%x\n",
+ sactive, sactive2);
+ goto process_cmd;
+ }
+ handled = 1;
+
+done_irqrestore:
+ spin_unlock_irqrestore(&host->lock, flags);
+#if defined(CONFIG_APM82181)
+ signal_hdd_led(0 /*off blink*/, -1 /* no color */);
+#endif
+ return IRQ_RETVAL(handled);
+}
+
+
+/*
+ * Clear DMA Control Register after completing transferring data
+ * using AHB DMA.
+ */
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
+ // Clear receive channel enable bit
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_RX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
+ // Clear transmit channel enable bit
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else {
+ /*
+ * This should not happen, it indicates the driver is out of
+ * sync. If it does happen, clear dmacr anyway.
+ */
+ dev_err(hsdev->dev, "%s DMA protocol RX and TX DMA not pending "
+ "tag=0x%02x pending=%d dmacr: 0x%08x\n",
+ __func__, tag, hsdevp->dma_pending[tag],
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+
+ // Clear all transmit and receive bit, but TXMOD bit is set to 1
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+ }
+}
+
+/*
+ *
+ */
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
+{
+ struct ata_queued_cmd *qc;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u8 tag = 0;
+
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+
+#ifdef DEBUG_NCQ
+ if (tag > 0) {
+ dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
+ "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
+ dir_2_txt(qc->dma_dir), prot_2_txt(qc->tf.protocol),
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+#endif
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ // DMA out of sync error
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
+ "pending dmacr: 0x%08x\n", __func__,
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+ sata_dwc_qc_complete(ap, qc, check_status);
+ ap->link.active_tag = ATA_TAG_POISON;
+ } else {
+ sata_dwc_qc_complete(ap, qc, check_status);
+ }
+}
+
+/*
+ *
+ */
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status)
+{
+ u8 status = 0;
+ int i = 0;
+ u32 mask = 0x0;
+ u8 tag = qc->tag;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u32 serror;
+ int dma_ch;
+
+ dwc_dev_vdbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
+
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX))
+ dev_err(ap->dev, "TX DMA PENDINGING\n");
+ else if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX))
+ dev_err(ap->dev, "RX DMA PENDINGING\n");
+
+ if (check_status) {
+ i = 0;
+ do {
+ /* check main status, clearing INTRQ */
+ status = ap->ops->sff_check_status(ap);
+ if (status & ATA_BUSY) {
+ dwc_dev_vdbg(ap->dev, "STATUS BUSY (0x%02x) [%d]\n",
+ status, i);
+ }
+ if (++i > 10)
+ break;
+ } while (status & ATA_BUSY);
+
+ status = ap->ops->sff_check_status(ap);
+ if (unlikely(status & ATA_BUSY))
+ dev_err(ap->dev, "QC complete cmd=0x%02x STATUS BUSY "
+ "(0x%02x) [%d]\n", qc->tf.command, status, i);
+
+
+ // Check error ==> need to process error here
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ if (unlikely(serror & SATA_DWC_SERR_ERR_BITS))
+ {
+ dev_err(ap->dev, "****** SERROR=0x%08x ******\n", serror);
+ ap->link.eh_context.i.action |= ATA_EH_RESET;
+ if (ata_is_dma(qc->tf.protocol)) {
+ dma_ch = hsdevp->dma_chan[tag];
+ dma_dwc_terminate_dma(ap, dma_ch);
+ } else {
+ dma_ch = hsdevp->dma_chan[0];
+ dma_dwc_terminate_dma(ap, dma_ch);
+ }
+ }
+ }
+ dwc_dev_vdbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u: "
+ "protocol=%d\n", qc->tf.command, status, ap->print_id,
+ qc->tf.protocol);
+
+ /* clear active bit */
+ mask = (~(qcmd_tag_to_mask(tag)));
+ hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask;
+ hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask;
+ dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued);
+ dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+
+ /* Complete taskfile transaction (does not read SCR registers) */
+ ata_qc_complete(qc);
+
+ return 0;
+}
+
+/*
+ * Clear interrupt and error flags in DMA status register.
+ */
+void sata_dwc_irq_clear (struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ dwc_port_dbg(ap,"%s\n",__func__);
+
+ // Clear DMA interrupts
+ clear_chan_interrupts(hsdev->dma_channel);
+ //sata_dma_regs
+ //out_le32(&hsdev->sata_dwc_regs->intmr,
+ // in_le32(&hsdev->sata_dwc_regs->intmr) & ~SATA_DWC_INTMR_ERRM);
+ //out_le32(&hsdev->sata_dwc_regs->errmr, 0x0);
+ //sata_dwc_check_status(ap);
+}
+
+/*
+ * Turn on IRQ
+ */
+u8 sata_dwc_irq_on(struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u8 tmp;
+
+ dwc_port_dbg(ap,"%s\n",__func__);
+ ap->ctl &= ~ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ if (ioaddr->ctl_addr)
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ tmp = ata_wait_idle(ap);
+
+ ap->ops->sff_irq_clear(ap);
+ enable_err_irq(hsdev);
+
+ return tmp;
+}
+
+
+/*
+ * This function enables the interrupts in IMR and unmasks them in ERRMR
+ *
+ */
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
+{
+ // Enable interrupts
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ SATA_DWC_INTMR_ERRM |
+ SATA_DWC_INTMR_NEWFPM |
+ SATA_DWC_INTMR_PMABRTM |
+ SATA_DWC_INTMR_DMATM);
+
+ /*
+ * Unmask the error bits that should trigger an error interrupt by
+ * setting the error mask register.
+ */
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS);
+
+ dwc_dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", __func__,
+ in_le32(&hsdev->sata_dwc_regs->intmr),
+ in_le32(&hsdev->sata_dwc_regs->errmr));
+}
+
+/*
+ * Configure DMA and interrupts on SATA port. This should be called after
+ * hardreset is executed on the SATA port.
+ */
+static void sata_dwc_init_port ( struct ata_port *ap ) {
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ // Configure DMA
+ if (ap->port_no == 0) {
+ dwc_dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+
+ // Clear all transmit/receive bits
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dwc_dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ }
+
+ // Enable interrupts
+ sata_dwc_enable_interrupts(hsdev);
+}
+
+
+/*
+ * Setup SATA ioport with corresponding register addresses
+ */
+static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
+{
+ port->cmd_addr = (void *)base + 0x00;
+ port->data_addr = (void *)base + 0x00;
+
+ port->error_addr = (void *)base + 0x04;
+ port->feature_addr = (void *)base + 0x04;
+
+ port->nsect_addr = (void *)base + 0x08;
+
+ port->lbal_addr = (void *)base + 0x0c;
+ port->lbam_addr = (void *)base + 0x10;
+ port->lbah_addr = (void *)base + 0x14;
+
+ port->device_addr = (void *)base + 0x18;
+ port->command_addr = (void *)base + 0x1c;
+ port->status_addr = (void *)base + 0x1c;
+
+ port->altstatus_addr = (void *)base + 0x20;
+ port->ctl_addr = (void *)base + 0x20;
+}
+
+
+/*
+ * Function : sata_dwc_port_start
+ * arguments : struct ata_ioports *port
+ * Return value : returns 0 if success, error code otherwise
+ * This function allocates the scatter gather LLI table for AHB DMA
+ */
+static int sata_dwc_port_start(struct ata_port *ap)
+{
+ int err = 0;
+ struct sata_dwc_device *hsdev;
+ struct sata_dwc_device_port *hsdevp = NULL;
+ struct device *pdev;
+ u32 sstatus;
+ int i;
+
+ hsdev = HSDEV_FROM_AP(ap);
+
+ dwc_dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
+
+ hsdev->host = ap->host;
+ pdev = ap->host->dev;
+ if (!pdev) {
+ dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
+ err = -ENODEV;
+ goto cleanup_exit;
+ }
+
+ /* Allocate Port Struct */
+ hsdevp = kmalloc(sizeof(*hsdevp), GFP_KERNEL);
+ if (!hsdevp) {
+ dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ memset(hsdevp, 0, sizeof(*hsdevp));
+ hsdevp->hsdev = hsdev;
+
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
+ hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
+
+ ap->prd = 0; /* set these so libata doesn't use them */
+ ap->prd_dma = 0;
+
+ /*
+ * DMA - Assign scatter gather LLI table. We can't use the libata
+ * version since it's PRD is IDE PCI specific.
+ */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ hsdevp->llit[i] = dma_alloc_coherent(pdev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ &(hsdevp->llit_dma[i]),
+ GFP_ATOMIC);
+ if (!hsdevp->llit[i]) {
+ dev_err(ap->dev, "%s: dma_alloc_coherent failed size "
+ "0x%x\n", __func__, SATA_DWC_DMAC_LLI_TBL_SZ);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ }
+
+ if (ap->port_no == 0) {
+ dwc_dev_vdbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dwc_dev_vdbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ ata_port_printk(ap, KERN_INFO, "%s: setting burst size in DBTSR: 0x%08x\n",
+ __func__, in_le32(&hsdev->sata_dwc_regs->dbtsr));
+ }
+
+ /* Clear any error bits before libata starts issuing commands */
+ clear_serror(ap);
+
+ ap->private_data = hsdevp;
+
+ /* Are we in Gen I or II */
+ sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS);
+ switch (SATA_DWC_SCR0_SPD_GET(sstatus)) {
+ case 0x0:
+ dev_info(ap->dev, "**** No neg speed (nothing attached?) \n");
+ break;
+ case 0x1:
+ dev_info(ap->dev, "**** GEN I speed rate negotiated \n");
+ break;
+ case 0x2:
+ dev_info(ap->dev, "**** GEN II speed rate negotiated \n");
+ break;
+ }
+
+cleanup_exit:
+ if (err) {
+ kfree(hsdevp);
+ sata_dwc_port_stop(ap);
+ dwc_dev_vdbg(ap->dev, "%s: fail\n", __func__);
+ } else {
+ dwc_dev_vdbg(ap->dev, "%s: done\n", __func__);
+ }
+
+ return err;
+}
+
+
+static void sata_dwc_port_stop(struct ata_port *ap)
+{
+ int i;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "%s: stop port\n", __func__);
+
+ if (hsdevp && hsdev) {
+ /* deallocate LLI table */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ dma_free_coherent(ap->host->dev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ hsdevp->llit[i], hsdevp->llit_dma[i]);
+ }
+
+ kfree(hsdevp);
+ }
+ ap->private_data = NULL;
+}
+
+/*
+ * Since the SATA DWC is master only. The dev select operation will
+ * be removed.
+ */
+void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
+{
+ // Do nothing
+ ndelay(100);
+}
+
+/*
+ * Function : sata_dwc_exec_command_by_tag
+ * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
+ * Return value : None
+ * This function keeps track of individual command tag ids and calls
+ * ata_exec_command in libata
+ */
+static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
+ struct ata_taskfile *tf,
+ u8 tag, u32 cmd_issued)
+{
+ unsigned long flags;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d, ap->link->tag=0x%08x\n", __func__, tf->command,
+ ata_cmd_2_txt(tf), tag, ap->link.active_tag);
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ hsdevp->cmd_issued[tag] = cmd_issued;
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ /*
+ * Clear SError before executing a new command.
+ *
+ * TODO if we read a PM's registers now, we will throw away the task
+ * file values loaded into the shadow registers for this command.
+ *
+ * sata_dwc_scr_write and read can not be used here. Clearing the PM
+ * managed SError register for the disk needs to be done before the
+ * task file is loaded.
+ */
+ clear_serror(ap);
+ ata_sff_exec_command(ap, tf);
+}
+
+static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
+ SATA_DWC_CMD_ISSUED_PENDING);
+}
+
+static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+
+ dwc_port_dbg(qc->ap, "%s\n", __func__);
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+
+ sata_dwc_bmdma_setup_by_tag(qc, tag);
+}
+
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ volatile int start_dma;
+ u32 reg, dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
+ struct ata_port *ap = qc->ap;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir = qc->dma_dir;
+ dma_chan = hsdevp->dma_chan[tag];
+
+ /* Used for ata_bmdma_start(qc) -- we are not BMDMA compatible */
+
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
+ start_dma = 1;
+ if (dir == DMA_TO_DEVICE)
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
+ else
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
+ } else {
+ dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
+ "(tag=%d) - DMA NOT started\n", __func__,
+ hsdevp->cmd_issued[tag], tag);
+ start_dma = 0;
+ }
+
+ dwc_dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
+ "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
+ dir_2_txt(qc->dma_dir), start_dma);
+ sata_dwc_tf_dump(hsdev->dev, &(qc->tf));
+
+ // Start DMA transfer
+ if (start_dma) {
+ reg = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ if (unlikely(reg & SATA_DWC_SERR_ERR_BITS)) {
+ dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
+ __func__, reg);
+ //sata_async_notification(ap);
+ //return;
+ }
+
+ // Set DMA control registers
+ if (dir == DMA_TO_DEVICE)
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXCHEN);
+ else
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_RXCHEN);
+
+ dwc_dev_vdbg(ap->dev, "%s: setting DMACR: 0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->dmacr));
+ /* Enable AHB DMA transfer on the specified channel */
+ dma_dwc_xfer_start(dma_chan);
+ }
+}
+
+
+static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+
+ dwc_port_dbg(qc->ap, "%s, tag=0x%08x\n", __func__, tag);
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+}
+
+/*
+ * Function : sata_dwc_qc_prep_by_tag
+ * arguments : ata_queued_cmd *qc, u8 tag
+ * Return value : None
+ * qc_prep for a particular queued command based on tag
+ */
+static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ struct ata_port *ap = qc->ap;
+ u32 dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir;
+ int err;
+
+ // DMA direction
+ dir = qc->dma_dir;
+
+ if ((dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+ dwc_dev_vdbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
+ __func__, ap->port_no, dir_2_txt(dir), qc->n_elem);
+
+ // Setup DMA for transfer
+ dma_chan = dma_dwc_xfer_setup(qc, hsdevp->llit[tag],
+ hsdevp->llit_dma[tag],
+ (void *__iomem)(&hsdev->sata_dwc_regs->dmadr));
+
+ if (unlikely(dma_chan < 0)) {
+ dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
+ __func__, err);
+ return;
+ }
+
+ hsdevp->dma_chan[tag] = dma_chan;
+}
+
+
+
+/**
+ * ata_sff_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA command, with proper synchronization with interrupt
+ * handler / other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void sata_dwc_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ iowrite8(tf->command, ap->ioaddr.command_addr);
+ /* If we have an mmio device with no ctl and no altstatus
+ * method this will fail. No such devices are known to exist.
+ */
+ if (ap->ioaddr.altstatus_addr)
+ ioread8(ap->ioaddr.altstatus_addr);
+ ndelay(400);
+}
+
+/**
+ * sata_dwc_tf_to_host - issue ATA taskfile to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA taskfile register set to ATA host controller,
+ * with proper synchronization with interrupt handler and
+ * other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static inline void sata_dwc_tf_to_host(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ dwc_port_dbg(ap,"%s\n",__func__);
+ ap->ops->sff_tf_load(ap, tf);
+ sata_dwc_exec_command(ap, tf);
+}
+
+
+/*
+ * Process command queue issue
+ */
+static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ int ret = 0;
+ struct ata_eh_info *ehi;
+ u32 scontrol, sstatus;
+ scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL);
+
+ ehi = &ap->link.eh_info;
+ /*
+ * Fix the problem when PMP card is unplugged from the SATA port.
+ * QC is still issued but no device present. Ignore the current QC.
+ * and pass error to error handler
+ */
+ sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS);
+ if ( sstatus == 0x0) {
+ ata_port_printk(ap, KERN_INFO, "Detect connection lost while commands are executing --> ignore current command\n");
+ ata_ehi_hotplugged(ehi);
+ ap->link.eh_context.i.action |= ATA_EH_RESET;
+ return ret;
+ }
+
+ // Set PMP field in the SCONTROL register
+ if ( sata_pmp_attached(ap) )
+ sata_dwc_pmp_select(ap, qc->dev->link->pmp);
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0 || ap->link.sactive > 1) {
+ dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s"
+ " ap active_tag=0x%08x ap sactive=0x%08x\n",
+ __func__, ap->print_id, qc->tf.command,
+ ata_cmd_2_txt(&qc->tf), qc->tag,
+ prot_2_txt(qc->tf.protocol), ap->link.active_tag,
+ ap->link.sactive);
+ }
+#endif
+
+ // Process NCQ
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dwc_link_dbg(qc->dev->link, "%s --> process NCQ , ap->link.active_tag=0x%08x, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag);
+ ap->link.active_tag = qc->tag;
+ ap->ops->sff_tf_load(ap, &qc->tf);
+ sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
+ SATA_DWC_CMD_ISSUED_PENDING);
+ } else {
+ dwc_link_dbg(qc->dev->link, "%s --> non NCQ process, ap->link.active_tag=%d, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag);
+ // Sync ata_port with qc->tag
+ ap->link.active_tag = qc->tag;
+ ret = ata_sff_qc_issue(qc);
+ }
+
+ return ret;
+}
+
+#if 0
+/*
+ * Function : sata_dwc_eng_timeout
+ * arguments : ata_port *ap
+ * Return value : None
+ * error handler for DMA time out
+ * ata_eng_timeout(ap) -- this does bmdma stuff which can not be done by this
+ * driver. SEE ALSO ata_qc_timeout(ap)
+ */
+static void sata_dwc_eng_timeout(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct ata_queued_cmd *qc;
+ u8 tag;
+ uint mask = 0x0;
+ unsigned long flags;
+ u32 serror, intpr, dma_ch;
+
+ tag = ap->link.active_tag;
+ dma_ch = hsdevp->dma_chan[tag];
+ qc = ata_qc_from_tag(ap, tag);
+
+ dev_err(ap->dev, "%s: id=%d active_tag=%d qc=%p dma_chan=%d\n",
+ __func__, ap->print_id, tag, qc, dma_ch);
+
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+
+ dev_err(ap->dev, "intpr=0x%08x serror=0x%08x\n", intpr, serror);
+
+ /* If there are no error bits set, can we just pass this on to eh? */
+ if (!(serror & SATA_DWC_SERR_ERR_BITS) &&
+ !(intpr & SATA_DWC_INTPR_ERR)) {
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ if (dma_dwc_channel_enabled(dma_ch))
+ dma_dwc_terminate_dma(ap, dma_ch);
+
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+
+ /* clear active bit */
+ mask = (~(qcmd_tag_to_mask(tag)));
+ hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask;
+ hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask;
+
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+ } else {
+ /* This is wrong, what really needs to be done is a reset. */
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ /* disable DMAC */
+ dma_dwc_terminate_dma(ap, dma_ch);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+ WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ if (qc->flags & ATA_QCFLAG_ACTIVE) {
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ /*
+ * test-only: The original code (AMCC: 2.6.19) called
+ * ata_eng_timeout(ap) here. This function is not available
+ * anymore. So what to do now?
+ */
+ }
+}
+#endif
+/*
+ * Function : sata_dwc_qc_prep
+ * arguments : ata_queued_cmd *qc
+ * Return value : None
+ * qc_prep for a particular queued command
+ */
+static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
+{
+ u32 sactive;
+ u8 tag = qc->tag;
+
+ if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0) {
+ dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
+ __func__, tag, qc->ap->link.active_tag);
+ }
+#endif
+
+ if (qc->tf.protocol == ATA_PROT_NCQ) {
+ sactive = sata_dwc_core_scr_read(qc->ap, SCR_ACTIVE);
+ sactive |= (0x00000001 << tag);
+ sata_dwc_core_scr_write(qc->ap, SCR_ACTIVE, sactive);
+ dwc_dev_vdbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
+ "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
+ sactive);
+ } else {
+ tag = 0;
+ }
+
+ sata_dwc_qc_prep_by_tag(qc, tag);
+}
+
+
+
+static void sata_dwc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ata_eh_freeze_port(qc->ap);
+}
+
+static void sata_dwc_error_handler(struct ata_port *ap)
+{
+ u32 serror;
+ u32 intmr, errmr;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ intmr = in_le32(&hsdev->sata_dwc_regs->intmr);
+ errmr = in_le32(&hsdev->sata_dwc_regs->errmr);
+
+ //sata_dwc_dma_xfer_complete(ap,1);
+ dwc_port_dbg(ap, "%s: SERROR=0x%08x, INTMR=0x%08x, ERRMR=0x%08x\n", __func__, serror, intmr, errmr);
+
+ dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued);
+ dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ dwc_port_vdbg(ap, "qc_active=0x%08x, qc_allocated=0x%08x, active_tag=%d\n", ap->qc_active, ap->qc_allocated, ap->link.active_tag);
+
+ sata_pmp_error_handler(ap);
+}
+
+/*
+ * sata_dwc_check_status - Get value of the Status Register
+ * @ap: Port to check
+ *
+ * Output content of the status register (CDR7)
+ */
+u8 sata_dwc_check_status(struct ata_port *ap)
+{
+ return ioread8(ap->ioaddr.status_addr);
+}
+
+
+/*
+ * Freeze the port by clear interrupt
+ * @ap: Port to freeze
+ */
+void sata_dwc_freeze(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ dwc_port_dbg(ap, "call %s ...\n",__func__);
+ // turn IRQ off
+ clear_intpr(hsdev);
+ clear_serror(ap);
+ out_le32(&hsdev->sata_dwc_regs->intmr, 0x0);
+}
+
+/*
+ * Thaw the port by turning IRQ on
+ */
+void sata_dwc_thaw(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "call %s ...\n",__func__);
+ // Clear IRQ
+ clear_intpr(hsdev);
+ // Turn IRQ back on
+ sata_dwc_enable_interrupts(hsdev);
+}
+
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_dwc_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ /*
+ * test-only: Currently this driver doesn't handle NCQ
+ * correctly. We enable NCQ but set the queue depth to a
+ * max of 1. This will get fixed in in a future release.
+ */
+// .sg_tablesize = LIBATA_MAX_PRD,
+ .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+
+static struct ata_port_operations sata_dwc_ops = {
+ .inherits = &sata_pmp_port_ops,
+ .dev_config = sata_dwc_dev_config,
+
+ .error_handler = sata_dwc_error_handler,
+ .softreset = sata_dwc_softreset,
+ .hardreset = sata_dwc_hardreset,
+ .pmp_softreset = sata_dwc_softreset,
+ .pmp_hardreset = sata_dwc_pmp_hardreset,
+
+ .qc_defer = sata_pmp_qc_defer_cmd_switch,
+ .qc_prep = sata_dwc_qc_prep,
+ .qc_issue = sata_dwc_qc_issue,
+ .qc_fill_rtf = ata_sff_qc_fill_rtf,
+
+ .scr_read = sata_dwc_scr_read,
+ .scr_write = sata_dwc_scr_write,
+
+ .port_start = sata_dwc_port_start,
+ .port_stop = sata_dwc_port_stop,
+
+ .bmdma_setup = sata_dwc_bmdma_setup,
+ .bmdma_start = sata_dwc_bmdma_start,
+ // Reuse some SFF functions
+ .sff_check_status = sata_dwc_check_status,
+ .sff_tf_read = ata_sff_tf_read,
+ .sff_data_xfer = ata_sff_data_xfer,
+ .sff_tf_load = ata_sff_tf_load,
+ .sff_dev_select = sata_dwc_dev_select,
+ .sff_exec_command = sata_dwc_exec_command,
+
+ .sff_irq_on = sata_dwc_irq_on,
+/* .sff_irq_clear = sata_dwc_irq_clear,
+ .freeze = sata_dwc_freeze,
+ .thaw = sata_dwc_thaw,
+ .sff_irq_on = ata_sff_irq_on,
+ */
+ .sff_irq_clear = ata_sff_irq_clear,
+ .freeze = ata_sff_freeze,
+ .thaw = ata_sff_thaw,
+ .pmp_attach = sata_dwc_pmp_attach,
+ .pmp_detach = sata_dwc_pmp_detach,
+ .post_internal_cmd = sata_dwc_post_internal_cmd,
+
+ /* test-only: really needed? */
+ //.eng_timeout = sata_dwc_eng_timeout,
+};
+
+static const struct ata_port_info sata_dwc_port_info[] = {
+ {
+ /*
+ * test-only: Currently this driver doesn't handle NCQ
+ * correctly. So we disable NCQ here for now. To enable
+ * it ATA_FLAG_NCQ needs to be added to the flags below.
+ */
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
+ ATA_FLAG_MMIO | ATA_FLAG_NCQ |
+ ATA_FLAG_PMP | ATA_FLAG_AN,
+ .pio_mask = 0x1f, /* pio 0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sata_dwc_ops,
+ },
+};
+
+static int sata_dwc_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct sata_dwc_device *hsdev;
+ u32 idr, versionr;
+ char *ver = (char *)&versionr;
+ u8 *base = NULL;
+ int err = 0;
+ int irq;
+ struct ata_host *host;
+ struct ata_port_info pi = sata_dwc_port_info[0];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+
+ const unsigned int *dma_channel;
+ /*
+ * Check if device is enabled
+ */
+ if (!of_device_is_available(ofdev->node)) {
+ printk(KERN_INFO "%s: Port disabled via device-tree\n",
+ ofdev->node->full_name);
+ return 0;
+ }
+
+ /* Allocate DWC SATA device */
+ hsdev = kmalloc(sizeof(*hsdev), GFP_KERNEL);
+ if (hsdev == NULL) {
+ dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+ memset(hsdev, 0, sizeof(*hsdev));
+
+
+ // Identify SATA DMA channel used for the current SATA device
+ dma_channel = of_get_property(ofdev->node, "dma-channel", NULL);
+ if ( dma_channel ) {
+ dev_notice(&ofdev->dev, "Gettting DMA channel %d\n", *dma_channel);
+ hsdev->dma_channel = *dma_channel;
+ } else
+ hsdev->dma_channel = 0;
+
+ /* Ioremap SATA registers */
+ base = of_iomap(ofdev->node, 0);
+ if (!base) {
+ dev_err(&ofdev->dev, "ioremap failed for SATA register address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ hsdev->reg_base = base;
+ dwc_dev_vdbg(&ofdev->dev, "ioremap done for SATA register address\n");
+
+ /* Synopsys DWC SATA specific Registers */
+ hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
+
+ /* Allocate and fill host */
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
+ if (!host) {
+ dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
+ err = -ENOMEM;
+ goto error_out;
+ }
+
+ host->private_data = hsdev;
+
+ /* Setup port */
+ host->ports[0]->ioaddr.cmd_addr = base;
+ host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
+ hsdev->scr_base = (u8 *)(base + SATA_DWC_SCR_OFFSET);
+ sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
+
+ /* Read the ID and Version Registers */
+ idr = in_le32(&hsdev->sata_dwc_regs->idr);
+ versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
+ dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
+ idr, ver[0], ver[1], ver[2]);
+
+ /* Get SATA DMA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->node, 1);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* Get physical SATA DMA register base address */
+ if (!sata_dma_regs) {
+ sata_dma_regs = of_iomap(ofdev->node, 1);
+ if (!sata_dma_regs) {
+ dev_err(&ofdev->dev, "ioremap failed for AHBDMA register address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ }
+ /* Save dev for later use in dev_xxx() routines */
+ hsdev->dev = &ofdev->dev;
+
+ /* Init glovbal dev list */
+ dwc_dev_list[hsdev->dma_channel] = hsdev;
+
+ /* Initialize AHB DMAC */
+ hsdev->irq_dma = irq;
+ dma_dwc_init(hsdev);
+ dma_register_interrupt(hsdev);
+
+
+ /* Enable SATA Interrupts */
+ sata_dwc_enable_interrupts(hsdev);
+
+ /* Get SATA interrupt number */
+ irq = irq_of_parse_and_map(ofdev->node, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /*
+ * Now, register with libATA core, this will also initiate the
+ * device discovery process, invoking our port_start() handler &
+ * error_handler() to execute a dummy Softreset EH session
+ */
+ ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ /* Everything is fine */
+ return 0;
+
+error_out:
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ if (base)
+ iounmap(base);
+
+ if (hsdev)
+ kfree(hsdev);
+
+ return err;
+}
+
+static int sata_dwc_remove(struct of_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_dwc_device *hsdev = host->private_data;
+
+ ata_host_detach(host);
+
+ dev_set_drvdata(dev, NULL);
+
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ iounmap(hsdev->reg_base);
+ kfree(hsdev);
+ kfree(host);
+
+ dwc_dev_vdbg(&ofdev->dev, "done\n");
+
+ return 0;
+}
+
+static const struct of_device_id sata_dwc_match[] = {
+ { .compatible = "amcc,sata-460ex", },
+ { .compatible = "amcc,sata-apm82181", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sata_dwc_match);
+
+static struct of_platform_driver sata_dwc_driver = {
+ .name = "sata-dwc",
+ .match_table = sata_dwc_match,
+ .probe = sata_dwc_probe,
+ .remove = sata_dwc_remove,
+};
+
+static int __init sata_dwc_init(void)
+{
+ return of_register_platform_driver(&sata_dwc_driver);
+}
+
+static void __exit sata_dwc_exit(void)
+{
+ of_unregister_platform_driver(&sata_dwc_driver);
+}
+
+module_init(sata_dwc_init);
+module_exit(sata_dwc_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
+MODULE_DESCRIPTION("DesignWare Cores SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 87060266ef9..9cfbb282e0c 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -186,3 +186,15 @@ config HW_RANDOM_MXC_RNGA
module will be called mxc-rnga.
If unsure, say Y.
+config HW_RANDOM_TRNG4xx
+ tristate "AMCC 4xx TRNG True Random Number Generator support"
+ depends on HW_RANDOM && PPC
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on AMCC 4XX.
+
+ To compile this driver as a module, choose M here: the
+ module will be called trng4xx.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5eeb1303f0d..252902d42c8 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
+obj-$(CONFIG_HW_RANDOM_TRNG4xx) += trng4xx.o
diff --git a/drivers/char/hw_random/trng4xx.c b/drivers/char/hw_random/trng4xx.c
new file mode 100644
index 00000000000..ece45e74aa1
--- /dev/null
+++ b/drivers/char/hw_random/trng4xx.c
@@ -0,0 +1,336 @@
+/**
+ *
+ * Copyright (c) 2008 Loc Ho <spulijala@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Detail Description:
+ * This file AMCC TRNG offload Linux device driver
+ *
+ * @file trng4xx.c
+ *
+ *
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <crypto/trng4xx.h>
+
+#define TRNG4XX_VER_STR "0.1"
+#define TRNG4XX_HDR "TRNG4XX: "
+
+/* #define TRNG4XX_DEBUG */
+
+#if !defined(TRNG4XX_DEBUG)
+# define TRNG4XX_LOG(fmt, ...)
+#else
+# define TRNG4XX_LOG(fmt, ...) \
+ do { \
+ printk(KERN_INFO TRNG4XX_HDR fmt "\n", ##__VA_ARGS__); \
+} while(0);
+#endif
+
+struct trng4xx_dev {
+ struct resource res;
+ u32 irq;
+ volatile char __iomem *csr;
+ struct semaphore access_prot;
+ u32 datum;
+};
+
+struct hal_config {
+ struct of_device *ofdev;
+};
+
+static struct trng4xx_dev trng4xx_dev;
+
+static irqreturn_t trng4xx_irq_handler(int irq, void *id);
+static void trng4xx_chk_overflow(void);
+
+int trng4xx_config_set(struct hal_config *cfg)
+{
+ struct device_node *rng_np = cfg->ofdev->node;
+ int rc = 0;
+
+ rc = of_address_to_resource(rng_np, 0, &trng4xx_dev.res);
+ if (rc)
+ return -ENODEV;
+
+ trng4xx_dev.csr = ioremap(trng4xx_dev.res.start,
+ trng4xx_dev.res.end -
+ trng4xx_dev.res.start + 1);
+ if (trng4xx_dev.csr == NULL) {
+ printk(KERN_ERR "unable to ioremap 0x%02X_%08X size %d\n",
+ (u32) (trng4xx_dev.res.start >> 32),
+ (u32) trng4xx_dev.res.start,
+ (u32) (trng4xx_dev.res.end -
+ trng4xx_dev.res.start + 1));
+ return -ENOMEM;
+ }
+
+ TRNG4XX_LOG("TRNG1 0x%02X_%08X size %d\n",
+ (u32) (trng4xx_dev.res.start >> 32),
+ (u32) trng4xx_dev.res.start,
+ (u32) (trng4xx_dev.res.end -
+ trng4xx_dev.res.start + 1));
+
+ trng4xx_dev.irq = of_irq_to_resource(rng_np, 0, NULL);
+
+ if (trng4xx_dev.irq == NO_IRQ) {
+ /* Un-map CSR */
+ iounmap(trng4xx_dev.csr);
+ trng4xx_dev.csr = NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int trng4xx_pka_config_clear(void)
+{
+ iounmap(trng4xx_dev.csr);
+ return 0;
+}
+
+inline int trng4xx_hw_read32(u32 reg_addr, u32 *data_val)
+{
+ *data_val = in_be32((unsigned __iomem *)
+ (trng4xx_dev.csr + reg_addr));
+ return 0;
+}
+
+inline int trng4xx_hw_write32(u32 reg_addr, u32 data_val)
+{
+ out_be32((unsigned __iomem *) (trng4xx_dev.csr + reg_addr),
+ data_val);
+ return 0;
+}
+
+int trng4xx_hw_init(void)
+{
+ int rc;
+
+ rc = request_irq(trng4xx_dev.irq, trng4xx_irq_handler,
+ 0, "TRNG", NULL);
+ if (rc != 0) {
+ printk(KERN_ERR "failed to register interrupt IRQ %d\n",
+ trng4xx_dev.irq);
+ return rc;
+ }
+ return 0;
+}
+
+int trng4xx_hw_deinit(void)
+{
+ free_irq(trng4xx_dev.irq, NULL);
+ return 0;
+}
+
+static irqreturn_t trng4xx_irq_handler(int irq, void *id)
+{
+ /* TRNG Alarm Counter overflow */
+ trng4xx_chk_overflow();
+ return IRQ_HANDLED;
+}
+
+/**
+ * TRNG Functions
+ *
+ */
+static void trng4xx_chk_overflow(void)
+{
+ /* TRNG Alarm Counter overflow */
+ int rc;
+ u32 val;
+ struct trng4xx_cfg {
+ u32 ring1_delay_sel :3;
+ u32 ring2_delay_sel :3;
+ u32 reset_cnt :6;
+ } __attribute__((packed));
+
+
+ rc = trng4xx_hw_write32(TRNG4XX_ALARMCNT_ADDR, val);
+ if (rc != 0)
+ return;
+
+ if (val > 128) {
+ struct trng4xx_cfg *trng4xx_cfg;
+
+ /* Alarm count is half, reset it */
+ rc = trng4xx_hw_read32(TRNG4XX_CFG_ADDR, &val);
+ if (rc != 0)
+ return;
+ trng4xx_cfg = (struct trng4xx_cfg *) &val;
+ ++trng4xx_cfg->ring1_delay_sel;
+ trng4xx_cfg->ring2_delay_sel =
+ (~trng4xx_cfg->ring1_delay_sel) & 0x07;
+
+ rc = trng4xx_hw_write32(TRNG4XX_CFG_ADDR, val);
+ if (rc != 0)
+ return;
+ trng4xx_hw_write32(TRNG4XX_ALARMCNT_ADDR, 0x00000000);
+ if (rc != 0)
+ return;
+ }
+}
+
+#define MAX_TRY 3
+int trng4xx_random(u32 *rand_val)
+{
+ u32 val = 0;
+ int rc;
+ u16 try_cnt = 0;
+
+ down(&trng4xx_dev.access_prot);
+ do {
+ rc = trng4xx_hw_read32(TRNG4XX_STATUS_ADDR, &val);
+ if (rc != 0)
+ goto err;
+ } while ((val & TRNG4XX_STATUS_BUSY) && ++try_cnt <= MAX_TRY);
+ if (val & TRNG4XX_STATUS_BUSY) {
+ rc = -EINPROGRESS;
+ goto err;
+ }
+ rc = trng4xx_hw_read32(TRNG4XX_OUTPUT_ADDR, rand_val);
+
+err:
+ up(&trng4xx_dev.access_prot);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(trng4xx_random);
+
+static int trng4xx_data_present(struct hwrng *rng, int wait)
+{
+ struct trng4xx_dev *dev = (struct trng4xx_dev *) rng->priv;
+ int i;
+ u32 val;
+
+ down(&trng4xx_dev.access_prot);
+ for (i = 0; i < 20; i++) {
+ udelay(10);
+ trng4xx_hw_read32(TRNG4XX_STATUS_ADDR, &val);
+ if (!(val & TRNG4XX_STATUS_BUSY)) {
+ trng4xx_hw_read32(TRNG4XX_OUTPUT_ADDR, &dev->datum);
+ break;
+ }
+ if (!wait)
+ break;
+ }
+ up(&trng4xx_dev.access_prot);
+ return (val & TRNG4XX_STATUS_BUSY) ? 0 : 1;
+}
+
+static int trng4xx_data_read(struct hwrng *rng, u32 *data)
+{
+ struct trng4xx_dev *dev = (struct trng4xx_dev *) rng->priv;
+
+ *data = dev->datum;
+ //printk("*data = 0x%08x\n", *data);
+ return 4;
+}
+
+static int trng4xx_init(struct hwrng *rng)
+{
+ return trng4xx_hw_init();
+}
+
+static void trng4xx_cleanup(struct hwrng *rng)
+{
+ trng4xx_hw_deinit();
+}
+
+static struct hwrng trng4xx_func = {
+ .name = "ppc4xx-trng",
+ .init = trng4xx_init,
+ .cleanup = trng4xx_cleanup,
+ .data_present = trng4xx_data_present,
+ .data_read = trng4xx_data_read,
+ .priv = (unsigned long) &trng4xx_dev,
+};
+
+/**
+ * Setup Driver with platform registration
+ *
+ */
+static int __devinit trng4xx_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct hal_config hw_cfg;
+ int rc;
+
+ hw_cfg.ofdev = ofdev;
+ rc = trng4xx_config_set(&hw_cfg);
+ if (rc != 0)
+ return rc;
+
+ TRNG4XX_LOG("AMCC 4xx TRNG v%s @0x%02X_%08X size %d IRQ %d\n",
+ TRNG4XX_VER_STR,
+ (u32) (trng4xx_dev.res.start >> 32),
+ (u32) trng4xx_dev.res.start,
+ (u32) (trng4xx_dev.res.end - trng4xx_dev.res.start + 1),
+ trng4xx_dev.irq);
+
+ init_MUTEX(&trng4xx_dev.access_prot);
+
+ rc = hwrng_register(&trng4xx_func);
+ if (rc) {
+ printk(KERN_ERR
+ "AMCC 4xx TRNG registering failed error %d\n", rc);
+ goto err;
+ }
+
+ return rc;
+
+err:
+ trng4xx_pka_config_clear();
+ return rc;
+}
+
+static int __devexit trng4xx_remove(struct of_device *dev)
+{
+ hwrng_unregister(&trng4xx_func);
+ trng4xx_pka_config_clear();
+ return 0;
+}
+
+static struct of_device_id trng4xx_match[] = {
+ { .compatible = "ppc4xx-trng", },
+ { .compatible = "amcc,ppc4xx-trng", },
+ { },
+};
+
+static struct of_platform_driver trng4xx_driver = {
+ .name = "ppc4xx-trng",
+ .match_table = trng4xx_match,
+ .probe = trng4xx_probe,
+ .remove = trng4xx_remove,
+};
+
+static int __init mod_init(void)
+{
+ return of_register_platform_driver(&trng4xx_driver);
+}
+
+static void __exit mod_exit(void)
+{
+ of_unregister_platform_driver(&trng4xx_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("AMCC 4xx True Random Number Generator");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index b08403d7d1c..d0921a72d59 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -213,6 +213,15 @@ config CRYPTO_DEV_IXP4XX
help
Driver for the IXP4xx NPE crypto engine.
+config CRYPTO_DEV_PKA4xx
+ tristate "Support for the AMCC 4xx PKA"
+ depends on PPC
+ help
+ Select this option if you want to have support for the AMCC 4xx PKA.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pka4xx
+
config CRYPTO_DEV_PPC4XX
tristate "Driver AMCC PPC4xx crypto accelerator"
depends on PPC && 4xx
@@ -222,4 +231,47 @@ config CRYPTO_DEV_PPC4XX
help
This option allows you to have support for AMCC crypto acceleration.
+config SEC_HW_POLL
+ bool "Turn on HW Polling instead of Doing Force Descriptor Write"
+ depends on CRYPTO_DEV_PPC4XX
+ default n
+
+config SEC_HW_RING_POLL_FREQ
+ int "Basic HW polling frequency"
+ help
+ HW Polling Frequency which the Packet Engine reads a segment of the external PDR
+ depends on CRYPTO_DEV_PPC4XX && SEC_HW_POLL
+ default "2"
+
+config SEC_HW_POLL_RETRY_FREQ
+ int "HW Polling Retry interval"
+ help
+ HW Polling Retry interval that specifies how much Packet
+ Engine wait between re-reads on an invalid descriptor entry
+ depends on CRYPTO_DEV_PPC4XX && SEC_HW_POLL
+ default "1"
+
+config SEC_PD_OCM
+ bool "Security Packet Descriptors on OCM"
+ default n
+ help
+ This enables the Security Packet Descriptors to be allocated on the OCM
+ depends on CRYPTO_DEV_PPC4XX && 460EX
+
+config SEC_SA_OCM
+ bool "Security SA on OCM"
+ default n
+ help
+ This option enables the SA and State Record to be alloctaed on the OCM.
+ depends on CRYPTO_DEV_PPC4XX && 460EX
+
+config CRYPTO_DEV_ASYNC_SAMPLE
+ tristate "Async crypto and hash sample driver using software algorithms"
+ select CRYPTO_HASH
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ help
+ This is a sample asynchronous crypto and hash device driver over synchronous
+ software crypto and hash algorithms.
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 6ffcb3f7f94..b9d434e28e4 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -5,4 +5,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_MV_CESA) += mv_cesa.o
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
+pka4xx-objs := pka_4xx_access.o pka_4xx.o
+obj-$(CONFIG_CRYPTO_DEV_PKA4xx) += pka4xx.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
index a33243c17b0..9d81d8f7571 100644
--- a/drivers/crypto/amcc/crypto4xx_alg.c
+++ b/drivers/crypto/amcc/crypto4xx_alg.c
@@ -20,18 +20,39 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/spinlock_types.h>
+#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <linux/crypto.h>
#include <linux/hash.h>
#include <crypto/internal/hash.h>
+#include <crypto/aead.h>
#include <linux/dma-mapping.h>
+#include <linux/pci.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/authenc.h>
+#include <net/ip.h>
+
#include "crypto4xx_reg_def.h"
#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
+#define DEBUG_CRYPTESP 1
+#ifdef DEBUG_CRYPTESP
+# define ESP_PHD print_hex_dump
+#else
+# define ESP_PHD(arg...)
+#endif
+
+#ifdef DEBUG_CRYPTESP
+
+#define ESP_PRINTK printk
+#else
+#define ESP_PRINTK(KERN_INFO arg...)
+#endif
+
void set_dynamic_sa_command_0(struct dynamic_sa_ctl *sa, u32 save_h,
u32 save_iv, u32 ld_h, u32 ld_iv, u32 hdr_proc,
u32 h, u32 c, u32 pad_type, u32 op_grp, u32 op,
@@ -56,11 +77,11 @@ void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
u32 cfb, u32 esn, u32 sn_mask, u32 mute,
u32 cp_pad, u32 cp_pay, u32 cp_hdr)
{
- sa->sa_command_1.w = 0;
sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
- sa->sa_command_1.bf.crypto_mode9_8 = cm & 3;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
sa->sa_command_1.bf.feedback_mode = cfb,
sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.hmac_muting = hmac_mc;
sa->sa_command_1.bf.extended_seq_num = esn;
sa->sa_command_1.bf.seq_num_mask = sn_mask;
sa->sa_command_1.bf.mutable_bit_proc = mute;
@@ -69,6 +90,423 @@ void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm, u32 hmac_mc,
sa->sa_command_1.bf.copy_hdr = cp_hdr;
}
+/** Table lookup for SA Hash Digest length and
+ * Hash Contents (based on Hash type)
+ */
+unsigned int crypto4xx_sa_hash_tbl[3][HASH_ALG_MAX_CNT] = {
+ /* Hash Contents */
+ { SA_HASH128_CONTENTS, SA_HASH160_CONTENTS, SA_HASH256_CONTENTS,
+ SA_HASH256_CONTENTS, SA_HASH512_CONTENTS, SA_HASH512_CONTENTS },
+ /* Digest len */
+ {4 * 4, 5 * 4, 7 * 4, 8 * 4, 12 * 4, 16 * 4},
+ /* SA Length */
+ { SA_HASH128_LEN, SA_HASH160_LEN, SA_HASH256_LEN, SA_HASH256_LEN,
+ SA_HASH512_LEN, SA_HASH512_LEN }
+};
+
+/** Table lookup for Hash Algorithms based on Hash type, used in
+ * crypto4xx_pre_compute_hmac()
+ */
+char *crypto4xx_hash_alg_map_tbl[HASH_ALG_MAX_CNT] = CRYPTO4XX_MAC_ALGS;
+
+static void crypto4xx_sg_setbuf(unsigned char *data, size_t bufsize,
+ struct scatterlist *sg, int sg_num)
+{
+ int remainder_of_page;
+ int i = 0;
+
+ sg_init_table(sg, sg_num);
+ while (bufsize > 0 && i < sg_num) {
+ sg_set_buf(&sg[i], data, bufsize);
+ remainder_of_page = PAGE_SIZE - sg[i].offset;
+ if (bufsize > remainder_of_page) {
+ /* the buffer was split over multiple pages */
+ sg[i].length = remainder_of_page;
+ bufsize -= remainder_of_page;
+ data += remainder_of_page;
+ } else {
+ bufsize = 0;
+ }
+ i++;
+ }
+}
+
+int crypto4xx_pre_compute_hmac(struct crypto4xx_ctx *ctx,
+ void *key,
+ unsigned int keylen,
+ unsigned int bs,
+ unsigned char ha,
+ unsigned char digs)
+{
+ u8 *ipad = NULL;
+ u8 *opad;
+ struct crypto_hash *child_hash = NULL;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ struct scatterlist asg[2];
+ struct crypto_tfm *child_tfm;
+ char *child_name = NULL;
+ int i, rc = 0;
+ int ds;
+
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+ child_name = crypto4xx_hash_alg_map_tbl[ha];
+ child_hash = crypto_alloc_hash(child_name, 0, 0);
+ if (IS_ERR(child_hash)) {
+ rc = PTR_ERR(child_hash);
+ printk(KERN_ERR "failed to load "
+ "transform for %s error %d\n",
+ child_name, rc);
+ return rc;
+ }
+
+ ipad = kmalloc(bs * 2, GFP_KERNEL);
+ if (ipad == NULL) {
+ crypto_free_hash(child_hash);
+ return -ENOMEM;
+ }
+
+ opad = ipad + bs;
+ child_tfm = crypto_hash_tfm(child_hash);
+ ds = crypto_hash_digestsize(child_hash);
+ desc.tfm = child_hash;
+ desc.flags = 0;
+ if (keylen > bs) {
+ crypto4xx_sg_setbuf(key, keylen, asg, 2);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, asg, keylen);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_final(&desc, ipad);
+ keylen = ds;
+ } else {
+ memcpy(ipad, key, keylen);
+ }
+ memset(ipad + keylen, 0, bs-keylen);
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ sg_init_one(&sg[0], ipad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ if (ha == SA_HASH_ALG_SHA224)
+ ds = SHA256_DIGEST_SIZE;
+ else if (ha == SA_HASH_ALG_SHA384)
+ ds = SHA512_DIGEST_SIZE;
+
+ crypto_hash_partial(&desc, ipad);
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx), ipad, ds);
+
+ sg_init_one(&sg[0], opad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ crypto_hash_partial(&desc, opad);
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_outer_digest(ctx), opad, ds);
+
+err_alg_hash_key:
+ kfree(ipad);
+ crypto_free_hash(child_hash);
+ return rc;
+}
+
+int crypto4xx_pre_compute_ssl_mac(struct crypto4xx_ctx *ctx,
+ void *key,
+ unsigned int keylen,
+ unsigned int bs,
+ unsigned char ha)
+
+{
+ u8 *ipad;
+ u8 *opad;
+ struct crypto_hash *child_hash = NULL;
+ struct hash_desc desc;
+ struct scatterlist sg[1];
+ struct crypto_tfm *child_tfm;
+ unsigned char *digest = NULL;
+ int padsize = 0;
+ char *child_name = NULL;
+ int i, rc = 0;
+ int ds;
+
+ digest = kmalloc(bs, GFP_KERNEL);
+ if (digest == NULL) {
+ rc = -ENOMEM;
+ goto err_nomem;
+ }
+
+ if (ha == SA_HASH_ALG_MD5) {
+ child_name = "md5";
+ padsize = 48;
+ } else if (ha == SA_HASH_ALG_SHA1) {
+ child_name = "sha1";
+ padsize = 40;
+ }
+
+ child_hash = crypto_alloc_hash(child_name, 0, 0);
+ if (IS_ERR(child_hash)) {
+ rc = PTR_ERR(child_hash);
+ printk(KERN_ERR
+ "failed to load transform for %s error %d\n",
+ child_name, rc);
+ goto err_alg;
+ }
+
+ child_tfm = crypto_hash_tfm(child_hash);
+ ds = crypto_hash_digestsize(child_hash);
+ desc.tfm = child_hash;
+ desc.flags = 0;
+
+ if (keylen > bs) {
+ sg_init_one(&sg[0], key, keylen);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, &sg[0], keylen);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_final(&desc, digest);
+ key = digest;
+ keylen = ds;
+ }
+
+ ipad = kmalloc(bs * 4, GFP_KERNEL);
+ if (ipad == NULL)
+ goto err_nomem;
+
+ memcpy(ipad, key, keylen);
+ memset(ipad + keylen, 0, bs);
+ opad = ipad + bs;
+ memcpy(opad, ipad, bs);
+
+ for (i = 0; i < bs; i++) {
+ ipad[i] ^= 0x36;
+ opad[i] ^= 0x5c;
+ }
+
+ sg_init_one(&sg[0], ipad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ crypto_hash_partial(&desc, digest);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_inner_digest(ctx), digest, ds);
+
+ sg_init_one(&sg[0], opad, bs);
+ rc = crypto_hash_init(&desc);
+ if (rc < 0)
+ goto err_alg_hash_key;
+ rc = crypto_hash_update(&desc, sg, bs);
+ if (rc < 0)
+ goto err_alg_hash_key;
+
+ crypto_hash_partial(&desc, digest);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_outer_digest(ctx), digest, ds);
+
+err_alg_hash_key:
+ crypto_free_hash(child_hash);
+err_alg:
+ kfree(digest);
+err_nomem:
+ return rc;
+}
+
+int crypto4xx_compute_gcm_hash_key_sw(struct crypto4xx_ctx *ctx,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_blkcipher *aes_tfm = NULL;
+ struct blkcipher_desc desc;
+ struct scatterlist sg[1];
+ char src[16];
+ int rc = 0;
+
+ aes_tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(aes_tfm)) {
+ printk(KERN_ERR "failed to load transform for %ld\n",
+ PTR_ERR(aes_tfm));
+ rc = PTR_ERR(aes_tfm);
+ return rc;
+ }
+ desc.tfm = aes_tfm;
+ desc.flags = 0;
+
+ memset(src, 0, 16);
+ rc = crypto_blkcipher_setkey(aes_tfm, key, keylen);
+ if (rc) {
+ printk(KERN_ERR "setkey() failed flags=%x\n",
+ crypto_blkcipher_get_flags(aes_tfm));
+ goto out;
+ }
+
+ sg_init_one(sg, src, 16);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, 16);
+ if (rc)
+ goto out;
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx), src, 16);
+
+out:
+ crypto_free_blkcipher(aes_tfm);
+ return rc;
+}
+
+/**
+ * 3DES/DES Functions
+ *
+ */
+static int crypto4xx_setkey_3des(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned char cm,
+ unsigned char fb)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+ return -EINVAL;
+ }
+
+ if (keylen == DES_KEY_SIZE) {
+ u32 tmp[32];
+ rc = des_ekey(tmp, key);
+ if (unlikely(rc == 0) &&
+ (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+ crypto_ablkcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_WEAK_KEY);
+ return -EINVAL;
+ }
+ }
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, keylen == 8 ? SA_DES_LEN : SA_3DES_LEN);
+ if (rc)
+ return rc;
+ /*
+ * state record will state in base ctx, so iv and
+ * hash result can be reused
+ * also don't need to alloc each packet coming
+ */
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ /* Setup SA */
+ ctx->direction = DIR_INBOUND;
+ ctx->hash_final = 0;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_DES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_DECRYPT, DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+ fb, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ if (keylen == DES_KEY_SIZE) {
+ crypto4xx_memcpy_le(((struct dynamic_sa_des *) sa)->key,
+ key, keylen);
+ ((struct dynamic_sa_des *)sa)->ctrl.sa_contents =
+ SA_DES_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_DES;
+ } else {
+ crypto4xx_memcpy_le(((struct dynamic_sa_3des *) sa)->key,
+ key, keylen);
+ ((struct dynamic_sa_3des *)sa)->ctrl.sa_contents =
+ SA_3DES_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_3DES;
+ }
+
+ memcpy((void *)(ctx->sa_in +
+ get_dynamic_sa_offset_state_ptr_field(ctx)),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ ctx->is_hash = 0;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+}
+
+int crypto4xx_setkey_3des_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_8BIT_CFB);
+}
+
+int crypto4xx_setkey_3des_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_3des_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_CBC,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+int crypto4xx_setkey_3des_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_3des(cipher, key, keylen,
+ CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+
int crypto4xx_encrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
@@ -79,22 +517,54 @@ int crypto4xx_encrypt(struct ablkcipher_request *req)
ctx->pd_ctl = 0x1;
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, NULL, 0, req->info,
+ get_dynamic_sa_iv_size(ctx));
}
int crypto4xx_decrypt(struct ablkcipher_request *req)
{
struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ctx->hash_final = 0;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, req->info,
+ get_dynamic_sa_iv_size(ctx));
+}
+
+int crypto4xx_encrypt_ctr(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
ctx->hash_final = 0;
ctx->is_hash = 0;
- ctx->pd_ctl = 1;
+ ctx->pd_ctl = 0x1;
+ ctx->direction = DIR_OUTBOUND;
return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
- req->nbytes, req->info,
- get_dynamic_sa_iv_size(ctx));
+ req->nbytes, NULL, 0,
+ req->info,
+ crypto_ablkcipher_ivsize(ablkcipher));
+}
+
+int crypto4xx_decrypt_ctr(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->hash_final = 0;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0,
+ req->info,
+ crypto_ablkcipher_ivsize(ablkcipher));
}
/**
@@ -106,11 +576,15 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
unsigned char cm,
u8 fb)
{
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
int rc;
+ ctx->dev = my_alg->dev;
+
if (keylen != AES_KEYSIZE_256 &&
keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
crypto_ablkcipher_set_flags(cipher,
@@ -162,10 +636,17 @@ static int crypto4xx_setkey_aes(struct crypto_ablkcipher *cipher,
memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
sa->sa_command_0.bf.dir = DIR_OUTBOUND;
-
+
return 0;
}
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_ECB,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
@@ -173,19 +654,716 @@ int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
CRYPTO_FEEDBACK_MODE_NO_FB);
}
+int crypto4xx_setkey_aes_ctr(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ u32 cnt = 1;
+ int rc;
+ u32 cm = CRYPTO_MODE_AES_CTR;
+
+ ctx->dev = my_alg->dev;
+
+ keylen -= 4;
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ if (keylen != AES_KEYSIZE_256 &&
+ keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_128) {
+ crypto_ablkcipher_set_flags(cipher,
+ CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ ctx->hash_final = 0;
+ ctx->ctr_aes = 1;
+ /* Setup SA */
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_ENCRYPT,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, cm, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF, SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE, SA_NOT_COPY_PAD,
+ SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+ sa->sa_contents = SA_AES_CONTENTS | (keylen << 2);
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ ctx->direction = DIR_INBOUND;
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ crypto4xx_memcpy_le(ctx->state_record, key + keylen, 4);
+ crypto4xx_memcpy_le(ctx->state_record + 12, (void *)&cnt, 4);
+
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+}
+
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_CFB,
+ CRYPTO_FEEDBACK_MODE_128BIT_CFB);
+}
+
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_OFB,
+ CRYPTO_FEEDBACK_MODE_64BIT_OFB);
+}
+
+int crypto4xx_setkey_aes_icm(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto4xx_setkey_aes(cipher, key, keylen, CRYPTO_MODE_AES_ICM,
+ CRYPTO_FEEDBACK_MODE_NO_FB);
+}
+
+/**
+ * AES-GCM Functions
+ */
+static inline int crypto4xx_aes_gcm_validate_keylen(unsigned int keylen)
+{
+ switch (keylen) {
+ case 16:
+ case 20:
+ case 24:
+ case 30:
+ case 32:
+ case 36:
+ return 0;
+ default:
+ printk(KERN_ERR "crypto4xx_setkey_aes_gcm: "
+ "ERROR keylen = 0x%08x\n", keylen);
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen)
+
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ u32 cm = 4;
+
+ ctx->dev = my_alg->dev;
+
+ if (crypto4xx_aes_gcm_validate_keylen(keylen) != 0) {
+ printk(KERN_ERR "crypto4xx_setkey_aes_gcm:"
+ "ERROR keylen = 0x%08x\n", keylen);
+ crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_GCM_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_contents = SA_AES_GCM_CONTENTS | (keylen << 2);
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+
+ ctx->direction = DIR_INBOUND;
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(ctx, key, keylen);
+ if (rc) {
+ printk(KERN_ERR "GCM hash key setting failed = %d\n", rc);
+ goto err;
+ }
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ ctx->is_gcm = 1;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x11;
+
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH_DECRYPT,
+ DIR_INBOUND);
+
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
+ sa->sa_command_1.bf.feedback_mode = 0;
+
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_command_1.bf.copy_payload = 1;
+
+ sa->sa_command_1.bf.copy_pad = 0;
+ sa->sa_command_1.bf.copy_hdr = 0;
+ sa->sa_command_1.bf.mutable_bit_proc = 1;
+ sa->sa_command_1.bf.seq_num_mask = 1;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT_HASH;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+ ctx->append_icv = 1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, req->assoc, req->assoclen,
+ req->iv, crypto_aead_ivsize(aead));
+}
+
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int len = req->cryptlen - crypto_aead_authsize(aead);
+
+ ctx->direction = DIR_INBOUND;
+ ctx->append_icv = 0;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ len, req->assoc, req->assoclen,
+ req->iv, crypto_aead_ivsize(aead));
+}
+
+int crypto4xx_givencrypt_aes_gcm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
+int crypto4xx_givdecrypt_aes_gcm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
+/**
+ * AES-CCM Functions
+ */
+int crypto4xx_setauthsize_aes(struct crypto_aead *ciper,
+ unsigned int authsize)
+{
+ struct aead_tfm *tfm = crypto_aead_crt(ciper);
+
+ switch (authsize) {
+ case 8:
+ case 12:
+ case 16:
+ case 10:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ tfm->authsize = authsize;
+ return 0;
+}
+
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc = 0;
+
+ ctx->dev = my_alg->dev;
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_CCM_LEN + (keylen-16) / 4);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ /* Setup SA */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents = SA_AES_CCM_CONTENTS | (keylen << 2);
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_HASH_DECRYPT, DIR_INBOUND);
+
+ sa->sa_command_0.bf.digest_len = 0;
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ ctx->direction = DIR_INBOUND;
+ ctx->append_icv = 0;
+ ctx->is_gcm = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x11;
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_CBC_MAC,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO, SA_OP_GROUP_BASIC,
+ SA_OPCODE_ENCRYPT_HASH, DIR_OUTBOUND);
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ return 0;
+}
+
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct dynamic_sa_ctl *sa;
+
+ ctx->direction = DIR_OUTBOUND;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ if (req->assoclen)
+ sa->sa_command_1.bf.hash_crypto_offset = req->assoclen >> 2;
+
+ sa->sa_command_0.bf.digest_len = (crypto_aead_authsize(aead) >> 2);
+ if ((req->iv[0] & 7) == 1)
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+
+ ctx->append_icv = 1;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, req->assoc, req->assoclen,
+ req->iv, 16);
+}
+
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct dynamic_sa_ctl *sa;
+
+ /* Support only counter field length of 2 and 4 bytes */
+ if ((req->iv[0] & 0x7) != 1 && (req->iv[0] & 0x7) != 3) {
+ printk(KERN_ERR "algorithm AES-CCM "
+ "unsupported counter length %d\n",
+ req->iv[0] & 0x7);
+ return -EINVAL;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ sa->sa_command_0.bf.digest_len = (crypto_aead_authsize(aead) >> 2);
+ if ((req->iv[0] & 7) == 1)
+ sa->sa_command_1.bf.crypto_mode9_8 = 1;
+ else
+ sa->sa_command_1.bf.crypto_mode9_8 = 0;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, req->assoc, req->assoclen,
+ req->iv, 16);
+}
+
+int crypto4xx_givencrypt_aes_ccm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
+int crypto4xx_givdecrypt_aes_ccm(struct aead_givcrypt_request *req)
+{
+ return -ENOSYS;
+}
+
/**
- * HASH SHA1 Functions
+ * Kasumi Functions
+ *
+ */
+int crypto4xx_setkey_kasumi(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned char cm)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ u32 sa_len = 0;
+ int rc;
+
+ if (keylen != 16) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ printk(KERN_ERR "%s: keylen fail\n", __func__);
+ return -EINVAL;
+ }
+
+ ctx->dev = my_alg->dev;
+
+ /* Create SA - SA is created here as the alg init function is
+ * common to many algorithm and it does not have the SA length
+ * as it is specify to an algorithm. See setkey function has
+ * to be called for encryption/decryption algorithm once,
+ * it is okay to do this here.
+ */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ if (cm == CRYPTO_MODE_KASUMI)
+ sa_len = SA_KASUMI_LEN;
+ else if (cm == CRYPTO_MODE_KASUMI_f8)
+ sa_len = SA_KASUMI_F8_LEN;
+
+ rc = crypto4xx_alloc_sa(ctx, sa_len);
+ if (rc)
+ return rc;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /* Setup SA - SA is a shared resource for request operation. As
+ * crypto alg and crypto mode can not be change, it should be
+ * ok to store them there. SA control words are not used by the
+ * hardware (configured in token instead), we use it to store
+ * software algorithm and mode selected.
+ */
+
+ if (cm == CRYPTO_MODE_KASUMI) {
+ sa->sa_contents = SA_KASUMI_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_KASUMI;
+ sa->sa_command_0.bf.hash_alg = SA_HASH_ALG_NULL;
+ sa->sa_command_0.bf.pad_type = 3; /* set to zero padding */
+ sa->sa_command_0.bf.opcode = 0;
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
+ sa->sa_command_1.bf.feedback_mode = 0;
+ } else {
+ sa->sa_contents = SA_KASUMI_F8_CONTENTS;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_KASUMI;
+ sa->sa_command_0.bf.hash_alg = SA_HASH_ALG_NULL;
+ sa->sa_command_0.bf.pad_type = 3;
+ sa->sa_command_0.bf.load_iv = SA_LOAD_IV_FROM_STATE;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ENCRYPT;
+ sa->sa_command_1.bf.crypto_mode31 = (cm & 4) >> 2;;
+ sa->sa_command_1.bf.crypto_mode9_8 = (cm & 3);
+ sa->sa_command_1.bf.feedback_mode = 0;
+ sa->sa_command_1.bf.mutable_bit_proc = 1;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa->sa_command_1.bf.sa_rev = 1;
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+ ctx->is_hash = 0;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+}
+
+int crypto4xx_setkey_kasumi_p(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_kasumi(cipher, key, keylen,
+ CRYPTO_MODE_KASUMI);
+}
+
+int crypto4xx_setkey_kasumi_f8(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_kasumi(cipher, key, keylen,
+ CRYPTO_MODE_KASUMI_f8);
+}
+
+/**
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * User should round up bit number to byte number. When receive the result
+ * packet and then mask off the extra bits in the last
+ * byte.
+ */
+int crypto4xx_encrypt_kasumi(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ctx->direction = DIR_OUTBOUND;
+ ctx->pd_ctl = 0x1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+
+/**
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * User should round up bit number to byte number.
+ * When receive the result packet and then mask off the extra bits in the last
+ * byte.
+ */
+int crypto4xx_decrypt_kasumi(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->pd_ctl = 0x1;
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+/**
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * The user should round up bit number to byte number.
+ * When receive the result packet and then mask
+ * off the extra bits in the last byte.
+ */
+int crypto4xx_encrypt_kasumi_f8(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, req->info, 8);
+}
+
+/** Note:
+ * Kasumi and Kasumi f8 work with number of bits.
+ * The crypto engine can only take number bytes as source/destination length
+ * User should round up bit number to byte number.
+ * When receive the result packet and then mask off the extra bits in the last
+ * byte.
+ */
+int crypto4xx_decrypt_kasumi_f8(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+ ctx->is_hash = 0;
+ ctx->pd_ctl = 0x1;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->nbytes, NULL, 0, req->info, 8);
+}
+
+/**
+ * ARC4 Functions
+ *
+ */
+int crypto4xx_setkey_arc4(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ int rc = 0;
+
+ ctx->dev = my_alg->dev;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_ARC4_LEN);
+ if (rc)
+ return rc;
+
+#if 0
+ crypto4xx_alloc_arc4_state_record(ctx);
+ if (ctx->arc4_state_record == NULL) {
+ crypto4xx_free_sa(ctx);
+ return -ENOMEM;
+ }
+#endif
+ if (ctx->arc4_state_record == NULL) {
+ rc = crypto4xx_alloc_arc4_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return -ENOMEM;
+ }
+ }
+ /* Setup SA */
+ ctx->sa_len = SA_ARC4_LEN;
+ ctx->init_arc4 = 1;
+ ctx->direction = DIR_INBOUND;
+
+ sa = ctx->sa_in;
+ memset(((struct dynamic_sa_arc4 *)sa)->key, 0, 16);
+
+ crypto4xx_memcpy_le(((struct dynamic_sa_arc4 *)sa)->key, key, keylen);
+ sa->sa_contents = SA_ARC4_CONTENTS;
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, SA_LOAD_IV_FROM_STATE,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_NULL,
+ SA_CIPHER_ALG_ARC4, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_ENCRYPT,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF, SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE, SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD, SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = keylen;
+ memcpy(sa + get_dynamic_sa_offset_arc4_state_ptr(ctx),
+ (void *)&ctx->arc4_state_record_dma_addr, 4);
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+
+ return 0;
+}
+
+int crypto4xx_arc4_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ if (ctx->init_arc4) {
+ ctx->init_arc4 = 0;
+ ctx->pd_ctl = 9;
+ } else {
+ ctx->pd_ctl = 0x1;
+ }
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ req->dst,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_arc4_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ if (ctx->init_arc4) {
+ ctx->init_arc4 = 0;
+ ctx->pd_ctl = 9;
+ } else {
+ ctx->pd_ctl = 0x1;
+ }
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ req->dst,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+
+/**
+ * Support MD5/SHA/HMAC Hashing Algorithms
+ *
*/
static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
unsigned int sa_len,
unsigned char ha,
unsigned char hm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
- struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
- struct dynamic_sa_ctl *sa;
- struct dynamic_sa_hash160 *sa_in;
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
int rc;
ctx->dev = my_alg->dev;
@@ -200,6 +1378,7 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
if (rc)
return rc;
+
if (ctx->state_record_dma_addr == 0) {
crypto4xx_alloc_state_record(ctx);
if (!ctx->state_record_dma_addr) {
@@ -207,10 +1386,15 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
return -ENOMEM;
}
}
-
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+
+ /*
+ * Setup hash algorithm and hash mode
+ */
set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
SA_NO_HEADER_PROC, ha, SA_CIPHER_ALG_NULL,
@@ -221,13 +1405,12 @@ static int crypto4xx_hash_alg_init(struct crypto_tfm *tfm,
SA_SEQ_MASK_OFF, SA_MC_ENABLE,
SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
SA_NOT_COPY_HDR);
+
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+ sa->sa_contents = crypto4xx_sa_hash_tbl[0][ha];
ctx->direction = DIR_INBOUND;
- sa->sa_contents = SA_HASH160_CONTENTS;
- sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in;
- /* Need to zero hash digest in SA */
- memset(sa_in->inner_digest, 0, sizeof(sa_in->inner_digest));
- memset(sa_in->outer_digest, 0, sizeof(sa_in->outer_digest));
- sa_in->state_ptr = ctx->state_record_dma_addr;
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
return 0;
@@ -261,7 +1444,7 @@ int crypto4xx_hash_update(struct ahash_request *req)
return crypto4xx_build_pd(&req->base, ctx, req->src,
(struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ req->nbytes, NULL, 0, NULL, 0);
}
int crypto4xx_hash_final(struct ahash_request *req)
@@ -279,16 +1462,2627 @@ int crypto4xx_hash_digest(struct ahash_request *req)
return crypto4xx_build_pd(&req->base, ctx, req->src,
(struct scatterlist *) req->result,
- req->nbytes, NULL, 0);
+ req->nbytes, NULL, 0, NULL, 0);
}
/**
* SHA1 Algorithm
*/
+
+int crypto4xx_md5_alg_init(struct crypto_tfm *tfm)
+{
+ return crypto4xx_hash_alg_init(tfm, SA_HASH128_LEN, SA_HASH_ALG_MD5,
+ SA_HASH_MODE_HASH);
+}
+
+int crypto4xx_hash_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned int sa_len,
+ unsigned char ha,
+ unsigned char hm,
+ unsigned int max_keylen)
+{
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct dynamic_sa_ctl *sa;
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ int ds = crypto_ahash_digestsize(hash);
+ int rc;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen > max_keylen) {
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -1;
+ }
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ /* Create SA */
+ rc = crypto4xx_alloc_sa(ctx, sa_len);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC,
+ ha, SA_CIPHER_ALG_NULL, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, 0, hm,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+ sa->sa_contents = crypto4xx_sa_hash_tbl[0][ha];
+ ctx->direction = DIR_INBOUND;
+ memcpy((ctx->sa_in) + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&ctx->state_record_dma_addr, 4);
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ rc = crypto4xx_pre_compute_hmac(ctx, (void *)key, keylen, bs, ha, ds);
+ if (rc) {
+ printk(KERN_ERR "Hmac Initial Digest Calculation failed\n");
+ goto err;
+ }
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 1;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+int crypto4xx_md5_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_hash_hmac_setkey(hash, key, keylen, SA_HASH128_LEN,
+ SA_HASH_ALG_MD5, SA_HASH_MODE_HMAC,
+ 256);
+}
+
+/**
+ * SHA1 and SHA2 Algorithm
+ *
+ */
+
int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm)
{
return crypto4xx_hash_alg_init(tfm, SA_HASH160_LEN, SA_HASH_ALG_SHA1,
SA_HASH_MODE_HASH);
}
+int crypto4xx_sha1_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_hash_hmac_setkey(hash, key, keylen, SA_HASH160_LEN,
+ SA_HASH_ALG_SHA1, SA_HASH_MODE_HMAC,
+ 256);
+}
+
+int crypto4xx_sha2_alg_init(struct crypto_tfm *tfm)
+{
+ int ds = crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
+ u8 ha;
+
+ switch (ds) {
+ default:
+ case 256/8:
+ ha = SA_HASH_ALG_SHA256;
+ break;
+ case 224/8:
+ ha = SA_HASH_ALG_SHA224;
+ break;
+ case 512/8:
+ ha = SA_HASH_ALG_SHA512;
+ break;
+ case 384/8:
+ ha = SA_HASH_ALG_SHA384;
+ break;
+ }
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+
+ return crypto4xx_hash_alg_init(tfm,
+ crypto4xx_sa_hash_tbl[2][ha], ha, 0);
+}
+
+int crypto4xx_sha2_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen)
+{
+ int ds = crypto_ahash_digestsize(hash);
+ unsigned char ha;
+
+ switch (ds) {
+ default:
+ case 256/8:
+ ha = SA_HASH_ALG_SHA256;
+ break;
+ case 224/8:
+ ha = SA_HASH_ALG_SHA224;
+ break;
+ case 512/8:
+ ha = SA_HASH_ALG_SHA512;
+ break;
+ case 384/8:
+ ha = SA_HASH_ALG_SHA384;
+ break;
+ }
+ BUG_ON(ha >= HASH_ALG_MAX_CNT);
+
+ return crypto4xx_hash_hmac_setkey(hash, key, keylen,
+ crypto4xx_sa_hash_tbl[2][ha],
+ ha,
+ SA_HASH_MODE_HMAC,
+ 512);
+}
+
+/**
+ * AES-XCBC-MAC Algorithm
+ *
+ */
+int crypto4xx_xcbc_digest(const unsigned char *key,
+ unsigned int keylen,
+ u8 *sa_hash, int bs)
+{
+ struct scatterlist sg[1];
+ struct crypto_blkcipher *aes_tfm = NULL;
+ struct blkcipher_desc desc;
+ int rc;
+ u8 *digest;
+
+ /* Load pre-computed key value into SA */
+ aes_tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(aes_tfm)) {
+ rc = PTR_ERR(aes_tfm);
+ printk(KERN_ERR "failed to load transform"
+ " for ecb(aes) error %d\n", rc);
+ goto err_alg;
+ }
+ desc.tfm = aes_tfm;
+ desc.flags = 0;
+ rc = crypto_blkcipher_setkey(desc.tfm, key, keylen);
+ if (rc) {
+ printk(KERN_ERR "failed to load key error %d\n", rc);
+ goto err_alg;
+ }
+ digest = kmalloc(16, GFP_KERNEL);
+ if (digest == NULL) {
+ rc = -ENOMEM;
+ goto err_alg;
+ }
+
+ memset(digest, 0x01, bs);
+ sg_init_one(&sg[0], digest, bs);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, bs);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to hash key error %d\n", rc);
+ goto err_alg;
+ }
+
+ crypto4xx_memcpy_le((void *) sa_hash, digest, bs);
+
+ memset(digest, 0x02, bs);
+ sg_init_one(&sg[0], digest, bs);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, bs);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to hash key error %d\n", rc);
+ goto err_alg;
+ }
+
+ sa_hash += 32;
+ crypto4xx_memcpy_le((void *) sa_hash, digest, bs);
+
+ memset(digest, 0x03, bs);
+ sg_init_one(&sg[0], digest, bs);
+ rc = crypto_blkcipher_encrypt(&desc, sg, sg, bs);
+ if (rc < 0) {
+ printk(KERN_ERR "failed to hash key error %d\n", rc);
+ goto err_alg;
+ }
+
+ sa_hash += 16;
+ crypto4xx_memcpy_le((void *) sa_hash, digest, bs);
+
+ crypto_free_blkcipher(aes_tfm);
+
+ return 0;
+err_alg:
+#if 0
+ if (aes_tfm)
+ crypto_free_blkcipher(aes_tfm);
+#endif
+ return rc;
+}
+
+int crypto4xx_xcbc_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct dynamic_sa_ctl *sa;
+ u8 *sa_hash;
+ int rc = 0;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen != 128/8) {
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ /* Create SA */
+ rc = crypto4xx_alloc_sa(ctx, SA_AES128_XCBC_MAC_LEN);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_contents = SA_AES128_XCBC_MAC_CONTENTS;
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC,
+ SA_HASH_ALG_AES_XCBC_MAC_128,
+ SA_CIPHER_ALG_NULL, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ memcpy((void *)(ctx->sa_in +
+ get_dynamic_sa_offset_state_ptr_field(ctx)),
+ (void *)&ctx->state_record_dma_addr, 4);
+ ctx->is_hash = 1;
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ sa_hash = (u8 *)(&(((struct dynamic_sa_aes128_xcbc_mac *)
+ ctx->sa_in)->inner_digest));
+ rc = crypto4xx_xcbc_digest(key, keylen, sa_hash, bs);
+ if (rc) {
+ printk(KERN_ERR "XCBC Digest Calculation Failed %d\n", rc);
+ goto err;
+ }
+
+ ctx->is_hash = 1;
+ ctx->hash_final = 1;
+ ctx->pd_ctl = 0x11;
+
+ ctx->direction = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+
+ return 0;
+err:
+ crypto4xx_free_sa(ctx);
+ return rc;
+}
+
+/**
+ * Kasumi F9 - Hash Algorithms
+ *
+ */
+int crypto4xx_kasumi_f9_setkey(struct crypto_ahash *hash,
+ const u8 *key, unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+
+ ctx->dev = my_alg->dev;
+
+ if (keylen != 16) {
+ crypto_ahash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_KASUMI_F9_LEN);
+ if (rc)
+ return rc;
+
+ if (ctx->state_record_dma_addr == 0) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc) {
+ crypto4xx_free_sa(ctx);
+ return rc;
+ }
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ set_dynamic_sa_command_0(sa, SA_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_NOT_LOAD_HASH, SA_LOAD_IV_FROM_SA,
+ SA_NO_HEADER_PROC, SA_HASH_ALG_KASUMI_f9,
+ SA_CIPHER_ALG_NULL, SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_BASIC, SA_OPCODE_HASH,
+ DIR_INBOUND);
+ set_dynamic_sa_command_1(sa, 0, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_NOT_COPY_PAD, SA_NOT_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+ sa->sa_contents = SA_KASUMI_F9_CONTENTS;
+
+ ctx->direction = DIR_INBOUND;
+ memcpy((void *)(ctx->sa_in +
+ get_dynamic_sa_offset_state_ptr_field(ctx)),
+ (void *)&ctx->state_record_dma_addr, 4);
+
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx), key, keylen);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ ctx->is_hash = 1;
+ ctx->hash_final = 1;
+ ctx->pd_ctl = 0x11;
+ ctx->bypass = 4;
+
+ return 0;
+}
+
+int crypto4xx_kasumi_f9_digest(struct ahash_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct scatterlist *src = req->src;
+ struct dynamic_sa_ctl *sa;
+ dma_addr_t addr;
+
+ /*
+ * We have prepended count/fresh/direction/reserv total
+ * 16byte before the plaintext
+ * so, need to modify the length.
+ * We doing so, to make use of tcrypt.c's hash_test.
+ */
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ addr = dma_map_page(NULL, sg_page(src), src->offset,
+ src->length, DMA_TO_DEVICE);
+ crypto4xx_memcpy_le((void *)sa +
+ get_dynamic_sa_offset_outer_digest(ctx),
+ phys_to_virt(addr), 12);
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src,
+ (struct scatterlist *)req->result,
+ req->nbytes, NULL, 0, NULL, 0);
+}
+/** IPSEC Related Routines */
+
+int crypto4xx_setkey_esp_tunnel(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 cipher_alg,
+ u32 hashing,
+ u32 c_mode,
+ u32 sa_len,
+ u32 sa_contents,
+ u32 ds,
+ u32 bypass,
+ u32 hash_bs)
+
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+
+ struct esp_authenc_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+
+ param = RTA_DATA(rta);
+
+ ctx->spi = be32_to_cpu(param->spi);
+ ctx->seq = be32_to_cpu(param->seq);
+ ctx->pad_block_size = be16_to_cpu(param->pad_block_size);
+ ctx->encap_uhl = be16_to_cpu(param->encap_uhl);
+
+ ESP_PRINTK(KERN_INFO "%s: spi = 0x%08x, seq = %d, pad_size = %d, encap uhl = %d\n",__FUNCTION__,
+ ctx->spi, ctx->seq, ctx->pad_block_size, ctx->encap_uhl);
+
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+
+
+ printk(KERN_INFO "%s: enckeylen = %d, authkeylen = %d\n",
+ __FUNCTION__, enckeylen, authkeylen);
+#if 0
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key, authkeylen, false);
+
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key+authkeylen, enckeylen, false);
+#endif
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ crypto4xx_alloc_sa(ctx, sa_len);
+ if (!ctx->sa_in_dma_addr || !ctx->sa_out_dma_addr)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *)(ctx->sa_in);
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_command_0.w = 0;
+ sa->sa_command_0.bf.hash_alg = hashing;
+ sa->sa_command_0.bf.gather = 0;
+ sa->sa_command_0.bf.save_hash_state = 1;
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_NULL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_HASH;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ sa->sa_command_1.w = 0;
+ sa->sa_command_1.bf.hmac_muting = 0;
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_contents = sa_contents;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen,
+ hash_bs, hashing, ds);
+
+ /*
+ * Now, setup command for ESP
+ */
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.save_hash_state = 0;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ sa->sa_command_0.bf.load_iv = 2;
+ sa->sa_command_0.bf.cipher_alg = cipher_alg;
+ sa->sa_command_0.bf.op_group = SA_OP_GROUP_PROTOCOL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ESP;
+
+ sa->sa_command_1.bf.hmac_muting = 0;
+
+
+ printk(KERN_INFO "%s: keylen = %d, enckeylen = %d\n",
+ __FUNCTION__, keylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES) {
+ if ( enckeylen == 16)
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ else if ( enckeylen == 24)
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_192;
+ else
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_256;
+ } else {
+ sa->sa_command_1.bf.key_len = 0;
+ }
+
+ sa->sa_command_1.bf.crypto_mode31 = c_mode >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = c_mode & 3;
+ sa->sa_command_1.bf.feedback_mode = 0;
+ sa->sa_command_1.bf.copy_payload = 1;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+ sa->sa_command_1.bf.seq_num_mask = 1;
+ sa->sa_command_1.bf.mutable_bit_proc = 0;
+
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key+authkeylen, enckeylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_spi(ctx),
+ (void*)&(ctx->spi), 4);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_seq_num(ctx),
+ (void*)&(ctx->seq), 4);
+
+ /*
+ * Setup sa for inbound processing
+ */
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ sa->sa_command_0.bf.load_iv = 1;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 0;
+ sa->sa_command_1.bf.hash_crypto_offset = 6;
+
+
+ /*
+ * Setup sa for outbound processing
+ */
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len*4);
+ sa = (struct dynamic_sa_ctl *)(ctx->sa_out);
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.load_iv = 3;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+ ctx->bypass = bypass;
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = param->pad_block_size/4;
+ ctx->append_icv = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+err_nomem:
+ return -ENOMEM;
+badkey:
+ ESP_PRINTK(KERN_INFO KERN_ERR "%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 0, 128);
+}
+/** DES and 3DES Related IPSEC Algorithms */
+int crypto4xx_setkey_tunnel_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_DES_MD5_SHA1_LEN,
+ SA_ESP_DES_MD5_SHA1_CONTENTS,
+ 16, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_DES_MD5_SHA1_LEN,
+ SA_ESP_DES_MD5_SHA1_CONTENTS, 20, 0,
+ 64);
+
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS,
+ 28, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32,
+ 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 48, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 64, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_3DES_MD5_SHA1_LEN,
+ SA_ESP_3DES_MD5_SHA1_CONTENTS,
+ 16, 0, 64);
+}
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_3DES_MD5_SHA1_LEN,
+ SA_ESP_3DES_MD5_SHA1_CONTENTS, 20,
+ 0,
+ 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS,
+ 28, 0, 64);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS,
+ 32, 0, 64);
+}
+
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA384,
+ 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 48, 0, 128);
+}
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp_tunnel(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS,
+ 64, 0, 128);
+}
+
+int crypto4xx_encrypt_esp_tunnel(struct aead_givcrypt_request *givreq,
+ struct aead_request *req)
+{
+
+ struct crypto4xx_ctx *ctx;
+#if 0
+ struct scatterlist *sg;
+ struct iphdr *iph;
+ void * daddr;
+ struct dynamic_sa_ctl *sa;
+#endif
+ if (givreq)
+ req = &givreq->areq;
+
+ ctx = crypto_tfm_ctx(req->base.tfm);
+
+#if 0
+ sg = req->dst;
+ daddr = kmap_atomic(sg_page(sg), KM_SOFTIRQ1);
+ iph = (struct iphdr *)(daddr + sg->offset - 20);
+ ctx->next_hdr = (u32)(iph->protocol);
+ kunmap_atomic(daddr, KM_SOFTIRQ1);
+
+
+ ctx->next_hdr = 4;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11 + (ctx->next_hdr << 8);
+#endif
+ ctx->hc_offset = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x411;
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+
+
+}
+
+int crypto4xx_decrypt_esp_tunnel(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_INBOUND;
+ ctx->hc_offset = 6;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+
+}
+int crypto4xx_encrypt_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_encrypt_esp_tunnel(NULL, req);
+}
+
+int crypto4xx_givencrypt_esp_cbc(struct aead_givcrypt_request *req)
+{
+ return crypto4xx_encrypt_esp_tunnel(req, NULL);
+}
+
+int crypto4xx_decrypt_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_decrypt_esp_tunnel(req);
+}
+
+/** Setkey Routine for IPSEC for Transport */
+int crypto4xx_setkey_esp(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen, u32 cipher_alg,
+ u32 hashing, u32 c_mode, u32 sa_len,
+ u32 sa_contents, u32 ds,
+ u32 bypass, u32 hash_bs)
+
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+
+ struct esp_authenc_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+
+ param = RTA_DATA(rta);
+
+ ctx->spi = be32_to_cpu(param->spi);
+ ctx->seq = be32_to_cpu(param->seq);
+ ctx->pad_block_size = be16_to_cpu(param->pad_block_size);
+ ctx->encap_uhl = be16_to_cpu(param->encap_uhl);
+
+ ESP_PRINTK(KERN_INFO "%s: spi = 0x%08x, seq = %d, pad_size = %d, encap uhl = %d\n",__FUNCTION__,
+ ctx->spi, ctx->seq, ctx->pad_block_size, ctx->encap_uhl);
+
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+
+ ESP_PRINTK(KERN_INFO "%s: enckeylen = %d, authkeylen = %d\n",
+ __FUNCTION__, enckeylen, authkeylen);
+
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key, authkeylen, false);
+
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key+authkeylen, enckeylen, false);
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ crypto4xx_alloc_sa(ctx, sa_len);
+ if (!ctx->sa_in_dma_addr || !ctx->sa_out_dma_addr)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_command_0.w = 0;
+ sa->sa_command_0.bf.hash_alg = hashing;
+ sa->sa_command_0.bf.gather = 0;
+ sa->sa_command_0.bf.save_hash_state = 1;
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.cipher_alg = SA_CIPHER_ALG_NULL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_HASH;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+ sa->sa_command_1.w = 0;
+ sa->sa_command_1.bf.hmac_muting = 0;
+ sa->sa_command_1.bf.sa_rev = 1;
+ sa->sa_contents = sa_contents;
+
+ ctx->direction = DIR_INBOUND;
+ memcpy((ctx->sa_in) + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen,
+ hash_bs, hashing, ds);
+
+ /*
+ * Now, setup command for ESP
+ */
+ sa->sa_command_0.bf.load_hash_state = 0;
+ sa->sa_command_0.bf.save_hash_state = 0;
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+
+ sa->sa_command_0.bf.load_iv = 2;
+ sa->sa_command_0.bf.cipher_alg = cipher_alg;
+ sa->sa_command_0.bf.op_group = SA_OP_GROUP_PROTOCOL;
+ sa->sa_command_0.bf.opcode = SA_OPCODE_ESP;
+
+ sa->sa_command_1.bf.hmac_muting = 0;
+
+
+ ESP_PRINTK(KERN_INFO "%s: keylen = %d, enckeylen = %d\n",
+ __FUNCTION__, keylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES) {
+ if ( enckeylen == 16)
+ {
+ ESP_PRINTK(KERN_INFO "%s: AES 128\n", __FUNCTION__);
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ } else if ( enckeylen == 24){
+ ESP_PRINTK(KERN_INFO "%s: AES 192\n", __FUNCTION__);
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_192;
+ } else {
+ ESP_PRINTK(KERN_INFO "%s: AES 256\n", __FUNCTION__);
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_256;
+ }
+ } else {
+ sa->sa_command_1.bf.key_len = 0;
+ }
+
+ sa->sa_command_1.bf.crypto_mode31 = c_mode >> 2;
+ sa->sa_command_1.bf.crypto_mode9_8 = c_mode & 3;
+ sa->sa_command_1.bf.feedback_mode = 0;
+ sa->sa_command_1.bf.copy_payload = 1;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+ sa->sa_command_1.bf.seq_num_mask = 1;
+ sa->sa_command_1.bf.mutable_bit_proc = 0;
+
+ sa->sa_command_0.bf.hdr_proc = 1;
+
+ crypto4xx_memcpy_le((void*)(ctx->sa_in) + get_dynamic_sa_offset_key_field(ctx),
+ key+authkeylen, enckeylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_spi(ctx),
+ (void*)&(ctx->spi),
+ 4);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_seq_num(ctx),
+ (void*)&(ctx->seq), 4);
+
+
+ sa->sa_command_1.bf.copy_hdr = 0;
+ sa->sa_command_1.bf.hash_crypto_offset = 6;
+
+ sa->sa_command_0.bf.load_iv = 1;
+ sa->sa_command_0.bf.dir = DIR_INBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ sa->sa_command_0.bf.dir = DIR_OUTBOUND;
+ sa->sa_command_0.bf.load_iv = 3;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.copy_hdr = 1;
+
+
+ ctx->bypass = bypass;
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = param->pad_block_size/4;
+ ctx->append_icv = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+err_nomem:
+ return -ENOMEM;
+badkey:
+ ESP_PRINTK(KERN_INFO "%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+
+/** Encrypt/Decrypt Routines for IPSEC for Transport */
+int crypto4xx_encrypt_esp(struct aead_givcrypt_request *givreq,
+ struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx;
+ struct scatterlist *sg;
+ struct iphdr *iph;
+ void * saddr;
+
+ if (givreq)
+ req = &givreq->areq;
+
+ ctx = crypto_tfm_ctx(req->base.tfm);
+
+ sg = req->src;
+ saddr = kmap_atomic(sg_page(sg), KM_SOFTIRQ1);
+ iph = (struct iphdr *)(saddr + sg->offset);
+ ctx->next_hdr = (u32)(iph->protocol);
+ iph->protocol = 0x32;
+
+#if 1
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)saddr+ sg->offset, sg->length, false);
+
+ ESP_PRINTK(KERN_INFO "%s: next_hdr = %d\n",__FUNCTION__, ctx->next_hdr);
+#endif
+ kunmap_atomic(saddr, KM_SOFTIRQ1);
+
+ ctx->hc_offset = 0;
+ ctx->pd_ctl = (ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_OUTBOUND;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+
+}
+
+int crypto4xx_decrypt_esp(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+#if 1
+ struct scatterlist *sg;
+ void * saddr;
+
+ sg = req->src;
+ saddr = kmap_atomic(sg_page(sg), KM_SOFTIRQ1);
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)saddr+sg->offset, sg->length, false);
+ kunmap_atomic(saddr, KM_SOFTIRQ1);
+#endif
+ ctx->hc_offset = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+/**AES Transport Related Algorithms for IPSEC */
+int crypto4xx_setkey_transport_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 5, 128);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_AES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 5, 128);
+}
+
+/**DES Transport Related Algorithms for IPSEC */
+int crypto4xx_setkey_transport_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher,
+ key,
+ keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 5, 128);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 5, 128);
+}
+
+/**3DES Transport Related Algorithms for IPSEC */
+int crypto4xx_setkey_transport_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_MD5, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 16, 5, 64);
+}
+int crypto4xx_setkey_transport_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA1, 1,
+ SA_ESP_MD5_SHA1_LEN,
+ SA_ESP_MD5_SHA1_CONTENTS, 20, 5, 64);
+}
+
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA224, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 28, 5, 64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA256, 1,
+ SA_ESP_SHA256_LEN,
+ SA_ESP_SHA256_CONTENTS, 32, 5,64);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA384, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 48, 5, 128);
+}
+
+int crypto4xx_setkey_transport_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ ESP_PRINTK(KERN_INFO "%s: called\n", __FUNCTION__);
+ return crypto4xx_setkey_esp(cipher, key, keylen,
+ SA_CIPHER_ALG_3DES,
+ SA_HASH_ALG_SHA512, 1,
+ SA_ESP_SHA512_LEN,
+ SA_ESP_SHA512_CONTENTS, 64, 5, 128);
+}
+
+int crypto4xx_encrypt_transport_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_encrypt_esp(NULL, req);
+}
+
+int crypto4xx_givencrypt_transport_esp_cbc(struct aead_givcrypt_request *req)
+{
+ return crypto4xx_encrypt_esp(req, NULL);
+}
+
+int crypto4xx_decrypt_transport_esp_cbc(struct aead_request *req)
+{
+ return crypto4xx_decrypt_esp(req);
+}
+/** Setkey and Encrypt/Decrypt Functions for Macsec */
+int crypto4xx_setkey_macsec_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct dynamic_sa_ctl *sa;
+ int rc;
+ struct offload_param {
+ __be32 spi;
+ __be32 seq;
+ __be32 iv_h;
+ __be32 iv_l;
+ } *param;
+
+
+ ctx->dev = my_alg->dev;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ crypto4xx_alloc_sa(ctx, SA_MACSEC_GCM_LEN );
+ if (!ctx->sa_in_dma_addr || !ctx->sa_out_dma_addr)
+ goto err_nomem;
+
+ ctx->direction = DIR_INBOUND;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ param = (struct offload_param *) key;
+ key += sizeof(struct offload_param);
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents = SA_MACSEC_GCM_CONTENTS;
+
+ keylen -= 16;
+ rc = crypto4xx_compute_gcm_hash_key_sw(ctx, key, keylen);
+ if (rc)
+ goto err_nomem_sr;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_INPUT,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ SA_PAD_TYPE_ZERO,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_EXT_PROT_MACSEC,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+
+ sa->sa_command_1.bf.byte_offset = 1;
+ sa->sa_command_1.bf.key_len = keylen >> 3;
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ /*
+ * Setup sa for inbound processing
+ */
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[0] = 0x00000001;
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[1] = 0x00000000;
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[2] = 0x00000000;
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq_mask[3] = 0x00000000;
+
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->spi = cpu_to_le32(param->spi);
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->seq = cpu_to_be32(param->seq);
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->iv[0] = cpu_to_le32(param->iv_h);
+ ((struct dynamic_sa_macsec_gcm *) ctx->sa_in)->iv[1] = cpu_to_le32(param->iv_l);
+
+
+ sa->sa_command_1.bf.copy_payload = 1;
+ sa->sa_command_1.bf.copy_pad = 1;
+ sa->sa_command_1.bf.copy_hdr = 1;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+
+ /*
+ * Setup sa for outbound processing
+ */
+ ctx->direction = DIR_OUTBOUND;
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_SA,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ 0,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_EXT_PROT_MACSEC,
+ DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->bypass = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+err_nomem:
+ return -ENOMEM;
+
+}
+
+int crypto4xx_encrypt_macsec(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ctx->direction = DIR_OUTBOUND;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_decrypt_macsec(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct crypto4xx_device *dev = ctx->dev;
+
+ dev->macsec_decrypt_num++;
+ ctx->pd_ctl =(ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_INBOUND;
+ ctx->bypass = 0;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+/** DTLS/SSL/TLS Related Algorithms */
+static int crypto4xx_setkey_dtls(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 hash_alg,
+ u32 cipher_alg,
+ u32 opcode,
+ u32 op_grp)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ struct offload_param {
+ __be32 spi;
+ __be32 seq_h;
+ __be32 seq_l;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+ int rc;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+ authkeylen = keylen - enckeylen;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+ rc = crypto4xx_alloc_sa(ctx, SA_DTLS_LEN);
+ if (rc)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err_nomem_sr;
+ }
+
+ ctx->direction = DIR_INBOUND;
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ sa->sa_contents = SA_DTLS_CONTENTS;
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&(ctx->state_record_dma_addr), 4);
+
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_INPUT,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_DTLS,
+ op_grp,
+ opcode,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.mutable_bit_proc = 0;
+
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen, bs, hash_alg,
+ authkeylen);
+ crypto4xx_memcpy_le((u32 *)((void *) sa +
+ get_dynamic_sa_offset_key_field(ctx)),
+ key + authkeylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES)
+ sa->sa_command_1.bf.key_len = enckeylen >> 3;
+
+ ((struct dynamic_sa_dtls *) sa)->spi.w = cpu_to_le32(param->spi);
+ ((struct dynamic_sa_dtls *) sa)->seq[1] = cpu_to_be32(param->seq_h);
+ ((struct dynamic_sa_dtls *) sa)->seq[0] = cpu_to_be32(param->seq_l);
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = 4;
+ ctx->append_icv = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa, SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA, /*SA_LOAD_IV_FROM_STATE */SA_GEN_IV,
+ SA_HEADER_PROC, hash_alg, cipher_alg,
+ SA_PAD_TYPE_DTLS, SA_OP_GROUP_EXTEND_PROTOCOL,
+ opcode, DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CBC, SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB, SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_OFF, SA_MC_ENABLE,
+ SA_COPY_PAD, SA_COPY_PAYLOAD, SA_COPY_HDR);
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+err_nomem:
+ return -ENOMEM;
+
+badkey:
+ printk("%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+
+int crypto4xx_setkey_dtls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_dtls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_dtls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_DTLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+/** DTLS/SSL/TLS Related Encrypt/Decrypt Algorithms */
+
+int crypto4xx_encrypt_dtls(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_decrypt_dtls(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+int tls;
+/** Setkey Functions for SSL/TLS */
+
+static int crypto4xx_setkey_ssl_tls(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 hash_alg,
+ u32 cipher_alg,
+ u32 opcode,
+ u32 op_grp)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ struct offload_param {
+ __be32 spi;
+ __be32 seq_h;
+ __be32 seq_l;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+ int rc;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+
+ param = RTA_DATA(rta);
+
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr)
+ crypto4xx_free_sa(ctx);
+
+
+ rc = crypto4xx_alloc_sa(ctx, SA_DTLS_LEN );
+ if (rc)
+ goto err_nomem;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err_nomem_sr;
+ }
+
+ ctx->direction = DIR_INBOUND;
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ /*
+ * Setup hash algorithm and hash mode
+ */
+ sa->sa_contents = SA_DTLS_CONTENTS;
+
+ ((struct dynamic_sa_dtls *)sa)->state_ptr= (u32)ctx->state_record_dma_addr;
+
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ set_dynamic_sa_command_0(sa,
+ SA_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_STATE,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp,
+ opcode,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ if (opcode == SA_OPCODE_SSL) {
+ if (hash_alg == SA_HASH_ALG_SHA1) {
+ memcpy(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx),
+ (void*)key, authkeylen);
+ memcpy(ctx->sa_in +
+ get_dynamic_sa_offset_outer_digest(ctx),
+ (void*)key, authkeylen);
+ } else if (hash_alg == SA_HASH_ALG_MD5) {
+ crypto4xx_pre_compute_ssl_mac(ctx, (void*)key,
+ authkeylen, bs,hash_alg);
+ }
+ } else {
+ crypto4xx_pre_compute_hmac(ctx, (void *)key, authkeylen, bs,
+ hash_alg, authkeylen);
+ sa->sa_command_1.bf.hmac_muting = 1;
+ }
+
+ ((struct dynamic_sa_dtls *) sa)->spi.w = cpu_to_le32(param->spi);
+ ((struct dynamic_sa_dtls *) sa)->seq[1] = cpu_to_be32(param->seq_h);
+ ((struct dynamic_sa_dtls *) sa)->seq[0] = cpu_to_be32(param->seq_l);
+
+ crypto4xx_memcpy_le((u32 *) ((void *) sa +
+ get_dynamic_sa_offset_key_field(ctx)),
+ key + authkeylen, enckeylen);
+
+ if (cipher_alg == SA_CIPHER_ALG_AES)
+ sa->sa_command_1.bf.key_len = enckeylen >> 3;
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = 4;
+ ctx->append_icv = 0;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+ ctx->direction = DIR_OUTBOUND;
+
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_STATE,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp, opcode,
+ DIR_OUTBOUND);
+ set_dynamic_sa_command_1(sa,
+ CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+err_nomem:
+ return -ENOMEM;
+badkey:
+ printk("%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
+extern int ssl_arc4;
+
+int crypto4xx_setkey_ssl_tls_arc4(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen,
+ u32 hash_alg,
+ u32 cipher_alg,
+ u32 opcode,
+ u32 op_grp)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ int bs = crypto_tfm_alg_blocksize(tfm);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ int i, j = 0, k = 0;
+ u8 a;
+ const u8 arc4_key[256];
+
+ struct offload_param {
+ __be32 spi;
+ __be32 seq_h;
+ __be32 seq_l;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+ unsigned int authkeylen;
+ int rc;
+
+ if (!RTA_OK(rta, keylen))
+ goto badkey;
+ if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+ goto badkey;
+
+ if (RTA_PAYLOAD(rta) < sizeof(*param))
+ goto badkey;
+
+ ctx->dev = my_alg->dev;
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ keylen -= RTA_ALIGN(rta->rta_len);
+
+ authkeylen = keylen - enckeylen;
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ rc = crypto4xx_alloc_sa(ctx, SA_SSL_ARC4_LEN);
+ if (rc)
+ return rc;
+
+ if (!ctx->state_record) {
+ rc = crypto4xx_alloc_state_record(ctx);
+ if (rc)
+ goto err_nomem_sr;
+ }
+
+ if (ctx->arc4_state_record == NULL) {
+ rc = crypto4xx_alloc_arc4_state_record(ctx);
+ if (rc)
+ goto err_nomem_arc4;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+
+ ctx->direction = DIR_INBOUND;
+ ctx->init_arc4 = 1;
+ sa->sa_contents = SA_SSL_ARC4_CONTENTS;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void *)&(ctx->state_record_dma_addr), 4);
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH, SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_SA,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp,
+ opcode,
+ DIR_INBOUND);
+
+ set_dynamic_sa_command_1(sa,
+ CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_OFF,
+ SA_MC_ENABLE,
+ SA_NOT_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.arc4_stateful = 1;
+ sa->sa_command_1.bf.save_arc4_state = 1;
+
+ if (opcode == SA_OPCODE_SSL) {
+ if (hash_alg == SA_HASH_ALG_SHA1) {
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_inner_digest(ctx),
+ (void*) key, authkeylen);
+ crypto4xx_memcpy_le(ctx->sa_in +
+ get_dynamic_sa_offset_outer_digest(ctx),
+ (void*) key, authkeylen);
+ } else if (hash_alg == SA_HASH_ALG_MD5) {
+ crypto4xx_pre_compute_ssl_mac(ctx, (void*)key,
+ authkeylen, bs, hash_alg);
+ }
+ sa->sa_command_1.bf.hmac_muting = 0;
+ } else {
+ crypto4xx_pre_compute_hmac(ctx, (void*) key, authkeylen, bs,
+ hash_alg, authkeylen);
+ sa->sa_command_1.bf.hmac_muting = 1;
+ }
+
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->arc4_state_ptr =
+ (u32)ctx->arc4_state_record_dma_addr;
+
+ /* Setting Key */
+ crypto4xx_memcpy_le((u32 *) ((void *) sa +
+ get_dynamic_sa_offset_key_field(ctx)),
+ key + authkeylen, enckeylen);
+
+ memcpy((u8 *)arc4_key, key + authkeylen, enckeylen);
+
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->spi.w =
+ cpu_to_le32(param->spi);
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->seq[1] =
+ cpu_to_be32(param->seq_h);
+ ((struct dynamic_sa_ssl_tls_arc4 *) sa)->seq[0] =
+ cpu_to_be32(param->seq_l);
+ /* For stateful mode we need to initialize the ARC4 state record */
+ ((struct dynamic_sa_ssl_tls_arc4 *) ctx->sa_in)->ij.i = 1;
+ ((struct dynamic_sa_ssl_tls_arc4 *) ctx->sa_in)->ij.j = 0;
+
+ for (i = 0; i < 256; i++)
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[i] = i;
+
+ for (i = 0; i < 256; i++) {
+ a = ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[i];
+ j = (j + arc4_key[k] + a) & 0xff;
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[i] =
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[j];
+ ((struct arc4_sr *) ctx->arc4_state_record)->arc4_state[j] = a;
+ if (++k >= enckeylen)
+ k = 0;
+ }
+
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ ctx->pad_ctl = 4;
+ ctx->append_icv = 0;
+ ctx->direction = DIR_OUTBOUND;
+ ctx->pd_ctl =( ctx->pad_ctl << 24) + 0x11;
+
+ /* Setup SA command for outbound process */
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_SA,
+ SA_HEADER_PROC,
+ hash_alg,
+ cipher_alg,
+ SA_PAD_TYPE_TLS,
+ op_grp,
+ opcode,
+ DIR_OUTBOUND);
+
+ set_dynamic_sa_command_1(sa,
+ CRYPTO_MODE_CBC,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_ON,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ sa->sa_command_1.bf.arc4_stateful = 1;
+ sa->sa_command_1.bf.save_arc4_state = 1;
+
+ return 0;
+
+err_nomem_arc4:
+ crypto4xx_free_state_record(ctx);
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+ return -ENOMEM;
+badkey:
+ printk("%s: badkey\n",__FUNCTION__);
+ return 0xffffffff;
+}
+
+int crypto4xx_setkey_ssl_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+int crypto4xx_setkey_ssl_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_ARC4,
+ SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_ssl_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_SSL,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+/** Encrypt Decrpt Functions for SSL-AES*/
+int crypto4xx_encrypt_ssl_aes(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, AES_BLOCK_SIZE);
+}
+
+int crypto4xx_decrypt_ssl_aes(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, AES_BLOCK_SIZE);
+}
+
+/** Encrypt Decrpt Functions for SSL-DES*/
+int crypto4xx_encrypt_ssl_des(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, DES_BLOCK_SIZE);
+}
+
+int crypto4xx_decrypt_ssl_des(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0,
+ req->iv, DES_BLOCK_SIZE);
+}
+
+/** Encrypt Decyrpt Functions for SSL-NULL*/
+int crypto4xx_encrypt_ssl_null(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_OUTBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int crypto4xx_decrypt_ssl_null(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+ ctx->direction = DIR_INBOUND;
+
+ return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+}
+
+int ssl_arc4 = 0;
+/** Encrypt Decyrpt Functions for SSL- ARC4*/
+int crypto4xx_encrypt_ssl_arc4(struct aead_request *req)
+{
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ int ret = 0;
+
+ ctx->direction = DIR_OUTBOUND;
+ ssl_arc4 = 1;
+ ret = crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+ ssl_arc4 = 0;
+ return ret;
+}
+
+int crypto4xx_decrypt_ssl_arc4(struct aead_request *req)
+{
+ int ret;
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ ssl_arc4 = 1;
+ ctx->direction = DIR_INBOUND;
+
+ ret = crypto4xx_build_pd(&req->base, ctx, req->src, req->dst,
+ req->cryptlen, NULL, 0, NULL, 0);
+ ssl_arc4 = 0;
+ return ret;
+}
+
+/** TLS and TLS V1 Setkey Functions */
+int crypto4xx_setkey_tls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+int crypto4xx_setkey_tls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ tls = 1;
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_AES, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_DES, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_3DES, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls_arc4(cipher, key, keylen,
+ SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_ARC4, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_tls1_1_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_MD5,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+int crypto4xx_setkey_tls1_1_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ return crypto4xx_setkey_ssl_tls(cipher, key, keylen, SA_HASH_ALG_SHA1,
+ SA_CIPHER_ALG_NULL, SA_OPCODE_TLS1_1,
+ SA_OP_GROUP_EXTEND_PROTOCOL);
+}
+
+int crypto4xx_setkey_transport_esp_rfc4106_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_tfm *tfm = crypto_aead_tfm(cipher);
+ struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct crypto4xx_alg *my_alg = crypto_alg_to_crypto4xx_alg(alg);
+ struct rtattr *rta = (void *) key;
+ struct dynamic_sa_ctl *sa;
+ u32 rc;
+
+ ctx->dev = my_alg->dev;
+ struct esp_authenc_param {
+ __be32 spi;
+ __be32 seq;
+ __be16 pad_block_size;
+ __be16 encap_uhl;
+ struct crypto_authenc_key_param authenc_param;
+ } *param;
+
+ unsigned int enckeylen;
+
+ ESP_PRINTK("%s: keylen = %d\n",__FUNCTION__, keylen);
+ ESP_PHD(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)key, keylen, false);
+
+ if (!RTA_OK(rta, keylen)) {
+ printk("%s: badkey 1\n",__FUNCTION__);
+ goto badkey;
+ }
+
+ param = RTA_DATA(rta);
+ enckeylen = be32_to_cpu(param->authenc_param.enckeylen);
+
+ key += RTA_ALIGN(rta->rta_len);
+ key += 4;
+ //keylen -= RTA_ALIGN(rta->rta_len);
+ keylen = keylen - sizeof(*param) - 4;
+
+ ctx->spi = be32_to_cpu(param->spi);
+ ctx->seq = be32_to_cpu(param->seq);
+ ctx->pad_block_size = be16_to_cpu(param->pad_block_size);
+ ctx->encap_uhl = be16_to_cpu(param->encap_uhl);
+
+ ESP_PRINTK("%s: spi = 0x%08x, seq = %d, pad_size = %d, encap uhl = %d\n",__FUNCTION__,
+ ctx->spi, ctx->seq, ctx->pad_block_size, ctx->encap_uhl);
+
+ /* Create SA */
+ if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) {
+ crypto4xx_free_sa(ctx);
+ }
+
+ rc = crypto4xx_alloc_sa(ctx, SA_ESP_GCM_LEN);
+ if (rc)
+ return rc;
+
+ if (!ctx->state_record) {
+ crypto4xx_alloc_state_record(ctx);
+ if (!ctx->state_record_dma_addr)
+ goto err_nomem_sr;
+ }
+
+ sa = (struct dynamic_sa_ctl *) ctx->sa_in;
+ ctx->direction = DIR_INBOUND;
+ sa->sa_contents = SA_ESP_GCM_CONTENTS;
+ ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx);
+
+ rc = crypto4xx_compute_gcm_hash_key_sw(ctx, key, keylen);
+ if (rc)
+ goto err_nomem_sr;
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx),
+ (void*)&(ctx->state_record_dma_addr), 4);
+
+ crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx),
+ key, keylen);
+
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_spi(ctx),
+ (void*)&(ctx->spi), 4);
+ memcpy(ctx->sa_in + get_dynamic_sa_offset_seq_num(ctx),
+ (void*)&(ctx->seq), 4);
+
+
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ SA_LOAD_IV_FROM_INPUT,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ 0,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_ESP,
+ DIR_INBOUND);
+
+ sa->sa_command_0.bf.digest_len = 3;
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_NOT_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+ /*
+ * Setup sa for outbound processing
+ */
+ memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4);
+ sa = (struct dynamic_sa_ctl *) ctx->sa_out;
+
+ set_dynamic_sa_command_0(sa,
+ SA_NOT_SAVE_HASH,
+ SA_NOT_SAVE_IV,
+ SA_LOAD_HASH_FROM_SA,
+ //SA_LOAD_IV_FROM_SA,
+ SA_LOAD_IV_GEN_IV,
+ SA_HEADER_PROC,
+ SA_HASH_ALG_GHASH,
+ SA_CIPHER_ALG_AES,
+ 0,
+ SA_OP_GROUP_PROTOCOL,
+ SA_OPCODE_ESP,
+ DIR_OUTBOUND);
+
+ sa->sa_command_0.bf.digest_len = 3;
+ set_dynamic_sa_command_1(sa, CRYPTO_MODE_AES_CTR,
+ SA_HASH_MODE_HASH,
+ CRYPTO_FEEDBACK_MODE_NO_FB,
+ SA_EXTENDED_SN_OFF,
+ SA_SEQ_MASK_ON,
+ SA_MC_ENABLE,
+ SA_COPY_PAD,
+ SA_COPY_PAYLOAD,
+ SA_COPY_HDR);
+
+ sa->sa_command_1.bf.key_len = SA_AES_KEY_LEN_128;
+ sa->sa_command_1.bf.hash_crypto_offset = 0;
+
+ ctx->bypass = 5;
+ ctx->authenc = 0;
+ ctx->hash_final = 1;
+ ctx->is_hash = 0;
+ printk("param->pad_block_size = %d\n", param->pad_block_size);
+ //ctx->pad_ctl = param->pad_block_size / 4;
+ ctx->pad_ctl = 0x08;
+ ctx->append_icv = 0;
+
+ return 0;
+
+err_nomem_sr:
+ crypto4xx_free_sa(ctx);
+
+ return -ENOMEM;
+badkey:
+
+ ESP_PRINTK("%s: badkey\n",__FUNCTION__);
+ return -EINVAL;
+}
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 46e899ac924..8c00e30e9e6 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -28,17 +28,40 @@
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/cacheflush.h>
+#include <crypto/internal/hash.h>
+#include <crypto/algapi.h>
+#include <asm/ppc4xx_ocm.h>
+#include <crypto/internal/hash.h>
+#include <crypto/algapi.h>
#include <crypto/aes.h>
+#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/ctr.h>
#include "crypto4xx_reg_def.h"
#include "crypto4xx_core.h"
#include "crypto4xx_sa.h"
#define PPC4XX_SEC_VERSION_STR "0.5"
-
+void my_dump_Data(const u_char* dptr, u_int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ printk("0x%02x, ", dptr[i]);
+ if ((i+1) % 8 == 0)
+ printk(" ");
+ if ((i+1) % 16 == 0)
+ printk("\n");
+ }
+ printk("\n");
+}
+static struct proc_dir_entry *proc_crypto4xx = NULL;
+struct proc_dir_entry *entry;
/**
* PPC4xx Crypto Engine Initialization Routine
*/
@@ -72,16 +95,24 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
- writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
get_random_bytes(&rand_num, sizeof(rand_num));
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
get_random_bytes(&rand_num, sizeof(rand_num));
writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
+ writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
+
ring_size.w = 0;
ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
ring_size.bf.ring_size = PPC4XX_NUM_PD;
writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
ring_ctrl.w = 0;
+
+ if (dev->core_dev->revb_ver == 1) {
+#ifdef CONFIG_SEC_HW_POLL
+ ring_ctrl.bf.ring_retry_divisor = CONFIG_SEC_HW_POLL_RETRY_FREQ;
+ ring_ctrl.bf.ring_poll_divisor = CONFIG_SEC_HW_RING_POLL_FREQ;
+#endif
+ }
writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
@@ -95,11 +126,19 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+
+#ifdef CONFIG_SEC_PD_OCM
+ writel((dev->pdr_ocm_addr >> 32), dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
+ writel((dev->pdr_ocm_addr >> 32), dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+#else
writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
+#endif
writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
+
writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
+
writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
/* un reset pe,sg and pdr */
@@ -112,13 +151,108 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
/*clear all pending interrupt*/
writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
- writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
- writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
- writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+
+ if (dev->core_dev->revb_ver == 1) {
+ writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
+ dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
+ /* For RevB, 460EX and 460ExR Rev B */
+ writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
+ dev->ce_base + CRYPTO4XX_INT_EN);
+ } else
+ writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
+}
+
+void crypto4xx_dump_regs(struct crypto4xx_core_device* core_dev)
+{
+ u32 reg_dump;
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_CTRL_STAT);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PD_CTRL_STAT = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SOURCE);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_Source_REG = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_DEST);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_Des_REG= 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SA);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SA_REG= 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PE_DMA_CFG = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_RING_SIZE);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_RING_SIZE = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_RING_CTRL);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_RING_CTRL = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_IO_THRESHOLD = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_PE_DMA_STAT);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PE_DMA_STAT= 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_PDR_BASE);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_PDR_BASE = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_PTR);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_PTR = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SA_CMD_0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SA_CMD_0 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SA_CMD_1);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SA_CMD_1 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SPI);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SPI = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SEQ_NUM0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SEQ_NUM_0 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_SEQ_NUM1);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_SEQ_NUM_1 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_IV + 0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 0 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_IV + 4);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 4 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base +CRYPTO4XX_STATE_IV + 8);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 8 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_IV + 12);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_IV + 12 = 0x%08x\n", reg_dump);
+
+ reg_dump = readl(core_dev->dev->ce_base + CRYPTO4XX_STATE_HASH_BYTE_CNT_0);
+ printk("crypto4xx_dump_regs: CRYPTO4XX_STATE_HASH_BYTE_CNT_0 = 0x%08x\n", reg_dump);
+
}
int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
{
+#ifdef CONFIG_SEC_SA_OCM
+ ctx->sa_out = ocm_alloc(&ctx->sa_out_ocm_addr, size * 4, 4,
+ OCM_NON_CACHED, "sec_sa_out");
+ ctx->sa_out_dma_addr = (u32)ctx->sa_out_ocm_addr;
+ printk("OCM Allocation done for SA Out %llx\n", (unsigned long long)ctx->sa_out_ocm_addr);
+ if (ctx->sa_out == NULL)
+ return -ENOMEM;
+ ctx->sa_in = ocm_alloc(&ctx->sa_in_ocm_addr, size * 4, 4,
+ OCM_NON_CACHED, "sec_sa_in");
+ if (ctx->sa_in == NULL) {
+ ocm_free(ctx->sa_out);
+ return -ENOMEM;
+ }
+ ctx->sa_in_dma_addr = (u32)ctx->sa_in_ocm_addr;
+ //printk("OCM Allocation done for SA In %llx\n", (unsigned long long)ctx->sa_in_ocm_addr);
+ memset(ctx->sa_in, 0, size * 4);
+ memset(ctx->sa_out, 0, size * 4);
+ ctx->sa_len = size;
+ return 0;
+#endif
ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
&ctx->sa_in_dma_addr, GFP_ATOMIC);
if (ctx->sa_in == NULL)
@@ -142,6 +276,14 @@ int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
{
+#ifdef CONFIG_SEC_SA_OCM
+ ocm_free(ctx->sa_out);
+ ocm_free(ctx->sa_in);
+ ctx->sa_in_dma_addr = 0;
+ ctx->sa_out_dma_addr = 0;
+ ctx->sa_len = 0;
+ return;
+#endif
if (ctx->sa_in != NULL)
dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
ctx->sa_in, ctx->sa_in_dma_addr);
@@ -151,16 +293,28 @@ void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
ctx->sa_in_dma_addr = 0;
ctx->sa_out_dma_addr = 0;
+ ctx->sa_in = NULL;
+ ctx->sa_out = NULL;
ctx->sa_len = 0;
}
u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
{
+#ifdef CONFIG_SEC_SA_OCM
+ ctx->state_record = ocm_alloc(&ctx->state_record_ocm_addr,
+ sizeof(struct sa_state_record), 4,
+ OCM_NON_CACHED, "sec_state_record");
+ if (ctx->state_record == NULL)
+ return -ENOMEM;
+ ctx->state_record_dma_addr = (u32)ctx->state_record_ocm_addr;
+#else
ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
sizeof(struct sa_state_record),
&ctx->state_record_dma_addr, GFP_ATOMIC);
- if (!ctx->state_record_dma_addr)
+
+ if (!ctx->state_record_dma_addr || !ctx->state_record)
return -ENOMEM;
+#endif
memset(ctx->state_record, 0, sizeof(struct sa_state_record));
return 0;
@@ -168,14 +322,82 @@ u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
{
+#ifdef CONFIG_SEC_SA_OCM
+ if (ctx->state_record != NULL)
+ ocm_free(ctx->state_record);
+#else
if (ctx->state_record != NULL)
dma_free_coherent(ctx->dev->core_dev->device,
sizeof(struct sa_state_record),
ctx->state_record,
ctx->state_record_dma_addr);
+#endif
+ ctx->state_record = NULL;
ctx->state_record_dma_addr = 0;
}
+u32 crypto4xx_alloc_arc4_state_record(struct crypto4xx_ctx *ctx)
+{
+#ifdef CONFIG_SEC_SA_OCM
+ ctx->arc4_state_record = ocm_alloc(&ctx->arc4_state_ocm_addr,
+ sizeof(struct arc4_sr), 4,
+ OCM_NON_CACHED, "sec_state_arc4_record");
+ if (ctx->arc4_state_record == NULL)
+ return -ENOMEM;
+ ctx->arc4_state_record_dma_addr = (u32)ctx->arc4_state_ocm_addr;
+#else
+ ctx->arc4_state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
+ sizeof(struct arc4_sr),
+ /* &dma_addr */ &ctx->arc4_state_record_dma_addr,
+ GFP_ATOMIC);
+
+ if (!ctx->arc4_state_record_dma_addr)
+ return -ENOMEM;
+#endif
+ memset(ctx->arc4_state_record, 0, sizeof(struct arc4_sr));
+
+ return 0;
+}
+
+void crypto4xx_free_arc4_state_record(struct crypto4xx_ctx *ctx)
+{
+
+ if (ctx->arc4_state_record != NULL) {
+#ifdef CONFIG_SEC_SA_OCM
+ ocm_free(ctx->arc4_state_record);
+
+#else
+ dma_free_coherent(ctx->dev->core_dev->device,
+ sizeof(struct arc4_sr),
+ ctx->arc4_state_record,
+ ctx->arc4_state_record_dma_addr);
+#endif
+ }
+ ctx->arc4_state_record = NULL;
+ ctx->arc4_state_record_dma_addr = 0;
+}
+
+int datalen_check;
+static int crypto4xx_device_read_procmem(char *buffer, char **start, off_t offset,
+ int count, int *eof, void *data)
+{
+ struct crypto4xx_core_device *core_dev = (struct crypto4xx_core_device *)data;
+ int len = 0;
+ u32 ring_ctrl_val;
+
+ ring_ctrl_val = readl(core_dev->dev->ce_base + CRYPTO4XX_RING_CTRL);
+
+ len += sprintf(buffer + len, "ring_ctrl_val = 0x%08x\n", ring_ctrl_val);
+ len += sprintf(buffer + len,
+ "Crypto4xx Controller on AMCC PPC 460EX Canyonlands Board\n");
+ len += sprintf(buffer + len,
+ "%u packets received for packetsize = %d\n", core_dev->dev->pkt_cnt,
+ datalen_check);
+ len += sprintf(buffer + len,
+ "%lld interrupts received\n", core_dev->irq_cnt);
+ *eof = 1;
+ return len;
+}
/**
* alloc memory for the gather ring
* no need to alloc buf for the ring
@@ -185,20 +407,37 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
{
int i;
struct pd_uinfo *pd_uinfo;
+
+#ifdef CONFIG_SEC_PD_OCM
+ int pd_size;
+ pd_size = sizeof(struct ce_pd) * PPC4XX_NUM_PD;
+ dev->pdr = ocm_alloc(&dev->pdr_ocm_addr, pd_size, 4,
+ OCM_NON_CACHED, "sec_pd");
+ dev->pdr_pa = (u32)dev->pdr_ocm_addr;
+ printk(KERN_INFO "Security OCM Allocation done for packet Descriptor: %llx,\n"
+ "Virtual OCM Address: %p, OCM Allocation size: %d\n",
+ (unsigned long long)dev->pdr_ocm_addr, dev->pdr, pd_size);
+ if (dev->pdr == NULL) {
+ printk("PD Allocation failed on OCM\n");
+ return -ENOMEM;
+ }
+#else
dev->pdr = dma_alloc_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
&dev->pdr_pa, GFP_ATOMIC);
if (!dev->pdr)
return -ENOMEM;
-
+#endif
dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
GFP_KERNEL);
if (!dev->pdr_uinfo) {
+#ifndef CONFIG_SEC_PD_OCM
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr,
dev->pdr_pa);
return -ENOMEM;
+#endif
}
memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
@@ -233,10 +472,14 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
+#ifndef CONFIG_SEC_PD_OCM
if (dev->pdr != NULL)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
+#else
+ ocm_free(dev->pdr);
+#endif
if (dev->shadow_sa_pool)
dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
@@ -245,6 +488,7 @@ static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
+ dev->pkt_cnt = 0;
kfree(dev->pdr_uinfo);
}
@@ -526,7 +770,7 @@ static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
(*idx)++;
return 0;
- }
+ }
}
static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
@@ -589,9 +833,25 @@ static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
struct sa_state_record *state_record =
(struct sa_state_record *) pd_uinfo->sr_va;
- if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
- memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
- SA_HASH_ALG_SHA1_DIGEST_SIZE);
+ switch (sa->sa_command_0.bf.hash_alg) {
+ case SA_HASH_ALG_KASUMI_f9:
+ crypto4xx_memcpy_le((void *)pd_uinfo->dest_va,
+ (u8 *)state_record->save_digest, 8);
+ break;
+ case SA_HASH_ALG_AES_XCBC_MAC_128:
+ crypto4xx_memcpy_le((void *)pd_uinfo->dest_va,
+ (u8 *) state_record->save_digest, 16);
+ break;
+ case SA_HASH_ALG_MD5:
+ crypto4xx_memcpy_le((void *)pd_uinfo->dest_va,
+ (u8 *) state_record->save_digest,
+ SA_HASH_ALG_MD5_DIGEST_SIZE);
+ break;
+ default:
+ memcpy((void *)pd_uinfo->dest_va,
+ state_record->save_digest,
+ crypto4xx_sa_hash_tbl[1][sa->sa_command_0.bf.hash_alg]);
+ break;
}
return 0;
@@ -616,6 +876,57 @@ static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
}
}
+void crypto4xx_append_icv_to_end(struct crypto4xx_device *dev,
+ struct scatterlist *dst,
+ struct sa_state_record *sr,
+ u32 offset,
+ u32 len)
+{
+ struct scatterlist *sg;
+ int i = 0;
+ u32 cp_len;
+ dma_addr_t addr;
+
+ sg = &dst[i];
+ while (len) {
+ while (sg->length < offset) {
+ offset -= sg->length;
+ i++;
+ sg = &sg[i];
+ }
+ /* at here, icv could be in this sg,
+ * or icv could be in the next sg
+ */
+ if (sg->length > offset) {
+ /* icv should be in middle of this sg */
+ addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+ sg->offset,
+ sg->length, DMA_TO_DEVICE);
+ cp_len = (sg->length-offset >= len) ? len :
+ sg->length-offset;
+ len -= cp_len;
+ crypto4xx_memcpy_le((u32 *)(phys_to_virt(addr)
+ + offset),
+ (u8 *)sr->save_digest, cp_len);
+ } else {
+ /* start from begin of next sg*/
+ i++;
+ sg = &sg[i];
+ offset = 0;
+ addr = dma_map_page(dev->core_dev->device, sg_page(sg),
+ sg->offset,
+ sg->length, DMA_FROM_DEVICE);
+ cp_len = (sg->length >= len) ? len : sg->length;
+ len -= cp_len;
+ crypto4xx_memcpy_le((u32 *) (phys_to_virt(addr)
+ + offset),
+ (u8 *) sr->save_digest, cp_len);
+ }
+ i++;
+ sg = &sg[i];
+ }
+}
+
static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
struct pd_uinfo *pd_uinfo,
struct ce_pd *pd)
@@ -637,6 +948,11 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
dst->offset, dst->length, DMA_FROM_DEVICE);
}
crypto4xx_ret_sg_desc(dev, pd_uinfo);
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ printk("ablkcipher return err status = 0x%08x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ }
if (ablk_req->base.complete != NULL)
ablk_req->base.complete(&ablk_req->base, 0);
@@ -644,7 +960,8 @@ static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
}
static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
- struct pd_uinfo *pd_uinfo)
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
{
struct crypto4xx_ctx *ctx;
struct ahash_request *ahash_req;
@@ -656,24 +973,101 @@ static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
crypto_tfm_ctx(ahash_req->base.tfm));
crypto4xx_ret_sg_desc(dev, pd_uinfo);
/* call user provided callback function x */
+ if (pd->pd_ctl.bf.status & 0xff) {
+ printk("ahash return err status = 0x%08x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ }
if (ahash_req->base.complete != NULL)
ahash_req->base.complete(&ahash_req->base, 0);
return 0;
}
-static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
+static u32 crypto4xx_aead_done(struct crypto4xx_device *dev,
+ struct pd_uinfo *pd_uinfo,
+ struct ce_pd *pd)
+{
+ struct aead_request *aead_req;
+ struct crypto4xx_ctx *ctx;
+ struct scatterlist *dst;
+ dma_addr_t addr;
+ struct crypto_aead *aead;
+
+ aead_req = container_of(pd_uinfo->async_req,
+ struct aead_request, base);
+ aead = crypto_aead_reqtfm(aead_req);
+ ctx = crypto_tfm_ctx(aead_req->base.tfm);
+
+ if (pd_uinfo->using_sd) {
+ crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
+ pd->pd_ctl_len.bf.pkt_len,
+ aead_req->dst);
+ } else {
+ dst = pd_uinfo->dest_va;
+ addr = dma_map_page(dev->core_dev->device, sg_page(dst),
+ dst->offset,
+ dst->length, DMA_FROM_DEVICE);
+ }
+
+ if (ctx->append_icv != 0) {
+ dst = pd_uinfo->dest_va;
+ crypto4xx_append_icv_to_end(dev, dst,
+ (struct sa_state_record *)
+ pd_uinfo->sr_va,
+ aead_req->cryptlen,
+ crypto_aead_authsize(aead));
+ }
+ crypto4xx_ret_sg_desc(dev, pd_uinfo);
+ /* call user provided callback function x */
+
+ if (pd->pd_ctl.bf.status & 0xff) {
+ if (pd->pd_ctl.bf.status & 1)
+ printk("authentication error\n");
+ if (pd->pd_ctl.bf.status & 2)
+ printk("pad fail error\n");
+ if (pd->pd_ctl.bf.status & 4)
+ printk("seqnum fail\n");
+ if (pd->pd_ctl.bf.status & 8)
+ printk("error _notify\n");
+ printk("aead return err status = 0x%08x\n",
+ pd->pd_ctl.bf.status & 0xff);
+ printk("pd pad_ctl = 0x%08x\n", pd->pd_ctl.bf.pd_pad_ctl);
+ }
+
+#if 0
+ void * saddr;
+ dst = pd_uinfo->dest_va;
+ printk("dumping aead_done length = %d\n", dst->length);
+ saddr = kmap_atomic(sg_page(dst), KM_SOFTIRQ1);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
+ 16, 1,
+ (void*)saddr+dst->offset, dst->length, false);
+ kunmap_atomic(saddr, KM_SOFTIRQ1);
+#endif
+ if (aead_req->base.complete != NULL)
+ aead_req->base.complete(&aead_req->base, 0);
+ return 0;
+}
+
+u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
{
struct ce_pd *pd;
struct pd_uinfo *pd_uinfo;
pd = dev->pdr + sizeof(struct ce_pd)*idx;
pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
+
if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+ CRYPTO_ALG_TYPE_AEAD)
+ return crypto4xx_aead_done(dev, pd_uinfo, pd);
+ else if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
CRYPTO_ALG_TYPE_ABLKCIPHER)
return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
- else
- return crypto4xx_ahash_done(dev, pd_uinfo);
+ else if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
+ CRYPTO_ALG_TYPE_AHASH)
+ return crypto4xx_ahash_done(dev, pd_uinfo, pd);
+
+ return 0;
}
/**
@@ -769,18 +1163,23 @@ static u32 get_next_sd(u32 current)
else
return 0;
}
-
+extern int ssl_arc4;
+extern int tls;
+int var = 0;
u32 crypto4xx_build_pd(struct crypto_async_request *req,
struct crypto4xx_ctx *ctx,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int datalen,
+ struct scatterlist *assoc,
+ u32 aad_len,
void *iv, u32 iv_len)
{
struct crypto4xx_device *dev = ctx->dev;
dma_addr_t addr, pd_dma, sd_dma, gd_dma;
struct dynamic_sa_ctl *sa;
struct scatterlist *sg;
+ struct scatterlist *aad;
struct ce_gd *gd;
struct ce_pd *pd;
u32 num_gd, num_sd;
@@ -790,13 +1189,19 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
unsigned long flags;
struct pd_uinfo *pd_uinfo = NULL;
unsigned int nbytes = datalen, idx;
- unsigned int ivlen = 0;
u32 gd_idx = 0;
+ unsigned int aadlen = 0;
+ datalen_check = datalen;
/* figure how many gd is needed */
- num_gd = get_sg_count(src, datalen);
- if (num_gd == 1)
- num_gd = 0;
+ if (aad_len) {
+ num_gd = get_sg_count(assoc, aad_len) +
+ get_sg_count(src, datalen);
+ } else {
+ num_gd = get_sg_count(src, datalen);
+ if (num_gd == 1)
+ num_gd = 0;
+ }
/* figure how many sd is needed */
if (sg_is_last(dst) || ctx->is_hash) {
@@ -852,8 +1257,8 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
pd_uinfo->num_gd = num_gd;
pd_uinfo->num_sd = num_sd;
+ writel(0, ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
if (iv_len || ctx->is_hash) {
- ivlen = iv_len;
pd->sa = pd_uinfo->sa_pa;
sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
if (ctx->direction == DIR_INBOUND)
@@ -864,17 +1269,48 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
memcpy((void *) sa + ctx->offset_to_sr_ptr,
&pd_uinfo->sr_pa, 4);
- if (iv_len)
- crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
+ if (iv_len) {
+ if (ctx->ctr_aes) {
+ /* First the nonce */
+ memcpy(pd_uinfo->sr_va, ctx->state_record,
+ CTR_RFC3686_NONCE_SIZE);
+ /* Copy the IV that is passed through
+ * each operation
+ */
+ crypto4xx_memcpy_le(pd_uinfo->sr_va +
+ CTR_RFC3686_NONCE_SIZE, iv, iv_len);
+ } else
+ crypto4xx_memcpy_le(pd_uinfo->sr_va,
+ iv, iv_len);
+ }
+ if (ctx->is_gcm || ctx->ctr_aes) {
+ u32 seq = 1;
+ /*For GCM and CTR(AES) algs adding the counter value*/
+ crypto4xx_memcpy_le(pd_uinfo->sr_va + 12,
+ (void *)&seq, 4);
+ }
} else {
if (ctx->direction == DIR_INBOUND) {
+#ifdef CONFIG_SEC_SA_OCM
+ writel((ctx->sa_in_ocm_addr >> 32),
+ ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
+#endif
pd->sa = ctx->sa_in_dma_addr;
sa = (struct dynamic_sa_ctl *) ctx->sa_in;
- } else {
+ } else {
+#ifdef CONFIG_SEC_SA_OCM
+ writel((ctx->sa_out_ocm_addr >> 32),
+ ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
+#endif
pd->sa = ctx->sa_out_dma_addr;
sa = (struct dynamic_sa_ctl *) ctx->sa_out;
}
}
+
+ //u32 sa_upper = readl(ctx->dev->ce_base + CRYPTO4XX_SA_UADDR);
+ //printk("Dumping the Upper SA address = 0x%x\n", sa_upper);
+ //printk("Dumping the lower SA address = 0x%x\n", pd->sa);
+
pd->sa_len = ctx->sa_len;
if (num_gd) {
/* get first gd we are going to use */
@@ -886,6 +1322,35 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
/* enable gather */
sa->sa_command_0.bf.gather = 1;
idx = 0;
+ if (aad_len) {
+ aadlen = aad_len;
+ aad = assoc;
+ /* walk the sg, and setup gather array for aad*/
+ while (aadlen) {
+ sg = &aad[idx];
+ addr = dma_map_page(dev->core_dev->device,
+ sg_page(sg), sg->offset,
+ sg->length, DMA_TO_DEVICE);
+
+ gd->ptr = addr;
+ gd->ctl_len.len = sg->length;
+ gd->ctl_len.done = 0;
+ gd->ctl_len.ready = 1;
+
+ if (sg->length >= aadlen)
+ break;
+
+ aadlen -= sg->length;
+
+ gd_idx = get_next_gd(gd_idx);
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ idx++;
+ }
+ /* prepare gd for src */
+ gd_idx = get_next_gd(gd_idx);
+ gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
+ }
+ idx = 0;
src = &src[0];
/* walk the sg, and setup gather array */
while (nbytes) {
@@ -970,13 +1435,24 @@ u32 crypto4xx_build_pd(struct crypto_async_request *req,
}
}
- sa->sa_command_1.bf.hash_crypto_offset = 0;
+ sa->sa_command_1.bf.hash_crypto_offset = (aad_len >> 2);
pd->pd_ctl.w = ctx->pd_ctl;
- pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
+ pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) |
+ (datalen + aad_len);
+ if (ctx->next_hdr)
+ pd->pd_ctl.bf.next_hdr = ctx->next_hdr;
pd_uinfo->state = PD_ENTRY_INUSE;
wmb();
/* write any value to push engine to read a pd */
- writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+ if (dev->core_dev->revb_ver == 1) {
+#ifndef CONFIG_SEC_HW_POLL
+ writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+#endif
+ } else
+ writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
+
+
+ dev->pkt_cnt++;
return -EINPROGRESS;
}
@@ -995,6 +1471,8 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
ctx->sa_in_dma_addr = 0;
ctx->sa_out_dma_addr = 0;
ctx->sa_len = 0;
+ ctx->is_gcm = 0;
+ ctx->append_icv = 0;
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
default:
@@ -1004,6 +1482,9 @@ static int crypto4xx_alg_init(struct crypto_tfm *tfm)
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct crypto4xx_ctx));
break;
+ case CRYPTO_ALG_TYPE_AEAD:
+ tfm->crt_aead.reqsize = sizeof(struct crypto4xx_ctx);
+ break;
}
return 0;
@@ -1015,6 +1496,7 @@ static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
crypto4xx_free_sa(ctx);
crypto4xx_free_state_record(ctx);
+ crypto4xx_free_arc4_state_record(ctx);
}
int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
@@ -1098,6 +1580,14 @@ static void crypto4xx_bh_tasklet_cb(unsigned long data)
}
}
}
+#define SRAM_OCM_ADDR_ERR 0x0B4
+#define SRAM_OCM_STATUS0 0x0B5
+#define SRAM_OCM_STATUS1 0X0B6
+
+#define PLBA0_ESRL 0x0082
+#define PLBA0_ESRH 0x0083
+#define PLBA0_EARL 0x0084
+#define PLBA0_EARH 0x0085
/**
* Top Half of isr.
@@ -1106,33 +1596,140 @@ static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
{
struct device *dev = (struct device *)data;
struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
+ //u32 int_status;
if (core_dev->dev->ce_base == 0)
return 0;
- writel(PPC4XX_INTERRUPT_CLR,
- core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ //int_status = readl(core_dev->dev->ce_base + CRYPTO4XX_INT_UNMASK_STAT);
+ //printk("Interrupt status = 0x%08x\n", int_status);
+
+ /* For RevB, 460EX and 460ExR Rev B */
+ if (core_dev->revb_ver == 1) {
+ writel(PPC4XX_INTERRUPT_CLR_REVB,
+ core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ } else {
+ writel(PPC4XX_INTERRUPT_CLR,
+ core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
+ }
+
+ core_dev->irq_cnt++;
tasklet_schedule(&core_dev->tasklet);
return IRQ_HANDLED;
}
+
/**
* Supported Crypto Algorithms
*/
struct crypto4xx_alg_common crypto4xx_alg[] = {
+ /* Crypto DES ECB, CBC, modes */
+#if 1
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "ppc4xx-cbc-des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_3des_cbc,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(des)",
+ .cra_driver_name = "ppc4xx-ecb-des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .setkey = crypto4xx_setkey_3des_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+
+ /* Crypto 3DES ECB, CBC, CFB, and OFB modes */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "ppc4xx-cbc-3des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_3des_cbc,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(des3_ede)",
+ .cra_driver_name = "ppc4xx-ecb-3des",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .setkey = crypto4xx_setkey_3des_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
/* Crypto AES modes */
{ .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-ppc4xx",
- .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct crypto4xx_ctx),
- .cra_type = &crypto_ablkcipher_type,
- .cra_init = crypto4xx_alg_init,
- .cra_exit = crypto4xx_alg_exit,
- .cra_module = THIS_MODULE,
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
.cra_u = {
.ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1144,8 +1741,2184 @@ struct crypto4xx_alg_common crypto4xx_alg[] = {
}
}
}},
-};
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ofb(aes)",
+ .cra_driver_name = "ppc4xx-ofb-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_aes_ofb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cfb(aes)",
+ .cra_driver_name = "ppc4xx-cfb-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_aes_cfb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ /* Crypto AES ECB, CBC, CTR, GCM, CCM, and GMAC modes */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ppc4xx-ecb-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .setkey = crypto4xx_setkey_aes_ecb,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "ppc4xx-ctr-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = CTR_RFC3686_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = CTR_RFC3686_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_aes_ctr,
+ .encrypt = crypto4xx_encrypt_ctr,
+ .decrypt = crypto4xx_decrypt_ctr,
+ }
+ }
+ }},
+
+ /* AEAD Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "ppc4xx-gcm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .maxauthsize = 16,
+ .ivsize = 12,
+ .setkey = crypto4xx_setkey_aes_gcm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_aes_gcm,
+ .decrypt = crypto4xx_decrypt_aes_gcm,
+ .givencrypt = crypto4xx_givencrypt_aes_gcm,
+ .givdecrypt = crypto4xx_givdecrypt_aes_gcm,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ppc4xx-ccm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = 16,
+ .setkey = crypto4xx_setkey_aes_ccm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_aes_ccm,
+ .decrypt = crypto4xx_decrypt_aes_ccm,
+ .givencrypt = crypto4xx_givencrypt_aes_ccm,
+ .givdecrypt = crypto4xx_givdecrypt_aes_ccm,
+ }
+ }
+ }},
+
+ /* Hash MD5 */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SA_HASH_ALG_MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "md5",
+ .cra_driver_name = "ppc4xx-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_md5_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+#if 1
+ /* Hash MD5-HMAC */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_md5_hmac_setkey,
+ .halg.digestsize = SA_HASH_ALG_MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "ppc4xx-hmac-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ /* Hash SHA1, SHA2 and HMAC */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "ppc4xx-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha1_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha1_hmac_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "ppc4xx-hmac-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha224",
+ .cra_driver_name = "ppc4xx-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "ppc4xx-hmac-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "ppc4xx-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "ppc4xx-hmac-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha384",
+ .cra_driver_name = "ppc4xx-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "ppc4xx-hmac-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "sha512",
+ .cra_driver_name = "ppc4xx-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_init = crypto4xx_sha2_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_sha2_hmac_setkey,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "ppc4xx-hmac-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+ /* Hash XCBC, GHASH, and Kasumi F9 */
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_hash_digest,
+ .setkey = crypto4xx_xcbc_setkey,
+ .halg.digestsize = 16,
+ .halg.base = {
+ .cra_name = "xcbc(aes)",
+ .cra_driver_name = "ppc4xx-xcbc-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+
+ /* Crypto Kasumi and Kasumi F8 */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "kasumi",
+ .cra_driver_name = "ppc4xx-kasumi",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = KASUMI_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = KASUMI_KEY_SIZE,
+ .max_keysize = KASUMI_KEY_SIZE,
+ .ivsize = KASUMI_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_kasumi_p,
+ .encrypt = crypto4xx_encrypt,
+ .decrypt = crypto4xx_decrypt,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "f8(kasumi)",
+ .cra_driver_name = "ppc4xx-f8-kasumi",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = KASUMI_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = KASUMI_KEY_SIZE,
+ .max_keysize = KASUMI_KEY_SIZE,
+ .ivsize = KASUMI_BLOCK_SIZE,
+ .setkey = crypto4xx_setkey_kasumi_f8,
+ .encrypt = crypto4xx_encrypt_kasumi_f8,
+ .decrypt = crypto4xx_decrypt_kasumi_f8,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AHASH, .u.hash = {
+ .init = crypto4xx_hash_init,
+ .update = crypto4xx_hash_update,
+ .final = crypto4xx_hash_final,
+ .digest = crypto4xx_kasumi_f9_digest,
+ .setkey = crypto4xx_kasumi_f9_setkey,
+ .halg.digestsize = 8,
+ .halg.base = {
+ .cra_name = "f9(kasumi)",
+ .cra_driver_name = "ppc4xx-f9-kasumi",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = KASUMI_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ahash_type,
+ .cra_module = THIS_MODULE,
+ }
+ }},
+#endif
+#if 1
+ /* Crypto ARC4 - stateless */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "ecb(arc4)",
+ .cra_driver_name = "ppc4xx-arc4",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = 1,
+ .max_keysize = 16,
+ .setkey = crypto4xx_setkey_arc4,
+ .encrypt = crypto4xx_arc4_encrypt,
+ .decrypt = crypto4xx_arc4_decrypt,
+ }
+ }
+ }},
+ /* Crypto ARC4 - statefull */
+ { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
+ .cra_name = "cbc(arc4)",
+ .cra_driver_name = "ppc4xx-arc4",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .ablkcipher = {
+ .min_keysize = 1,
+ .max_keysize = 16,
+ .setkey = crypto4xx_setkey_arc4,
+ .encrypt = crypto4xx_arc4_encrypt,
+ .decrypt = crypto4xx_arc4_decrypt,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u ={
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha1),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha224),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha256),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha384),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha512),cbc(aes))))",
+ .cra_driver_name = "tunnel-esp-cbc-aes-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_aes_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha1),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha224),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha256),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha384),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha512),cbc(des))))",
+ .cra_driver_name = "tunnel-esp-cbc-des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(md5),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha1),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha224),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha256),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha384),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tunnel(esp(authenc(hmac(sha512),cbc(des3_ede))))",
+ .cra_driver_name = "tunnel-esp-cbc-3des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 8 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tunnel_esp_cbc_3des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_esp_cbc,
+ .decrypt = crypto4xx_decrypt_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_esp_cbc,
+ }
+ }
+ }},
+
+ /** IPSec transport combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(md5),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha1),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha224),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha256),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha384),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha512),cbc(aes))))",
+ .cra_driver_name = "transport-esp-cbc-aes-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_aes_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec transport combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(md5),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha1),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha224),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha256),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha384),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha512),cbc(des))))",
+ .cra_driver_name = "transport-esp-cbc-des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec transport combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(md5),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 16, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha1),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 20, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha224),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha224",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 28, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha224,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+#endif
+#if 1
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha256),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha256",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 32, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha256,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ /* IPSec combined hash and crypto Algorithms */
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha384),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha384",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 48, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha384,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(authenc(hmac(sha512),cbc(des3_ede))))",
+ .cra_driver_name = "transport-esp-cbc-3des-sha512",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 64, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_cbc_3des_sha512,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "transport(esp(rfc4106(gcm(aes))))",
+ .cra_driver_name = "transport-esp-rfc4106-gcm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0, /* Hardware requires 16 bytes aligned */
+ .cra_type = &crypto_aead_type,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size for crypto */
+ .maxauthsize = 12, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_transport_esp_rfc4106_gcm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "transport-esp-rfc4104-gcm-aes",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY_IPSEC-100,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 16,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_u = {.aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .setkey = crypto4xx_setkey_transport_esp_rfc4106_gcm,
+ .setauthsize = crypto4xx_setauthsize_aes,
+ .encrypt = crypto4xx_encrypt_transport_esp_cbc,
+ .decrypt = crypto4xx_decrypt_transport_esp_cbc,
+ .givencrypt = crypto4xx_givencrypt_transport_esp_cbc,
+ }
+ }}},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "macsec(gcm)",
+ .cra_driver_name = "macsec-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64,
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_macsec_gcm,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_macsec,
+ .decrypt = crypto4xx_decrypt_macsec,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(aes-sha1)",
+ .cra_driver_name = "dtls-ppc4xx",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(des-sha1)",
+ .cra_driver_name = "ppc4xx-dtls-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(des3-sha1)",
+ .cra_driver_name = "ppc4xx-dtls-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(NULL-md5)",
+ .cra_driver_name = "ppc4xx-dtls-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "dtls(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-dtls-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_dtls_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(aes-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 64 bits ... 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0xF,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_aes,
+ .decrypt = crypto4xx_decrypt_ssl_aes,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(des-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(des3-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(arc4-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-arc4-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_arc4_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(arc4-md5)",
+ .cra_driver_name = "ppc4xx-ssl-arc4-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_arc4_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(NULL-md5)",
+ .cra_driver_name = "ppc4xx-ssl-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "ssl(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-ssl-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_ssl_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+#if 1
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(aes-sha1)",
+ .cra_driver_name = "ppc4xx-tls-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_aes,
+ .decrypt = crypto4xx_decrypt_ssl_aes,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(des-sha1)",
+ .cra_driver_name = "ppc4xx-tls-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(des3-sha1)",
+ .cra_driver_name = "ppc4xx-tls-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_des,
+ .decrypt = crypto4xx_decrypt_ssl_des,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(arc4-sha1)",
+ .cra_driver_name = "ppc4xx-tls-arc4-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_arc4_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(arc4-md5)",
+ .cra_driver_name = "ppc4xx-tls-arc4-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_arc4_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(NULL-md5)",
+ .cra_driver_name = "ppc4xx-tls-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-tls-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_null,
+ .decrypt = crypto4xx_decrypt_ssl_null,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(aes-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-aes-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_aes_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(des-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-des-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_des_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(des3-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-des3-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 8, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_des3_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(arc4-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-arc4-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_arc4_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(arc4-md5)",
+ .cra_driver_name = "ppc4xx-tls1.1-arc4-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 16, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_arc4_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_ssl_arc4,
+ .decrypt = crypto4xx_decrypt_ssl_arc4,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(NULL-md5)",
+ .cra_driver_name = "ppc4xx-tls1.1-null-md5",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_null_md5,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+
+ { .type = CRYPTO_ALG_TYPE_AEAD, .u.cipher = {
+ .cra_name = "tls1_1(NULL-sha1)",
+ .cra_driver_name = "ppc4xx-tls1.1-null-sha1",
+ .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
+ .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = 64, /* 128-bits block */
+ .cra_ctxsize = sizeof(struct crypto4xx_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_aead_type,
+ .cra_init = crypto4xx_alg_init,
+ .cra_exit = crypto4xx_alg_exit,
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .aead = {
+ .ivsize = 0, /* IV size is 16 bytes */
+ .maxauthsize = 128, /* Max auth data size in bytes */
+ .setkey = crypto4xx_setkey_tls1_1_null_sha1,
+ .setauthsize = NULL,
+ .encrypt = crypto4xx_encrypt_dtls,
+ .decrypt = crypto4xx_decrypt_dtls,
+ .givencrypt = NULL,
+ .givdecrypt = NULL,
+ }
+ }
+ }},
+#endif
+};
+#define CRYPTO4XX_CONSOLE_DRIVER_NAME "crypto4xx"
/**
* Module Initialization Routine
*/
@@ -1156,6 +3929,9 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
struct resource res;
struct device *dev = &ofdev->dev;
struct crypto4xx_core_device *core_dev;
+ unsigned int pvr;
+ unsigned short min;
+ int revb_ver = 1; /* RevB of crypto core */
rc = of_address_to_resource(ofdev->node, 0, &res);
if (rc)
@@ -1172,6 +3948,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
mtdcri(SDR0, PPC405EX_SDR0_SRST,
mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
+ revb_ver = 0;
} else if (of_find_compatible_node(NULL, NULL,
"amcc,ppc460sx-crypto")) {
mtdcri(SDR0, PPC460SX_SDR0_SRST,
@@ -1193,8 +3970,23 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
if (!core_dev->dev)
goto err_alloc_dev;
+ /* Older version of 460EX/GT does not support H/W based security intr coalescing */
+ pvr = mfspr(SPRN_PVR);
+ printk("Reading pvr value = %x\n", pvr);
+ if ((pvr & 0xfffffff0) == 0x130218A0) {
+ min = PVR_MIN(pvr);
+ if (min < 4) {
+ printk(KERN_INFO "RevA 460EX/GT ... h/w bug in security intr coal\n");
+ revb_ver = 0;
+ } else {
+ printk(KERN_INFO "RevB h/w security interrupt coalescing supported ...\n");
+ }
+ }
+
+ core_dev->revb_ver = revb_ver;
core_dev->dev->core_dev = core_dev;
core_dev->device = dev;
+ core_dev->irq_cnt = 0ll;
spin_lock_init(&core_dev->lock);
INIT_LIST_HEAD(&core_dev->dev->alg_list);
rc = crypto4xx_build_pdr(core_dev->dev);
@@ -1209,6 +4001,25 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
if (rc)
goto err_build_sdr;
+ proc_crypto4xx = proc_mkdir("driver/crypto4xx", NULL);
+ if(proc_crypto4xx == NULL) {
+ printk(KERN_ERR
+ "%s: Error creating proc entry\n",__FUNCTION__);
+ return -ENOMEM;
+ }
+
+ entry = create_proc_read_entry("crypto4xx",
+ 0,
+ proc_crypto4xx,
+ crypto4xx_device_read_procmem,
+ core_dev);
+ if(entry == NULL) {
+ printk(KERN_CRIT
+ "%s: crypto4xx: create_proc_read_entry failed!\n",
+ __FUNCTION__);
+ return -ENOMEM;
+ }
+
/* Init tasklet for bottom half processing */
tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
(unsigned long) dev);
@@ -1216,7 +4027,7 @@ static int __init crypto4xx_probe(struct of_device *ofdev,
/* Register for Crypto isr, Crypto Engine IRQ */
core_dev->irq = irq_of_parse_and_map(ofdev->node, 0);
rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
- core_dev->dev->name, dev);
+ "CRYPTO", dev);
if (rc)
goto err_request_irq;
@@ -1269,8 +4080,12 @@ static int __exit crypto4xx_remove(struct of_device *ofdev)
/* Un-register with Linux CryptoAPI */
crypto4xx_unregister_alg(core_dev->dev);
/* Free all allocated memory */
- crypto4xx_stop_all(core_dev);
+ remove_proc_entry("crypto4xx", entry) ;
+ entry = NULL;
+ remove_proc_entry("driver/crypto4xx", proc_crypto4xx);
+ proc_crypto4xx = NULL;
+ crypto4xx_stop_all(core_dev);
return 0;
}
diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
index da9cbe3b9fc..658a2416294 100644
--- a/drivers/crypto/amcc/crypto4xx_core.h
+++ b/drivers/crypto/amcc/crypto4xx_core.h
@@ -32,10 +32,16 @@
#define PPC405EX_CE_RESET 0x00000008
#define CRYPTO4XX_CRYPTO_PRIORITY 300
-#define PPC4XX_LAST_PD 63
-#define PPC4XX_NUM_PD 64
+
+//#define PPC4XX_LAST_PD 1022
+//#define PPC4XX_NUM_PD 1023
+
+#define PPC4XX_LAST_PD 511
+#define PPC4XX_NUM_PD 512
+
#define PPC4XX_LAST_GD 1023
#define PPC4XX_NUM_GD 1024
+
#define PPC4XX_LAST_SD 63
#define PPC4XX_NUM_SD 64
#define PPC4XX_SD_BUFFER_SIZE 2048
@@ -76,7 +82,9 @@ struct crypto4xx_device {
void *pdr; /* base address of packet
descriptor ring */
dma_addr_t pdr_pa; /* physical address used to
- program ce pdr_base_register */
+ program ce pdr_base_register */
+ phys_addr_t pdr_ocm_addr;
+
void *gdr; /* gather descriptor ring */
dma_addr_t gdr_pa; /* physical address used to
program ce gdr_base_register */
@@ -100,6 +108,9 @@ struct crypto4xx_device {
void *pdr_uinfo;
struct list_head alg_list; /* List of algorithm supported
by this device */
+ u32 pkt_cnt;
+ u32 macsec_decrypt_num;
+
};
struct crypto4xx_core_device {
@@ -108,18 +119,31 @@ struct crypto4xx_core_device {
struct crypto4xx_device *dev;
u32 int_status;
u32 irq;
+ u64 irq_cnt;
struct tasklet_struct tasklet;
spinlock_t lock;
+ struct timer_list crypto4xx_timer;
+ int revb_ver;
};
struct crypto4xx_ctx {
struct crypto4xx_device *dev;
void *sa_in;
dma_addr_t sa_in_dma_addr;
+ phys_addr_t sa_in_ocm_addr;
+
void *sa_out;
dma_addr_t sa_out_dma_addr;
+ phys_addr_t sa_out_ocm_addr;
+
+ void *arc4_state_record;
+ dma_addr_t arc4_state_record_dma_addr;
+ phys_addr_t arc4_state_ocm_addr;
+
void *state_record;
dma_addr_t state_record_dma_addr;
+ phys_addr_t state_record_ocm_addr;
+
u32 sa_len;
u32 offset_to_sr_ptr; /* offset to state ptr, in dynamic sa */
u32 direction;
@@ -127,9 +151,20 @@ struct crypto4xx_ctx {
u32 save_iv;
u32 pd_ctl_len;
u32 pd_ctl;
+ u32 append_icv;
+ u32 is_gcm;
+ u32 ctr_aes;
u32 bypass;
+ u32 init_arc4;
u32 is_hash;
u32 hash_final;
+ u32 spi;
+ u32 seq;
+ u32 pad_block_size;
+ u32 encap_uhl;
+ u32 pad_ctl;
+ u32 authenc;
+ u32 hc_offset;
};
struct crypto4xx_req_ctx {
@@ -166,6 +201,7 @@ static inline struct crypto4xx_alg *crypto_alg_to_crypto4xx_alg(
return container_of(x, struct crypto4xx_alg, alg.u.cipher);
}
+extern void my_dump_Data(const u_char* dptr, u_int size);
extern int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size);
extern void crypto4xx_free_sa(struct crypto4xx_ctx *ctx);
extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
@@ -173,9 +209,15 @@ extern u32 crypto4xx_alloc_sa_rctx(struct crypto4xx_ctx *ctx,
extern void crypto4xx_free_sa_rctx(struct crypto4xx_ctx *rctx);
extern void crypto4xx_free_ctx(struct crypto4xx_ctx *ctx);
extern u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx);
+extern void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx);
+extern u32 crypto4xx_alloc_arc4_state_record(struct crypto4xx_ctx *ctx);
+extern void crypto4xx_free_arc4_state_record(struct crypto4xx_ctx *ctx);
extern u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx);
extern u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx);
extern u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_arc4_state_ptr(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_seq_num(struct crypto4xx_ctx *ctx);
+u32 get_dynamic_sa_offset_spi(struct crypto4xx_ctx *ctx);
extern void crypto4xx_memcpy_le(unsigned int *dst,
const unsigned char *buf, int len);
extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
@@ -183,9 +225,15 @@ extern u32 crypto4xx_build_pd(struct crypto_async_request *req,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int datalen,
+ struct scatterlist *assoc,
+ u32 aad_len,
void *iv, u32 iv_len);
extern int crypto4xx_setkey_aes_cbc(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen);
+extern int crypto4xx_setkey_3des_cbc(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+extern int crypto4xx_setkey_3des_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
extern int crypto4xx_encrypt(struct ablkcipher_request *req);
extern int crypto4xx_decrypt(struct ablkcipher_request *req);
extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
@@ -193,4 +241,315 @@ extern int crypto4xx_hash_digest(struct ahash_request *req);
extern int crypto4xx_hash_final(struct ahash_request *req);
extern int crypto4xx_hash_update(struct ahash_request *req);
extern int crypto4xx_hash_init(struct ahash_request *req);
+extern int crypto4xx_md5_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_hash_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen,
+ unsigned int sa_len,
+ unsigned char ha,
+ unsigned char hm,
+ unsigned int max_keylen);
+extern int crypto4xx_md5_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen);
+extern int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_sha2_alg_init(struct crypto_tfm *tfm);
+extern int crypto4xx_sha2_hmac_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen);
+extern int crypto4xx_sha1_hmac_setkey(struct crypto_ahash *hash, const u8 *key,
+ unsigned int keylen);
+extern u32 get_dynamic_sa_offset_inner_digest(struct crypto4xx_ctx *ctx);
+extern u32 get_dynamic_sa_offset_outer_digest(struct crypto4xx_ctx *ctx);
+extern int crypto4xx_pre_compute_hmac(struct crypto4xx_ctx *ctx,
+ void *key,
+ unsigned int keylen,
+ unsigned int bs,
+ unsigned char ha,
+ unsigned char digs);
+int crypto4xx_setkey_aes_ecb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ofb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_cfb(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ctr(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_gcm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_setkey_aes_ccm(struct crypto_aead *cipher,
+ const u8 *key, unsigned int keylen);
+
+int crypto4xx_encrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_decrypt_aes_gcm(struct aead_request *req);
+int crypto4xx_encrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_decrypt_aes_ccm(struct aead_request *req);
+int crypto4xx_encrypt_ctr(struct ablkcipher_request *req);
+int crypto4xx_decrypt_ctr(struct ablkcipher_request *req);
+int crypto4xx_setauthsize_aes(struct crypto_aead *ciper,
+ unsigned int authsize);
+int crypto4xx_givencrypt_aes_ccm(struct aead_givcrypt_request *req);
+int crypto4xx_givencrypt_aes_gcm(struct aead_givcrypt_request *req);
+int crypto4xx_givdecrypt_aes_ccm(struct aead_givcrypt_request *req);
+int crypto4xx_givdecrypt_aes_gcm(struct aead_givcrypt_request *req);
+int crypto4xx_setkey_kasumi_f8(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_encrypt_kasumi_f8(struct ablkcipher_request *req);
+int crypto4xx_decrypt_kasumi_f8(struct ablkcipher_request *req);
+int crypto4xx_setkey_kasumi_p(struct crypto_ablkcipher *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_kasumi_f9_digest(struct ahash_request *req);
+int crypto4xx_kasumi_f9_setkey(struct crypto_ahash *hash,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_xcbc_setkey(struct crypto_ahash *hash,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_arc4(struct crypto_ablkcipher *cipher,
+ const u8 *key, unsigned int keylen);
+int crypto4xx_arc4_decrypt(struct ablkcipher_request *req);
+int crypto4xx_arc4_encrypt(struct ablkcipher_request *req);
+u32 crypto4xx_alloc_arc4_state_record(struct crypto4xx_ctx *ctx);
+int crypto4xx_setauthsize_aes_ccm(struct crypto_aead *ciper,
+ unsigned int authsize);
+
+/* From crypto/md5.c */
+extern void md5_get_immediate_hash(struct crypto_tfm *tfm, u8 *data);
+extern unsigned int crypto4xx_sa_hash_tbl[3][6];
+
+/** IPSec Deneric Tunnel Related Routine Declarations */
+int crypto4xx_encrypt_esp_cbc(struct aead_request *req);
+int crypto4xx_decrypt_esp_cbc(struct aead_request *req);
+int crypto4xx_givencrypt_esp_cbc(struct aead_givcrypt_request *req);
+
+/** IPSec Tunnel AES Routine Declarations */
+int crypto4xx_setkey_tunnel_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+/** DES Tunnel Mode ipsec Related Algorithms */
+int crypto4xx_setkey_tunnel_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+/** 3DES Tunnel Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_tunnel_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_setkey_tunnel_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** Generic Transport Mode IPSEC Related Algorithms */
+int crypto4xx_encrypt_transport_esp_cbc(struct aead_request *req);
+int crypto4xx_givencrypt_transport_esp_cbc(struct aead_givcrypt_request *req);
+int crypto4xx_decrypt_transport_esp_cbc(struct aead_request *req);
+
+/** AES Transport Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_transport_esp_cbc_aes_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_aes_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** DES Transport Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_transport_esp_cbc_des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** 3DES Transport Mode IPSEC Related Algorithms */
+int crypto4xx_setkey_transport_esp_cbc_3des_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha224(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha256(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha384(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_transport_esp_cbc_3des_sha512(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+/**Macsec Related Declarations */
+int crypto4xx_encrypt_macsec(struct aead_request *req);
+int crypto4xx_decrypt_macsec(struct aead_request *req);
+int crypto4xx_setkey_macsec_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** DTLS/SSL/TLS Related Setkey Algorithms */
+int crypto4xx_setkey_dtls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_dtls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_ssl_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_setkey_tls_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+int crypto4xx_setkey_tls_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_aes_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_des_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_des3_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_arc4_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_arc4_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_null_md5(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+int crypto4xx_setkey_tls1_1_null_sha1(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
+
+/** DTLS/SSL/TLS Related Encrypt/Decrypt Algorithms */
+int crypto4xx_encrypt_dtls(struct aead_request *req);
+int crypto4xx_decrypt_dtls(struct aead_request *req);
+int crypto4xx_encrypt_ssl_aes(struct aead_request *req);
+int crypto4xx_decrypt_ssl_aes(struct aead_request *req);
+int crypto4xx_encrypt_ssl_des(struct aead_request *req);
+int crypto4xx_decrypt_ssl_des(struct aead_request *req);
+int crypto4xx_encrypt_ssl_arc4(struct aead_request *req);
+int crypto4xx_decrypt_ssl_arc4(struct aead_request *req);
+int crypto4xx_encrypt_ssl_null(struct aead_request *req);
+int crypto4xx_decrypt_ssl_null(struct aead_request *req);
+int crypto4xx_setkey_transport_esp_rfc4106_gcm(struct crypto_aead *cipher,
+ const u8 *key,
+ unsigned int keylen);
#endif
diff --git a/drivers/crypto/amcc/crypto4xx_reg_def.h b/drivers/crypto/amcc/crypto4xx_reg_def.h
index 7d4edb00261..d964f5d5551 100644
--- a/drivers/crypto/amcc/crypto4xx_reg_def.h
+++ b/drivers/crypto/amcc/crypto4xx_reg_def.h
@@ -54,6 +54,10 @@
#define CRYPTO4XX_SEQ_RD 0x00000408
#define CRYPTO4XX_SEQ_MASK_RD 0x0000040C
+#define CRYPTO4XX_SPI 0x000106B0
+#define CRYPTO4XX_SEQ_NUM0 0x000106B4
+#define CRYPTO4XX_SEQ_NUM1 0x000106B8
+
#define CRYPTO4XX_SA_CMD_0 0x00010600
#define CRYPTO4XX_SA_CMD_1 0x00010604
@@ -112,6 +116,7 @@
#define CRYPTO4XX_PRNG_LFSR_L 0x00070030
#define CRYPTO4XX_PRNG_LFSR_H 0x00070034
+
/**
* Initilize CRYPTO ENGINE registers, and memory bases.
*/
@@ -121,18 +126,21 @@
#define PPC4XX_PD_SIZE 6
#define PPC4XX_CTX_DONE_INT 0x2000
#define PPC4XX_PD_DONE_INT 0x8000
+#define PPC4XX_TMO_ERR_INT 0x40000
#define PPC4XX_BYTE_ORDER 0x22222
#define PPC4XX_INTERRUPT_CLR 0x3ffff
+#define PPC4XX_INTERRUPT_CLR_REVB 0x7ffff
#define PPC4XX_PRNG_CTRL_AUTO_EN 0x3
#define PPC4XX_DC_3DES_EN 1
-#define PPC4XX_INT_DESCR_CNT 4
+#define PPC4XX_INT_DESCR_CNT 7
#define PPC4XX_INT_TIMEOUT_CNT 0
+#define PPC4XX_INT_TIMEOUT_CNT_REVB 0x3FF
#define PPC4XX_INT_CFG 1
/**
* all follow define are ad hoc
*/
-#define PPC4XX_RING_RETRY 100
-#define PPC4XX_RING_POLL 100
+#define PPC4XX_RING_RETRY 1
+#define PPC4XX_RING_POLL 1
#define PPC4XX_SDR_SIZE PPC4XX_NUM_SD
#define PPC4XX_GDR_SIZE PPC4XX_NUM_GD
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
index 466fd94cd4a..fa4ff7ac66a 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ b/drivers/crypto/amcc/crypto4xx_sa.c
@@ -84,6 +84,119 @@ u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
return sizeof(struct dynamic_sa_ctl) + offset * 4;
}
+u32 get_dynamic_sa_offset_arc4_state_ptr(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi
+ + cts.bf.seq_num0
+ + cts.bf.seq_num1
+ + cts.bf.seq_num_mask0
+ + cts.bf.seq_num_mask1
+ + cts.bf.seq_num_mask2
+ + cts.bf.seq_num_mask3
+ + cts.bf.iv0
+ + cts.bf.iv1
+ + cts.bf.iv2
+ + cts.bf.iv3
+ + cts.bf.state_ptr
+ + cts.bf.arc4_ij_ptr;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_inner_digest(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+ offset = cts.bf.key_size;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_outer_digest(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_spi(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_seq_num(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi;
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
+u32 get_dynamic_sa_offset_seq_num_mask(struct crypto4xx_ctx *ctx)
+{
+ u32 offset;
+ union dynamic_sa_contents cts;
+
+ if (ctx->direction == DIR_INBOUND)
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
+ else
+ cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
+
+ offset = cts.bf.key_size
+ + cts.bf.inner_size
+ + cts.bf.outer_size
+ + cts.bf.spi
+ + cts.bf.seq_num0
+ + cts.bf.seq_num1;
+
+ return sizeof(struct dynamic_sa_ctl) + offset * 4;
+}
+
u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
{
union dynamic_sa_contents cts;
@@ -92,6 +205,7 @@ u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx)
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents;
else
cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents;
+
return (cts.bf.iv0 + cts.bf.iv1 + cts.bf.iv2 + cts.bf.iv3) * 4;
}
diff --git a/drivers/crypto/amcc/crypto4xx_sa.h b/drivers/crypto/amcc/crypto4xx_sa.h
index 4b83ed7e557..5350c4efbc5 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.h
+++ b/drivers/crypto/amcc/crypto4xx_sa.h
@@ -50,12 +50,42 @@ union dynamic_sa_contents {
u32 w;
} __attribute__((packed));
+#define SA_OPCODE_ESP 0
+#define SA_OPCODE_AH 1
+#define SA_OPCODE_SSL 4
+#define SA_OPCODE_TLS 5
+#define SA_OPCODE_SRTP 7
+#define SA_OPCODE_DTLS 1
+#define SA_OPCODE_TLS1_1 6
+
+#define SA_OP_GROUP_BASIC 0
+#define SA_OP_GROUP_PROTOCOL 1
+#define SA_OP_GROUP_EXTEND_PROTOCOL 3
+
+#define SA_OPCODE_EXT_PROT_DTLS 1
+#define SA_OPCODE_EXT_PROT_MACSEC 2
+#define SA_OPCODE_EXT_PROT_SSL 4
+#define SA_OPCODE_EXT_PROT_TLS10 5
+#define SA_OPCODE_EXT_PROT_TLS11 6
+
#define DIR_OUTBOUND 0
#define DIR_INBOUND 1
-#define SA_OP_GROUP_BASIC 0
#define SA_OPCODE_ENCRYPT 0
#define SA_OPCODE_DECRYPT 0
+#define SA_OPCODE_ENCRYPT_HASH 1
+#define SA_OPCODE_HASH_DECRYPT 1
#define SA_OPCODE_HASH 3
+#define SA_OPCODE_HASH_ENCRYPT 4
+#define SA_OPCODE_DECRYPT_HASH 4
+
+#define SA_OPCODE_ESP 0
+#define SA_OPCODE_AH 1
+#define SA_OPCODE_SSL 4
+#define SA_OPCODE_TLS 5
+#define SA_OPCODE_SRTP 7
+#define SA_OPCODE_DTLS 1
+#define SA_OPCODE_TLS1_1 6
+
#define SA_CIPHER_ALG_DES 0
#define SA_CIPHER_ALG_3DES 1
#define SA_CIPHER_ALG_ARC4 2
@@ -65,8 +95,17 @@ union dynamic_sa_contents {
#define SA_HASH_ALG_MD5 0
#define SA_HASH_ALG_SHA1 1
+#define SA_HASH_ALG_SHA224 2
+#define SA_HASH_ALG_SHA256 3
+#define SA_HASH_ALG_SHA384 4
+#define SA_HASH_ALG_SHA512 5
+#define HASH_ALG_MAX_CNT 6
+#define SA_HASH_ALG_AES_XCBC_MAC_128 8
+#define SA_HASH_ALG_KASUMI_f9 9
+#define SA_HASH_ALG_GHASH 12
+#define SA_HASH_ALG_GMAC 13
+#define SA_HASH_ALG_CBC_MAC 14
#define SA_HASH_ALG_NULL 15
-#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
#define SA_LOAD_HASH_FROM_SA 0
#define SA_LOAD_HASH_FROM_STATE 2
@@ -84,9 +123,22 @@ union dynamic_sa_contents {
#define SA_SAVE_HASH 1
#define SA_NOT_SAVE_IV 0
#define SA_SAVE_IV 1
+#define SA_GEN_IV 3
+
#define SA_HEADER_PROC 1
#define SA_NO_HEADER_PROC 0
+#define SA_HASH_ALG_MD5_DIGEST_SIZE 16
+#define SA_HASH_ALG_SHA1_DIGEST_SIZE 20
+#define SA_HASH_ALG_SHA224_DIGEST_SIZE 28
+#define SA_HASH_ALG_SHA256_DIGEST_SIZE 32
+#define SA_HASH_ALG_SHA384_DIGEST_SIZE 48
+#define SA_HASH_ALG_SHA512_DIGEST_SIZE 64
+
+#define CRYPTO4XX_CRYPTO_PRIORITY_IPSEC 300
+
+#define CRYPTO4XX_MAC_ALGS { "md5", "sha1", \
+ "sha224", "sha256", "sha384", "sha512" }
union sa_command_0 {
struct {
u32 scatter:1;
@@ -111,7 +163,13 @@ union sa_command_0 {
} __attribute__((packed));
#define CRYPTO_MODE_ECB 0
+#define CRYPTO_MODE_KASUMI 0
#define CRYPTO_MODE_CBC 1
+#define CRYPTO_MODE_OFB 2
+#define CRYPTO_MODE_CFB 3
+#define CRYPTO_MODE_AES_CTR 4
+#define CRYPTO_MODE_KASUMI_f8 4
+#define CRYPTO_MODE_AES_ICM 5
#define CRYPTO_FEEDBACK_MODE_NO_FB 0
#define CRYPTO_FEEDBACK_MODE_64BIT_OFB 0
@@ -124,7 +182,7 @@ union sa_command_0 {
#define SA_AES_KEY_LEN_256 4
#define SA_REV2 1
-/**
+/*
* The follow defines bits sa_command_1
* In Basic hash mode this bit define simple hash or hmac.
* In IPsec mode, this bit define muting control.
@@ -177,13 +235,46 @@ struct dynamic_sa_ctl {
/**
* State Record for Security Association (SA)
*/
-struct sa_state_record {
+struct sa_state_record {
u32 save_iv[4];
u32 save_hash_byte_cnt[2];
u32 save_digest[16];
} __attribute__((packed));
/**
+ * Arc4 State Record for Security Association (SA)
+ */
+struct arc4_sr {
+ u32 arc4_state[64];
+} __attribute__((packed));
+
+/**
+ * Security Association (SA) for DES
+ */
+struct dynamic_sa_des {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[2];
+ u32 iv[2];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_DES_LEN (sizeof(struct dynamic_sa_des)/4)
+#define SA_DES_CONTENTS 0x26000022
+
+/**
+ * Security Association (SA) for 3DES
+ */
+struct dynamic_sa_3des {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 iv[2]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_3DES_LEN (sizeof(struct dynamic_sa_3des)/4)
+#define SA_3DES_CONTENTS 0x26000062
+
+/**
* Security Association (SA) for AES128
*
*/
@@ -194,11 +285,10 @@ struct dynamic_sa_aes128 {
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
-
#define SA_AES128_LEN (sizeof(struct dynamic_sa_aes128)/4)
#define SA_AES128_CONTENTS 0x3e000042
-/*
+/**
* Security Association (SA) for AES192
*/
struct dynamic_sa_aes192 {
@@ -208,7 +298,6 @@ struct dynamic_sa_aes192 {
u32 state_ptr;
u32 reserved;
} __attribute__((packed));
-
#define SA_AES192_LEN (sizeof(struct dynamic_sa_aes192)/4)
#define SA_AES192_CONTENTS 0x3e000062
@@ -228,6 +317,19 @@ struct dynamic_sa_aes256 {
#define SA_AES_CONTENTS 0x3e000002
/**
+ * Security Association (SA) for HASH128: HMAC-MD5
+ */
+struct dynamic_sa_hash128 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH128_LEN (sizeof(struct dynamic_sa_hash128)/4)
+#define SA_HASH128_CONTENTS 0x20008402
+
+/**
* Security Association (SA) for HASH160: HMAC-SHA1
*/
struct dynamic_sa_hash160 {
@@ -240,4 +342,418 @@ struct dynamic_sa_hash160 {
#define SA_HASH160_LEN (sizeof(struct dynamic_sa_hash160)/4)
#define SA_HASH160_CONTENTS 0x2000a502
+/**
+ * Security Association (SA) for HASH256: HMAC-SHA224, HMAC-SHA256
+ */
+struct dynamic_sa_hash256 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH256_LEN (sizeof(struct dynamic_sa_hash256)/4)
+#define SA_HASH256_CONTENTS 0x20010802
+
+/*
+ * Security Association (SA) for HASH512: HMAC-SHA512
+ */
+struct dynamic_sa_hash512 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[16];
+ u32 outer_digest[16];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_HASH512_LEN (sizeof(struct dynamic_sa_hash512)/4)
+#define SA_HASH512_CONTENTS 0x20021002
+
+/**
+ * Security Association (SA) for AES128_XCBC_MAC
+ */
+struct dynamic_sa_aes128_xcbc_mac {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES128_XCBC_MAC_LEN (sizeof(struct dynamic_sa_aes128_xcbc_mac)/4)
+#define SA_AES128_XCBC_MAC_CONTENTS 0x3e010842
+
+/**
+ * Security Association (SA) for AES128_GCM
+ */
+struct dynamic_sa_aes128_gcm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES128_GCM_LEN (sizeof(struct dynamic_sa_aes128_gcm)/4)
+#define SA_AES128_GCM_CONTENTS 0x3e0c8442
+
+/**
+ * Security Association (SA) for AES192_XCBC_MAC
+ */
+struct dynamic_sa_aes192_xcbc_mac {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES192_XCBC_MAC_LEN (sizeof(struct dynamic_sa_aes192_xcbc_mac)/4)
+#define SA_AES192_XCBC_MAC_CONTENTS 0x3e010862
+
+/**
+ * Security Association (SA) for AES192_GCM
+ */
+struct dynamic_sa_aes192_gcm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES192_GCM_LEN (sizeof(struct dynamic_sa_aes192_gcm)/4)
+#define SA_AES192_GCM_CONTENTS 0x3e0c8462
+
+
+/**
+ * Security Association (SA) for AES256_XCBC_MAC
+ */
+struct dynamic_sa_aes256_xcbc_mac {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[8];
+ u32 outer_digest[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES256_XCBC_MAC_LEN (sizeof(struct dynamic_sa_aes256_xcbc_mac)/4)
+#define SA_AES256_XCBC_MAC_CONTENTS 0x3e010882
+
+/**
+ * Security Association (SA) for AES256_GCM
+ */
+struct dynamic_sa_aes256_gcm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[4];
+ u32 outer_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES256_GCM_LEN (sizeof(struct dynamic_sa_aes256_gcm)/4)
+#define SA_AES256_GCM_CONTENTS 0x3e0c8482
+#define SA_AES_GCM_CONTENTS 0x3e0c8402
+
+/**
+ * Security Association (SA) for Kasumi
+ */
+struct dynamic_sa_kasumi {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_KASUMI_LEN (sizeof(struct dynamic_sa_kasumi)/4)
+#define SA_KASUMI_CONTENTS 0x20000042
+
+/**
+ * Security Association (SA) for Kasumi f8
+ */
+struct dynamic_sa_kasumi_f8 {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 iv[2];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_KASUMI_F8_LEN (sizeof(struct dynamic_sa_kasumi_f8)/4)
+#define SA_KASUMI_F8_CONTENTS 0x26000042
+
+#define KASUMI_BLOCK_SIZE 8
+#define KASUMI_KEY_SIZE 16
+
+/**
+ * Security Association (SA) for Kasumi f8
+ */
+struct dynamic_sa_kasumi_f9 {
+ struct dynamic_sa_ctl ctrl;
+ u32 inner_digest[4];
+ u32 outter_digest[3];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_KASUMI_F9_LEN (sizeof(struct dynamic_sa_kasumi_f9)/4)
+#define SA_KASUMI_F9_CONTENTS 0x20006402
+
+/**
+ * Security Association (SA) for AES256 CCM
+ */
+struct dynamic_sa_aes256_ccm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES256_CCM_LEN (sizeof(struct dynamic_sa_aes256_ccm)/4)
+#define SA_AES256_CCM_CONTENTS 0x3e000082
+#define SA_AES_CCM_CONTENTS 0x3e000002
+
+/**
+ * Security Association (SA) for AES192 CCM
+ */
+struct dynamic_sa_aes192_ccm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES192_CCM_LEN (sizeof(struct dynamic_sa_aes192_ccm)/4)
+#define SA_AES192_CCM_CONTENTS 0x3e000062
+
+/**
+ * Security Association (SA) for AES128 CCM
+ */
+struct dynamic_sa_aes128_ccm {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 iv[4]; /* for CBC, OFC, and CFB mode */
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+#define SA_AES128_CCM_LEN (sizeof(struct dynamic_sa_aes128_ccm)/4)
+#define SA_AES128_CCM_CONTENTS 0x3e000042
+
+/**
+ * Security Association (SA) for ARC4
+ */
+struct arc4_ij_ptr {
+ u32 rsv:16;
+ u32 j:8;
+ u32 i:8;
+} __attribute__((packed));
+
+struct dynamic_sa_arc4 {
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ struct arc4_ij_ptr ij;
+ u32 arc4_state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ARC4_LEN (sizeof(struct dynamic_sa_arc4)/4)
+#define SA_ARC4_CONTENTS 0xc0000042
+
+/**
+ * Security Association (SA) for IPsec ESP md5 or ESP sha1
+ */
+struct dynamic_sa_esp_md5_sha1
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_MD5_SHA1_LEN sizeof(struct dynamic_sa_esp_md5_sha1)/4
+#define SA_ESP_MD5_SHA1_CONTENTS 0x3ffca582
+
+struct dynamic_sa_esp_des_md5_sha1
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[2];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_DES_MD5_SHA1_LEN sizeof(struct dynamic_sa_esp_des_md5_sha1)/4
+#define SA_ESP_DES_MD5_SHA1_CONTENTS 0x3ffca522
+
+/**
+ * Security Association (SA) for IPsec ESP 3des md5 sha1
+ */
+struct dynamic_sa_esp_3des_md5_sha1
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[6];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_3DES_MD5_SHA1_LEN sizeof(struct dynamic_sa_esp_3des_md5_sha1)/4
+#define SA_ESP_3DES_MD5_SHA1_CONTENTS 0x3ffca562
+
+/**
+ * Security Association (SA) for IPsec ESP sha512
+ */
+struct dynamic_sa_esp_sha512
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[16];
+ u32 outter_digest[16];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_SHA512_LEN sizeof(struct dynamic_sa_esp_sha512)/4
+#define SA_ESP_SHA512_CONTENTS 0x3ffe1082
+
+
+/**
+ * Security Association (SA) for IPsec ESP gcm
+ */
+struct dynamic_sa_esp_gcm
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[4];
+ u32 outter_digest[4];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_GCM_LEN sizeof(struct dynamic_sa_esp_gcm)/4
+#define SA_ESP_GCM_CONTENTS 0x3ffc8442
+/**
+ * Security Association (SA) for IPsec ESP aes sha256
+ */
+struct dynamic_sa_esp_sha256
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[8];
+ u32 outter_digest[8];
+ u32 spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_ESP_SHA256_LEN sizeof(struct dynamic_sa_esp_sha256)/4
+#define SA_ESP_SHA256_CONTENTS 0x3ffd0882
+
+/**
+ * Security Association (SA) for MACsec GCM
+ */
+struct dynamic_sa_macsec_gcm
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[4];
+ u32 spi;
+ u32 seq;
+ u32 seq_mask[4];
+ u32 iv[2];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_MACSEC_GCM_LEN sizeof(struct dynamic_sa_macsec_gcm)/4
+#define SA_MACSEC_GCM_CONTENTS 0x27ec0442
+
+/**
+ * Security Association (SA) for DTLS
+ */
+union dynamic_sa_dtls_spi
+{
+ struct {
+ u32 rsv:8;
+ u32 version:16;
+ u32 type:8;
+ }bf;
+ u32 w;
+}__attribute__((packed));
+
+struct dynamic_sa_dtls
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[8];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ union dynamic_sa_dtls_spi spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ u32 reserved;
+} __attribute__((packed));
+
+#define SA_DTLS_LEN sizeof(struct dynamic_sa_dtls)/4
+#define SA_DTLS_CONTENTS 0x3ffca582
+
+/**
+ *
+ * Security Association (SA) for SSL/TLS ARC4
+ */
+ struct dynamic_sa_ssl_tls_arc4
+{
+ struct dynamic_sa_ctl ctrl;
+ u32 key[4];
+ u32 inner_digest[5];
+ u32 outter_digest[5];
+ union dynamic_sa_dtls_spi spi;
+ u32 seq[2];
+ u32 seq_mask[4];
+ u32 iv[4];
+ u32 state_ptr;
+ struct arc4_ij_ptr ij;
+ u32 arc4_state_ptr;
+} __attribute__((packed));
+
+//typedef struct dynamic_sa_ssl_tls_arc4 dynamic_sa_ssl_tls_arc4_t;
+#define SA_SSL_ARC4_LEN sizeof(struct dynamic_sa_ssl_tls_arc4)/4
+#define SA_SSL_ARC4_CONTENTS 0xfffca542
+
#endif
diff --git a/drivers/crypto/pka_4xx.c b/drivers/crypto/pka_4xx.c
new file mode 100644
index 00000000000..4dea3eb4b3c
--- /dev/null
+++ b/drivers/crypto/pka_4xx.c
@@ -0,0 +1,1333 @@
+/*******************************************************************************
+ *
+ * Copyright (c) 2008 Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Detail Description:
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ * @file pka4xx.c
+ *
+ * This file provides access and implementation of the high layer API to the
+ * PKA registers.
+ *
+ *******************************************************************************
+ */
+#include <asm/delay.h>
+#include <asm/dcr-native.h>
+#include <linux/irq.h>
+#include "pka_4xx_access.h"
+#include <crypto/pka_4xx.h>
+#include "pka_4xx_firmware.h"
+
+/**
+ * PKA Functions
+ *
+ */
+/* # of time to poll for synchronous operation */
+#define PKA4XX_POLL_DONE_MAX_CNT 5000
+
+#define PKA4XX_CSR_WRITE_RETURN(a, v) \
+ do { \
+ rc = pka4xx_csr_hw_write32((a), (v)); \
+ if (rc != RC_OK) \
+ return rc; \
+ } while(0);
+
+#define PKA4XX_CSR_READ_RETURN(a, v) \
+ do { \
+ rc = pka4xx_csr_hw_read32((a), (v)); \
+ if (rc != RC_OK) \
+ return rc; \
+ } while(0);
+
+
+#define PKA_ALIGN(x, a) do { \
+ (x) += ((a)-1); \
+ (x) &= ~((a)-1); \
+ } while(0);
+#define PKA_ALIGN_RVAL(x, a) (((x) + ((a)-1)) & (~((a)-1)))
+
+static u32 pkt_firmware_sizedw = PKA_FIRMWARE_1_3_SIZEDW;
+static const u32 *pka_firmware = pka_firmware_1_3;
+
+u32 msg_buf[20][10];
+int msg_idx;
+
+u32 pka4xx_pkcp_set_vec(u32 vecA_cnt,
+ u32 *vecA,
+ u32 vecB_cnt,
+ u32 *vecB)
+{
+ u32 addr;
+ int rc, i;
+ u32 val32;
+
+ addr = PKA_RAM_ADDR;
+ /* Set PKA RAM address and load input A - multiplicand */
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, vecA_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_APTR_ADDR, &val32);
+ PKA4XX_CSR_READ_RETURN(PKA_ALENGTH_ADDR, &val32);
+ for(i = 0; i < vecA_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, vecA[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr %08X val %08X", addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Align 8-byte but use 2 as it is DWORD */
+ /* Set PKA RAM address and load for input B - multiplier */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BLENGTH_ADDR, vecB_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_BPTR_ADDR, &val32);
+ PKA4XX_CSR_READ_RETURN(PKA_BLENGTH_ADDR, &val32);
+ for(i = 0; i < vecB_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, vecB[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO,
+ "addr %08X val %08X", addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address for output C - product */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+
+ return addr;
+}
+
+u32 pka4xx_addsub_set_vec(u32 input_cnt,
+ u32 *addendA,
+ u32 *subtrahend,
+ u32 *addendC)
+{
+ u32 addr;
+ int rc, i;
+
+ addr = PKA_RAM_ADDR;
+ /* Set PKA RAM address and load input A - addendA */
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, input_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, addendA[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input B - subtrahend */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, subtrahend[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input C - addendC */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, addendC[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+
+ return addr;
+}
+
+
+u32 pka4xx_shift_set_vec(u32 input_cnt,
+ u32 *input,
+ u8 shift)
+{
+ u32 addr;
+ int rc, i;
+
+ addr = PKA_RAM_ADDR;
+ /* Set PKA RAM address and load input A - input */
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, input_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < input_cnt; i++, addr += 4)
+ PKA4XX_CSR_WRITE_RETURN(addr, input[i]);
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set shift value */
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, shift);
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ /* Save callback for asynchronous operation */
+
+ return addr;
+}
+
+u32 pka4xx_expmod_crt_set_vec(u32 exp_len,
+ u32 *expP,
+ u32 *expQ,
+ u32 mod_inverse_len,
+ u32 *modP,
+ u32 *modQ,
+ u32 *inverseQ,
+ u32 *input)
+{
+ u32 addr;
+ u32 oaddr_start = 0x00000000;
+ u32 Daddr;
+ int i, rc;
+ u32 val32;
+
+ addr = PKA_RAM_ADDR + (oaddr_start << 2);
+
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, exp_len);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < exp_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, expP[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X expP val 0x%08X\n",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ for(i = 0; i < exp_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, expQ[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X expQ val 0x%08X\n",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ /* Set PKA RAM address and load input modP and modQ */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BLENGTH_ADDR, mod_inverse_len);
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ for(i = 0; i < mod_inverse_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, modP[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X modP val 0x%08X\n",
+ addr, val32);
+ }
+ addr += 8;/*mm */ /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ for(i = 0; i < mod_inverse_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, modQ[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X mod Q val 0x%08X\n",
+ addr, val32);
+ }
+ addr += 4; /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ /* Set PKA RAM address and load input inverseQ */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ for(i = 0; i < mod_inverse_len; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, inverseQ[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X invQ val 0x%08X\n",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+ Daddr = addr;
+ for(i = 0; i < (mod_inverse_len<<1); i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, input[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X input val 0x%08X\n",
+ addr, val32);
+ }
+
+ return Daddr;
+}
+
+u32 pka4xx_expmod_set_vec(u32 base_mod_cnt, u32 *base,
+ u32 *modulus,
+ u32 exponent_cnt,
+ u32 *exponent)
+{
+ u32 addr;
+ u32 oaddr_start = 0x00000000;
+ u32 val32;
+ int rc, i;
+
+ addr = PKA_RAM_ADDR + (oaddr_start << 2);
+
+ PKA4XX_CSR_WRITE_RETURN(PKA_ALENGTH_ADDR, exponent_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_APTR_ADDR, addr >> 2);
+ for(i = 0; i < exponent_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, exponent[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X A val 0x%08X",
+ addr, val32);
+ }
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input B - modulus */
+ PKA4XX_CSR_WRITE_RETURN(PKA_BLENGTH_ADDR, base_mod_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_BPTR_ADDR, addr >> 2);
+ for(i = 0; i < base_mod_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, modulus[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X B val 0x%08X",
+ addr, val32);
+ }
+ addr += 4; /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address and load input C - base */
+ PKA4XX_CSR_WRITE_RETURN(PKA_CPTR_ADDR, addr >> 2);
+ for(i = 0; i < base_mod_cnt; i++, addr += 4) {
+ PKA4XX_CSR_WRITE_RETURN(addr, base[i]);
+ PKA4XX_CSR_READ_RETURN(addr, &val32);
+ LPRINTF(LL_INFO, "addr 0x%08X C val 0x%08X",
+ addr, val32);
+ }
+ addr += 4; /* Require 1 extra DWORD */
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Set PKA RAM address for output - result */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+
+ return addr;
+}
+
+void pka4xx_process_completed_event (struct pka4xx_op *op)
+{
+ int status = RC_OK;
+ pka4xx_cb callback = NULL;
+
+ callback = op->cb;
+ op->cb = NULL;
+ if (callback)
+ (*callback)(op->ctx, status);
+}
+
+void pka4xx_tasklet_cb (unsigned long data)
+{
+ struct list_head *pos;
+ struct list_head *tmp;
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&pka_get_ctx()->lock, flags);
+
+ list_for_each_safe(pos, tmp, &pka_get_ctx()->completed_event_queue) {
+ struct pka4xx_op *item;
+ item = list_entry(pos, struct pka4xx_op, next);
+ list_del(pos);
+ spin_unlock_irqrestore(&pka_get_ctx()->lock,flags);
+ pka4xx_process_completed_event(item);
+ spin_lock_irqsave(&pka_get_ctx()->lock, flags);
+ }
+
+ spin_unlock_irqrestore(&pka_get_ctx()->lock,flags);
+}
+
+static u8 pka4xx_pending_op(void)
+{
+ return pka_get_ctx()->op_head != pka_get_ctx()->op_tail;
+}
+
+static struct pka4xx_op * pka4xx_get_op_item(void)
+{
+ u32 tail;
+
+ if (pka_get_ctx()->op_tail == PKA4XX_PENDING_OP_MAX-1)
+ tail = 0;
+ else
+ tail = pka_get_ctx()->op_tail + 1;
+
+ if (tail == pka_get_ctx()->op_head) {
+ printk(LL_ERR "No free descriptor available for operation "
+ "queuing\n");
+ return NULL;
+ }
+ return &pka_get_ctx()->op[pka_get_ctx()->op_tail];
+}
+
+static int pka4xx_start_op(struct pka4xx_op *op, int interrupt_mode)
+{
+ int rc;
+ u8 restart = 0;
+ u32 Daddr;
+ u32 Caddr, addr;
+ u32 val32;
+
+ if (!interrupt_mode) {
+ restart = !(pka_get_ctx()->op_head != pka_get_ctx()->op_tail);
+
+ if (pka_get_ctx()->op_tail == PKA4XX_PENDING_OP_MAX-1)
+ pka_get_ctx()->op_tail = 0;
+ else
+ pka_get_ctx()->op_tail++;
+ }
+
+ if (restart || interrupt_mode) {
+ switch(op->opcode) {
+ case 0: /* Canceled */
+ return RC_OK;
+ case PKA_FUNCTION_DIV:
+ /* Passing to pka4xx_div_set_vec the order of
+ * dividend_cnt, dividend, divisor_cnt, divisor
+ */
+ LPRINTF(LL_INFO, "Starting async Div PKA operation \n");
+ Caddr = pka4xx_pkcp_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.vecB_cnt,
+ op->async_pkcp.vecB);
+ op->ramC_addr = Caddr;
+ addr = Caddr;
+ addr += (op->async_pkcp.vecB_cnt + 1) * 4;
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Select PKA RAM address for output D - quotient */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_DPTR_ADDR, &val32);
+ op->ramD_addr = addr;
+ break;
+ case PKA_FUNCTION_MUL:
+ case PKA_FUNCTION_MOD:
+ case PKA_FUNCTION_ADD:
+ case PKA_FUNCTION_SUB:
+ case PKA_FUNCTION_COMPARE:
+ Caddr = pka4xx_pkcp_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.vecB_cnt,
+ op->async_pkcp.vecB);
+ op->ramC_addr = Caddr;
+ break;
+ case PKA_FUNCTION_ADDSUB:
+ LPRINTF(LL_INFO, "Starting async ADDSUB PKA operation\n");
+ Daddr = pka4xx_addsub_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.vecB,
+ op->async_pkcp.vec_addsub_C);
+ op->ramD_addr = Daddr;
+ break;
+ case PKA_FUNCTION_RSHIFT:
+ case PKA_FUNCTION_LSHIFT:
+ Caddr = pka4xx_shift_set_vec(op->async_pkcp.vecA_cnt,
+ op->async_pkcp.vecA,
+ op->async_pkcp.shift_val);
+ op->ramC_addr = Caddr;
+ break;
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT2:
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT4:
+ case PKA_FUNCTION_SEQOP_EXPMOD_VAR:
+ Daddr = pka4xx_expmod_set_vec
+ (op->async_expmod.base_mod_cnt,
+ op->async_expmod.base,
+ op->async_expmod.modulus,
+ op->async_expmod.exp_cnt,
+ op->async_expmod.exp);
+ op->ramD_addr = Daddr;
+ break;
+ case PKA_FUNCTION_SEQOP_EXPMOD_CRT:
+ /* No pending operation before adding this operation
+ * id restart = 1
+ */
+ Daddr = pka4xx_expmod_crt_set_vec
+ (op->async_expmod_crt.exp_len,
+ op->async_expmod_crt.expP,
+ op->async_expmod_crt.expQ,
+ op->async_expmod_crt.mod_inverse_len,
+ op->async_expmod_crt.modP,
+ op->async_expmod_crt.modQ,
+ op->async_expmod_crt.inverseQ,
+ op->async_expmod_crt.input);
+ op->ramD_addr = Daddr;
+ break;
+ default:
+ printk(LL_ERR "No operation in async mode\n");
+ return RC_OK;
+ }
+ if (op->opcode == PKA_FUNCTION_SEQOP_EXPMOD_VAR ||
+ op->opcode == PKA_FUNCTION_SEQOP_EXPMOD_CRT) {
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, op->resultC_cnt);
+ }
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | op->opcode);
+ }
+ return RC_OK;
+}
+
+irqreturn_t pka4xx_irq_handler(int irq, void * id)
+{
+ int rc;
+ u32 i;
+ u32 val;
+ struct pka4xx_op *op;
+ struct pka4xx_op *next_op;
+ unsigned long flags;
+
+ if (!pka4xx_pending_op()) {
+ LPRINTF(LL_INFO,
+ "No pending op in pka4xx_irq_handler !!\n");
+ return 0;
+ }
+ op = &pka_get_ctx()->op[pka_get_ctx()->op_head];
+ switch(op->opcode) {
+ case 0: /* Canceled */
+ op->cb = NULL;
+ break;
+ case PKA_FUNCTION_COMPARE:
+ PKA4XX_CSR_READ_RETURN(PKA_COMPARE_ADDR, &val);
+ if (val & PKA_COMPARE_EQUAL)
+ *op->resultC_addr = 0;
+ else if (val & PKA_COMPARE_LESSTHAN)
+ *op->resultC_addr = -1;
+ else
+ *op->resultC_addr = 1;
+ break;
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT2:
+ case PKA_FUNCTION_SEQOP_EXPMOD_ACT4:
+ case PKA_FUNCTION_SEQOP_EXPMOD_VAR:
+ case PKA_FUNCTION_SEQOP_EXPMOD_CRT:
+ for(i = 0; i < op->resultD_cnt; op->ramD_addr += 4) {
+ pka4xx_csr_hw_read32(op->ramD_addr,
+ &op->resultD_addr[i]);
+ msg_buf[msg_idx][i] = op->resultD_addr[i];
+ LPRINTF(LL_INFO, "res expmod 0x%08x",
+ msg_buf[msg_idx][i]);
+ i++;
+ }
+ break;
+ case PKA_FUNCTION_ADDSUB:
+ for(i = 0; i < op->resultD_cnt; op->ramD_addr += 4)
+ pka4xx_csr_hw_read32(op->ramD_addr,
+ &op->resultD_addr[i++]);
+ break;
+ case PKA_FUNCTION_DIV:
+ for(i = 0; i < op->resultC_cnt; op->ramC_addr += 4)
+ pka4xx_csr_hw_read32(op->ramC_addr,
+ &op->resultC_addr[i++]);
+ for(i = 0; i < op->resultD_cnt; op->ramD_addr += 4)
+ pka4xx_csr_hw_read32(op->ramD_addr,
+ &op->resultD_addr[i++]);
+ break;
+ default:
+ for(i = 0; i < op->resultC_cnt; op->ramC_addr += 4)
+ pka4xx_csr_hw_read32(op->ramC_addr,
+ &op->resultC_addr[i++]);
+ break;
+ }
+
+ if (pka_get_ctx()->op_head == PKA4XX_PENDING_OP_MAX - 1)
+ pka_get_ctx()->op_head = 0;
+ else
+ pka_get_ctx()->op_head =
+ (pka_get_ctx()->op_head + 1) % PKA4XX_PENDING_OP_MAX;
+
+ next_op = &pka_get_ctx()->op[pka_get_ctx()->op_head];
+
+ spin_lock_irqsave(&pka_get_ctx()->lock, flags);
+ list_add_tail(&op->next, &pka_get_ctx()->completed_event_queue);
+ spin_unlock_irqrestore(&pka_get_ctx()->lock,flags);
+
+ if (!pka4xx_pending_op()) {
+ LPRINTF(LL_INFO, "No pending op in pka4xx_irq_handler\n");
+ tasklet_schedule(&pka_get_ctx()->tasklet);
+ return IRQ_HANDLED;
+ }
+ pka4xx_start_op(next_op, 1);
+ tasklet_schedule(&pka_get_ctx()->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static int pka4xx_wait2complete(void)
+{
+ int rc;
+ u32 val;
+ u32 tried = 0;
+
+ do {
+ udelay(1);
+ PKA4XX_CSR_READ_RETURN(PKA_FUNCTION_ADDR, &val);
+ if (!(val & PKA_FUNCTION_RUN)) {
+ return RC_OK;
+ }
+ tried++;
+ } while (tried < PKA4XX_POLL_DONE_MAX_CNT);
+
+ LPRINTF(LL_INFO "Returning busy after tried count = %d", tried);
+ return RC_EBUSY;
+}
+
+int pka4xx_mul(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 multiplicand_cnt, u32 *multiplicand,
+ u32 multiplier_cnt, u32 *multiplier,
+ u32 *product)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (multiplicand_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ multiplier_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(multiplicand_cnt, multiplicand,
+ multiplier_cnt, multiplier);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | PKA_FUNCTION_MUL);
+ rc = pka4xx_wait2complete();
+
+ if (rc != RC_OK)
+ return rc;
+ multiplicand_cnt += multiplier_cnt;
+ for(i = 0; i < multiplicand_cnt; i++) {
+ PKA4XX_CSR_READ_RETURN(addr, &product[i]);
+ LPRINTF(LL_INFO, "result addr 0x%08x value 0x%08x",
+ addr, product[i]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_MUL;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = multiplicand_cnt+multiplier_cnt;
+ pka_op->resultC_addr = product;
+ pka_op->async_pkcp.vecA_cnt = multiplicand_cnt;
+ pka_op->async_pkcp.vecA = multiplicand;
+ pka_op->async_pkcp.vecB_cnt = multiplier_cnt;
+ pka_op->async_pkcp.vecB = multiplier;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_div(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 dividend_cnt, u32 *dividend,
+ u32 divisor_cnt, u32 *divisor,
+ u32 *remainder, u32 *quotient)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 resultC_addr;
+ u32 resultD_addr;
+ u32 i;
+ u32 val32;
+
+#ifdef PPR_PKA_DEBUG
+ if (dividend_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ divisor_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ divisor_cnt > dividend_cnt)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ resultC_addr = pka4xx_pkcp_set_vec(dividend_cnt, dividend,
+ divisor_cnt, divisor);
+ addr = resultC_addr;
+ addr += (divisor_cnt + 1) * 4;
+ PKA_ALIGN(addr, 8); /* Align 8-byte */
+ /* Select PKA RAM address for output D - quotient */
+ PKA4XX_CSR_WRITE_RETURN(PKA_DPTR_ADDR, addr >> 2);
+ PKA4XX_CSR_READ_RETURN(PKA_DPTR_ADDR, &val32);
+ resultD_addr = addr;
+
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | PKA_FUNCTION_DIV);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < divisor_cnt; i++) {
+ PKA4XX_CSR_READ_RETURN(resultC_addr, &remainder[i]);
+ LPRINTF(LL_INFO, "C remaider : 0x%08x",
+ remainder[i]);
+ resultC_addr += 4;
+ }
+ dividend_cnt -= divisor_cnt;
+ for(i = 0; i <= dividend_cnt /* Use = for + 1 */; ) {
+ PKA4XX_CSR_READ_RETURN(resultD_addr,
+ &quotient[i++]);
+ resultD_addr += 4;
+ }
+
+ return RC_OK;
+ }
+ /* Setting params for Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_DIV;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = divisor_cnt;
+ pka_op->resultD_cnt = dividend_cnt-divisor_cnt+1;
+ pka_op->resultC_addr = remainder;
+ pka_op->resultD_addr = quotient;
+ pka_op->async_pkcp.vecA_cnt = dividend_cnt;
+ pka_op->async_pkcp.vecA = dividend;
+ pka_op->async_pkcp.vecB_cnt = divisor_cnt;
+ pka_op->async_pkcp.vecB = divisor;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_mod(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 dividend_cnt, u32 *dividend,
+ u32 divisor_cnt, u32 *divisor,
+ u32 *remainder)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (dividend_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ divisor_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(dividend_cnt, dividend,
+ divisor_cnt, divisor);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RUN | PKA_FUNCTION_MOD);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < divisor_cnt; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &remainder[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_MOD;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = divisor_cnt;
+ pka_op->resultC_addr = remainder;
+ pka_op->async_pkcp.vecA_cnt = dividend_cnt;
+ pka_op->async_pkcp.vecA = dividend;
+ pka_op->async_pkcp.vecB_cnt = divisor_cnt;
+ pka_op->async_pkcp.vecB = divisor;
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_add(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 addendA_cnt, u32 *addendA,
+ u32 addendB_cnt, u32 *addendB, u32 *sum)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 result_len;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (addendA_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ addendB_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ result_len = addendA_cnt > addendB_cnt ? (addendA_cnt+1) :
+ (addendB_cnt+1);
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(addendA_cnt, addendA,
+ addendB_cnt, addendB);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_ADD | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < result_len; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &sum[i++]);
+ addr += 4;
+ }
+ LPRINTF(LL_INFO, "result = %d,addr = 0x%08x",
+ *sum, (unsigned int)addr);
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_ADD;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = result_len;
+ pka_op->resultC_addr = sum;
+ pka_op->async_pkcp.vecA_cnt = addendA_cnt;
+ pka_op->async_pkcp.vecA = addendA;
+ pka_op->async_pkcp.vecB_cnt = addendB_cnt;
+ pka_op->async_pkcp.vecB = addendB;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_sub(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 minuend_cnt, u32 *minuend,
+ u32 subtrahend_cnt, u32 *subtrahend, u32 *difference)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 result_len;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (minuend_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ subtrahend_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ result_len = minuend_cnt > subtrahend_cnt ? minuend_cnt :
+ subtrahend_cnt;
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_pkcp_set_vec(minuend_cnt, minuend,
+ subtrahend_cnt, subtrahend);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_SUB | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < result_len; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &difference[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_SUB;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = result_len;
+ pka_op->resultC_addr = difference;
+ pka_op->async_pkcp.vecA_cnt = minuend_cnt;
+ pka_op->async_pkcp.vecA = minuend;
+ pka_op->async_pkcp.vecB_cnt = subtrahend_cnt;
+ pka_op->async_pkcp.vecB = subtrahend;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_addsub(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 input_cnt, u32 *addendA,
+ u32 *addendC, u32 *subtrahend, u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op * pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (input_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_addsub_set_vec(input_cnt, addendA,
+ subtrahend, addendC);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_ADDSUB | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i <= input_cnt /* Use = for + 1 */; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_ADDSUB;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultD_cnt = input_cnt+1;
+ pka_op->resultD_addr = result;
+ pka_op->async_pkcp.vecA_cnt = input_cnt;
+ pka_op->async_pkcp.vecA = addendA;
+ pka_op->async_pkcp.vecB_cnt = 0;
+ pka_op->async_pkcp.vecB = subtrahend;
+ pka_op->async_pkcp.vec_addsub_C = addendC;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_rshift(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 input_cnt, u32 *input,
+ u8 shift, u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (input_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_shift_set_vec(input_cnt, input, shift);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_RSHIFT | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < input_cnt;) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_RSHIFT;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = input_cnt;
+ pka_op->resultC_addr = result;
+ pka_op->async_pkcp.vecA_cnt = input_cnt;
+ pka_op->async_pkcp.vecA = input;
+ pka_op->async_pkcp.shift_val = shift;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_lshift(pka4xx_cb cb, void *ctx,
+ u32 *op_id, u32 input_cnt,
+ u32 *input, u8 shift,
+ u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op *pka_op;
+ u32 result_len;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (input_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ result_len = shift == 0 ? input_cnt : (input_cnt+1);
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_shift_set_vec(input_cnt, input, shift);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_LSHIFT | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < result_len; ) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i++]);
+ addr += 4;
+ }
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_LSHIFT;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = result_len;
+ pka_op->resultC_addr = result;
+ pka_op->async_pkcp.vecA_cnt = input_cnt;
+ pka_op->async_pkcp.vecA = input;
+ pka_op->async_pkcp.shift_val = shift;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_compare(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u32 input1_cnt, u32 *input1,
+ u32 input2_cnt, u32 *input2,
+ int *result)
+{
+ int rc;
+ struct pka4xx_op *pka_op;
+ u32 val;
+
+#ifdef PPR_PKA_DEBUG
+ if (input1_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ input2_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ pka4xx_pkcp_set_vec(input1_cnt, input1, input2_cnt,
+ input2);
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_COMPARE | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ PKA4XX_CSR_READ_RETURN(PKA_COMPARE_ADDR, &val);
+ if (val & PKA_COMPARE_EQUAL)
+ *result = 0;
+ else if (val & PKA_COMPARE_LESSTHAN)
+ *result = -1;
+ else
+ *result = 1;
+ return RC_OK;
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_COMPARE;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = 1;
+ pka_op->resultC_addr = (u32 *)result;
+ pka_op->async_pkcp.vecA_cnt = input1_cnt;
+ pka_op->async_pkcp.vecA = input1;
+ pka_op->async_pkcp.vecB_cnt = input2_cnt;
+ pka_op->async_pkcp.vecB = input2;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_expmod(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u8 odd_pwr_cnt,
+ u32 base_mod_cnt, u32 *base,
+ u32 *modulus,
+ u32 exponent_cnt, u32 *exponent,
+ u32 *result)
+{
+ int rc;
+ u32 addr; /* Address of PKA RAM */
+ struct pka4xx_op * pka_op;
+ u32 cmd;
+ u32 i;
+
+#ifdef PPR_PKA_DEBUG
+ if (odd_pwr_cnt > 16 || odd_pwr_cnt == 0 ||
+ base_mod_cnt > PKA4XX_VECTOR_MAXSIZE ||
+ exponent_cnt > PKA4XX_VECTOR_MAXSIZE)
+ return RC_INVALID_PARM;
+#endif
+
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ /* Start the operation */
+ if (odd_pwr_cnt == 2) {
+ cmd = PKA_FUNCTION_SEQOP_EXPMOD_ACT2;
+ } else if (odd_pwr_cnt == 8) {
+ cmd = PKA_FUNCTION_SEQOP_EXPMOD_ACT4;
+ } else {
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, odd_pwr_cnt);
+ cmd = PKA_FUNCTION_SEQOP_EXPMOD_VAR;
+ }
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ addr = pka4xx_expmod_set_vec(base_mod_cnt, base, modulus,
+ exponent_cnt, exponent);
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ cmd | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < base_mod_cnt; i++) {
+ PKA4XX_CSR_READ_RETURN(addr, &result[i]);
+ LPRINTF(LL_INFO, "output = 0x%08x ",
+ result[i]);
+ addr += 4;
+ }
+ return RC_OK;
+
+ }
+ /* Asynchronous operation */
+ pka_op->opcode = cmd;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultC_cnt = odd_pwr_cnt; /* Save odd power cnt in here */
+ pka_op->resultD_cnt = base_mod_cnt;
+ pka_op->resultC_addr = NULL;
+ pka_op->resultD_addr = result;
+ pka_op->async_expmod.base = base;
+ pka_op->async_expmod.exp = exponent;
+ pka_op->async_expmod.modulus = modulus;
+ pka_op->async_expmod.base_mod_cnt = base_mod_cnt;
+ pka_op->async_expmod.exp_cnt = exponent_cnt;
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_expmod_crt(pka4xx_cb cb, void *ctx, u32 *op_id,
+ u8 odd_pwr_cnt,
+ u32 exp_len, u32 *expP, u32 *expQ,
+ u32 mod_inverse_len, u32 *modP, u32 *modQ,
+ u32 *inverseQ, u32 *input,
+ u32 *result)
+{
+ int rc;
+ struct pka4xx_op *pka_op;
+ u32 i;
+ u32 Daddr;
+
+#ifdef PPR_PKA_DEBUG
+ if (exp_len > PKA4XX_VECTOR_MAXSIZE ||
+ mod_inverse_len > PKA4XX_VECTOR_MAXSIZE ||
+ odd_pwr_cnt > 16)
+ return RC_INVALID_PARM;
+#endif
+ if (cb == NULL) {
+ if (pka4xx_pending_op())
+ return RC_EBUSY;
+ pka_op = NULL;
+ } else {
+ pka_op = pka4xx_get_op_item();
+ if (pka_op == NULL)
+ return RC_EBUSY;
+ }
+
+ if (!cb) {
+ Daddr = pka4xx_expmod_crt_set_vec(exp_len, expP, expQ,
+ mod_inverse_len,
+ modP, modQ,
+ inverseQ, input);
+ } else {
+ /* Asynchronous operation */
+ pka_op->opcode = PKA_FUNCTION_SEQOP_EXPMOD_CRT;
+ pka_op->cb = cb;
+ pka_op->ctx = ctx;
+ pka_op->resultD_cnt = mod_inverse_len<<1;
+ pka_op->resultC_cnt = odd_pwr_cnt; /* Use result C cnt for pwr cnt */
+ pka_op->resultD_addr = result;
+ pka_op->async_expmod_crt.expP = expP;
+ pka_op->async_expmod_crt.expQ = expQ;
+ pka_op->async_expmod_crt.modP = modP;
+ pka_op->async_expmod_crt.modQ = modQ;
+ pka_op->async_expmod_crt.inverseQ = inverseQ;
+ pka_op->async_expmod_crt.exp_len = exp_len;
+ pka_op->async_expmod_crt.mod_inverse_len = mod_inverse_len;
+ pka_op->async_expmod_crt.input = input;
+ }
+
+ /* Save callback for asynchronous operation */
+ if (!cb) {
+ /* Start the operation */
+ PKA4XX_CSR_WRITE_RETURN(PKA_SHIFT_ADDR, odd_pwr_cnt);
+ PKA4XX_CSR_WRITE_RETURN(PKA_FUNCTION_ADDR,
+ PKA_FUNCTION_SEQOP_EXPMOD_CRT | PKA_FUNCTION_RUN);
+ rc = pka4xx_wait2complete();
+ if (rc != RC_OK)
+ return rc;
+ for(i = 0; i < (mod_inverse_len<<1); i++) {
+ PKA4XX_CSR_READ_RETURN(Daddr, &result[i]);
+ LPRINTF(LL_INFO, "D addr : 0x%08x val 0x%08x",
+ Daddr, result[i]);
+ Daddr += 4;
+ }
+ return RC_OK;
+ }
+
+ if (op_id)
+ *op_id = pka_op->id;
+ pka4xx_start_op(pka_op, 0);
+
+ return RC_EINPROGRESS;
+}
+
+int pka4xx_hw_init(void)
+{
+ int rc;
+ u32 i;
+ int result;
+ u32 prog_addr;
+
+ printk(LL_INFO "Initializing PKA...\n");
+
+ /* Initialize context variable */
+ for(i = 0; i < PKA4XX_PENDING_OP_MAX; i++) {
+ pka_get_ctx()->op[i].id = i+1;
+ pka_get_ctx()->op[i].opcode = 0;
+ }
+ INIT_LIST_HEAD(&pka_get_ctx()->completed_event_queue);
+
+ /* Load PKA firmware */
+ LPRINTF(LL_INFO, "Loading PKA firmware PKA RAM Addr: 0x%08X size "
+ "(DW): %d...",
+ pka_get_ctx()->csr_paddr,
+ pkt_firmware_sizedw);
+
+ /* Put PKA Sequencer into reset to access firmware area */
+ rc = pka4xx_csr_hw_write32(PKA_SEQ_CTRL_ADDR, PKA_SEQ_CTRL_RESET);
+ if (rc != RC_OK) {
+ LPRINTF(LL_ERR,
+ "Failed to put PKA Sequencer into reset error 0x%08X",
+ rc);
+ return rc;
+ }
+ /* Now, load the firmware */
+ prog_addr = PKA_PROGRAM_ADDR;
+ for(i = 0; i < pkt_firmware_sizedw; i++, prog_addr += 4) {
+ rc = pka4xx_csr_hw_write32(prog_addr, pka_firmware[i]);
+
+ if (rc != RC_OK) {
+ LPRINTF(LL_ERR,
+ "Failed to load PKA firmware error 0x%08X", rc);
+ return rc;
+ }
+ }
+ /* Put PKA Sequencer into normal operation */
+ rc = pka4xx_csr_hw_write32(PKA_SEQ_CTRL_ADDR, 0);
+ if (rc != RC_OK) {
+ LPRINTF(LL_ERR,
+ "Failed to put PKA Sequencer into reset error 0x%08X",
+ rc);
+ return rc;
+ }
+
+ /* Register for interrupt */
+ tasklet_init(&pka_get_ctx()->tasklet,
+ pka4xx_tasklet_cb, (unsigned long)pka_get_ctx()->op);
+
+ result = request_irq(pka_get_ctx()->irq, pka4xx_irq_handler,
+ 0, "PKA", NULL);
+ if (result != 0)
+ return result;
+
+ set_irq_type(pka_get_ctx()->irq, IRQ_TYPE_EDGE_RISING);
+ /* Comment this out to enable interrupt mode -- Now doing only polling mode */
+ /* disable_irq(pka_get_ctx()->irq); */
+
+ return RC_OK;
+}
+
+int pka4xx_hw_deinit(void)
+{
+ disable_irq(pka_get_ctx()->irq);
+ free_irq(pka_get_ctx()->irq, NULL);
+ return RC_OK;
+}
diff --git a/drivers/crypto/pka_4xx_access.c b/drivers/crypto/pka_4xx_access.c
new file mode 100644
index 00000000000..c2452bdc749
--- /dev/null
+++ b/drivers/crypto/pka_4xx_access.c
@@ -0,0 +1,201 @@
+/*******************************************************************************
+ *
+ * Copyright (c) 2008 Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Detail Description:
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ * @file pka4xx_acccess.c
+ *
+ * This file provides access and implementation of the PKA hardware
+ * under Linux.
+ *
+ *******************************************************************************
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <asm/io.h>
+#include <crypto/pka_4xx.h>
+#include "pka_4xx_access.h"
+
+#define PKA4XX_VER_STR "0.1"
+
+struct pka4xx_dev {
+ struct pka4xx_ctx ctx;
+ struct resource csr_res;
+ struct resource pka_ram_res;
+};
+
+struct hal_config {
+ struct of_device *ofdev;
+};
+
+static struct pka4xx_dev pka_dev = {
+ .ctx = {
+ 0,
+ { { 1, PKA4XX_RAM_FREE_SIZE/4, 0 },
+ { 0, 0, 0 } },
+ 0, 0
+ }
+};
+
+struct pka4xx_ctx *pka_get_ctx(void)
+{
+ return &pka_dev.ctx;
+}
+
+int pka4xx_config_set(struct hal_config *cfg)
+{
+ struct device_node *pka_np = cfg->ofdev->node;
+ int rc = 0;
+
+ rc = of_address_to_resource(pka_np, 0, &pka_dev.csr_res);
+ if (rc) {
+ LPRINTF(LL_INFO, "error getting address to resource");
+ return -ENODEV;
+ }
+ pka_dev.ctx.csr_paddr = pka_dev.csr_res.start;
+ pka_dev.ctx.csr = ioremap(pka_dev.csr_res.start,
+ pka_dev.csr_res.end - pka_dev.csr_res.start + 1);
+
+ if (pka_dev.ctx.csr == NULL) {
+ LPRINTF(LL_ERR,
+ "unable to ioremap 0x%02X_%08X size %d",
+ (u32) (pka_dev.csr_res.start >> 32),
+ (u32) pka_dev.csr_res.start,
+ (u32) (pka_dev.csr_res.end - pka_dev.csr_res.start + 1));
+ return -ENOMEM;
+ }
+
+ pka_dev.ctx.irq = of_irq_to_resource(pka_np, 0, NULL);
+
+ if (pka_dev.ctx.irq == NO_IRQ) {
+ /* Un-map CSR */
+ iounmap(pka_dev.ctx.csr);
+ pka_dev.ctx.csr = NULL;
+ LPRINTF(LL_ERR, "no irq");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int pka4xx_config_clear(void)
+{
+ iounmap(pka_dev.ctx.csr);
+ return 0;
+}
+
+int pka4xx_csr_hw_read32(u32 reg_addr, u32 *data_val)
+{
+ *data_val = in_le32((volatile unsigned __iomem *)
+ (pka_dev.ctx.csr + reg_addr));
+ return 0;
+}
+
+int pka4xx_csr_hw_write32(u32 reg_addr, u32 data_val)
+{
+ out_le32((volatile unsigned __iomem *) (pka_dev.ctx.csr + reg_addr),
+ data_val);
+ return 0;
+}
+
+/**
+ * Setup Driver with platform registration
+ */
+static int __devinit pka4xx_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct hal_config hw_cfg;
+ int rc;
+
+ hw_cfg.ofdev = ofdev;
+ rc = pka4xx_config_set(&hw_cfg);
+ if (rc != 0)
+ return rc;
+
+ printk(KERN_INFO "AMCC 4xx PKA v%s @0x%02X_%08X size %d IRQ %d\n",
+ PKA4XX_VER_STR,
+ (u32) (pka_dev.csr_res.start >> 32),
+ (u32) pka_dev.csr_res.start,
+ (u32) (pka_dev.csr_res.end - pka_dev.csr_res.start + 1),
+ pka_dev.ctx.irq);
+
+ rc = pka4xx_hw_init();
+ if (rc < 0) {
+ LPRINTF(LL_ERR, "failed to initialize PKA");
+ goto err;
+ }
+ printk(KERN_INFO "PKA Driver Successfully Initialized\n");
+ return rc;
+
+err:
+ pka4xx_config_clear();
+ return rc;
+}
+
+static int __devexit pka4xx_remove(struct of_device *dev)
+{
+ pka4xx_hw_deinit();
+ pka4xx_config_clear();
+ return 0;
+}
+
+static struct of_device_id pka4xx_match[] = {
+ { .compatible = "ppc4xx-pka", },
+ { .compatible = "amcc,ppc4xx-pka", },
+ { },
+};
+
+static struct of_platform_driver pka4xx_driver = {
+ .name = "ppc4xx-pka",
+ .match_table = pka4xx_match,
+ .probe = pka4xx_probe,
+ .remove = pka4xx_remove,
+};
+
+static int __init mod_init(void)
+{
+ return of_register_platform_driver(&pka4xx_driver);
+}
+
+static void __exit mod_exit(void)
+{
+ of_unregister_platform_driver(&pka4xx_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("AMCC 4xx Public Key Accelerator");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(pka4xx_mul);
+EXPORT_SYMBOL_GPL(pka4xx_div);
+EXPORT_SYMBOL_GPL(pka4xx_mod);
+EXPORT_SYMBOL_GPL(pka4xx_add);
+EXPORT_SYMBOL_GPL(pka4xx_sub);
+EXPORT_SYMBOL_GPL(pka4xx_addsub);
+EXPORT_SYMBOL_GPL(pka4xx_rshift);
+EXPORT_SYMBOL_GPL(pka4xx_lshift);
+EXPORT_SYMBOL_GPL(pka4xx_compare);
+EXPORT_SYMBOL_GPL(pka4xx_expmod);
+EXPORT_SYMBOL_GPL(pka4xx_expmod_crt);
+
diff --git a/drivers/crypto/pka_4xx_access.h b/drivers/crypto/pka_4xx_access.h
new file mode 100644
index 00000000000..b7baa664f8c
--- /dev/null
+++ b/drivers/crypto/pka_4xx_access.h
@@ -0,0 +1,86 @@
+/*******************************************************************************
+ *
+ * Copyright (c) 2008 Loc Ho <lho@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Detail Description:
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ * @file pka4xx_access.h
+ *
+ * This module provides access to the AMCC SoC PKA hardware under Linux.
+ *
+ *******************************************************************************
+ */
+#ifndef __PKA4XX_ACCESS_H__
+#define __PKA4XX_ACCESS_H__
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/errno.h>
+
+#ifndef AHB_BASE_ADDR_LO
+ /* FIXME */
+# define AHB_BASE_ADDR_LO 0
+#endif
+
+/* Debugging Flags */
+
+#ifndef LL_EMERG
+#define LL_EMERG KERN_EMERG
+#define LL_ALERT KERN_ALERT
+#define LL_CRIT KERN_CRIT
+#define LL_ERR KERN_ERR
+#define LL_WARNING KERN_WARNING
+#define LL_NOTICE KERN_NOTICE
+#define LL_INFO KERN_INFO
+#define LL_DEBUG KERN_DEBUG
+#define LL_EXTRADEBUG KERN_DEBUG
+#endif
+
+#define PKA4XX_HDR "PKA4XX: "
+/* #define PKA4XX_DEBUG */
+
+#if !defined(PKA4XX_DEBUG)
+# define LPRINTF(ll, fmt, ...)
+#else
+# define LPRINTF(ll, fmt, ...) \
+ do { \
+ printk(ll PKA4XX_HDR fmt "\n", ##__VA_ARGS__); \
+ } while(0);
+#endif
+
+#ifndef RC_OK
+ #define RC_OK 0
+ #define RC_INVALID_PARM -EINVAL
+ #define RC_NODEV -ENODEV
+ #define RC_NO_IMPLEMENTATION -ENOSYS
+ #define RC_ENOMEM -ENOMEM
+ #define RC_EINPROGRESS -EINPROGRESS
+ #define RC_EALREADY -EALREADY
+ #define RC_EBUSY -EBUSY
+ #define RC_EIO -EIO
+
+ /* Error code base specify to AMCC */
+ #define RC_ERROR_BASE 5000
+ #define RC_HWERROR -(RC_ERROR_BASE+0)
+ #define RC_FATAL -(RC_ERROR_BASE+1)
+#endif /* RC_OK */
+
+#ifndef ASSERT
+ #define ASSERT(x)
+#endif /* ASSERT */
+
+#endif
diff --git a/drivers/crypto/pka_4xx_firmware.h b/drivers/crypto/pka_4xx_firmware.h
new file mode 100644
index 00000000000..64f79dada4b
--- /dev/null
+++ b/drivers/crypto/pka_4xx_firmware.h
@@ -0,0 +1,515 @@
+/*
+// ASMSEQ Generic Sequencer Assembler V1.15
+// EIP28 sequencer firmware including CRT V1.0
+// Written by KLa/AVe, October 2005
+//==========================================
+*/
+#define PKA_FIRMWARE_1_3_SIZEDW 506
+const unsigned int pka_firmware_1_3[PKA_FIRMWARE_1_3_SIZEDW] =
+ { 0x007001C9,
+ 0x008C0200,
+ 0x007000C9,
+ 0x0068401C,
+ 0x00210008,
+ 0x00601FEE,
+ 0x003CA000,
+ 0x00C80011,
+ 0x00210002,
+ 0x003C6000,
+ 0x00C80011,
+ 0x00694018,
+ 0x003C2000,
+ 0x00C80011,
+ 0x003C7000,
+ 0x00CC00A3,
+ 0x002C0001,
+ 0x0070001D,
+ 0x00611FEC,
+ 0x006A4010,
+ 0x006B4014,
+ 0x00621FE6,
+ 0x00631FE2,
+ 0x00684000,
+ 0x00694004,
+ 0x00601FE4,
+ 0x00611FE0,
+ 0x006A4008,
+ 0x006B400C,
+ 0x00621FE8,
+ 0x00631FEA,
+ 0x00C800A8,
+ 0x00601FD2,
+ 0x00611FD0,
+ 0x00631FD6,
+ 0x00691FE2,
+ 0x00621FD4,
+ 0x00004000,
+ 0x00240002,
+ 0x0034FFFE,
+ 0x00601FDC,
+ 0x00054000,
+ 0x006A1FD6,
+ 0x00614010,
+ 0x00624000,
+ 0x00060000,
+ 0x00621FD8,
+ 0x00060000,
+ 0x00624008,
+ 0x00621FDA,
+ 0x00218200,
+ 0x008701AD,
+ 0x006B1FD0,
+ 0x00060000,
+ 0x00070000,
+ 0x00634004,
+ 0x00624008,
+ 0x008701AD,
+ 0x00020000,
+ 0x00681FD6,
+ 0x00691FDA,
+ 0x00068000,
+ 0x008701B9,
+ 0x00681FDC,
+ 0x006A1FD0,
+ 0x00691FD2,
+ 0x006B1FE6,
+ 0x00060000,
+ 0x00270001,
+ 0x0037FFFE,
+ 0x0005C000,
+ 0x00611FE4,
+ 0x00681FD8,
+ 0x00621FE0,
+ 0x00601FE8,
+ 0x00601FEA,
+ 0x008500A7,
+ 0x006A1FDC,
+ 0x00691FD6,
+ 0x00681FDA,
+ 0x008701B9,
+ 0x00691FD8,
+ 0x00681FD6,
+ 0x00068000,
+ 0x008701B9,
+ 0x00691FD2,
+ 0x006A1FD0,
+ 0x00611FE4,
+ 0x00621FE0,
+ 0x008500A7,
+ 0x00681FD6,
+ 0x006A1FD8,
+ 0x00691FE2,
+ 0x00604004,
+ 0x00624000,
+ 0x00624008,
+ 0x00044000,
+ 0x0030FFFE,
+ 0x00500000,
+ 0x00500000,
+ 0x00064000,
+ 0x0032FFFE,
+ 0x00520000,
+ 0x00520000,
+ 0x00250001,
+ 0x00614010,
+ 0x00614014,
+ 0x00218020,
+ 0x008701AD,
+ 0x00681FD0,
+ 0x00691FE2,
+ 0x00604004,
+ 0x00614010,
+ 0x00614014,
+ 0x00250001,
+ 0x002A0002,
+ 0x008C0400,
+ 0x004B8000,
+ 0x006A1FD8,
+ 0x003FFFFF,
+ 0x0015C000,
+ 0x00064000,
+ 0x00624008,
+ 0x00218010,
+ 0x008701AD,
+ 0x00691FD4,
+ 0x006A1FDA,
+ 0x00614004,
+ 0x00624008,
+ 0x00218001,
+ 0x008701AD,
+ 0x00624000,
+ 0x00604004,
+ 0x00691FD8,
+ 0x006B1FE2,
+ 0x00614008,
+ 0x0007C000,
+ 0x00634010,
+ 0x00218200,
+ 0x008701AD,
+ 0x00624008,
+ 0x00691FD8,
+ 0x006B1FDC,
+ 0x00614000,
+ 0x00070000,
+ 0x00681FE2,
+ 0x00634004,
+ 0x00604010,
+ 0x00218001,
+ 0x008701AD,
+ 0x00624004,
+ 0x006A1FD6,
+ 0x00040000,
+ 0x00624000,
+ 0x00624008,
+ 0x00604014,
+ 0x00218018,
+ 0x008701AD,
+ 0x00691FD4,
+ 0x00621FEA,
+ 0x00611FE8,
+ 0x00200001,
+ 0x008001E9,
+ 0x007000C8,
+ 0x00200013,
+ 0x008C0400,
+ 0x008001F7,
+ 0x00611FDE,
+ 0x007000C8,
+ 0x00691FE0,
+ 0x006A1FE2,
+ 0x00004000,
+ 0x008701B9,
+ 0x00020000,
+ 0x0030FFFE,
+ 0x00490000,
+ 0x00310001,
+ 0x00200003,
+ 0x00D401DA,
+ 0x00200007,
+ 0x008C0400,
+ 0x006B4024,
+ 0x000B8000,
+ 0x00C801DA,
+ 0x0006C000,
+ 0x00270002,
+ 0x00631FE2,
+ 0x00681FEA,
+ 0x002E0001,
+ 0x0004C000,
+ 0x002C0001,
+ 0x00601FF8,
+ 0x0032FFFE,
+ 0x00520000,
+ 0x00520000,
+ 0x00691FE4,
+ 0x006A1FE6,
+ 0x00004000,
+ 0x008701B9,
+ 0x00681FE0,
+ 0x00210010,
+ 0x00220000,
+ 0x0030FFFE,
+ 0x00480000,
+ 0x00230001,
+ 0x00330001,
+ 0x00D400D3,
+ 0x0033FFFF,
+ 0x00070000,
+ 0x00330001,
+ 0x003A0001,
+ 0x00320001,
+ 0x002DFFFF,
+ 0x00CC00CD,
+ 0x00621FFC,
+ 0x00018000,
+ 0x00072000,
+ 0x00681FE0,
+ 0x00631FFE,
+ 0x0030FFFE,
+ 0x00240002,
+ 0x00480000,
+ 0x00072000,
+ 0x006B1FFE,
+ 0x002E0001,
+ 0x00078000,
+ 0x00681FE0,
+ 0x00210010,
+ 0x00220000,
+ 0x0030FFFE,
+ 0x00480000,
+ 0x00330001,
+ 0x00D400ED,
+ 0x0033FFFF,
+ 0x00070000,
+ 0x00330001,
+ 0x003A0001,
+ 0x00320001,
+ 0x002DFFFF,
+ 0x00CC00E7,
+ 0x00621FFE,
+ 0x00691FE4,
+ 0x008C0400,
+ 0x00684024,
+ 0x006A4028,
+ 0x00180000,
+ 0x00D001C6,
+ 0x003E000F,
+ 0x00238000,
+ 0x00138000,
+ 0x00631FF2,
+ 0x00320003,
+ 0x00360002,
+ 0x00030000,
+ 0x0033FFFE,
+ 0x00078000,
+ 0x00631FF4,
+ 0x004BC000,
+ 0x00084000,
+ 0x00631FF0,
+ 0x0030FFFE,
+ 0x00048000,
+ 0x00601FF6,
+ 0x0018C000,
+ 0x003C0001,
+ 0x00C801D1,
+ 0x00681FF8,
+ 0x006A1FE2,
+ 0x002E0001,
+ 0x00068000,
+ 0x008701B1,
+ 0x006A1FE2,
+ 0x00681FF8,
+ 0x00691FE8,
+ 0x00048000,
+ 0x002A0001,
+ 0x008701B9,
+ 0x00691FE0,
+ 0x00681FF8,
+ 0x00614004,
+ 0x00691FE2,
+ 0x00604000,
+ 0x00290001,
+ 0x00614014,
+ 0x00054000,
+ 0x00250001,
+ 0x006A1FF8,
+ 0x00614010,
+ 0x00064000,
+ 0x00260003,
+ 0x00624008,
+ 0x00218200,
+ 0x008701AD,
+ 0x006B1FEC,
+ 0x00018000,
+ 0x00681FEA,
+ 0x006A1FE2,
+ 0x002B0002,
+ 0x00D0013F,
+ 0x00631FFA,
+ 0x008701B9,
+ 0x00010000,
+ 0x0087019A,
+ 0x00681FF8,
+ 0x00260001,
+ 0x00068000,
+ 0x00048000,
+ 0x00010000,
+ 0x006A1FE2,
+ 0x00240001,
+ 0x00048000,
+ 0x008701B9,
+ 0x00691FEA,
+ 0x0087019A,
+ 0x006B1FFA,
+ 0x002B0001,
+ 0x00631FFA,
+ 0x00D40133,
+ 0x00681FEA,
+ 0x00691FF8,
+ 0x00058000,
+ 0x00058000,
+ 0x00250002,
+ 0x008701B9,
+ 0x00200000,
+ 0x0080017A,
+ 0x00681FEA,
+ 0x00010000,
+ 0x0087019A,
+ 0x00200000,
+ 0x00040000,
+ 0x006B1FF0,
+ 0x00691FF2,
+ 0x00174000,
+ 0x00C80150,
+ 0x00240001,
+ 0x00691FEC,
+ 0x00084000,
+ 0x000C4000,
+ 0x00D0017A,
+ 0x00601FF0,
+ 0x00300001,
+ 0x00D40155,
+ 0x00601FFA,
+ 0x00347FFF,
+ 0x00C80161,
+ 0x00601FFA,
+ 0x00681FEA,
+ 0x00010000,
+ 0x0087019A,
+ 0x00681FFA,
+ 0x00300001,
+ 0x00800158,
+ 0x00681FF0,
+ 0x00300001,
+ 0x00D40162,
+ 0x00347FFF,
+ 0x00240003,
+ 0x00691FE2,
+ 0x00250001,
+ 0x00072000,
+ 0x00691FEA,
+ 0x00058000,
+ 0x006A1FE2,
+ 0x00681FEA,
+ 0x0087019A,
+ 0x00681FF0,
+ 0x00300001,
+ 0x00D00176,
+ 0x00601FF0,
+ 0x00681FEA,
+ 0x00010000,
+ 0x0087019A,
+ 0x0080016E,
+ 0x006B1FF4,
+ 0x004BC000,
+ 0x00631FF0,
+ 0x00200000,
+ 0x00691FF2,
+ 0x00310001,
+ 0x00D40189,
+ 0x006B1FF6,
+ 0x002B0002,
+ 0x00D40183,
+ 0x00140000,
+ 0x00C8018D,
+ 0x00800154,
+ 0x00631FF6,
+ 0x006B1FF4,
+ 0x002B0002,
+ 0x00631FF4,
+ 0x004BC000,
+ 0x00631FF0,
+ 0x00611FF2,
+ 0x00140000,
+ 0x00CC014A,
+ 0x00800146,
+ 0x00681FF8,
+ 0x00691FEA,
+ 0x008701B9,
+ 0x00048000,
+ 0x008701B1,
+ 0x00681FEA,
+ 0x00691FF8,
+ 0x006A1FE2,
+ 0x008701A5,
+ 0x002EFFFF,
+ 0x00621FE2,
+ 0x00200001,
+ 0x008001DA,
+ 0x00604000,
+ 0x00624010,
+ 0x00614004,
+ 0x00624014,
+ 0x00691FF8,
+ 0x00614008,
+ 0x00218001,
+ 0x008C0480,
+ 0x00BC01BE,
+ 0x0061401C,
+ 0x00691FF8,
+ 0x00614008,
+ 0x00624010,
+ 0x00691FE0,
+ 0x00614000,
+ 0x002107FF,
+ 0x00614004,
+ 0x0060400C,
+ 0x00218004,
+ 0x008C0480,
+ 0x00BC01BE,
+ 0x0061401C,
+ 0x008B2000,
+ 0x00010000,
+ 0x0030FFFE,
+ 0x00500000,
+ 0x00500000,
+ 0x00500000,
+ 0x00500000,
+ 0x00300002,
+ 0x002A0002,
+ 0x00614000,
+ 0x00624010,
+ 0x00604008,
+ 0x00218808,
+ 0x008001AD,
+ 0x00707FC8,
+ 0x0070001D,
+ 0x00200000,
+ 0x00010000,
+ 0x00220001,
+ 0x008701B9,
+ 0x0020000F,
+ 0x008001DA,
+ 0x00681FEA,
+ 0x00691FE2,
+ 0x0030FFFE,
+ 0x00500001,
+ 0x0031FFFF,
+ 0x002DFFFF,
+ 0x00500000,
+ 0x002DFFFF,
+ 0x00CC01CC,
+ 0x00200005,
+ 0x008001DA,
+ 0x00691FE8,
+ 0x00681FEA,
+ 0x001C4000,
+ 0x00C801D9,
+ 0x0070001D,
+ 0x00681FEA,
+ 0x006A1FE2,
+ 0x008701B9,
+ 0x00200009,
+ 0x00691FEE,
+ 0x0070001F,
+ 0x003D9000,
+ 0x00CC01E0,
+ 0x00691FDE,
+ 0x00890000,
+ 0x00300002,
+ 0x00D001E8,
+ 0x00601FEC,
+ 0x00681FEA,
+ 0x006A1FE2,
+ 0x00010000,
+ 0x008701B9,
+ 0x00681FEC,
+ 0x0030FFFE,
+ 0x006A1FE4,
+ 0x006B1FE6,
+ 0x00624000,
+ 0x00634010,
+ 0x006A1FE0,
+ 0x006B1FE2,
+ 0x00624004,
+ 0x00634014,
+ 0x006A1FE8,
+ 0x006B1FEE,
+ 0x00624008,
+ 0x00377FFF,
+ 0x008C0400,
+ 0x0063401C,
+ 0x006440C9,
+ 0x00800001,
+ 0x00780003
+};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b401dadad4a..3d1b57d4c1b 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -111,6 +111,46 @@ config SH_DMAE
help
Enable support for the Renesas SuperH DMA controllers.
+config AMCC_PPC460EX_460GT_ADMA
+ tristate "AMCC PPC460EX/GT ADMA support"
+ depends on 460EX || 460GT
+ select ASYNC_CORE
+ select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ default y
+ ---help---
+ Enable support for the AMCC PPC440SPe RAID engines.
+
+config AMCC_PPC460EX_460GT_4CHAN_DMA
+ tristate "AMCC PPC460EX PPC460GT PLB DMA support"
+ depends on 460EX || 460GT || APM82181
+ select DMA_ENGINE
+ default y
+
+config AMCC_PPC460EX_460GT_PLB_ADMA
+ tristate "AMCC PPC460EX/GT 4Channel PLB ADMA support"
+ depends on 460EX || 460GT
+ select ASYNC_CORE
+ select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ default y
+ ---help---
+ Enable support for the AMCC PPC460Ex PLB engines.
+
+config APM82181_ADMA
+ tristate "APM82181 Asynchonous DMA support"
+ depends on APM82181
+ select ASYNC_CORE
+ select ASYNC_TX_DMA
+ select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ default y
+ ---help---
+ Enable support for the APM82181 Asynchonous DMA engines.
+
+config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ bool
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index eca71ba78ae..3637a70241f 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,6 +3,9 @@ obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_AMCC_PPC460EX_460GT_ADMA) += ppc460ex-adma.o
+obj-$(CONFIG_AMCC_PPC460EX_460GT_4CHAN_DMA) += ppc460ex_4chan_dma.o ppc460ex_4chan_sgdma.o
+obj-$(CONFIG_APM82181_ADMA) += apm82181-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
@@ -10,3 +13,4 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
+#amcc_ppc460ex_4chan_dma-objs := ppc460ex_4chan_dma.o ppc460ex_4chan_sgdma.o
diff --git a/drivers/dma/apm82181-adma.c b/drivers/dma/apm82181-adma.c
new file mode 100644
index 00000000000..5800ca15e56
--- /dev/null
+++ b/drivers/dma/apm82181-adma.c
@@ -0,0 +1,2433 @@
+/*
+ * Copyright(c) 2010 Applied Micro Circuits Corporation(AMCC). All rights reserved.
+ *
+ * Author: Tai Tri Nguyen <ttnguyen@appliedmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the AppliedMicro APM82181 Processor.
+ * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ * ADMA driver written by D.Williams.
+ */
+#define ADMA_DEBUG
+#undef ADMA_DEBUG
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/of_platform.h>
+#include <linux/proc_fs.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/apm82181-adma.h>
+
+#define PPC4XX_EDMA "apm82181-adma: "
+#ifdef ADMA_DEBUG
+#define DBG(string, args...) \
+ printk(PPC4XX_EDMA string ,##args)
+#define INFO DBG("<%s> -- line %d\n",__func__,__LINE__);
+#define ADMA_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+#else
+#define DBG(string, args...) \
+ {if (0) printk(KERN_INFO PPC4XX_EDMA string ,##args); 0; }
+#define INFO DBG("");
+#define ADMA_HEXDUMP(b, l) \
+ {if (0) print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 8, 1, (b), (l), false); 0;}
+#endif
+
+#define MEM_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+
+/* The list of channels exported by apm82181 ADMA */
+struct list_head
+ppc_adma_chan_list = LIST_HEAD_INIT(ppc_adma_chan_list);
+
+/* This flag is set when want to refetch the xor chain in the interrupt
+ * handler
+ */
+static u32 do_xor_refetch = 0;
+
+/* Pointers to last submitted to DMA0/1/2/3 and XOR CDBs */
+static apm82181_desc_t *chan_last_sub[5];
+static apm82181_desc_t *chan_first_cdb[5];
+
+/* Pointer to last linked and submitted xor CB */
+static apm82181_desc_t *xor_last_linked = NULL;
+static apm82181_desc_t *xor_last_submit = NULL;
+
+/* /proc interface is used here to verify the h/w RAID 5 capabilities
+ */
+static struct proc_dir_entry *apm82181_proot;
+
+/* These are used in enable & check routines
+ */
+static u32 apm82181_xor_verified;
+static u32 apm82181_memcpy_verified[4];
+static apm82181_ch_t *apm82181_dma_tchan[5];
+static struct completion apm82181_r5_test_comp;
+
+static inline int apm82181_chan_is_busy(apm82181_ch_t *chan);
+#if 0
+static phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
+{
+ phys_addr_t page_4gb = 0;
+
+ return (page_4gb | addr);
+}
+#endif
+/**
+ * apm82181_adma_device_estimate - estimate the efficiency of processing
+ * the operation given on this channel. It's assumed that 'chan' is
+ * capable to process 'cap' type of operation.
+ * @chan: channel to use
+ * @cap: type of transaction
+ * @src_lst: array of source pointers
+ * @src_cnt: number of source operands
+ * @src_sz: size of each source operand
+ */
+int apm82181_adma_estimate (struct dma_chan *chan,
+ enum dma_transaction_type cap, struct page **src_lst,
+ int src_cnt, size_t src_sz)
+{
+ int ef = 1;
+
+ /* channel idleness increases the priority */
+ if (likely(ef) &&
+ !apm82181_chan_is_busy(to_apm82181_adma_chan(chan)))
+ ef++;
+ else {
+ if(chan->chan_id !=APM82181_XOR_ID)
+ ef = -1;
+ }
+ return ef;
+}
+
+/******************************************************************************
+ * Command (Descriptor) Blocks low-level routines
+ ******************************************************************************/
+/**
+ * apm82181_desc_init_interrupt - initialize the descriptor for INTERRUPT
+ * pseudo operation
+ */
+static inline void apm82181_desc_init_interrupt (apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ xor_cb_t *p;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ BUG();
+ break;
+ case APM82181_XOR_ID:
+ p = desc->hw_desc;
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ /* NOP with Command Block Complete Enable */
+ p->cbc = XOR_CBCR_CBCE_BIT;
+ break;
+ default:
+ printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
+ __FUNCTION__);
+ break;
+ }
+}
+
+/**
+ * apm82181_desc_init_xor - initialize the descriptor for XOR operation
+ */
+static inline void apm82181_desc_init_xor(apm82181_desc_t *desc, int src_cnt,
+ unsigned long flags)
+{
+ xor_cb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = 1;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on complete */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+/**
+ * apm82181_desc_init_memcpy - initialize the descriptor for MEMCPY operation
+ */
+static inline void apm82181_desc_init_memcpy(apm82181_desc_t *desc,
+ unsigned long flags)
+{
+ dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset(hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(APM82181_DESC_INT, &desc->flags);
+ else
+ clear_bit(APM82181_DESC_INT, &desc->flags);
+ /* dma configuration for running */
+ hw_desc->ctrl.tm = 2; /* soft init mem-mem mode */
+ hw_desc->ctrl.pw = 4; /* transfer width 128 bytes */
+ hw_desc->ctrl.ben = 1;/* buffer enable */
+ hw_desc->ctrl.sai = 1;/* increase source addr */
+ hw_desc->ctrl.dai = 1;/* increase dest addr */
+ hw_desc->ctrl.tce = 1;/* chan stops when TC is reached */
+ hw_desc->ctrl.cp = 3; /* hinghest priority */
+ hw_desc->ctrl.sl = 0; /* source is in PLB */
+ hw_desc->ctrl.pl = 0; /* dest is in PLB */
+ hw_desc->cnt.tcie = 0;/* no interrupt on init */
+ hw_desc->cnt.etie = 0; /* enable error interrupt */
+ hw_desc->cnt.eie = 1; /* enable error interrupt */
+ hw_desc->cnt.link = 0;/* not link to next cdb */
+ hw_desc->cnt.sgl = 0;
+ hw_desc->ctrl.ce =1; /* enable channel */
+ hw_desc->ctrl.cie =1; /* enable int channel */
+}
+
+/**
+ * apm82181_desc_init_memset - initialize the descriptor for MEMSET operation
+ */
+static inline void apm82181_desc_init_memset(apm82181_desc_t *desc, int value,
+ unsigned long flags)
+{
+ //dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(APM82181_DESC_INT, &desc->flags);
+ else
+ clear_bit(APM82181_DESC_INT, &desc->flags);
+
+}
+
+
+
+/**
+ * apm82181_desc_set_src_addr - set source address into the descriptor
+ */
+static inline void apm82181_desc_set_src_addr( apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int src_idx, dma_addr_t addr)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->src_hi = (u32)(addr >> 32);
+ dma_hw_desc->src_lo = (u32)addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->ops[src_idx].h = (u32)(addr >>32);
+ xor_hw_desc->ops[src_idx].l = (u32)addr;
+ break;
+ }
+}
+
+static void apm82181_adma_set_src(apm82181_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan);
+
+ sw_desc = sw_desc->group_head;
+
+ if (likely(sw_desc))
+ apm82181_desc_set_src_addr(sw_desc, chan, index, addr);
+}
+
+/**
+ * apm82181_desc_set_dest_addr - set destination address into the descriptor
+ */
+static inline void apm82181_desc_set_dest_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, dma_addr_t addr, u32 index)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->dest_hi = (u32)(addr >> 32);
+ dma_hw_desc->dest_lo = (u32)addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbtah = (u32)(addr >> 32);
+ xor_hw_desc->cbtal |= (u32)addr;
+ break;
+ }
+}
+
+static int plbdma_get_transfer_width(dma_cdb_t *dma_hw_desc)
+{
+ switch (dma_hw_desc->ctrl.pw){
+ case 0:
+ return 1; /* unit: bytes */
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ case 4:
+ return 16;
+ }
+ return 0;
+}
+/**
+ * apm82181_desc_set_byte_count - set number of data bytes involved
+ * into the operation
+ */
+static inline void apm82181_desc_set_byte_count(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, size_t byte_count)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+ int terminal_cnt, transfer_width = 0;
+
+ DBG("<%s> byte_count %08x\n", __func__,byte_count);
+ switch (chan->device->id){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ transfer_width = plbdma_get_transfer_width(dma_hw_desc);
+ terminal_cnt = byte_count/transfer_width;
+ dma_hw_desc->cnt.tc = terminal_cnt;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbbc = byte_count;
+ break;
+ }
+}
+
+/**
+ * apm82181_xor_set_link - set link address in xor CB
+ */
+static inline void apm82181_xor_set_link (apm82181_desc_t *prev_desc,
+ apm82181_desc_t *next_desc)
+{
+ xor_cb_t *xor_hw_desc = prev_desc->hw_desc;
+
+ if (unlikely(!next_desc || !(next_desc->phys))) {
+ printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
+ __func__, next_desc,
+ next_desc ? next_desc->phys : 0);
+ BUG();
+ }
+ DBG("<%s>:next_desc->phys %llx\n", __func__,next_desc->phys);
+ xor_hw_desc->cbs = 0;
+ xor_hw_desc->cblal = (u32)next_desc->phys;
+ xor_hw_desc->cblah = (u32)(next_desc->phys >> 32);
+ xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
+}
+
+/**
+ * apm82181_desc_set_link - set the address of descriptor following this
+ * descriptor in chain
+ */
+static inline void apm82181_desc_set_link(apm82181_ch_t *chan,
+ apm82181_desc_t *prev_desc, apm82181_desc_t *next_desc)
+{
+ unsigned long flags;
+ apm82181_desc_t *tail = next_desc;
+
+ if (unlikely(!prev_desc || !next_desc ||
+ (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
+ /* If previous next is overwritten something is wrong.
+ * though we may refetch from append to initiate list
+ * processing; in this case - it's ok.
+ */
+ printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
+ "prev->hw_next=0x%p\n", __FUNCTION__, prev_desc,
+ next_desc, prev_desc ? prev_desc->hw_next : 0);
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ /* do s/w chaining both for DMA and XOR descriptors */
+ prev_desc->hw_next = next_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ case APM82181_XOR_ID:
+ /* bind descriptor to the chain */
+ while (tail->hw_next)
+ tail = tail->hw_next;
+ xor_last_linked = tail;
+
+ if (prev_desc == xor_last_submit)
+ /* do not link to the last submitted CB */
+ break;
+ apm82181_xor_set_link (prev_desc, next_desc);
+ break;
+ default:
+ BUG();
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * apm82181_desc_get_src_addr - extract the source address from the descriptor
+ */
+static inline u32 apm82181_desc_get_src_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int src_idx)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->src_lo);
+}
+
+/**
+ * apm82181_desc_get_dest_addr - extract the destination address from the
+ * descriptor
+ */
+static inline u32 apm82181_desc_get_dest_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int idx)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->dest_lo);
+}
+
+/**
+ * apm82181_desc_get_byte_count - extract the byte count from the descriptor
+ */
+static inline u32 apm82181_desc_get_byte_count(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ //return (dma_hw_desc->cnt);
+}
+
+
+/**
+ * apm82181_desc_get_link - get the address of the descriptor that
+ * follows this one
+ */
+static inline u32 apm82181_desc_get_link(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ if (!desc->hw_next)
+ return 0;
+
+ return desc->hw_next->phys;
+}
+
+/**
+ * apm82181_desc_is_aligned - check alignment
+ */
+static inline int apm82181_desc_is_aligned(apm82181_desc_t *desc,
+ int num_slots)
+{
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+
+
+/******************************************************************************
+ * ADMA channel low-level routines
+ ******************************************************************************/
+
+static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan);
+static inline void apm82181_chan_append(apm82181_ch_t *chan);
+
+/*
+ * apm82181_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
+ */
+static inline void apm82181_adma_device_clear_eot_status (apm82181_ch_t *chan)
+{
+ u32 val ;
+ int idx = chan->device->id;
+ volatile xor_regs_t *xor_reg;
+ INFO;
+ switch (idx) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ val = mfdcr(DCR_DMA2P40_SR);
+ if(val & DMA_SR_RI(idx)){
+ printk(KERN_ERR "Err occurred, DMA%d status: 0x%x\n", idx, val);
+ }
+ /* TC reached int, write back to clear */
+ mtdcr(DCR_DMA2P40_SR, val);
+ break;
+ case APM82181_XOR_ID:
+ /* reset status bits to ack*/
+ xor_reg = chan->device->xor_base;
+
+ val = xor_reg->sr;
+ DBG("XOR engine status: 0x%08x\n", val);
+ xor_reg->sr = val;
+
+ if (val & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
+ if (val & XOR_IE_RPTIE_BIT) {
+ /* Read PLB Timeout Error.
+ * Try to resubmit the CB
+ */
+ INFO;
+ xor_reg->cblalr = xor_reg->ccbalr;
+ xor_reg->crsr |= XOR_CRSR_XAE_BIT;
+ } else
+ printk (KERN_ERR "XOR ERR 0x%x status\n", val);
+ break;
+ }
+
+ /* if the XORcore is idle, but there are unprocessed CBs
+ * then refetch the s/w chain here
+ */
+ if (!(xor_reg->sr & XOR_SR_XCP_BIT) && do_xor_refetch) {
+ apm82181_chan_append(chan);
+ }
+ break;
+ }
+}
+
+/*
+ * apm82181_chan_is_busy - get the channel status
+ */
+
+static inline int apm82181_chan_is_busy(apm82181_ch_t *chan)
+{
+ int busy = 0;
+ volatile xor_regs_t *xor_reg = chan->device->xor_base;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ if(mfdcr(DCR_DMA2P40_SR) & DMA_SR_CB(chan->device->id))
+ busy = 1;
+ else
+ busy = 0;
+ break;
+ case APM82181_XOR_ID:
+ /* use the special status bit for the XORcore
+ */
+ busy = (xor_reg->sr & XOR_SR_XCP_BIT) ? 1 : 0;
+ break;
+ default:
+ BUG();
+ }
+
+ return busy;
+}
+
+/**
+ * apm82181_dma_put_desc - put PLB DMA 0/1/2/3 descriptor to FIFO
+ */
+static inline void apm82181_dma_put_desc(apm82181_ch_t *chan,
+ apm82181_desc_t *desc)
+{
+ dma_cdb_t *cdb = desc->hw_desc;
+ u32 sg_cmd = 0;
+
+ /* Enable TC interrupt */
+ if(test_bit(APM82181_DESC_INT, &desc->flags))
+ cdb->cnt.tcie = 1;
+ else
+ cdb->cnt.tcie = 0;
+ /* Not link to next cdb */
+ cdb->sg_hi = 0xffffffff;
+ cdb->sg_lo = 0xffffffff;
+
+ chan_last_sub[chan->device->id] = desc;
+
+ /* Update new cdb addr */
+ mtdcr(DCR_DMA2P40_SGHx(chan->device->id), (u32)(desc->phys >> 32));
+ mtdcr(DCR_DMA2P40_SGLx(chan->device->id), (u32)desc->phys);
+
+ INFO;
+ DBG("slot id: %d addr: %llx\n", desc->idx, desc->phys);
+ DBG("S/G addr H: %08x addr L: %08x\n",
+ mfdcr(DCR_DMA2P40_SGHx(chan->device->id)),
+ mfdcr(DCR_DMA2P40_SGLx(chan->device->id)));
+ ADMA_HEXDUMP(cdb, 96);
+ /* Enable S/G */
+ sg_cmd |= (DMA_SGC_SSG(chan->device->id) | DMA_SGC_EM_ALL);
+ sg_cmd |= DMA_SGC_SGL(chan->device->id, 0); /* S/G addr in PLB */
+
+ mtdcr(DCR_DMA2P40_SGC, sg_cmd);
+ DBG("S/G addr H: %08x addr L: %08x\n",
+ mfdcr(DCR_DMA2P40_SGHx(chan->device->id)),
+ mfdcr(DCR_DMA2P40_SGLx(chan->device->id)));
+ /* need to use variable for logging current CDB */
+ chan->current_cdb_addr = desc->phys;
+
+}
+
+/**
+ * apm82181_chan_append - update the h/w chain in the channel
+ */
+static inline void apm82181_chan_append(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *iter;
+ volatile xor_regs_t *xor_reg;
+ phys_addr_t cur_desc;
+ xor_cb_t *xcb;
+ unsigned long flags;
+ INFO;
+
+ local_irq_save(flags);
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ cur_desc = apm82181_chan_get_current_descriptor(chan);
+ DBG("current_desc %llx\n", cur_desc);
+ if (likely(cur_desc)) {
+ INFO;
+ iter = chan_last_sub[chan->device->id];
+ BUG_ON(!iter);
+ } else {
+ INFO;
+ /* first peer */
+ iter = chan_first_cdb[chan->device->id];
+ BUG_ON(!iter);
+ INFO;
+ apm82181_dma_put_desc(chan, iter);
+ chan->hw_chain_inited = 1;
+ }
+
+ /* is there something new to append */
+ if (!iter->hw_next)
+ break;
+
+ /* flush descriptors from the s/w queue to fifo */
+ list_for_each_entry_continue(iter, &chan->chain, chain_node) {
+ apm82181_dma_put_desc(chan, iter);
+ if (!iter->hw_next)
+ break;
+ }
+ break;
+ case APM82181_XOR_ID:
+ /* update h/w links and refetch */
+ if (!xor_last_submit->hw_next)
+ break;
+ xor_reg = chan->device->xor_base;
+ /* the last linked CDB has to generate an interrupt
+ * that we'd be able to append the next lists to h/w
+ * regardless of the XOR engine state at the moment of
+ * appending of these next lists
+ */
+ xcb = xor_last_linked->hw_desc;
+ xcb->cbc |= XOR_CBCR_CBCE_BIT;
+
+ if (!(xor_reg->sr & XOR_SR_XCP_BIT)) {
+ /* XORcore is idle. Refetch now */
+ do_xor_refetch = 0;
+ apm82181_xor_set_link(xor_last_submit,
+ xor_last_submit->hw_next);
+
+ xor_last_submit = xor_last_linked;
+ xor_reg->crsr |= XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT;
+ } else {
+ /* XORcore is running. Refetch later in the handler */
+ do_xor_refetch = 1;
+ }
+
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * apm82181_chan_get_current_descriptor - get the currently executed descriptor
+ */
+static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan)
+{
+ phys_addr_t curr_cdb_addr;
+ volatile xor_regs_t *xor_reg;
+ int idx = chan->device->id;
+
+ if (unlikely(!chan->hw_chain_inited))
+ /* h/w descriptor chain is not initialized yet */
+ return 0;
+ switch(idx){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ curr_cdb_addr = chan->current_cdb_addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_reg = chan->device->xor_base;
+ curr_cdb_addr = (dma_addr_t)xor_reg->ccbahr;
+ curr_cdb_addr = (curr_cdb_addr << 32) | xor_reg->ccbalr;
+ break;
+ default:
+ BUG();
+ }
+ return curr_cdb_addr;
+}
+
+
+/******************************************************************************
+ * ADMA device level
+ ******************************************************************************/
+
+static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan);
+static dma_cookie_t apm82181_adma_tx_submit(
+ struct dma_async_tx_descriptor *tx);
+
+static void apm82181_adma_set_dest(
+ apm82181_desc_t *tx,
+ dma_addr_t addr, int index);
+
+/**
+ * apm82181_get_group_entry - get group entry with index idx
+ * @tdesc: is the last allocated slot in the group.
+ */
+static inline apm82181_desc_t *
+apm82181_get_group_entry ( apm82181_desc_t *tdesc, u32 entry_idx)
+{
+ apm82181_desc_t *iter = tdesc->group_head;
+ int i = 0;
+
+ if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
+ printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
+ __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
+ BUG();
+ }
+ list_for_each_entry(iter, &tdesc->group_list, chain_node) {
+ if (i++ == entry_idx)
+ break;
+ }
+ return iter;
+}
+
+/**
+ * apm82181_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &apm82181_chan->lock while calling this function
+ */
+static void apm82181_adma_free_slots(apm82181_desc_t *slot,
+ apm82181_ch_t *chan)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ /*async_tx_clear_ack(&slot->async_tx);*/ /* Don't need to clear. It is hack*/
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+}
+
+static void
+apm82181_adma_unmap(apm82181_ch_t *chan, apm82181_desc_t *desc)
+{
+ u32 src_cnt, dst_cnt;
+ dma_addr_t addr;
+ /*
+ * get the number of sources & destination
+ * included in this descriptor and unmap
+ * them all
+ */
+ src_cnt = 1;
+ dst_cnt = 1;
+
+ /* unmap destinations */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ while (dst_cnt--) {
+ addr = apm82181_desc_get_dest_addr(
+ desc, chan, dst_cnt);
+ dma_unmap_page(&chan->device->ofdev->dev,
+ addr, desc->unmap_len,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* unmap sources */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ while (src_cnt--) {
+ addr = apm82181_desc_get_src_addr(
+ desc, chan, src_cnt);
+ dma_unmap_page(&chan->device->ofdev->dev,
+ addr, desc->unmap_len,
+ DMA_TO_DEVICE);
+ }
+ }
+
+}
+/**
+ * apm82181_adma_run_tx_complete_actions - call functions to be called
+ * upon complete
+ */
+static dma_cookie_t apm82181_adma_run_tx_complete_actions(
+ apm82181_desc_t *desc,
+ apm82181_ch_t *chan,
+ dma_cookie_t cookie)
+{
+ int i;
+ //enum dma_data_direction dir;
+ INFO;
+ BUG_ON(desc->async_tx.cookie < 0);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ *
+ * actually, ppc's dma_unmap_page() functions are empty, so
+ * the following code is just for the sake of completeness
+ */
+ if (chan && chan->needs_unmap && desc->group_head &&
+ desc->unmap_len) {
+ apm82181_desc_t *unmap = desc->group_head;
+ /* assume 1 slot per op always */
+ u32 slot_count = unmap->slot_cnt;
+
+ /* Run through the group list and unmap addresses */
+ for (i = 0; i < slot_count; i++) {
+ BUG_ON(!unmap);
+ apm82181_adma_unmap(chan, unmap);
+ unmap = unmap->hw_next;
+ }
+ desc->group_head = NULL;
+ }
+ }
+
+ /* run dependent operations */
+ dma_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+/**
+ * apm82181_adma_clean_slot - clean up CDB slot (if ack is set)
+ */
+static int apm82181_adma_clean_slot(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx))
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (list_is_last(&desc->chain_node, &chan->chain) ||
+ desc->phys == apm82181_chan_get_current_descriptor(chan))
+ return 1;
+
+ dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
+ desc->phys, desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ apm82181_adma_free_slots(desc, chan);
+ return 0;
+}
+
+/**
+ * __apm82181_adma_slot_cleanup - this is the common clean-up routine
+ * which runs through the channel CDBs list until reach the descriptor
+ * currently processed. When routine determines that all CDBs of group
+ * are completed then corresponding callbacks (if any) are called and slots
+ * are freed.
+ */
+static void __apm82181_adma_slot_cleanup(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *iter, *_iter, *group_start = NULL;
+ dma_cookie_t cookie = 0;
+ phys_addr_t current_desc = apm82181_chan_get_current_descriptor(chan);
+ int busy = apm82181_chan_is_busy(chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ DBG("apm82181 adma%d: %s\n",
+ chan->device->id, __FUNCTION__);
+ DBG("current_desc %llx\n", current_desc);
+
+ if (!current_desc) {
+ /* There were no transactions yet, so
+ * nothing to clean
+ */
+ return;
+ }
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &chan->chain,
+ chain_node) {
+ DBG(" cookie: %d slot: %d "
+ "busy: %d this_desc: %llx next_desc: %x cur: %llx ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy, iter->phys,
+ apm82181_desc_get_link(iter, chan), current_desc,
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel,subsequent descriptors are either in process
+ * or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || apm82181_desc_get_link(iter, chan)) {
+ /* not all descriptors of the group have
+ * been completed; exit.
+ */
+ break;
+ }
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ if (!group_start)
+ group_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ apm82181_desc_t *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+
+ /* clean up the group */
+ slot_cnt = group_start->slot_cnt;
+ grp_iter = group_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &chan->chain, chain_node) {
+
+ cookie = apm82181_adma_run_tx_complete_actions(
+ grp_iter, chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = apm82181_adma_clean_slot(
+ grp_iter, chan);
+ if (end_of_chain && slot_cnt) {
+ /* Should wait for ZeroSum complete */
+ if (cookie > 0)
+ chan->completed_cookie = cookie;
+ return;
+ }
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ group_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ cookie = apm82181_adma_run_tx_complete_actions(iter, chan,
+ cookie);
+
+ if (apm82181_adma_clean_slot(iter, chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ if (cookie > 0) {
+ chan->completed_cookie = cookie;
+ DBG("completed cookie %d\n", cookie);
+ }
+
+}
+
+/**
+ * apm82181_adma_tasklet - clean up watch-dog initiator
+ */
+static void apm82181_adma_tasklet (unsigned long data)
+{
+ apm82181_ch_t *chan = (apm82181_ch_t *) data;
+ spin_lock(&chan->lock);
+ INFO;
+ __apm82181_adma_slot_cleanup(chan);
+ spin_unlock(&chan->lock);
+}
+
+/**
+ * apm82181_adma_slot_cleanup - clean up scheduled initiator
+ */
+static void apm82181_adma_slot_cleanup (apm82181_ch_t *chan)
+{
+ spin_lock_bh(&chan->lock);
+ __apm82181_adma_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * apm82181_adma_alloc_slots - allocate free slots (if any)
+ */
+static apm82181_desc_t *apm82181_adma_alloc_slots(
+ apm82181_ch_t *chan, int num_slots,
+ int slots_per_op)
+{
+ apm82181_desc_t *iter = NULL, *_iter, *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+
+ BUG_ON(!num_slots || !slots_per_op);
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = chan->last_used;
+ else
+ iter = list_entry(&chan->all_slots, apm82181_desc_t,
+ slot_node);
+ prefetch(iter);
+ DBG("---iter at %p idx %d\n ",iter,iter->idx);
+ list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
+ slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+ if (slots_found == num_slots) {
+ apm82181_desc_t *alloc_tail = NULL;
+ apm82181_desc_t *last_used = NULL;
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op) {
+ async_tx_ack(&iter->async_tx);
+ }
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->hw_next = NULL;
+ iter->flags = 0;
+ iter->slot_cnt = num_slots;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->group_list);
+ chan->last_used = last_used;
+ DBG("---slot allocated at %llx idx %d, hw_desc %p tx_ack %d\n",
+ alloc_tail->phys, alloc_tail->idx, alloc_tail->hw_desc,
+ async_tx_test_ack(&alloc_tail->async_tx));
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+#ifdef ADMA_DEBUG
+ static int empty_slot_cnt;
+ if(!(empty_slot_cnt%100))
+ printk(KERN_INFO"No empty slots trying to free some\n");
+ empty_slot_cnt++;
+#endif
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&chan->irq_tasklet);
+ return NULL;
+}
+
+/**
+ * apm82181_chan_xor_slot_count - get the number of slots necessary for
+ * XOR operation
+ */
+static inline int apm82181_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt;
+
+ /* each XOR descriptor provides up to 16 source operands */
+ slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
+
+ if (likely(len <= APM82181_ADMA_XOR_MAX_BYTE_COUNT))
+ return slot_cnt;
+
+ printk(KERN_ERR "%s: len %d > max %d !!\n",
+ __func__, len, APM82181_ADMA_XOR_MAX_BYTE_COUNT);
+ BUG();
+ return slot_cnt;
+}
+
+/**
+ * apm82181_desc_init_null_xor - initialize the descriptor for NULL XOR
+ * pseudo operation
+ */
+static inline void apm82181_desc_init_null_xor(apm82181_desc_t *desc)
+{
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 0;
+ desc->dst_cnt = 1;
+}
+/**
+ * apm82181_chan_set_first_xor_descriptor - initi XORcore chain
+ */
+static inline void apm82181_chan_set_first_xor_descriptor(apm82181_ch_t *chan,
+ apm82181_desc_t *next_desc)
+{
+ volatile xor_regs_t *xor_reg;
+
+ xor_reg = chan->device->xor_base;
+
+ if (xor_reg->sr & XOR_SR_XCP_BIT)
+ printk(KERN_INFO "%s: Warn: XORcore is running "
+ "when try to set the first CDB!\n",
+ __func__);
+
+ xor_last_submit = xor_last_linked = next_desc;
+
+ xor_reg->crsr = XOR_CRSR_64BA_BIT;
+
+ xor_reg->cblalr = next_desc->phys;
+ xor_reg->cblahr = 0;
+ xor_reg->cbcr |= XOR_CBCR_LNK_BIT;
+
+ chan->hw_chain_inited = 1;
+}
+/**
+ * apm82181_chan_start_null_xor - initiate the first XOR operation (DMA engines
+ * use FIFOs (as opposite to chains used in XOR) so this is a XOR
+ * specific operation)
+ */
+static void apm82181_chan_start_null_xor(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc, *group_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+ volatile xor_regs_t *xor_reg = chan->device->xor_base;
+
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __func__);
+ INFO;
+ spin_lock_bh(&chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(0, 2, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ INFO;
+ group_start = sw_desc->group_head;
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ async_tx_ack(&sw_desc->async_tx);
+ apm82181_desc_init_null_xor(group_start);
+ INFO;
+
+ cookie = chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ chan->completed_cookie = cookie - 1;
+ chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(apm82181_chan_is_busy(chan));
+
+ /* set the descriptor address */
+ apm82181_chan_set_first_xor_descriptor(chan, sw_desc);
+
+ /* run the descriptor */
+ xor_reg->crsr = XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT;
+ } else
+ printk(KERN_ERR "apm82181 adma%d"
+ " failed to allocate null descriptor\n",
+ chan->device->id);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * apm82181_adma_alloc_chan_resources - allocate pools for CDB slots
+ */
+static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *slot = NULL;
+ char *hw_desc;
+ int i, db_sz;
+ int init = apm82181_chan->slots_allocated ? 0 : 1;
+
+ chan->chan_id = apm82181_chan->device->id;
+
+ /* Allocate descriptor slots */
+ i = apm82181_chan->slots_allocated;
+ if (apm82181_chan->device->id != APM82181_XOR_ID)
+ db_sz = sizeof (dma_cdb_t);
+ else
+ db_sz = sizeof (xor_cb_t);
+
+ for (; i < (apm82181_chan->device->pool_size/db_sz); i++) {
+ slot = kzalloc(sizeof(apm82181_desc_t), GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "APM82181/GT ADMA Channel only initialized"
+ " %d descriptor slots", i--);
+ break;
+ }
+
+ hw_desc = (char *) apm82181_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[i * db_sz];
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = apm82181_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->group_list);
+ slot->phys = apm82181_chan->device->dma_desc_pool + i * db_sz;
+ slot->idx = i;
+ spin_lock_bh(&apm82181_chan->lock);
+ apm82181_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &apm82181_chan->all_slots);
+ spin_unlock_bh(&apm82181_chan->lock);
+ }
+
+ if (i && !apm82181_chan->last_used) {
+ apm82181_chan->last_used =
+ list_entry(apm82181_chan->all_slots.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+
+ printk("apm82181 adma%d: allocated %d descriptor slots\n",
+ apm82181_chan->device->id, i);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ switch (apm82181_chan->device->id)
+ {
+ apm82181_chan->hw_chain_inited = 0;
+ case APM82181_PDMA0_ID:
+ apm82181_dma_tchan[0] = apm82181_chan;
+ break;
+ case APM82181_PDMA1_ID:
+ apm82181_dma_tchan[1] = apm82181_chan;
+ break;
+ case APM82181_PDMA2_ID:
+ apm82181_dma_tchan[2] = apm82181_chan;
+ break;
+ case APM82181_PDMA3_ID:
+ apm82181_dma_tchan[3] = apm82181_chan;
+ break;
+ case APM82181_XOR_ID:
+ apm82181_dma_tchan[4] = apm82181_chan;
+ apm82181_chan_start_null_xor(apm82181_chan);
+ break;
+ default:
+ BUG();
+ }
+ apm82181_chan->needs_unmap = 1;
+ }
+
+ return (i > 0) ? i : -ENOMEM;
+}
+
+/**
+ * apm82181_desc_assign_cookie - assign a cookie
+ */
+static dma_cookie_t apm82181_desc_assign_cookie(apm82181_ch_t *chan,
+ apm82181_desc_t *desc)
+{
+ dma_cookie_t cookie = chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+
+/**
+ * apm82181_adma_check_threshold - append CDBs to h/w chain if threshold
+ * has been achieved
+ */
+static void apm82181_adma_check_threshold(apm82181_ch_t *chan)
+{
+ dev_dbg(chan->device->common.dev, "apm82181 adma%d: pending: %d\n",
+ chan->device->id, chan->pending);
+ INFO;
+ if (chan->pending >= APM82181_ADMA_THRESHOLD) {
+ chan->pending = 0;
+ apm82181_chan_append(chan);
+ }
+}
+
+/**
+ * apm82181_adma_tx_submit - submit new descriptor group to the channel
+ * (it's not necessary that descriptors will be submitted to the h/w
+ * chains too right now)
+ */
+static dma_cookie_t apm82181_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ apm82181_desc_t *sw_desc = tx_to_apm82181_adma_slot(tx);
+ apm82181_ch_t *chan = to_apm82181_adma_chan(tx->chan);
+ apm82181_desc_t *group_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+ group_start = sw_desc->group_head;
+ slot_cnt = group_start->slot_cnt;
+ slots_per_op = group_start->slots_per_op;
+ INFO;
+ spin_lock_bh(&chan->lock);
+ cookie = apm82181_desc_assign_cookie(chan, sw_desc);
+
+ if (unlikely(list_empty(&chan->chain))) {
+ /* first peer */
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ chan_first_cdb[chan->device->id] = group_start;
+ } else {
+ /* isn't first peer, bind CDBs to chain */
+ old_chain_tail = list_entry(chan->chain.prev,
+ apm82181_desc_t, chain_node);
+ list_splice_init(&sw_desc->group_list,
+ &old_chain_tail->chain_node);
+ /* fix up the hardware chain */
+ apm82181_desc_set_link(chan, old_chain_tail, group_start);
+ }
+
+ /* increment the pending count by the number of operations */
+ chan->pending += slot_cnt / slots_per_op;
+ apm82181_adma_check_threshold(chan);
+ spin_unlock_bh(&chan->lock);
+
+ DBG("apm82181 adma%d:cookie: %d slot: %d tx %p\n",
+ chan->device->id, sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
+ return cookie;
+}
+/**
+ * apm82181_adma_prep_dma_xor - prepare CDB for a XOR operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_xor(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
+ unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+#ifdef ADMA_DEBUG
+ printk("\n%s(%d):\n\tsrc: ", __func__,
+ apm82181_chan->device->id);
+ for (slot_cnt=0; slot_cnt < src_cnt; slot_cnt++)
+ printk("0x%llx ", dma_src[slot_cnt]);
+ printk("\n\tdst: 0x%llx\n", dma_dest);
+#endif
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > APM82181_ADMA_XOR_MAX_BYTE_COUNT));
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ apm82181_chan->device->id, __func__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ spin_lock_bh(&apm82181_chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_xor(group_start, src_cnt, flags);
+ apm82181_adma_set_dest(group_start, dma_dest, 0);
+ while (src_cnt--)
+ apm82181_adma_set_src(group_start,
+ dma_src[src_cnt], src_cnt);
+ apm82181_desc_set_byte_count(group_start, apm82181_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+/**
+ * apm82181_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s\n", apm82181_chan->device->id,
+ __FUNCTION__);
+ spin_lock_bh(&apm82181_chan->lock);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_interrupt(group_start, apm82181_chan);
+ group_start->unmap_len = 0;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * apm82181_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > APM82181_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&apm82181_chan->lock);
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s len: %u int_en %d \n",
+ apm82181_chan->device->id, __FUNCTION__, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ flags |= DMA_PREP_INTERRUPT;
+ apm82181_desc_init_memcpy(group_start, flags);
+ apm82181_adma_set_dest(group_start, dma_dest, 0);
+ apm82181_adma_set_src(group_start, dma_src, 0);
+ apm82181_desc_set_byte_count(group_start, apm82181_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * apm82181_adma_prep_dma_memset - prepare CDB for a MEMSET operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dma_dest, int value,
+ size_t len, unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > APM82181_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&apm82181_chan->lock);
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s cal: %u len: %u int_en %d\n",
+ apm82181_chan->device->id, __FUNCTION__, value, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_memset(group_start, value, flags);
+ apm82181_adma_set_dest(group_start, dma_dest, 0);
+ apm82181_desc_set_byte_count(group_start, apm82181_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+
+/**
+ * apm82181_adma_set_dest - set destination address into descriptor
+ */
+static void apm82181_adma_set_dest(apm82181_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan);
+ BUG_ON(index >= sw_desc->dst_cnt);
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ /* to do: support transfers lengths >
+ * APM82181_ADMA_DMA/XOR_MAX_BYTE_COUNT
+ */
+ apm82181_desc_set_dest_addr(sw_desc->group_head,
+ // chan, 0x8, addr, index); // Enabling HB bus
+ chan, addr, index);
+ break;
+ case APM82181_XOR_ID:
+ sw_desc = apm82181_get_group_entry(sw_desc, index);
+ apm82181_desc_set_dest_addr(sw_desc, chan,
+ addr, index);
+ break;
+ default:
+ BUG();
+ }
+}
+
+
+/**
+ * apm82181_adma_free_chan_resources - free the resources allocated
+ */
+static void apm82181_adma_free_chan_resources(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *iter, *_iter;
+ int in_use_descs = 0;
+
+ apm82181_adma_slot_cleanup(apm82181_chan);
+
+ spin_lock_bh(&apm82181_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &apm82181_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(iter, _iter,
+ &apm82181_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ apm82181_chan->slots_allocated--;
+ }
+ apm82181_chan->last_used = NULL;
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d %s slots_allocated %d\n",
+ apm82181_chan->device->id,
+ __FUNCTION__, apm82181_chan->slots_allocated);
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "GT: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * apm82181_adma_is_complete - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status apm82181_adma_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+ last_complete = apm82181_chan->completed_cookie;
+
+ if (done)
+ *done= last_complete;
+ if (used)
+ *used = last_used;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ apm82181_adma_slot_cleanup(apm82181_chan);
+
+ last_used = chan->cookie;
+ last_complete = apm82181_chan->completed_cookie;
+
+ if (done)
+ *done= last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/**
+ * apm82181_adma_eot_handler - end of transfer interrupt handler
+ */
+static irqreturn_t apm82181_adma_eot_handler(int irq, void *data)
+{
+ apm82181_ch_t *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__);
+ INFO;
+ if(chan->device->id == APM82181_XOR_ID)
+ tasklet_schedule(&chan->irq_tasklet);
+ apm82181_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * apm82181_adma_err_handler - DMA error interrupt handler;
+ * do the same things as a eot handler
+ */
+#if 0
+static irqreturn_t apm82181_adma_err_handler(int irq, void *data)
+{
+ apm82181_ch_t *chan = data;
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__);
+ tasklet_schedule(&chan->irq_tasklet);
+ apm82181_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+#endif
+/**
+ * apm82181_test_callback - called when test operation has been done
+ */
+static void apm82181_test_callback (void *unused)
+{
+ complete(&apm82181_r5_test_comp);
+}
+
+/**
+ * apm82181_adma_issue_pending - flush all pending descriptors to h/w
+ */
+static void apm82181_adma_issue_pending(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+
+ DBG("apm82181 adma%d: %s %d \n", apm82181_chan->device->id,
+ __FUNCTION__, apm82181_chan->pending);
+ if (apm82181_chan->pending) {
+ apm82181_chan->pending = 0;
+ apm82181_chan_append(apm82181_chan);
+ }
+}
+
+/**
+ * apm82181_adma_remove - remove the asynch device
+ */
+static int __devexit apm82181_pdma_remove(struct platform_device *dev)
+{
+ apm82181_dev_t *device = platform_get_drvdata(dev);
+ struct dma_chan *chan, *_chan;
+ struct ppc_dma_chan_ref *ref, *_ref;
+ apm82181_ch_t *apm82181_chan;
+ int i;
+
+ dma_async_device_unregister(&device->common);
+
+ for (i = 0; i < 3; i++) {
+ u32 irq;
+ irq = platform_get_irq(dev, i);
+ free_irq(irq, device);
+ }
+
+
+ do {
+ struct resource *res;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start);
+ } while (0);
+
+ list_for_each_entry_safe(chan, _chan, &device->common.channels,
+ device_node) {
+ apm82181_chan = to_apm82181_adma_chan(chan);
+ list_del(&chan->device_node);
+ kfree(apm82181_chan);
+ }
+
+ list_for_each_entry_safe(ref, _ref, &ppc_adma_chan_list, node) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+
+ kfree(device);
+
+ return 0;
+}
+
+static inline void xor_hw_init (apm82181_dev_t *adev)
+{
+ volatile xor_regs_t *xor_reg = adev->xor_base;
+ /* Reset XOR */
+ xor_reg->crsr = XOR_CRSR_XASR_BIT;
+ xor_reg->crrr = XOR_CRSR_64BA_BIT;
+
+ /* enable XOR engine interrupts */
+ xor_reg->ier = XOR_IE_CBCIE_BIT |
+ XOR_IE_ICBIE_BIT | XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT;
+}
+
+/*
+ * Per channel probe
+ */
+static int __devinit apm82181_dma_per_chan_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int ret = 0, irq;
+ const u32 *index, *dcr_regs, *pool_size;
+ apm82181_plb_dma_t *pdma;
+ apm82181_dev_t *adev;
+ apm82181_ch_t *chan;
+ struct ppc_dma_chan_ref *ref;
+ struct device_node *np = ofdev->node;
+ struct resource res;
+ int len;
+
+ INFO;
+ pdma = dev_get_drvdata(ofdev->dev.parent);
+ BUG_ON(!pdma);
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ adev->dev = &ofdev->dev;
+ index = of_get_property(np, "cell-index", NULL);
+ if(!index) {
+ printk(KERN_ERR "adma-channel: Device node %s has missing or invalid "
+ "cell-index property\n", np->full_name);
+ goto err;
+ }
+ adev->id = (int)*index;
+ /* The XOR engine/PLB DMA 4 channels have different resources/pool_sizes */
+ if (adev->id != APM82181_XOR_ID){
+ dcr_regs = of_get_property(np, "dcr-reg", &len);
+ if (!dcr_regs || (len != 2 * sizeof(u32))) {
+ printk(KERN_ERR "plb_dma channel%d: Can't get DCR register base !",
+ adev->id);
+ goto err;
+ }
+ adev->dcr_base = dcr_regs[0];
+
+ pool_size = of_get_property(np, "pool_size", NULL);
+ if(!pool_size) {
+ printk(KERN_ERR "plb_dma channel%d: Device node %s has missing or "
+ "invalid pool_size property\n", adev->id, np->full_name);
+ goto err;
+ }
+ adev->pool_size = *pool_size;
+ } else {
+ if (of_address_to_resource(np, 0, &res)) {
+ printk(KERN_ERR "adma_xor channel%d %s: could not get resource address.\n",
+ adev->id,np->full_name);
+ goto err;
+ }
+
+ DBG("XOR resource start = %llx end = %llx\n", res.start, res.end);
+ adev->xor_base = ioremap(res.start, res.end - res.start + 1);
+ if (!adev->xor_base){
+ printk(KERN_ERR "XOR engine registers memory mapping failed.\n");
+ goto err;
+ }
+ adev->pool_size = PAGE_SIZE << 1;
+ }
+
+ adev->pdma = pdma;
+ adev->ofdev = ofdev;
+ dev_set_drvdata(&(ofdev->dev),adev);
+
+ switch (adev->id){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_cap_set(DMA_MEMCPY,adev->cap_mask);
+ break;
+ case APM82181_XOR_ID:
+ dma_cap_set(DMA_XOR,adev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT,adev->cap_mask);
+ break;
+ default:
+ BUG();
+ }
+ /* XOR h/w configuration */
+ if(adev->id == APM82181_XOR_ID)
+ xor_hw_init(adev);
+ /* allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly drain the write buffer
+ */
+ if ((adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
+ adev->pool_size, &adev->dma_desc_pool, GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ adev->common.cap_mask = adev->cap_mask;
+ INIT_LIST_HEAD(&adev->common.channels);
+ /* set base routines */
+ adev->common.device_alloc_chan_resources =
+ apm82181_adma_alloc_chan_resources;
+ adev->common.device_free_chan_resources =
+ apm82181_adma_free_chan_resources;
+ adev->common.device_is_tx_complete = apm82181_adma_is_complete;
+ adev->common.device_issue_pending = apm82181_adma_issue_pending;
+ adev->common.dev = &ofdev->dev;
+
+ /* set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memcpy =
+ apm82181_adma_prep_dma_memcpy;
+ }
+ if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memset =
+ apm82181_adma_prep_dma_memset;
+ }
+
+ if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_interrupt =
+ apm82181_adma_prep_dma_interrupt;
+ }
+
+ if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
+ adev->common.max_xor = XOR_MAX_OPS;
+ adev->common.device_prep_dma_xor =
+ apm82181_adma_prep_dma_xor;
+ }
+
+ /* create a channel */
+ if ((chan = kzalloc(sizeof(*chan), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_chan_alloc;
+ }
+ tasklet_init(&chan->irq_tasklet, apm82181_adma_tasklet,
+ (unsigned long)chan);
+
+ irq = irq_of_parse_and_map(np, 0);
+ switch (adev->id){
+ case 0:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan0", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 1:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan1", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 2:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan2", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 3:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan3", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 4:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-xor", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ spin_lock_init(&chan->lock);
+ chan->device = adev;
+ INIT_LIST_HEAD(&chan->chain);
+ INIT_LIST_HEAD(&chan->all_slots);
+ chan->common.device = &adev->common;
+ list_add_tail(&chan->common.device_node, &adev->common.channels);
+ adev->common.chancnt++;
+
+ printk( "AMCC(R) APM82181 ADMA Engine found [%d]: "
+ "( capabilities: %s%s%s%s%s%s%s)\n",
+ adev->id,
+ dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq_xor " : "",
+ dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " :
+ "",
+ dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " :
+ "",
+ dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
+ dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "int " : "");
+ INFO;
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(&ofdev->dev, "failed to register dma async device");
+ goto err_irq;
+ }
+ INFO;
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ if (ref) {
+ INFO;
+ ref->chan = &chan->common;
+ INIT_LIST_HEAD(&ref->node);
+ list_add_tail(&ref->node, &ppc_adma_chan_list);
+ } else
+ dev_warn(&ofdev->dev, "failed to allocate channel reference!\n");
+
+ goto out;
+err_irq:
+ kfree(chan);
+err_chan_alloc:
+ dma_free_coherent(&ofdev->dev, adev->pool_size,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+err_dma_alloc:
+ if (adev->xor_base)
+ iounmap(adev->xor_base);
+err:
+ kfree(adev);
+out:
+ return ret;
+}
+
+static struct of_device_id dma_4chan_match[] =
+{
+ {
+ .compatible = "amcc,apm82181-adma",
+ },
+ {},
+};
+
+static struct of_device_id dma_per_chan_match[] = {
+ {.compatible = "amcc,apm82181-dma-4channel",},
+ {.compatible = "amcc,xor",},
+ {},
+};
+/*
+ * apm82181_adma_probe - probe the asynch device
+ */
+static int __devinit apm82181_pdma_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int ret = 0;
+ apm82181_plb_dma_t *pdma;
+
+ if ((pdma = kzalloc(sizeof(*pdma), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdma->dev = &ofdev->dev;
+ pdma->ofdev = ofdev;
+ printk(PPC4XX_EDMA "Probing AMCC APM82181 ADMA engines...\n");
+
+ dev_set_drvdata(&(ofdev->dev),pdma);
+ of_platform_bus_probe(ofdev->node, dma_per_chan_match,&ofdev->dev);
+
+out:
+ return ret;
+}
+
+/*
+ * apm82181_test_xor - test are RAID-5 XOR capability enabled successfully.
+ * For this we just perform one DMA XOR operation with the 3 sources
+ * to a destination
+ */
+static int apm82181_test_xor (apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc, *group_start;
+ struct page *pg_src[3], *pg_dest;
+ char *a;
+ dma_addr_t dma_src_addr[3];
+ dma_addr_t dma_dst_addr;
+ int rval = -EFAULT, i;
+ int len = PAGE_SIZE, src_cnt = 3;
+ int slot_cnt, slots_per_op;
+ INFO;
+ printk("ADMA channel %d XOR testing\n",chan->device->id);
+ for(i = 0; i < 3; i++){
+ pg_src[i] = alloc_page(GFP_KERNEL);
+ if (!pg_src[i])
+ return -ENOMEM;
+ }
+ pg_dest = alloc_page(GFP_KERNEL);
+ if (!pg_dest)
+ return -ENOMEM;
+ /* Fill the test page with ones */
+ memset(page_address(pg_src[0]), 0xDA, len);
+ memset(page_address(pg_src[1]), 0xDA, len);
+ memset(page_address(pg_src[2]), 0x00, len);
+ memset(page_address(pg_dest), 0xA5, len);
+ for(i = 0; i < 3; i++){
+ a = page_address(pg_src[i]);
+ printk("The virtual addr of src %d =%x\n",i, (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ }
+ a = page_address(pg_dest);
+ printk("The virtual addr of dest=%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+
+ for(i = 0; i < 3; i++){
+ dma_src_addr[i] = dma_map_page(chan->device->dev, pg_src[i], 0, len,
+ DMA_BIDIRECTIONAL);
+ }
+ dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len,
+ DMA_BIDIRECTIONAL);
+ printk("dma_src_addr[0]: %llx; dma_src_addr[1]: %llx;\n "
+ "dma_src_addr[2]: %llx; dma_dst_addr %llx, len: %x\n", dma_src_addr[0],
+ dma_src_addr[1], dma_src_addr[2], dma_dst_addr, len);
+
+ spin_lock_bh(&chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_xor(group_start, src_cnt, DMA_PREP_INTERRUPT);
+ /* Setup addresses */
+ while (src_cnt--)
+ apm82181_adma_set_src(group_start,
+ dma_src_addr[src_cnt], src_cnt);
+ apm82181_adma_set_dest(group_start, dma_dst_addr, 0);
+ apm82181_desc_set_byte_count(group_start, chan, len);
+ sw_desc->unmap_len = PAGE_SIZE;
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ printk("Submit CDB...\n");
+ MEM_HEXDUMP(sw_desc->hw_desc, 96);
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = apm82181_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&apm82181_r5_test_comp);
+ apm82181_adma_tx_submit(&sw_desc->async_tx);
+ apm82181_adma_issue_pending(&chan->common);
+ //wait_for_completion(&apm82181_r5_test_comp);
+ /* wait for a while so that dma transaction finishes */
+ mdelay(100);
+ /* Now check if the test page zeroed */
+ a = page_address(pg_dest);
+ /* XOR result at destination */
+ MEM_HEXDUMP(a,50);
+ if ((*(u32*)a) == 0x00000000 && memcmp(a, a+4, PAGE_SIZE-4)==0) {
+ /* page dest XOR is corect as expected - RAID-5 enabled */
+ rval = 0;
+ } else {
+ /* RAID-5 was not enabled */
+ rval = -EINVAL;
+ }
+
+exit:
+ dma_unmap_page(chan->device->dev, dma_src_addr[0], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_src_addr[1], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_src_addr[2], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_dst_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(pg_src[0]);
+ __free_page(pg_src[1]);
+ __free_page(pg_src[2]);
+ __free_page(pg_dest);
+ return rval;
+}
+
+
+/*
+ * apm82181_test_dma - test are RAID-5 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-5
+ o/of_platform_driver_unregister(&apm82181_pdma_driver);
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int apm82181_test_dma (apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc;
+ struct page *pg_src, *pg_dest;
+ char *a, *d;
+ dma_addr_t dma_src_addr;
+ dma_addr_t dma_dst_addr;
+ int rval = -EFAULT;
+ int len = PAGE_SIZE;
+
+ printk("PLB DMA channel %d memcpy testing\n",chan->device->id);
+ pg_src = alloc_page(GFP_KERNEL);
+ if (!pg_src)
+ return -ENOMEM;
+ pg_dest = alloc_page(GFP_KERNEL);
+ if (!pg_dest)
+ return -ENOMEM;
+ /* Fill the test page with ones */
+ memset(page_address(pg_src), 0x77, len);
+ memset(page_address(pg_dest), 0xa5, len);
+ a = page_address(pg_src);
+ printk("The virtual addr of src =%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ a = page_address(pg_dest);
+ printk("The virtual addr of dest=%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ dma_src_addr = dma_map_page(chan->device->dev, pg_src, 0, len,
+ DMA_BIDIRECTIONAL);
+ dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len,
+ DMA_BIDIRECTIONAL);
+ printk("dma_src_addr: %llx; dma_dst_addr %llx\n", dma_src_addr, dma_dst_addr);
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = apm82181_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dst, int_ena */
+ apm82181_desc_init_memcpy(sw_desc, DMA_PREP_INTERRUPT);
+ //apm82181_desc_init_memcpy(sw_desc, 0);
+ /* Setup adresses */
+ apm82181_adma_set_src(sw_desc, dma_src_addr, 0);
+ apm82181_adma_set_dest(sw_desc, dma_dst_addr, 0);
+ apm82181_desc_set_byte_count(sw_desc, chan, len);
+ sw_desc->unmap_len = PAGE_SIZE;
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ printk("Submit CDB...\n");
+ MEM_HEXDUMP(sw_desc->hw_desc, 96);
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = apm82181_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&apm82181_r5_test_comp);
+ apm82181_adma_tx_submit(&sw_desc->async_tx);
+ apm82181_adma_issue_pending(&chan->common);
+ //wait_for_completion(&apm82181_r5_test_comp);
+
+ a = page_address(pg_src);
+ d = page_address(pg_dest);
+ if (!memcmp(a, d, len)) {
+ rval = 0;
+ } else {
+ rval = -EINVAL;
+ }
+
+ a = page_address(pg_src);
+ printk("\nAfter DMA done:");
+ printk("\nsrc %x value:\n", (unsigned int)a);
+ MEM_HEXDUMP(a,96);
+ a = page_address(pg_dest);
+ printk("\ndest%x value:\n", (unsigned int)a);
+ MEM_HEXDUMP(a,96);
+
+exit:
+ __free_page(pg_src);
+ __free_page(pg_dest);
+ return rval;
+}
+
+static struct of_platform_driver apm82181_pdma_driver = {
+ .name = "apm82181_plb_dma",
+ .match_table = dma_4chan_match,
+
+ .probe = apm82181_pdma_probe,
+ //.remove = apm82181_pdma_remove,
+};
+struct of_platform_driver apm82181_dma_per_chan_driver = {
+ .name = "apm82181-dma-4channel",
+ .match_table = dma_per_chan_match,
+ .probe = apm82181_dma_per_chan_probe,
+};
+
+static int apm82181_xor_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *p = page;
+
+ p += sprintf(p, "%s\n",
+ apm82181_xor_verified ?
+ "APM82181 ASYNC XOR capability are VERIFIED.\n" :
+ "APM82181 ASYNC XOR capability are NOT VERIFIED.\n");
+
+ return p - page;
+}
+
+static int apm82181_xor_write (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ if(count != 2)
+ return -EFAULT;
+ /* Verify does it really work now */
+ if (!apm82181_test_xor(apm82181_dma_tchan[4])) {
+ /* APM82181 RAID-5 XOR has been activated successfully */;
+ printk("APM82181 ADMA XOR engine has been verified "
+ "successfully\n");
+ apm82181_xor_verified = 1;
+ } else {
+ /* APM82181 RAID-5 memcpy hasn't been activated! */;
+ printk("APM82181 ADMA XOR engine hasn't been "
+ "verified yet\n");
+ apm82181_xor_verified = 0;
+ }
+
+ return count;
+}
+
+static int apm82181_dma_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i;
+
+ printk("APM82181 ASYNC MEMCPY capability\n");
+ for(i = 0; i < 4; i++){
+ printk("\tPLB DMA channel %d: %s ", i,
+ apm82181_memcpy_verified[i] ?
+ "VERIFIED.\n" : "NOT VERIFIED.\n");
+ }
+ return 0;
+}
+
+static int apm82181_dma_write (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ /* e.g. 0xffffffff */
+ char tmp[2];
+ u32 val;
+
+ if(count != 2)
+ return -EFAULT;
+
+ if (copy_from_user(tmp, buffer, count))
+ return -EFAULT;
+ val = simple_strtoul(tmp, NULL, 10); /* decimal base */
+ if(!(val == 0 || val == 1 || val == 2 || val == 3 )) {
+ printk("Error! Wrong channel id, please choose 1 valid id [0/1/2/3]\n");
+ return -EFAULT;
+ }
+
+ /* Verify does it really work now */
+ if (!apm82181_test_dma(apm82181_dma_tchan[val])) {
+ /* APM82181 RAID-5 memcpy has been activated successfully */;
+ printk("APM82181 PLBDMA MEMCPY channel %d has been verified "
+ "successfully\n", val);
+ apm82181_memcpy_verified[val] = 1;
+ } else {
+ /* APM82181 RAID-5 memcpy hasn't been activated! */;
+ printk("APM82181 PLBDMA MEMCPY channel %d hasn't been "
+ "verified yet\n", val);
+ apm82181_memcpy_verified[val] = 0;
+ }
+
+ return count;
+}
+
+static int __init apm82181_adma_per_chan_init (void)
+{
+ int rval;
+ rval = of_register_platform_driver(&apm82181_dma_per_chan_driver);
+ return rval;
+}
+
+static int __init apm82181_adma_init (void)
+{
+ int rval;
+ struct proc_dir_entry *p;
+
+ rval = of_register_platform_driver(&apm82181_pdma_driver);
+
+ if (rval == 0) {
+ /* Create /proc entries */
+ apm82181_proot = proc_mkdir(APM82181_DMA_PROC_ROOT, NULL);
+ if (!apm82181_proot) {
+ printk(KERN_ERR "%s: failed to create %s proc "
+ "directory\n",__FUNCTION__,APM82181_DMA_PROC_ROOT);
+ /* User will not be able to enable h/w RAID-6 */
+ return rval;
+ }
+
+ /* ADMA MEMCPY verification entry */
+ p = create_proc_entry("adma_memcopy_test", 0, apm82181_proot);
+ if (p) {
+ p->read_proc = apm82181_dma_read;
+ p->write_proc = apm82181_dma_write;
+ }
+ /* ADMA XOR capability verification entry */
+ p = create_proc_entry("adma_xor_test", 0, apm82181_proot);
+ if (p) {
+ p->read_proc = apm82181_xor_read;
+ p->write_proc = apm82181_xor_write;
+ }
+ }
+ return rval;
+}
+
+#if 0
+static void __exit apm82181_adma_exit (void)
+{
+ of_unregister_platform_driver(&apm82181_pdma_driver);
+ return;
+}
+module_exit(apm82181_adma_exit);
+#endif
+
+module_init(apm82181_adma_per_chan_init);
+module_init(apm82181_adma_init);
+
+MODULE_AUTHOR("Tai Tri Nguyen<ttnguyen@appliedmicro.com>");
+MODULE_DESCRIPTION("APM82181 ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8f99354082c..0177c3ff6ff 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -680,6 +680,7 @@ int dma_async_device_register(struct dma_device *device)
struct dma_chan* chan;
atomic_t *idr_ref;
+ printk( "--------------- %s: %i-------------------------\n",__FUNCTION__,__LINE__);
if (!device)
return -ENODEV;
@@ -858,14 +859,10 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- unsigned long flags;
dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
- flags = DMA_CTRL_ACK |
- DMA_COMPL_SRC_UNMAP_SINGLE |
- DMA_COMPL_DEST_UNMAP_SINGLE;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, DMA_CTRL_ACK);
if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
@@ -907,12 +904,10 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
dma_addr_t dma_dest, dma_src;
dma_cookie_t cookie;
int cpu;
- unsigned long flags;
dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
- flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
- tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
+ tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, DMA_CTRL_ACK);
if (!tx) {
dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
diff --git a/drivers/dma/ppc460ex-adma.c b/drivers/dma/ppc460ex-adma.c
new file mode 100644
index 00000000000..2ef1e9d6052
--- /dev/null
+++ b/drivers/dma/ppc460ex-adma.c
@@ -0,0 +1,5409 @@
+/*
+ * Copyright(c) 2006 DENX Engineering. All rights reserved.
+ *
+ * Author: Yuri Tikhonov <yur@emcraft.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the AMCC PPC460ex Processors.
+ * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ * ADMA driver written by D.Williams.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/of_platform.h>
+#include <linux/proc_fs.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/ppc460ex_adma.h>
+#include <asm/ppc460ex_xor.h>
+#include <asm/ppc4xx_ocm.h>
+/* The list of channels exported by ppc460ex ADMA */
+struct list_head
+ppc_adma_chan_list = LIST_HEAD_INIT(ppc_adma_chan_list);
+
+/* This flag is set when want to refetch the xor chain in the interrupt
+ * handler
+ */
+static u32 do_xor_refetch = 0;
+
+/* Pointers to last submitted to DMA0, DMA1 CDBs */
+static ppc460ex_desc_t *chan_last_sub[3];
+static ppc460ex_desc_t *chan_first_cdb[3];
+
+/* Pointer to last linked and submitted xor CB */
+static ppc460ex_desc_t *xor_last_linked = NULL;
+static ppc460ex_desc_t *xor_last_submit = NULL;
+
+/* This array is used in data-check operations for storing a pattern */
+static char ppc460ex_qword[16];
+
+/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
+ * the block size in transactions, then we do not allow to activate more than
+ * only one RXOR transactions simultaneously. So use this var to store
+ * the information about is RXOR currently active (PPC460EX_RXOR_RUN bit is
+ * set) or not (PPC460EX_RXOR_RUN is clear).
+ */
+static unsigned long ppc460ex_rxor_state;
+
+/* /proc interface is used here to enable the h/w RAID-6 capabilities
+ */
+static struct proc_dir_entry *ppc460ex_proot;
+static struct proc_dir_entry *ppc460ex_pqroot;
+
+/* These are used in enable & check routines
+ */
+static u32 ppc460ex_r6_enabled;
+static u32 ppc460ex_r5_enabled;
+static ppc460ex_ch_t *ppc460ex_r6_tchan;
+static ppc460ex_ch_t *ppc460ex_r5_tchan;
+static struct completion ppc460ex_r6_test_comp;
+static struct completion ppc460ex_r5_test_comp;
+
+static int ppc460ex_adma_dma2rxor_prep_src (ppc460ex_desc_t *desc,
+ ppc460ex_rxor_cursor_t *cursor, int index,
+ int src_cnt, u32 addr);
+static void ppc460ex_adma_dma2rxor_set_src (ppc460ex_desc_t *desc,
+ int index, dma_addr_t addr);
+static void ppc460ex_adma_dma2rxor_set_mult (ppc460ex_desc_t *desc,
+ int index, u8 mult);
+#if 1
+static inline void pr_dma(int x, char *str)
+{
+ if(mfdcr(0x60)) {
+ printk("<%s> Line:%d\n",str,x);
+ }
+}
+#else
+static inline void pr_dma(int x, char *str)
+{
+}
+#endif
+phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
+{
+ phys_addr_t page_4gb = 0;
+
+ return (page_4gb | addr);
+}
+/***********************************************************************
+ * HW specific initialization
+ * ********************************************************************/
+static u64 ppc460ex_adma_dmamask = DMA_32BIT_MASK;
+
+/* DMA and XOR platform devices' resources */
+
+static struct resource ppc460ex_dma_1_resources[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = DMA1_CS_FIFO_NEED_SERVICE_IRQ,
+ .end = DMA1_CS_FIFO_NEED_SERVICE_IRQ,
+ .flags = IORESOURCE_IRQ
+ },
+ {
+ .start = DMA_ERROR_IRQ,
+ .end = DMA_ERROR_IRQ,
+ .flags = IORESOURCE_IRQ
+ }
+};
+
+
+/* DMA and XOR platform devices' data */
+
+/* DMA0,1 engines use FIFO to maintain CDBs, so we
+ * should allocate the pool accordingly to size of this
+ * FIFO. Thus, the pool size depends on the FIFO depth:
+ * how much CDBs pointers FIFO may contaun then so much
+ * CDBs we should provide in pool.
+ * That is
+ * CDB size = 32B;
+ * CDBs number = (DMA0_FIFO_SIZE >> 3);
+ * Pool size = CDBs number * CDB size =
+ * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2.
+ *
+ * As far as the XOR engine is concerned, it does not
+ * use FIFOs but uses linked list. So there is no dependency
+ * between pool size to allocate and the engine configuration.
+ */
+
+static struct ppc460ex_adma_platform_data ppc460ex_dma_1_data = {
+ .hw_id = PPC460EX_DMA1_ID,
+ .pool_size = DMA1_FIFO_SIZE << 2,
+};
+
+/* DMA and XOR platform devices definitions */
+#if 1
+static struct platform_device ppc460ex_dma_1_channel = {
+ .name = "PPC460EX-ADMA",
+ .id = PPC460EX_DMA1_ID,
+ .num_resources = ARRAY_SIZE(ppc460ex_dma_1_resources),
+ .resource = ppc460ex_dma_1_resources,
+ .dev = {
+ .dma_mask = &ppc460ex_adma_dmamask,
+ .coherent_dma_mask = DMA_64BIT_MASK,
+ .platform_data = (void *) &ppc460ex_dma_1_data,
+ },
+};
+#endif
+
+/*
+ * Init DMA0/1 and XOR engines; allocate memory for DMAx FIFOs; set platform_device
+ * memory resources addresses
+ */
+static void ppc460ex_configure_raid_devices(void)
+{
+ void *fifo_buf;
+ volatile i2o_regs_t *i2o_reg;
+ volatile dma_regs_t *dma_reg1;
+ /*
+ * volatile dma_regs_t *dma_reg0, *dma_reg1;
+ volatile xor_regs_t *xor_reg;
+ */
+ u32 mask;
+
+ /*
+ * Map registers and allocate fifo buffer
+ */
+ if (!(i2o_reg = ioremap(I2O_MMAP_BASE, I2O_MMAP_SIZE))) {
+ printk(KERN_ERR "I2O registers mapping failed.\n");
+ return;
+ }
+ if (!(dma_reg1 = ioremap(DMA1_MMAP_BASE, DMA_MMAP_SIZE))) {
+ printk(KERN_ERR "DMA1 registers mapping failed.\n");
+ goto err1;
+ }
+
+ /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share
+ * the base address of FIFO memory space.
+ * Actually we need twice more physical memory than programmed in the
+ * <fsiz> register (because there are two FIFOs foreach DMA: CP and CS)
+ */
+ fifo_buf = kmalloc(( DMA1_FIFO_SIZE)<<1, GFP_KERNEL);
+ if (!fifo_buf) {
+ printk(KERN_ERR "DMA FIFO buffer allocating failed.\n");
+ goto err2;
+ }
+
+ /*
+ * Configure h/w
+ */
+ /* Reset I2O/DMA */
+ SDR_WRITE(SDR0_SRST0, SDR0_SRST_I2ODMA);
+ SDR_WRITE(SDR0_SRST0, 0);
+
+
+ /* Setup the base address of mmaped registers */
+ mtdcr(DCRN_I2O0_IBAH, (u32)(I2O_MMAP_BASE >> 32));
+ mtdcr(DCRN_I2O0_IBAL, (u32)(I2O_MMAP_BASE) | I2O_REG_ENABLE);
+
+ /* SetUp FIFO memory space base address */
+ out_le32(&i2o_reg->ifbah, 0);
+ out_le32(&i2o_reg->ifbal, ((u32)__pa(fifo_buf)));
+
+ /* set zero FIFO size for I2O, so the whole fifo_buf is used by DMAs.
+ * DMA0_FIFO_SIZE is defined in bytes, <fsiz> - in number of CDB pointers (8byte).
+ * DMA FIFO Length = CSlength + CPlength, where
+ * CSlength = CPlength = (fsiz + 1) * 8.
+ */
+ out_le32(&i2o_reg->ifsiz, 0);
+ out_le32(&dma_reg1->fsiz, DMA_FIFO_ENABLE | ((DMA1_FIFO_SIZE>>3) - 2));
+ /* Configure DMA engine */
+ out_le32(&dma_reg1->cfg, DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN);
+
+ /* Clear Status */
+ out_le32(&dma_reg1->dsts, ~0);
+
+ /*
+ * Prepare WXOR/RXOR (finally it is being enabled via /proc interface of
+ * the ppc460ex ADMA driver)
+ */
+ /* Set HB alias */
+ mtdcr(DCRN_MQ0_BAUH, DMA_CUED_XOR_HB);
+
+ /* Set:
+ * - LL transaction passing limit to 1;
+ * - Memory controller cycle limit to 1;
+ * - Galois Polynomial to 0x14d (default)
+ */
+ mtdcr(DCRN_MQ0_CFBHL, 0x88a68000 | (1 << MQ0_CFBHL_TPLM) |
+ (1 << MQ0_CFBHL_HBCL) |
+ (PPC460EX_DEFAULT_POLY << MQ0_CFBHL_POLY));
+
+ /* Unmask 'CS FIFO Attention' interrupts and
+ * enable generating interrupts on errors
+ */
+ mask = in_le32(&i2o_reg->iopim) & ~(
+ I2O_IOPIM_P0SNE | I2O_IOPIM_P1SNE |
+ I2O_IOPIM_P0EM | I2O_IOPIM_P1EM);
+ out_le32(&i2o_reg->iopim, mask);
+
+ /* enable XOR engine interrupts */
+
+ /*
+ * Unmap I2O registers
+ */
+ iounmap(i2o_reg);
+ printk("<%s> line %d\n", __FUNCTION__, __LINE__);
+
+ /* Configure MQ as follows:
+ * MQ: 0x80001C80. This means
+ * - AddrAck First Request,
+ * - Read Passing Limit = 1,
+ * - Read Passing Enable,
+ * - Read Flow Through Enable,
+ * - MCIF Cycle Limit = 1.
+ */
+#if 1
+ mdelay(1000);
+ mask = (1 << MQ_CF1_AAFR) | ((1 & MQ_CF1_RPLM_MSK) << MQ_CF1_RPLM) |
+ (1 << MQ_CF1_RPEN) | (1 << MQ_CF1_RFTE) |
+ ((1 & MQ_CF1_WRCL_MSK) << MQ_CF1_WRCL);
+ mtdcr(DCRN_MQ0_CF1H, mask);
+ mtdcr(DCRN_MQ0_CF1L, mask);
+#endif
+ printk("<%s> line %d\n", __FUNCTION__, __LINE__);
+
+ /* Configure PLB as follows:
+ * PLB: 0xDF000000. This means
+ * - Priority level 00 fair priority,
+ * - Priority level 01 fair priority,
+ * - Priority level 11 fair priority,
+ * - High Bus Utilization enabled,
+ * - 4 Deep read pipe,
+ * - 2 Deep write pipe.
+ */
+ mask = (1 << PLB_ACR_PPM0) | (1 << PLB_ACR_PPM1) | (1 << PLB_ACR_PPM3) |
+ (1 << PLB_ACR_HBU) | ((3 & PLB_ACR_RDP_MSK) << PLB_ACR_RDP) |
+ (1 << PLB_ACR_WRP);
+ mtdcr(DCRN_PLB0_ACR, mask);
+ mtdcr(DCRN_PLB1_ACR, mask);
+ printk("<%s> line %d\n", __FUNCTION__, __LINE__);
+
+ /*
+ * Set resource addresses
+ */
+
+ ppc460ex_dma_1_channel.resource[0].start = (resource_size_t)(dma_reg1);
+ ppc460ex_dma_1_channel.resource[0].end =
+ ppc460ex_dma_1_channel.resource[0].start+DMA_MMAP_SIZE;
+ printk( " ppc460ex_dma_1_channel.resource[0].start=0x%lx \n",
+ ppc460ex_dma_1_channel.resource[0].start);
+ printk("<%s> line %d dma_reg1=0x%lx \n", __FUNCTION__, __LINE__,dma_reg1);
+
+
+ printk("<%s> line %d\n", __FUNCTION__, __LINE__);
+ return;
+err2:
+ iounmap(dma_reg1);
+err1:
+ iounmap(i2o_reg);
+ return;
+}
+#if 1
+static struct platform_device *ppc460ex_devs[] __initdata = {
+/* &ppc460ex_dma_0_channel, */
+ &ppc460ex_dma_1_channel,
+ /*&ppc460ex_xor_channel, */
+};
+#endif
+
+/******************************************************************************
+ * Command (Descriptor) Blocks low-level routines
+ ******************************************************************************/
+/**
+ * ppc460ex_desc_init_interrupt - initialize the descriptor for INTERRUPT
+ * pseudo operation
+ */
+static inline void ppc460ex_desc_init_interrupt (ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan)
+{
+ xor_cb_t *p;
+
+ switch (chan->device->id) {
+ case PPC460EX_XOR_ID:
+ p = desc->hw_desc;
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ /* NOP with Command Block Complete Enable */
+ p->cbc = XOR_CBCR_CBCE_BIT;
+ break;
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ /* NOP with interrupt */
+ set_bit(PPC460EX_DESC_INT, &desc->flags);
+ break;
+ default:
+ printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
+ __FUNCTION__);
+ break;
+ }
+}
+
+/**
+ * ppc460ex_desc_init_null_xor - initialize the descriptor for NULL XOR
+ * pseudo operation
+ */
+static inline void ppc460ex_desc_init_null_xor(ppc460ex_desc_t *desc)
+{
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 0;
+ desc->dst_cnt = 1;
+}
+
+/**
+ * ppc460ex_desc_init_pqxor_xor - initialize the descriptor for PQ_XOR
+ * operation in DMA2 controller
+ */
+static inline void ppc460ex_desc_init_dma2rxor(ppc460ex_desc_t *desc,
+ int dst_cnt, int src_cnt, unsigned long flags)
+{
+ xor_cb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+ memset (desc->reverse_flags, 0, sizeof (desc->reverse_flags));
+ desc->descs_per_op = 0;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on complete */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+/**
+ * ppc460ex_desc_init_pq - initialize the descriptor for PQ_XOR operation
+ */
+static inline void ppc460ex_desc_init_pq(ppc460ex_desc_t *desc,
+ int dst_cnt, int src_cnt, unsigned long flags,
+ unsigned long op)
+{
+ dma_cdb_t *hw_desc;
+ ppc460ex_desc_t *iter;
+ u8 dopc;
+
+
+ /* Common initialization of a PQ descriptors chain */
+
+ set_bits(op, &desc->flags);
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+
+ dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
+ DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
+
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset (iter->hw_desc, 0, sizeof(dma_cdb_t));
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ clear_bit(PPC460EX_DESC_INT, &iter->flags);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC460EX_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC460EX_DESC_INT, &iter->flags);
+ }
+ }
+
+ /* Set OPS depending on WXOR/RXOR type of operation */
+ if (!test_bit(PPC460EX_DESC_RXOR, &desc->flags)) {
+ /* This is a WXOR only chain:
+ * - first descriptors are for zeroing destinations
+ * if PPC460EX_ZERO_P/Q set;
+ * - descriptors remained are for GF-XOR operations.
+ */
+ iter = list_first_entry(&desc->group_list,
+ ppc460ex_desc_t, chain_node);
+
+ if (test_bit(PPC460EX_ZERO_P, &desc->flags)) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ }
+
+ if (test_bit(PPC460EX_ZERO_Q, &desc->flags)) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ }
+
+ list_for_each_entry_from(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ } else {
+ /* This is either RXOR-only or mixed RXOR/WXOR */
+
+ /* The first 1 or 2 slots in chain are always RXOR,
+ * if need to calculate P & Q, then there are two
+ * RXOR slots; if only P or only Q, then there is one
+ */
+ iter = list_first_entry(&desc->group_list,
+ ppc460ex_desc_t, chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+
+ if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ }
+
+ /* The remain descs (if any) are WXORs */
+ if (test_bit(PPC460EX_DESC_WXOR, &desc->flags)) {
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ list_for_each_entry_from(iter, &desc->group_list,
+ chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ }
+ }
+}
+void ppc460ex_desc_init_xor(ppc460ex_desc_t *desc,
+ int dst_cnt, int src_cnt, unsigned long flags,
+ unsigned long op)
+{
+ dma_cdb_t *hw_desc;
+ ppc460ex_desc_t *iter;
+ u8 dopc;
+
+
+ /* Common initialization of a PQ descriptors chain */
+
+ set_bits(op, &desc->flags);
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+
+ dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ?
+ DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
+
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset (iter->hw_desc, 0, sizeof(dma_cdb_t));
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ clear_bit(PPC460EX_DESC_INT, &iter->flags);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC460EX_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC460EX_DESC_INT, &iter->flags);
+ }
+ }
+
+ /* Set OPS depending on WXOR/RXOR type of operation */
+ if (!test_bit(PPC460EX_DESC_RXOR, &desc->flags)) {
+ /* This is a WXOR only chain:
+ * - first descriptors are for zeroing destinations
+ * if PPC460EX_ZERO_P/Q set;
+ * - descriptors remained are for GF-XOR operations.
+ */
+ iter = list_first_entry(&desc->group_list,
+ ppc460ex_desc_t, chain_node);
+
+ if (test_bit(PPC460EX_ZERO_P, &desc->flags)) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ }
+
+
+ list_for_each_entry_from(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ } else {
+ /* This is either RXOR-only or mixed RXOR/WXOR */
+
+ /* The first 1 or 2 slots in chain are always RXOR,
+ * if need to calculate P & Q, then there are two
+ * RXOR slots; if only P or only Q, then there is one
+ */
+ iter = list_first_entry(&desc->group_list,
+ ppc460ex_desc_t, chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+
+ if (desc->dst_cnt == DMA_DEST_MAX_NUM) {
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ }
+
+ /* The remain descs (if any) are WXORs */
+ if (test_bit(PPC460EX_DESC_WXOR, &desc->flags)) {
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ list_for_each_entry_from(iter, &desc->group_list,
+ chain_node) {
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = dopc;
+ }
+ }
+ }
+}
+/**
+ * ppc460ex_desc_init_dma01_xor - initialize the descriptor for P_XOR operation
+ */
+static inline void ppc460ex_desc_init_dma01_xor(ppc460ex_desc_t *desc,
+ int dst_cnt, int src_cnt, unsigned long flags,
+ unsigned long op)
+{
+ dma_cdb_t *hw_desc;
+ ppc460ex_desc_t *iter;
+
+ /* Common initialization of a XOR descriptors chain */
+
+ set_bits(op, &desc->flags);
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset (iter->hw_desc, 0, sizeof(dma_cdb_t));
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ clear_bit(PPC460EX_DESC_INT, &iter->flags);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC460EX_DESC_INT, &iter->flags);
+ else
+ clear_bit(PPC460EX_DESC_INT, &iter->flags);
+ }
+ }
+
+ /* Set OPS depending on WXOR/RXOR type of operation */
+ if (!test_bit(PPC460EX_DESC_RXOR, &desc->flags)) {
+ /* This is a WXOR only chain:
+ * - first <dst_cnt> descriptors are for zeroing destinations
+ * if PPC460EX_ZERO_P is set;
+ * - descriptors remained are for GF-XOR operations.
+ */
+ iter = list_first_entry(&desc->group_list,
+ ppc460ex_desc_t, chain_node);
+
+ if (dst_cnt && test_bit(PPC460EX_ZERO_P,
+ &desc->flags)) {
+ /* MV_SG1_SG2 to zero P or Q if this is
+ * just PQ_XOR operation and MV_SG1_SG2
+ * if only Q has to be calculated
+ */
+ hw_desc = iter->hw_desc;
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ iter = list_first_entry(&iter->chain_node,
+ ppc460ex_desc_t, chain_node);
+ }
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ if (desc->dst_cnt == DMA_DEST_MAX_NUM)
+ hw_desc->opc = DMA_CDB_OPC_MULTICAST;
+ else
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ }
+ } else {
+ /* This is either RXOR-only or mixed RXOR/WXOR
+ * The first slot in chain is always RXOR,
+ * the slots remained (if there are) are WXOR
+ */
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ /* No DMA_CDB_OPC_MULTICAST option for RXOR */
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+ }
+ }
+}
+
+/**
+ * ppc460ex_desc_init_pqzero_sum - initialize the descriptor for PQ_VAL
+ * operation
+ */
+static inline void ppc460ex_desc_init_pqzero_sum(ppc460ex_desc_t *desc,
+ int dst_cnt, int src_cnt)
+{
+ dma_cdb_t *hw_desc;
+ ppc460ex_desc_t *iter;
+ int i = 0;
+
+ /* initialize each descriptor in chain */
+ list_for_each_entry(iter, &desc->group_list, chain_node) {
+ hw_desc = iter->hw_desc;
+ memset (iter->hw_desc, 0, sizeof(dma_cdb_t));
+
+ /* This is a PQ_VAL operation:
+ * - first <dst_cnt> descriptors are for GF-XOR operations;
+ * - <dst_cnt> descriptors remained are for checking the result.
+ */
+ if (i++ < src_cnt)
+ /* MV_SG1_SG2 if only Q is being verified
+ * MULTICAST if both P and Q are being verified
+ */
+ hw_desc->opc = (dst_cnt == DMA_DEST_MAX_NUM) ?
+ DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2;
+ else
+ /* DMA_CDB_OPC_DCHECK128 operation */
+ hw_desc->opc = DMA_CDB_OPC_DCHECK128;
+
+ if (likely(!list_is_last(&iter->chain_node,
+ &desc->group_list))) {
+ /* set 'next' pointer */
+ iter->hw_next = list_entry(iter->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ } else {
+ /* this is the last descriptor.
+ * this slot will be pasted from ADMA level
+ * each time it wants to configure parameters
+ * of the transaction (src, dst, ...)
+ */
+ iter->hw_next = NULL;
+ /* always enable interrupt generating since we get
+ * the status of pqzero from the handler
+ */
+ set_bit(PPC460EX_DESC_INT, &iter->flags);
+ }
+ }
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = dst_cnt;
+}
+
+/**
+ * ppc460ex_desc_init_memcpy - initialize the descriptor for MEMCPY operation
+ */
+static inline void ppc460ex_desc_init_memcpy(ppc460ex_desc_t *desc,
+ unsigned long flags)
+{
+ dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC460EX_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC460EX_DESC_INT, &desc->flags);
+
+ hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
+}
+
+/**
+ * ppc460ex_desc_init_memset - initialize the descriptor for MEMSET operation
+ */
+static inline void ppc460ex_desc_init_memset(ppc460ex_desc_t *desc, int value,
+ unsigned long flags)
+{
+ dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC460EX_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC460EX_DESC_INT, &desc->flags);
+
+ hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
+ hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
+ hw_desc->opc = DMA_CDB_OPC_DFILL128;
+}
+
+/**
+ * ppc460ex_desc_set_src_addr - set source address into the descriptor
+ */
+static inline void ppc460ex_desc_set_src_addr( ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan, int src_idx,
+ dma_addr_t addrh, dma_addr_t addrl)
+{
+ dma_cdb_t *dma_hw_desc;
+ phys_addr_t addr64, tmplow, tmphi;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ if (!addrh) {
+ addr64 = fixup_bigphys_addr(addrl, sizeof(phys_addr_t));
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow);
+ dma_hw_desc->sg1u = cpu_to_le32((u32)tmphi);
+ break;
+ }
+}
+
+/**
+ * ppc460ex_desc_set_src_mult - set source address mult into the descriptor
+ */
+static inline void ppc460ex_desc_set_src_mult( ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan, u32 mult_index, int sg_index,
+ unsigned char mult_value)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+ u32 *psgu;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ switch(sg_index){
+ /* for RXOR operations set multiplier
+ * into source cued address
+ */
+ case DMA_CDB_SG_SRC:
+ psgu = &dma_hw_desc->sg1u;
+ break;
+ /* for WXOR operations set multiplier
+ * into destination cued address(es)
+ */
+ case DMA_CDB_SG_DST1:
+ psgu = &dma_hw_desc->sg2u;
+ break;
+ case DMA_CDB_SG_DST2:
+ psgu = &dma_hw_desc->sg3u;
+ break;
+ default:
+ BUG();
+ }
+
+ *psgu |= cpu_to_le32(mult_value << mult_index);
+ if(mfdcr(0x60) == 0xfee8) {
+ printk("Line--%d mult_value = 0x%x mult_index=0x%x *psgu=0x%x\n",__LINE__, mult_value,mult_index,*psgu);
+ }
+ *psgu |= cpu_to_le32( 1 << mult_index);
+ if(mfdcr(0x60) == 0xfee8) {
+ printk("Line--%d mult_value = 0x%x mult_index=0x%x *psgu=0x%x\n",__LINE__, mult_value,mult_index,*psgu);
+ }
+ break;
+ case PPC460EX_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * ppc460ex_desc_set_dest_addr - set destination address into the descriptor
+ */
+static inline void ppc460ex_desc_set_dest_addr(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan,
+ dma_addr_t addrh, dma_addr_t addrl,
+ u32 dst_idx)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+ phys_addr_t addr64, tmphi, tmplow;
+ u32 *psgu, *psgl;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ if (!addrh) {
+ addr64 = fixup_bigphys_addr(addrl, sizeof(phys_addr_t));
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc = desc->hw_desc;
+
+ psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u;
+ psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l;
+
+ *psgl = cpu_to_le32((u32)tmplow);
+ *psgu |= cpu_to_le32((u32)tmphi);
+ break;
+ case PPC460EX_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbtal = addrl;
+ xor_hw_desc->cbtah = 0;
+ break;
+ }
+}
+
+/**
+ * ppc460ex_desc_set_byte_count - set number of data bytes involved
+ * into the operation
+ */
+static inline void ppc460ex_desc_set_byte_count(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan, u32 byte_count)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->cnt = cpu_to_le32(byte_count);
+ break;
+ }
+}
+
+/**
+ * ppc460ex_desc_set_rxor_block_size - set RXOR block size
+ */
+static inline void ppc460ex_desc_set_rxor_block_size(u32 byte_count)
+{
+ /* assume that byte_count is aligned on the 512-boundary;
+ * thus write it directly to the register (bits 23:31 are
+ * reserved there).
+ */
+ mtdcr(DCRN_MQ0_CF2H, byte_count);
+}
+
+/**
+ * ppc460ex_desc_set_dcheck - set CHECK pattern
+ */
+static inline void ppc460ex_desc_set_dcheck(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan, u8 *qword)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ out_le32(&dma_hw_desc->sg3l, qword[0]);
+ out_le32(&dma_hw_desc->sg3u, qword[4]);
+ out_le32(&dma_hw_desc->sg2l, qword[8]);
+ out_le32(&dma_hw_desc->sg2u, qword[12]);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * ppc460ex_xor_set_link - set link address in xor CB
+ */
+static inline void ppc460ex_xor_set_link (ppc460ex_desc_t *prev_desc,
+ ppc460ex_desc_t *next_desc)
+{
+ xor_cb_t *xor_hw_desc = prev_desc->hw_desc;
+
+ if (unlikely(!next_desc || !(next_desc->phys))) {
+ printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%x\n",
+ __FUNCTION__, next_desc,
+ next_desc ? next_desc->phys : 0);
+ BUG();
+ }
+
+ xor_hw_desc->cbs = 0;
+ xor_hw_desc->cblal = next_desc->phys;
+ xor_hw_desc->cblah = 0;
+ xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
+}
+
+/**
+ * ppc460ex_desc_set_link - set the address of descriptor following this
+ * descriptor in chain
+ */
+static inline void ppc460ex_desc_set_link(ppc460ex_ch_t *chan,
+ ppc460ex_desc_t *prev_desc, ppc460ex_desc_t *next_desc)
+{
+ unsigned long flags;
+ ppc460ex_desc_t *tail = next_desc;
+
+ if (unlikely(!prev_desc || !next_desc ||
+ (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
+ /* If previous next is overwritten something is wrong.
+ * though we may refetch from append to initiate list
+ * processing; in this case - it's ok.
+ */
+ printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
+ "prev->hw_next=0x%p\n", __FUNCTION__, prev_desc,
+ next_desc, prev_desc ? prev_desc->hw_next : 0);
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ /* do s/w chaining both for DMA and XOR descriptors */
+ prev_desc->hw_next = next_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ break;
+ case PPC460EX_XOR_ID:
+ /* bind descriptor to the chain */
+ while (tail->hw_next)
+ tail = tail->hw_next;
+ xor_last_linked = tail;
+
+ if (prev_desc == xor_last_submit)
+ /* do not link to the last submitted CB */
+ break;
+ ppc460ex_xor_set_link (prev_desc, next_desc);
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * ppc460ex_desc_get_src_addr - extract the source address from the descriptor
+ */
+static inline u32 ppc460ex_desc_get_src_addr(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan, int src_idx)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ /* May have 0, 1, 2, or 3 sources */
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DFILL128:
+ return 0;
+ case DMA_CDB_OPC_DCHECK128:
+ if (unlikely(src_idx)) {
+ printk(KERN_ERR "%s: try to get %d source for"
+ " DCHECK128\n", __FUNCTION__, src_idx);
+ BUG();
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ case DMA_CDB_OPC_MULTICAST:
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ if (unlikely(src_idx > 2)) {
+ printk(KERN_ERR "%s: try to get %d source from"
+ " DMA descr\n", __FUNCTION__, src_idx);
+ BUG();
+ }
+ if (src_idx) {
+ if (le32_to_cpu(dma_hw_desc->sg1u) &
+ DMA_CUED_XOR_WIN_MSK) {
+ u8 region;
+
+ if (src_idx == 1)
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ desc->unmap_len;
+
+ region = (le32_to_cpu(
+ dma_hw_desc->sg1u)) >>
+ DMA_CUED_REGION_OFF;
+
+ region &= DMA_CUED_REGION_MSK;
+ switch (region) {
+ case DMA_RXOR123:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len << 1);
+ case DMA_RXOR124:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len * 3);
+ case DMA_RXOR125:
+ return le32_to_cpu(
+ dma_hw_desc->sg1l) +
+ (desc->unmap_len << 2);
+ default:
+ printk (KERN_ERR
+ "%s: try to"
+ " get src3 for region %02x"
+ "PPC460EX_DESC_RXOR12?\n",
+ __FUNCTION__, region);
+ BUG();
+ }
+ } else {
+ printk(KERN_ERR
+ "%s: try to get %d"
+ " source for non-cued descr\n",
+ __FUNCTION__, src_idx);
+ BUG();
+ }
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __FUNCTION__, dma_hw_desc->opc);
+ BUG();
+ }
+ return le32_to_cpu(dma_hw_desc->sg1l);
+ case PPC460EX_XOR_ID:
+ /* May have up to 16 sources */
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->ops[src_idx].l;
+ }
+ return 0;
+}
+
+/**
+ * ppc460ex_desc_get_dest_addr - extract the destination address from the
+ * descriptor
+ */
+static inline u32 ppc460ex_desc_get_dest_addr(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan, int idx)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ if (likely(!idx))
+ return le32_to_cpu(dma_hw_desc->sg2l);
+ return le32_to_cpu(dma_hw_desc->sg3l);
+ case PPC460EX_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->cbtal;
+ }
+ return 0;
+}
+
+/**
+ * ppc460ex_desc_get_byte_count - extract the byte count from the descriptor
+ */
+static inline u32 ppc460ex_desc_get_byte_count(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+ return le32_to_cpu(dma_hw_desc->cnt);
+ case PPC460EX_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ return xor_hw_desc->cbbc;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * ppc460ex_desc_get_src_num - extract the number of source addresses from
+ * the descriptor
+ */
+static inline u32 ppc460ex_desc_get_src_num(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_hw_desc = desc->hw_desc;
+
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DFILL128:
+ return 0;
+ case DMA_CDB_OPC_DCHECK128:
+ return 1;
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ case DMA_CDB_OPC_MULTICAST:
+ /*
+ * Only for RXOR operations we have more than
+ * one source
+ */
+ if (le32_to_cpu(dma_hw_desc->sg1u) &
+ DMA_CUED_XOR_WIN_MSK) {
+ /* RXOR op, there are 2 or 3 sources */
+ if (((le32_to_cpu(dma_hw_desc->sg1u) >>
+ DMA_CUED_REGION_OFF) &
+ DMA_CUED_REGION_MSK) == DMA_RXOR12) {
+ /* RXOR 1-2 */
+ return 2;
+ } else {
+ /* RXOR 1-2-3/1-2-4/1-2-5 */
+ return 3;
+ }
+ }
+ return 1;
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __FUNCTION__, dma_hw_desc->opc);
+ BUG();
+ }
+ case PPC460EX_XOR_ID:
+ /* up to 16 sources */
+ xor_hw_desc = desc->hw_desc;
+ return (xor_hw_desc->cbc & XOR_CDCR_OAC_MSK);
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * ppc460ex_desc_get_dst_num - get the number of destination addresses in
+ * this descriptor
+ */
+static inline u32 ppc460ex_desc_get_dst_num(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* May be 1 or 2 destinations */
+ dma_hw_desc = desc->hw_desc;
+ switch (dma_hw_desc->opc) {
+ case DMA_CDB_OPC_NO_OP:
+ case DMA_CDB_OPC_DCHECK128:
+ return 0;
+ case DMA_CDB_OPC_MV_SG1_SG2:
+ case DMA_CDB_OPC_DFILL128:
+ return 1;
+ case DMA_CDB_OPC_MULTICAST:
+ return 2;
+ default:
+ printk(KERN_ERR "%s: unknown OPC 0x%02x\n",
+ __FUNCTION__, dma_hw_desc->opc);
+ BUG();
+ }
+ case PPC460EX_XOR_ID:
+ /* Always only 1 destination */
+ return 1;
+ default:
+ BUG();
+ }
+ return 0;
+}
+
+/**
+ * ppc460ex_desc_get_link - get the address of the descriptor that
+ * follows this one
+ */
+static inline u32 ppc460ex_desc_get_link(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan)
+{
+ if (!desc->hw_next)
+ return 0;
+
+ return desc->hw_next->phys;
+}
+
+/**
+ * ppc460ex_desc_is_aligned - check alignment
+ */
+static inline int ppc460ex_desc_is_aligned(ppc460ex_desc_t *desc,
+ int num_slots)
+{
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+/**
+ * ppc460ex_chan_xor_slot_count - get the number of slots necessary for
+ * XOR operation
+ */
+static inline int ppc460ex_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt;
+
+ /* each XOR descriptor provides up to 16 source operands */
+ slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
+
+ if (likely(len <= PPC460EX_ADMA_XOR_MAX_BYTE_COUNT))
+ return slot_cnt;
+
+ printk(KERN_ERR "%s: len %d > max %d !!\n",
+ __FUNCTION__, len, PPC460EX_ADMA_XOR_MAX_BYTE_COUNT);
+ BUG();
+ return slot_cnt;
+}
+
+/**
+ */
+static inline int ppc460ex_chan_pqxor_slot_count (dma_addr_t *srcs,
+ int src_cnt, size_t len)
+{
+ int order = 0;
+ int state = 0;
+ int addr_count = 0;
+ int i;
+ for (i=1; i<src_cnt; i++) {
+ char *cur_addr = (char *)srcs[i];
+ char *old_addr = (char *)srcs[i-1];
+ switch (state) {
+ case 0:
+ if (cur_addr == old_addr + len) {
+ /* direct RXOR */
+ order = 1;
+ state = 1;
+ if (i == src_cnt-1) {
+ addr_count++;
+ }
+ } else if (old_addr == cur_addr + len) {
+ /* reverse RXOR */
+ order = -1;
+ state = 1;
+ if (i == src_cnt-1) {
+ addr_count++;
+ }
+ } else {
+ state = 3;
+ }
+ break;
+ case 1:
+ if (i == src_cnt-2 || (order == -1
+ && cur_addr != old_addr - len)) {
+ order = 0;
+ state = 0;
+ addr_count++;
+ } else if (cur_addr == old_addr + len*order) {
+ state = 2;
+ if (i == src_cnt-1) {
+ addr_count++;
+ }
+ } else if (cur_addr == old_addr + 2*len) {
+ state = 2;
+ if (i == src_cnt-1) {
+ addr_count++;
+ }
+ } else if (cur_addr == old_addr + 3*len) {
+ state = 2;
+ if (i == src_cnt-1) {
+ addr_count++;
+ }
+ } else {
+ order = 0;
+ state = 0;
+ addr_count++;
+ }
+ break;
+ case 2:
+ order = 0;
+ state = 0;
+ addr_count++;
+ break;
+ }
+ if (state == 3) break;
+ }
+ if (src_cnt <= 1 || (state != 1 && state != 2)) {
+ /* FIXME. return 0 here and check for this when called. */
+ BUG ();
+ }
+
+ return (addr_count + XOR_MAX_OPS - 1) / XOR_MAX_OPS;
+}
+
+
+/******************************************************************************
+ * ADMA channel low-level routines
+ ******************************************************************************/
+
+static inline u32 ppc460ex_chan_get_current_descriptor(ppc460ex_ch_t *chan);
+static inline void ppc460ex_chan_append(ppc460ex_ch_t *chan);
+
+/**
+ * ppc460ex_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
+ */
+static inline void ppc460ex_adma_device_clear_eot_status (ppc460ex_ch_t *chan)
+{
+ volatile dma_regs_t *dma_reg;
+ u8 *p = chan->device->dma_desc_pool_virt;
+ dma_cdb_t *cdb;
+ u32 rv, hv, i;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* read FIFO to ack */
+ //dma_reg = (dma_regs_t *)chan->device->pdev->resource[0].start;
+ dma_reg = (dma_regs_t *)chan->device->res[0].start;
+ while (rv = in_le32(&dma_reg->csfpl)) {
+
+ if ( chan->device->desc_memory == ADMA_DESC_MEM_OCM)
+ hv = in_le32(&dma_reg->csfph);/* clear the upper bits too */
+
+ i = rv & DMA_CDB_ADDR_MSK;
+ cdb = (dma_cdb_t *)&p[i -
+ (u32)chan->device->dma_desc_pool];
+
+ /* Clear opcode to ack. This is necessary for
+ * ZeroSum operations only
+ */
+ cdb->opc = 0;
+
+ if (test_bit(PPC460EX_RXOR_RUN,
+ &ppc460ex_rxor_state)) {
+ /* probably this is a completed RXOR op,
+ * get pointer to CDB using the fact that
+ * physical and virtual addresses of CDB
+ * in pools have the same offsets
+ */
+ if (le32_to_cpu(cdb->sg1u) &
+ DMA_CUED_XOR_BASE) {
+ /* this is a RXOR */
+ clear_bit(PPC460EX_RXOR_RUN,
+ &ppc460ex_rxor_state);
+ }
+ }
+
+ if (rv & DMA_CDB_STATUS_MSK) {
+ /* ZeroSum check failed
+ */
+ ppc460ex_desc_t *iter;
+ dma_addr_t phys = rv & ~DMA_CDB_MSK;
+
+ /*
+ * Update the status of corresponding
+ * descriptor.
+ */
+ list_for_each_entry(iter, &chan->chain,
+ chain_node) {
+ if (iter->phys == phys)
+ break;
+ }
+ /*
+ * if cannot find the corresponding
+ * slot it's a bug
+ */
+ BUG_ON (&iter->chain_node == &chan->chain);
+
+ if (iter->xor_check_result)
+ *iter->xor_check_result |=
+ rv & DMA_CDB_STATUS_MSK;
+ }
+ }
+
+ rv = in_le32(&dma_reg->dsts);
+ if (rv) {
+ printk("DMA%d err status: 0x%x\n", chan->device->id,
+ rv);
+ /* write back to clear */
+ out_le32(&dma_reg->dsts, rv);
+ }
+ break;
+ }
+}
+
+
+/**
+ * ppc460ex_chan_is_busy - get the channel status
+ */
+static inline int ppc460ex_chan_is_busy(ppc460ex_ch_t *chan)
+{
+ int busy = 0;
+ volatile xor_regs_t *xor_reg;
+ volatile dma_regs_t *dma_reg;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ dma_reg = (dma_regs_t *)chan->device->res[0].start;
+ /* if command FIFO's head and tail pointers are equal and
+ * status tail is the same as command, then channel is free
+ */
+ if (dma_reg->cpfhp != dma_reg->cpftp ||
+ dma_reg->cpftp != dma_reg->csftp)
+ busy = 1;
+ break;
+ case PPC460EX_XOR_ID:
+ /* use the gtcial status bit for the XORcore
+ */
+ busy = (xor_reg->sr & XOR_SR_XCP_BIT) ? 1 : 0;
+ break;
+ }
+
+ return busy;
+}
+
+/**
+ * ppc460ex_chan_set_first_xor_descriptor - initi XORcore chain
+ */
+static inline void ppc460ex_chan_set_first_xor_descriptor(ppc460ex_ch_t *chan,
+ ppc460ex_desc_t *next_desc)
+{
+ volatile xor_regs_t *xor_reg;
+
+ //xor_reg = (xor_regs_t *)chan->device->pdev->resource[0].start;
+
+ if (xor_reg->sr & XOR_SR_XCP_BIT)
+ printk(KERN_INFO "%s: Warn: XORcore is running "
+ "when try to set the first CDB!\n",
+ __FUNCTION__);
+
+ xor_last_submit = xor_last_linked = next_desc;
+
+ xor_reg->crsr = XOR_CRSR_64BA_BIT;
+
+ xor_reg->cblalr = next_desc->phys;
+ xor_reg->cblahr = 0;
+ xor_reg->cbcr |= XOR_CBCR_LNK_BIT;
+
+ chan->hw_chain_inited = 1;
+}
+
+/**
+ * ppc460ex_dma_put_desc - put DMA0,1 descriptor to FIFO
+ */
+static void ppc460ex_dma_put_desc(ppc460ex_ch_t *chan,
+ ppc460ex_desc_t *desc)
+{
+ u32 pcdb;
+ volatile dma_regs_t *dma_reg =
+ dma_reg = (dma_regs_t *)chan->device->res[0].start;
+
+ pcdb = desc->phys;
+ if (!test_bit(PPC460EX_DESC_INT, &desc->flags))
+ pcdb |= DMA_CDB_NO_INT;
+ if ( chan->device->desc_memory == ADMA_DESC_MEM_OCM)
+ pcdb |= DMA_CDB_64B_ADDR; /* 64 bit */
+ chan_last_sub[chan->device->id] = desc;
+ out_le32 (&dma_reg->cpfpl, pcdb);
+
+ if ( chan->device->desc_memory == ADMA_DESC_MEM_OCM)
+ out_le32 (&dma_reg->cpfph, 0x4); //upper bits
+}
+
+/**
+ * ppc460ex_chan_append - update the h/w chain in the channel
+ */
+static inline void ppc460ex_chan_append(ppc460ex_ch_t *chan)
+{
+ volatile dma_regs_t *dma_reg;
+ ppc460ex_desc_t *iter;
+ u32 cur_desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ //dma_reg = (dma_regs_t *)chan->device->pdev->resource[0].start;
+ //dma_reg = (dma_regs_t *)chan->device->odev->dev.resource[0].start;
+ dma_reg = (dma_regs_t *)chan->device->res[0].start;
+ cur_desc = ppc460ex_chan_get_current_descriptor(chan);
+
+ if (likely(cur_desc)) {
+ iter = chan_last_sub[chan->device->id];
+ BUG_ON(!iter);
+ } else {
+ /* first peer */
+ iter = chan_first_cdb[chan->device->id];
+ BUG_ON(!iter);
+ ppc460ex_dma_put_desc(chan, iter);
+ chan->hw_chain_inited = 1;
+ }
+
+ /* is there something new to append */
+ if (!iter->hw_next)
+ goto out;
+
+ /* flush descriptors from the s/w queue to fifo */
+ list_for_each_entry_continue(iter, &chan->chain, chain_node) {
+ ppc460ex_dma_put_desc(chan, iter);
+ if (!iter->hw_next)
+ break;
+ }
+ break;
+ }
+out:
+ local_irq_restore(flags);
+}
+
+/**
+ * ppc460ex_chan_get_current_descriptor - get the currently executed descriptor
+ */
+static inline u32 ppc460ex_chan_get_current_descriptor(ppc460ex_ch_t *chan)
+{
+ volatile dma_regs_t *dma_reg;
+ volatile xor_regs_t *xor_reg;
+
+ if (unlikely(!chan->hw_chain_inited))
+ /* h/w descriptor chain is not initialized yet */
+ return 0;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ //dma_reg = (dma_regs_t *)chan->device->pdev->resource[0].start;
+ dma_reg = (dma_regs_t *)chan->device->res[0].start;
+ return (le32_to_cpu(dma_reg->acpl)) & (~DMA_CDB_MSK);
+ case PPC460EX_XOR_ID:
+ //xor_reg = (xor_regs_t *)chan->device->pdev->resource[0].start;
+ return xor_reg->ccbalr;
+ }
+ return 0;
+}
+
+/**
+ * ppc460ex_chan_run - enable the channel
+ */
+static inline void ppc460ex_chan_run(ppc460ex_ch_t *chan)
+{
+ volatile xor_regs_t *xor_reg;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* DMAs are always enabled, do nothing */
+ break;
+ case PPC460EX_XOR_ID:
+ /* drain write buffer */
+
+ /* fetch descriptor pointed to in <link> */
+ xor_reg->crsr = XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT;
+ break;
+ }
+}
+
+
+/******************************************************************************
+ * ADMA device level
+ ******************************************************************************/
+
+static void ppc460ex_chan_start_null_xor(ppc460ex_ch_t *chan);
+static int ppc460ex_adma_alloc_chan_resources(struct dma_chan *chan);
+static dma_cookie_t ppc460ex_adma_tx_submit(
+ struct dma_async_tx_descriptor *tx);
+
+static void ppc460ex_adma_set_dest(
+ ppc460ex_desc_t *tx,
+ dma_addr_t addr, int index);
+static void ppc460ex_adma_memcpy_xor_set_src(
+ ppc460ex_desc_t *tx,
+ dma_addr_t addr, int index);
+
+static void ppc460ex_adma_dma01_xor_set_dest(
+ ppc460ex_desc_t *tx,
+ dma_addr_t addr, int index);
+static void ppc460ex_adma_dma01_xor_set_src(
+ ppc460ex_desc_t *tx,
+ dma_addr_t addr, int index);
+static void ppc460ex_adma_dma01_xor_set_src_mult(
+ ppc460ex_desc_t *tx,
+ unsigned char mult, int index);
+
+static void ppc460ex_adma_pqxor_set_dest(
+ ppc460ex_desc_t *tx,
+ dma_addr_t *paddr, unsigned long flags);
+static void ppc460ex_adma_pqxor_set_src(
+ ppc460ex_desc_t *tx,
+ dma_addr_t addr, int index);
+static void ppc460ex_adma_pqxor_set_src_mult (
+ ppc460ex_desc_t *sw_desc,
+ unsigned char mult, int index,int dst_pos);
+
+static void ppc460ex_adma_pqzero_sum_set_dest (
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t paddr, dma_addr_t qaddr);
+static void ppc460ex_adma_mq_zero_sum_set_dest (
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr);
+static void ppc460ex_adma_pqzero_sum_set_src(
+ ppc460ex_desc_t *tx,
+ dma_addr_t addr, int index);
+static void ppc460ex_adma_pqzero_sum_set_src_mult(
+ ppc460ex_desc_t *tx,
+ unsigned char mult, int index, int dst_pos);
+
+static void ppc460ex_adma_dma2rxor_set_dest (
+ ppc460ex_desc_t *tx,
+ dma_addr_t addr, int index);
+
+void ppc460ex_adma_xor_set_dest(ppc460ex_desc_t *sw_desc,
+ dma_addr_t *addrs, unsigned long flags);
+void ppc460ex_adma_xor_set_src(
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr,
+ int index);
+void ppc460ex_adma_xor_set_src_mult (
+ ppc460ex_desc_t *sw_desc,
+ unsigned char mult, int index,int dst_pos);
+/**
+ * ppc460ex_can_rxor - check if the operands may be processed with RXOR
+ */
+static int ppc460ex_can_rxor (struct page **srcs, int src_cnt, size_t len)
+{
+ int i, order = 0, state = 0;
+
+ if (unlikely(!(src_cnt > 1)))
+ return 0;
+
+ for (i=1; i<src_cnt; i++) {
+ char *cur_addr = page_address (srcs[i]);
+ char *old_addr = page_address (srcs[i-1]);
+ switch (state) {
+ case 0:
+ if (cur_addr == old_addr + len) {
+ /* direct RXOR */
+ order = 1;
+ state = 1;
+ } else
+ if (old_addr == cur_addr + len) {
+ /* reverse RXOR */
+ order = -1;
+ state = 1;
+ } else
+ goto out;
+ break;
+ case 1:
+ if ((i == src_cnt-2) ||
+ (order == -1 && cur_addr != old_addr - len)) {
+ order = 0;
+ state = 0;
+ } else
+ if ((cur_addr == old_addr + len*order) ||
+ (cur_addr == old_addr + 2*len) ||
+ (cur_addr == old_addr + 3*len)) {
+ state = 2;
+ } else {
+ order = 0;
+ state = 0;
+ }
+ break;
+ case 2:
+ order = 0;
+ state = 0;
+ break;
+ }
+ }
+
+out:
+ if (state == 1 || state == 2)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ppc460ex_adma_device_estimate - estimate the efficiency of processing
+ * the operation given on this channel. It's assumed that 'chan' is
+ * capable to process 'cap' type of operation.
+ * @chan: channel to use
+ * @cap: type of transaction
+ * @src_lst: array of source pointers
+ * @src_cnt: number of source operands
+ * @src_sz: size of each source operand
+ */
+int ppc460ex_adma_estimate (struct dma_chan *chan,
+ enum dma_transaction_type cap, struct page **src_lst,
+ int src_cnt, size_t src_sz)
+{
+ int ef = 1;
+
+ if (cap == DMA_PQ || cap == DMA_PQ_VAL) {
+ /* If RAID-6 capabilities were not activated don't try
+ * to use them
+ */
+ if (unlikely(!ppc460ex_r6_enabled))
+ return -1;
+ }
+ if(cap == DMA_XOR) {
+ if (unlikely(!ppc460ex_r5_enabled))
+ return -1;
+ }
+ /* in the current implementation of ppc460ex ADMA driver it
+ * makes sense to pick out only pqxor case, because it may be
+ * processed:
+ * (1) either using Biskup method on DMA2;
+ * (2) or on DMA0/1.
+ * Thus we give a favour to (1) if the sources are suitable;
+ * else let it be processed on one of the DMA0/1 engines.
+ */
+ if (cap == DMA_PQ && chan->chan_id == PPC460EX_XOR_ID) {
+ if (ppc460ex_can_rxor(src_lst, src_cnt, src_sz))
+ ef = 3; /* override (dma0/1 + idle) */
+ else
+ ef = 0; /* can't process on DMA2 if !rxor */
+ }
+
+ /* channel idleness increases the priority */
+ if (likely(ef) &&
+ !ppc460ex_chan_is_busy(to_ppc460ex_adma_chan(chan)))
+ ef++;
+
+ return ef;
+}
+
+/**
+ * ppc460ex_get_group_entry - get group entry with index idx
+ * @tdesc: is the last allocated slot in the group.
+ */
+static inline ppc460ex_desc_t *
+ppc460ex_get_group_entry ( ppc460ex_desc_t *tdesc, u32 entry_idx)
+{
+ ppc460ex_desc_t *iter = tdesc->group_head;
+ int i = 0;
+
+ if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
+ printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
+ __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
+ BUG();
+ }
+ list_for_each_entry(iter, &tdesc->group_list, chain_node) {
+ if (i++ == entry_idx)
+ break;
+ }
+ return iter;
+}
+
+/**
+ * ppc460ex_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &ppc460ex_chan->lock while calling this function
+ */
+static void ppc460ex_adma_free_slots(ppc460ex_desc_t *slot,
+ ppc460ex_ch_t *chan)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ /*async_tx_clear_ack(&slot->async_tx);*/ /* Don't need to clear. It is hack*/
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ ppc460ex_desc_t,
+ slot_node);
+ }
+}
+
+static void
+ppc460ex_adma_unmap(ppc460ex_ch_t *chan, ppc460ex_desc_t *desc)
+{
+ u32 src_cnt, dst_cnt;
+ dma_addr_t addr;
+ /*
+ * get the number of sources & destination
+ * included in this descriptor and unmap
+ * them all
+ */
+ src_cnt = ppc460ex_desc_get_src_num(desc, chan);
+ dst_cnt = ppc460ex_desc_get_dst_num(desc, chan);
+
+ /* unmap destinations */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ while (dst_cnt--) {
+ addr = ppc460ex_desc_get_dest_addr(
+ desc, chan, dst_cnt);
+ dma_unmap_page(&chan->device->odev->dev,
+ addr, desc->unmap_len,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* unmap sources */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ while (src_cnt--) {
+ addr = ppc460ex_desc_get_src_addr(
+ desc, chan, src_cnt);
+ dma_unmap_page(&chan->device->odev->dev,
+ addr, desc->unmap_len,
+ DMA_TO_DEVICE);
+ }
+ }
+
+}
+/**
+ * ppc460ex_adma_run_tx_complete_actions - call functions to be called
+ * upon complete
+ */
+static dma_cookie_t ppc460ex_adma_run_tx_complete_actions(
+ ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan,
+ dma_cookie_t cookie)
+{
+ int i;
+
+ BUG_ON(desc->async_tx.cookie < 0);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ *
+ * actually, ppc's dma_unmap_page() functions are empty, so
+ * the following code is just for the sake of completeness
+ */
+ if (chan && chan->needs_unmap && desc->group_head &&
+ desc->unmap_len) {
+ ppc460ex_desc_t *unmap = desc->group_head;
+ /* assume 1 slot per op always */
+ u32 slot_count = unmap->slot_cnt;
+
+ /* Run through the group list and unmap addresses */
+ for (i = 0; i < slot_count; i++) {
+ BUG_ON(!unmap);
+ ppc460ex_adma_unmap(chan, unmap);
+ unmap = unmap->hw_next;
+ }
+ //desc->group_head = NULL;
+ }
+ }
+
+ /* run dependent operations */
+ dma_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+/**
+ * ppc460ex_adma_clean_slot - clean up CDB slot (if ack is set)
+ */
+static int ppc460ex_adma_clean_slot(ppc460ex_desc_t *desc,
+ ppc460ex_ch_t *chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx))
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (list_is_last(&desc->chain_node, &chan->chain) ||
+ desc->phys == ppc460ex_chan_get_current_descriptor(chan))
+ return 1;
+
+ if (chan->device->id != PPC460EX_XOR_ID) {
+ /* our DMA interrupt handler clears opc field of
+ * each processed descriptor. For all types of
+ * operations except for ZeroSum we do not actually
+ * need ack from the interrupt handler. ZeroSum is a
+ * gtcial case since the result of this operation
+ * is available from the handler only, so if we see
+ * such type of descriptor (which is unprocessed yet)
+ * then leave it in chain.
+ */
+ dma_cdb_t *cdb = desc->hw_desc;
+ if (cdb->opc == DMA_CDB_OPC_DCHECK128)
+ return 1;
+ }
+
+ dev_dbg(chan->device->common.dev, "\tfree slot %x: %d stride: %d\n",
+ desc->phys, desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ ppc460ex_adma_free_slots(desc, chan);
+ return 0;
+}
+
+/**
+ * #define DEBUG 1__ppc460ex_adma_slot_cleanup - this is the common clean-up routine
+ * which runs through the channel CDBs list until reach the descriptor
+ * currently processed. When routine determines that all CDBs of group
+ * are completed then corresponding callbacks (if any) are called and slots
+ * are freed.
+ */
+static void __ppc460ex_adma_slot_cleanup(ppc460ex_ch_t *chan)
+{
+ ppc460ex_desc_t *iter, *_iter, *group_start = NULL;
+ dma_cookie_t cookie = 0;
+ u32 current_desc = ppc460ex_chan_get_current_descriptor(chan);
+ int busy = ppc460ex_chan_is_busy(chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ dev_dbg(chan->device->common.dev, "ppc460ex adma%d: %s\n",
+ chan->device->id, __FUNCTION__);
+
+ if (!current_desc) {
+ /* There were no transactions yet, so
+ * nothing to clean
+ */
+ return;
+ }
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &chan->chain,
+ chain_node) {
+ dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
+ "busy: %d this_desc: %#x next_desc: %#x cur: %#x ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy, iter->phys,
+ ppc460ex_desc_get_link(iter, chan), current_desc,
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel,subsequent descriptors are either in process
+ * or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || ppc460ex_desc_get_link(iter, chan)) {
+ /* not all descriptors of the group have
+ * been completed; exit.
+ */
+ break;
+ }
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ if (!group_start)
+ group_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ ppc460ex_desc_t *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+
+ /* clean up the group */
+ slot_cnt = group_start->slot_cnt;
+ grp_iter = group_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &chan->chain, chain_node) {
+
+ cookie = ppc460ex_adma_run_tx_complete_actions(
+ grp_iter, chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = ppc460ex_adma_clean_slot(
+ grp_iter, chan);
+ if (end_of_chain && slot_cnt) {
+ /* Should wait for ZeroSum complete */
+ if (cookie > 0)
+ chan->completed_cookie = cookie;
+ return;
+ }
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ group_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ cookie = ppc460ex_adma_run_tx_complete_actions(iter, chan,
+ cookie);
+
+ if (ppc460ex_adma_clean_slot(iter, chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ if (cookie > 0) {
+ chan->completed_cookie = cookie;
+ pr_debug("\tcompleted cookie %d\n", cookie);
+#ifdef DEBUG_ADMA
+ static int tcnt=0;
+ if(tcnt%100 == 0)
+ printk("\t <%s> completed cookie %d\n",__FUNCTION__, cookie);
+#endif
+ }
+
+}
+
+/**
+ * ppc460ex_adma_tasklet - clean up watch-dog initiator
+ */
+static void ppc460ex_adma_tasklet (unsigned long data)
+{
+ ppc460ex_ch_t *chan = (ppc460ex_ch_t *) data;
+ __ppc460ex_adma_slot_cleanup(chan);
+}
+
+/**
+ * ppc460ex_adma_slot_cleanup - clean up scheduled initiator
+ */
+static void ppc460ex_adma_slot_cleanup (ppc460ex_ch_t *chan)
+{
+ spin_lock_bh(&chan->lock);
+ __ppc460ex_adma_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * ppc460ex_adma_alloc_slots - allocate free slots (if any)
+ */
+ppc460ex_desc_t *ppc460ex_adma_alloc_slots(
+ ppc460ex_ch_t *chan, int num_slots,
+ int slots_per_op)
+{
+ ppc460ex_desc_t *iter = NULL, *_iter, *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+
+ BUG_ON(!num_slots || !slots_per_op);
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = chan->last_used;
+ else
+ iter = list_entry(&chan->all_slots, ppc460ex_desc_t,
+ slot_node);
+ list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
+ slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+ if (slots_found == num_slots) {
+ ppc460ex_desc_t *alloc_tail = NULL;
+ ppc460ex_desc_t *last_used = NULL;
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op) {
+ async_tx_ack(&iter->async_tx);
+ }
+
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->hw_next = NULL;
+ iter->flags = 0;
+ iter->slot_cnt = num_slots;
+ iter->xor_check_result = NULL;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ ppc460ex_desc_t,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->group_list);
+ chan->last_used = last_used;
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+ static empty_slot_cnt;
+ if(!(empty_slot_cnt%100))
+ dev_dbg(chan->device->common.dev,
+ "No empty slots trying to free some\n");
+ empty_slot_cnt++;
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&chan->irq_tasklet);
+ return NULL;
+}
+
+/**
+ * ppc460ex_adma_alloc_chan_resources - allocate pools for CDB slots
+ */
+static int ppc460ex_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *slot = NULL;
+ char *hw_desc;
+ int i, db_sz;
+ int init = ppc460ex_chan->slots_allocated ? 0 : 1;
+ ppc460ex_aplat_t *plat_data;
+
+ chan->chan_id = ppc460ex_chan->device->id;
+ plat_data = ppc460ex_chan->device->odev->dev.platform_data;
+
+ /* Allocate descriptor slots */
+ i = ppc460ex_chan->slots_allocated;
+ if (ppc460ex_chan->device->id != PPC460EX_XOR_ID)
+ db_sz = sizeof (dma_cdb_t);
+ else
+ db_sz = sizeof (xor_cb_t);
+
+ for (; i < (plat_data->pool_size/db_sz); i++) {
+ slot = kzalloc(sizeof(ppc460ex_desc_t), GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "GT ADMA Channel only initialized"
+ " %d descriptor slots", i--);
+ break;
+ }
+
+ hw_desc = (char *) ppc460ex_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[i * db_sz];
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = ppc460ex_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->group_list);
+ hw_desc = (char *) ppc460ex_chan->device->dma_desc_pool;
+ slot->phys = (dma_addr_t) &hw_desc[i * db_sz];
+ slot->idx = i;
+ spin_lock_bh(&ppc460ex_chan->lock);
+ ppc460ex_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &ppc460ex_chan->all_slots);
+ spin_unlock_bh(&ppc460ex_chan->lock);
+ }
+
+ if (i && !ppc460ex_chan->last_used) {
+ ppc460ex_chan->last_used =
+ list_entry(ppc460ex_chan->all_slots.next,
+ ppc460ex_desc_t,
+ slot_node);
+ }
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: allocated %d descriptor slots\n",
+ ppc460ex_chan->device->id, i);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ switch (ppc460ex_chan->device->id)
+ {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ ppc460ex_chan->hw_chain_inited = 0;
+ /* Use WXOR for self-testing */
+ if (!ppc460ex_r5_tchan)
+ ppc460ex_r5_tchan = ppc460ex_chan;
+ if (!ppc460ex_r6_tchan)
+ ppc460ex_r6_tchan = ppc460ex_chan;
+ break;
+ case PPC460EX_XOR_ID:
+ ppc460ex_chan_start_null_xor(ppc460ex_chan);
+ break;
+ default:
+ BUG();
+ }
+ ppc460ex_chan->needs_unmap = 1;
+ }
+
+ return (i > 0) ? i : -ENOMEM;
+}
+
+/**
+ * ppc460ex_desc_assign_cookie - assign a cookie
+ */
+static dma_cookie_t ppc460ex_desc_assign_cookie(ppc460ex_ch_t *chan,
+ ppc460ex_desc_t *desc)
+{
+ dma_cookie_t cookie = chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+/**
+ * ppc460ex_rxor_set_region_data -
+ */
+static void ppc460ex_rxor_set_region (ppc460ex_desc_t *desc,
+ u8 xor_arg_no, u32 mask)
+{
+ xor_cb_t *xcb = desc->hw_desc;
+
+ xcb->ops [xor_arg_no].h |= mask;
+}
+
+/**
+ * ppc460ex_rxor_set_src -
+ */
+static void ppc460ex_rxor_set_src (ppc460ex_desc_t *desc,
+ u8 xor_arg_no, dma_addr_t addr)
+{
+ xor_cb_t *xcb = desc->hw_desc;
+
+ xcb->ops [xor_arg_no].h |= DMA_CUED_XOR_BASE;
+ xcb->ops [xor_arg_no].l = addr;
+}
+
+/**
+ * ppc460ex_rxor_set_mult -
+ */
+static void ppc460ex_rxor_set_mult (ppc460ex_desc_t *desc,
+ u8 xor_arg_no, u8 idx, u8 mult)
+{
+ xor_cb_t *xcb = desc->hw_desc;
+
+ xcb->ops [xor_arg_no].h |= mult << (DMA_CUED_MULT1_OFF + idx * 8);
+}
+
+/**
+ * ppc460ex_wxor_set_base
+ */
+static void ppc460ex_wxor_set_base (ppc460ex_desc_t *desc)
+{
+ xor_cb_t *xcb = desc->hw_desc;
+
+ xcb->cbtah = DMA_CUED_XOR_BASE;
+ xcb->cbtah |= (1 << DMA_CUED_MULT1_OFF);
+}
+
+/**
+ * ppc460ex_adma_check_threshold - append CDBs to h/w chain if threshold
+ * has been achieved
+ */
+static void ppc460ex_adma_check_threshold(ppc460ex_ch_t *chan)
+{
+ dev_dbg(chan->device->common.dev, "ppc460ex adma%d: pending: %d\n",
+ chan->device->id, chan->pending);
+
+ if (chan->pending >= PPC460EX_ADMA_THRESHOLD) {
+ chan->pending = 0;
+ ppc460ex_chan_append(chan);
+ }
+}
+
+/**
+ * ppc460ex_adma_tx_submit - submit new descriptor group to the channel
+ * (it's not necessary that descriptors will be submitted to the h/w
+ * chains too right now)
+ */
+static dma_cookie_t ppc460ex_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ ppc460ex_desc_t *sw_desc = tx_to_ppc460ex_adma_slot(tx);
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(tx->chan);
+ ppc460ex_desc_t *group_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+
+ group_start = sw_desc->group_head;
+ slot_cnt = group_start->slot_cnt;
+ slots_per_op = group_start->slots_per_op;
+
+ spin_lock_bh(&chan->lock);
+
+ cookie = ppc460ex_desc_assign_cookie(chan, sw_desc);
+
+ if (unlikely(list_empty(&chan->chain))) {
+ /* first peer */
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ chan_first_cdb[chan->device->id] = group_start;
+ } else {
+ /* isn't first peer, bind CDBs to chain */
+ old_chain_tail = list_entry(chan->chain.prev,
+ ppc460ex_desc_t, chain_node);
+ list_splice_init(&sw_desc->group_list,
+ &old_chain_tail->chain_node);
+ /* fix up the hardware chain */
+ ppc460ex_desc_set_link(chan, old_chain_tail, group_start);
+ }
+
+ /* increment the pending count by the number of operations */
+ chan->pending += slot_cnt / slots_per_op;
+ ppc460ex_adma_check_threshold(chan);
+ spin_unlock_bh(&chan->lock);
+
+ dev_dbg(chan->device->common.dev,
+ "ppc460ex adma%d: %s cookie: %d slot: %d tx %p\n",
+ chan->device->id,__FUNCTION__,
+ sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
+ return cookie;
+}
+
+/**
+ * ppc460ex_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s\n", ppc460ex_chan->device->id,
+ __FUNCTION__);
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc460ex_desc_init_interrupt(group_start, ppc460ex_chan);
+ group_start->unmap_len = 0;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc460ex_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > PPC460EX_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s len: %u int_en %d \n",
+ ppc460ex_chan->device->id, __FUNCTION__, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc460ex_desc_init_memcpy(group_start, flags);
+ ppc460ex_adma_set_dest(group_start, dma_dest, 0);
+ ppc460ex_adma_memcpy_xor_set_src(group_start, dma_src, 0);
+ ppc460ex_desc_set_byte_count(group_start, ppc460ex_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ if(mfdcr(0x60) == 0xfee8) {
+ printk("Byte Count = 0x%x\n",len);
+ printk("src= 0x%x\n",dma_src);
+ printk("Dest = 0x%x\n",dma_dest);
+ }
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc460ex_adma_prep_dma_memset - prepare CDB for a MEMSET operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dma_dest, int value,
+ size_t len, unsigned long flags)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > PPC460EX_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s cal: %u len: %u int_en %d\n",
+ ppc460ex_chan->device->id, __FUNCTION__, value, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc460ex_desc_init_memset(group_start, value, flags);
+ ppc460ex_adma_set_dest(group_start, dma_dest, 0);
+ ppc460ex_desc_set_byte_count(group_start, ppc460ex_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+
+static inline void ppc460ex_desc_set_xor_src_cnt (ppc460ex_desc_t *desc,
+ int src_cnt);
+static void ppc460ex_init_rxor_cursor (ppc460ex_rxor_cursor_t *cursor);
+
+/**
+ * ppc460ex_adma_init_dma2rxor_slot -
+ */
+static void ppc460ex_adma_init_dma2rxor_slot (ppc460ex_desc_t *desc,
+ dma_addr_t *src, int src_cnt)
+{
+ int i;
+ /* initialize CDB */
+ for (i=0; i<src_cnt; i++) {
+ ppc460ex_adma_dma2rxor_prep_src(desc,
+ &desc->rxor_cursor,
+ i, desc->src_cnt,
+ (u32)src[i]);
+ }
+}
+#if 1
+static inline ppc460ex_desc_t *ppc460ex_dma_prep_pq(
+ ppc460ex_ch_t *ppc460ex_chan,
+ dma_addr_t *dst, unsigned int dst_cnt,
+ dma_addr_t *src, unsigned int src_cnt, unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ int slot_cnt;
+ ppc460ex_desc_t *sw_desc = NULL, *iter;
+ unsigned long op = 0;
+ unsigned char mult = 1;
+
+ /* select operations WXOR/RXOR depending on the
+ * source addresses of operators and the number
+ * of destinations (RXOR support only Q-parity calculations)
+ */
+ set_bit(PPC460EX_DESC_WXOR, &op);
+ if (!test_and_set_bit(PPC460EX_RXOR_RUN, &ppc460ex_rxor_state)) {
+ /* no active RXOR;
+ * do RXOR if:
+ * - destination os only one,
+ * - there are more than 1 source,
+ * - len is aligned on 512-byte boundary,
+ * - source addresses fit to one of 4 possible regions.
+ */
+ if (dst_cnt == 1 && src_cnt > 1 &&
+ !(len & ~MQ0_CF2H_RXOR_BS_MASK) &&
+ (src[0] + len) == src[1]) {
+ /* may do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR, &op);
+ if (src_cnt != 2) {
+ /* may try to enhance region of RXOR */
+ if ((src[1] + len) == src[2]) {
+ /* do RXOR R1 R2 R3 */
+ set_bit(PPC460EX_DESC_RXOR123,
+ &op);
+ } else if ((src[1] + len * 2) == src[2]) {
+ /* do RXOR R1 R2 R4 */
+ set_bit(PPC460EX_DESC_RXOR124, &op);
+ } else if ((src[1] + len * 3) == src[2]) {
+ /* do RXOR R1 R2 R5 */
+ set_bit(PPC460EX_DESC_RXOR125,
+ &op);
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR12,
+ &op);
+ }
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR12, &op);
+ }
+ }
+
+ if (!test_bit(PPC460EX_DESC_RXOR, &op)) {
+ /* can not do this operation with RXOR */
+ clear_bit(PPC460EX_RXOR_RUN,
+ &ppc460ex_rxor_state);
+ } else {
+ /* can do; set block size right now */
+ ppc460ex_desc_set_rxor_block_size(len);
+ }
+ }
+
+ /* Number of necessary slots depends on operation type selected */
+ if (!test_bit(PPC460EX_DESC_RXOR, &op)) {
+ /* This is a WXOR only chain. Need descriptors for each
+ * source to GF-XOR them with WXOR, and need descriptors
+ * for each destination to zero them with WXOR
+ */
+ slot_cnt = src_cnt;
+
+ if (flags & DMA_PREP_ZERO_P) {
+ slot_cnt++;
+ set_bit(PPC460EX_ZERO_P, &op);
+ }
+ if (flags & DMA_PREP_ZERO_Q) {
+ slot_cnt++;
+ set_bit(PPC460EX_ZERO_Q, &op);
+ }
+ } else {
+ /* Need 1/2 descriptor for RXOR operation, and
+ * need (src_cnt - (2 or 3)) for WXOR of sources
+ * remained (if any)
+ */
+ slot_cnt = dst_cnt;
+
+ if (flags & DMA_PREP_ZERO_P)
+ set_bit(PPC460EX_ZERO_P, &op);
+ if (flags & DMA_PREP_ZERO_Q)
+ set_bit(PPC460EX_ZERO_Q, &op);
+
+ if (test_bit(PPC460EX_DESC_RXOR12, &op))
+ slot_cnt += src_cnt - 2;
+ else
+ slot_cnt += src_cnt - 3;
+
+ /* Thus we have either RXOR only chain or
+ * mixed RXOR/WXOR
+ */
+ if (slot_cnt == dst_cnt) {
+ /* RXOR only chain */
+ clear_bit(PPC460EX_DESC_WXOR, &op);
+ }
+ }
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ /* for both RXOR/WXOR each descriptor occupies one slot */
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt, 1);
+ if (sw_desc) {
+ ppc460ex_desc_init_pq(sw_desc, dst_cnt, src_cnt,
+ flags, op);
+
+ /* setup dst/src/mult */
+ ppc460ex_adma_pqxor_set_dest(sw_desc,
+ dst, flags);
+ while(src_cnt--) {
+ ppc460ex_adma_pqxor_set_src(sw_desc,
+ src[src_cnt], src_cnt);
+ if ((flags & DMA_PREP_HAVE_Q ) && !scf) {
+ mult = scf[src_cnt];
+ ppc460ex_adma_pqxor_set_src_mult(sw_desc,
+ scf[src_cnt], src_cnt, dst_cnt -1 );
+ }
+ }
+
+ /* Setup byte count foreach slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list,
+ chain_node) {
+ if(mfdcr(0x60) == 0xfee8) {
+ printk("Byte Count = 0x%x\n",len);
+ printk("src[0]= 0x%x\n",src[0]);
+ printk("src[1]= 0x%x\n",src[1]);
+ printk("src[2]= 0x%x\n",src[2]);
+ printk("Dest = 0x%x\n",dst);
+ }
+ ppc460ex_desc_set_byte_count(iter,
+ ppc460ex_chan, len);
+ iter->unmap_len = len;
+ }
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ return sw_desc;
+}
+
+#endif
+static inline ppc460ex_desc_t *ppc460ex_dma_prep_xor(
+ ppc460ex_ch_t *ppc460ex_chan,
+ dma_addr_t *dst, unsigned int dst_cnt,
+ dma_addr_t *src, unsigned int src_cnt, unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ int slot_cnt;
+ ppc460ex_desc_t *sw_desc = NULL, *iter;
+ unsigned long op = 0;
+
+ /* select operations WXOR/RXOR depending on the
+ * source addresses of operators and the number
+ * of destinations (RXOR support only Q-parity calculations)
+ */
+ set_bit(PPC460EX_DESC_WXOR, &op);
+ if (!test_and_set_bit(PPC460EX_RXOR_RUN, &ppc460ex_rxor_state)) {
+ /* no active RXOR;
+ * do RXOR if:
+ * - destination os only one,
+ * - there are more than 1 source,
+ * - len is aligned on 512-byte boundary,
+ * - source addresses fit to one of 4 possible regions.
+ */
+ if (dst_cnt == 2 && src_cnt > 1 &&
+ !(len & ~MQ0_CF2H_RXOR_BS_MASK) &&
+ (src[0] + len) == src[1]) {
+ /* may do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR, &op);
+ if (src_cnt != 2) {
+ /* may try to enhance region of RXOR */
+ if ((src[1] + len) == src[2]) {
+ /* do RXOR R1 R2 R3 */
+ set_bit(PPC460EX_DESC_RXOR123,
+ &op);
+ } else if ((src[1] + len * 2) == src[2]) {
+ /* do RXOR R1 R2 R4 */
+ set_bit(PPC460EX_DESC_RXOR124, &op);
+ } else if ((src[1] + len * 3) == src[2]) {
+ /* do RXOR R1 R2 R5 */
+ set_bit(PPC460EX_DESC_RXOR125,
+ &op);
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR12,
+ &op);
+ }
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR12, &op);
+ }
+ }
+
+ if (!test_bit(PPC460EX_DESC_RXOR, &op)) {
+ /* can not do this operation with RXOR */
+ clear_bit(PPC460EX_RXOR_RUN,
+ &ppc460ex_rxor_state);
+ } else {
+ /* can do; set block size right now */
+ ppc460ex_desc_set_rxor_block_size(len);
+ }
+ }
+
+ /* Number of necessary slots depends on operation type selected */
+ if (!test_bit(PPC460EX_DESC_RXOR, &op)) {
+ /* This is a WXOR only chain. Need descriptors for each
+ * source to GF-XOR them with WXOR, and need descriptors
+ * for each destination to zero them with WXOR
+ */
+ slot_cnt = src_cnt;
+
+ if (flags & DMA_PREP_ZERO_P) {
+ slot_cnt++;
+ set_bit(PPC460EX_ZERO_P, &op);
+ }
+ } else {
+ /* Need 1/2 descriptor for RXOR operation, and
+ * need (src_cnt - (2 or 3)) for WXOR of sources
+ * remained (if any)
+ */
+ slot_cnt = dst_cnt;
+
+ if (flags & DMA_PREP_ZERO_P)
+ set_bit(PPC460EX_ZERO_P, &op);
+
+ if (test_bit(PPC460EX_DESC_RXOR12, &op))
+ slot_cnt += src_cnt - 2;
+ else
+ slot_cnt += src_cnt - 3;
+
+ /* Thus we have either RXOR only chain or
+ * mixed RXOR/WXOR
+ */
+ if (slot_cnt == dst_cnt) {
+ /* RXOR only chain */
+ clear_bit(PPC460EX_DESC_WXOR, &op);
+ }
+ }
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ /* for both RXOR/WXOR each descriptor occupies one slot */
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt, 1);
+ if (sw_desc) {
+ ppc460ex_desc_init_xor(sw_desc, dst_cnt, src_cnt,
+ flags, op);
+
+ /* setup dst/src/mult */
+ ppc460ex_adma_xor_set_dest(sw_desc,
+ dst, flags);
+ while(src_cnt--) {
+ ppc460ex_adma_xor_set_src(sw_desc,
+ src[src_cnt], src_cnt);
+ }
+
+ /* Setup byte count foreach slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list,
+ chain_node) {
+ if(mfdcr(0x60) == 0xfee8) {
+ printk("Byte Count = 0x%x\n",len);
+ printk("src[0]= 0x%x\n",src[0]);
+ printk("src[1]= 0x%x\n",src[1]);
+ printk("src[2]= 0x%x\n",src[2]);
+ printk("Dest = 0x%x\n",dst);
+ }
+ ppc460ex_desc_set_byte_count(iter,
+ ppc460ex_chan, len);
+ iter->unmap_len = len;
+ }
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ return sw_desc;
+}
+static inline ppc460ex_desc_t *ppc460ex_dma01_prep_xor (
+ ppc460ex_ch_t *ppc460ex_chan,
+ dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, unsigned long flags)
+{
+ int slot_cnt;
+ ppc460ex_desc_t *sw_desc = NULL, *iter;
+ unsigned long op = 0;
+ unsigned int dst_cnt = 1; /*Marri */
+ /* select operations WXOR/RXOR depending on the
+ * source addresses of operators and the number
+ * of destinations (RXOR support only Q-parity calculations)
+ */
+ set_bit(PPC460EX_DESC_WXOR, &op);
+ if (!test_and_set_bit(PPC460EX_RXOR_RUN, &ppc460ex_rxor_state)) {
+ /* no active RXOR;
+ * do RXOR if:
+ * - destination os only one,
+ * - there are more than 1 source,
+ * - len is aligned on 512-byte boundary,
+ * - source addresses fit to one of 4 possible regions.
+ */
+ if (dst_cnt == 3 && src_cnt > 1 && /* Marri dstcnt == 3 never comes here */
+ !(len & ~MQ0_CF2H_RXOR_BS_MASK) &&
+ (src[0] - len) == src[1]) { /* Marri */
+ /* may do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR, &op);
+ if (src_cnt != 2) {
+ /* may try to enhance region of RXOR */
+ if ((src[1] - len) == src[2]) {
+ /* do RXOR R1 R2 R3 */
+ set_bit(PPC460EX_DESC_RXOR123,
+ &op);
+ } else if ((src[1] - len * 2) == src[2]) {
+ /* do RXOR R1 R2 R4 */
+ set_bit(PPC460EX_DESC_RXOR124, &op);
+ } else if ((src[1] - len * 3) == src[2]) {
+ /* do RXOR R1 R2 R5 */
+ set_bit(PPC460EX_DESC_RXOR125,
+ &op);
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR12,
+ &op);
+ }
+ } else {
+ /* do RXOR R1 R2 */
+ set_bit(PPC460EX_DESC_RXOR12, &op);
+ }
+ }
+
+ if (!test_bit(PPC460EX_DESC_RXOR, &op)) {
+ /* can not do this operation with RXOR */
+ clear_bit(PPC460EX_RXOR_RUN,
+ &ppc460ex_rxor_state);
+ } else {
+ /* can do; set block size right now */
+ ppc460ex_desc_set_rxor_block_size(len);
+ }
+ }
+
+ /* Number of necessary slots depends on operation type selected */
+ if (!test_bit(PPC460EX_DESC_RXOR, &op)) {
+ /* This is a WXOR only chain. Need descriptors for each
+ * source to GF-XOR them with WXOR, and need descriptors
+ * for each destination to zero them with WXOR
+ */
+ slot_cnt = src_cnt;
+
+#if 1
+ if (flags & DMA_PREP_ZERO_P) {
+ slot_cnt += dst_cnt;
+ set_bit(PPC460EX_ZERO_P, &op);
+ }
+#endif /* RAID-6 stuff*/
+ } else {
+ /* Need 1 descriptor for RXOR operation, and
+ * need (src_cnt - (2 or 3)) for WXOR of sources
+ * remained (if any)
+ * Thus we have 1 CDB for RXOR, let the set_dst
+ * function think that this is just a zeroing descriptor
+ * and skip it when walking through the chain.
+ * So set PPC460EX_ZERO_P.
+ */
+ set_bit(PPC460EX_ZERO_P, &op);
+
+ if (test_bit(PPC460EX_DESC_RXOR12, &op)) {
+ slot_cnt = src_cnt - 1;
+ } else {
+ slot_cnt = src_cnt - 2;
+ }
+
+ /* Thus we have either RXOR only chain or
+ * mixed RXOR/WXOR
+ */
+ if (slot_cnt == 1) {
+ /* RXOR only chain */
+ clear_bit(PPC460EX_DESC_WXOR, &op);
+ }
+ }
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ /* for both RXOR/WXOR each descriptor occupies one slot */
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt, 1);
+ if (sw_desc) {
+ ppc460ex_desc_init_dma01_xor(sw_desc, dst_cnt, src_cnt,
+ flags, op);
+
+ /* setup dst/src/mult */
+ while(dst_cnt--)
+ ppc460ex_adma_dma01_xor_set_dest(sw_desc,
+ dst, dst_cnt);
+ //dst[dst_cnt], dst_cnt); /*marri */
+ while(src_cnt--) {
+ ppc460ex_adma_dma01_xor_set_src(sw_desc,
+ src[src_cnt], src_cnt);
+ ppc460ex_adma_dma01_xor_set_src_mult(sw_desc,
+ 1, src_cnt);/* Marri forcing RAID-5*/
+ /*scf[src_cnt], src_cnt);*/
+ }
+
+ /* Setup byte count foreach slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list,
+ chain_node) {
+ if(mfdcr(0x60) == 0xfee8) {
+ printk("Byte Count = 0x%x\n",len);
+ printk("src[0]= 0x%x\n",src[0]);
+ printk("src[1]= 0x%x\n",src[1]);
+ printk("src[2]= 0x%x\n",src[2]);
+ printk("Dest = 0x%x\n",dst);
+ }
+ ppc460ex_desc_set_byte_count(iter,
+ ppc460ex_chan, len);
+ iter->unmap_len = len;
+ }
+ }
+
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ return sw_desc;
+}
+#if 1
+/**
+ * ppc460ex_adma_prep_dma_pq- prepare CDB (group) for a GF-XOR operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_pq(
+ struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
+ unsigned int src_cnt, unsigned char *scf,
+ size_t len, unsigned long flags)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *sw_desc = NULL;
+ int dst_cnt = 0;
+
+ BUG_ON(!len);
+ BUG_ON(unlikely(len > PPC460EX_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(!src_cnt);
+
+ if (flags & DMA_PREP_HAVE_P) {
+ BUG_ON(!dst[0]);
+ dst_cnt++;
+ } else
+ BUG_ON(flags & DMA_PREP_ZERO_P);
+ if (flags & DMA_PREP_HAVE_Q) {
+ BUG_ON(!dst[1]);
+ dst_cnt++;
+ } else
+ BUG_ON(flags & DMA_PREP_ZERO_Q);
+ BUG_ON(!dst_cnt);
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ ppc460ex_chan->device->id, __FUNCTION__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ switch (ppc460ex_chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ sw_desc = ppc460ex_dma_prep_pq(ppc460ex_chan,
+ dst, dst_cnt, src, src_cnt, scf,
+ len, flags);
+ break;
+
+ }
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+#endif
+/**
+ * ppc460ex_adma_prep_dma_mq_xor - prepare CDB (group) for a GF-XOR operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_mq_xor(
+ struct dma_chan *chan, dma_addr_t dst,
+ dma_addr_t *src, unsigned int src_cnt,
+ size_t len, unsigned long flags)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *sw_desc = NULL;
+ int dst_cnt = 1;
+
+ BUG_ON(!len);
+ BUG_ON(unlikely(len > PPC460EX_ADMA_XOR_MAX_BYTE_COUNT));
+ BUG_ON(!src_cnt );
+
+// printk("<%s> line %d\n",__FUNCTION__,__LINE__);
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ ppc460ex_chan->device->id, __FUNCTION__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ switch (ppc460ex_chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+#if 0
+ sw_desc = ppc460ex_dma01_prep_xor (ppc460ex_chan,
+ dst, src, src_cnt,
+ len, flags);
+#else
+ sw_desc = ppc460ex_dma_prep_xor(ppc460ex_chan,
+ &dst, dst_cnt, src, src_cnt, 0,
+ len, flags);
+#endif
+ break;
+
+ }
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc460ex_adma_prep_dma_pqzero_sum - prepare CDB group for
+ * a PQ_VAL operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_pqzero_sum(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ unsigned char *scf, size_t len,
+ u32 *pqres, unsigned long flags)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *sw_desc, *iter;
+ dma_addr_t pdest, qdest;
+ int slot_cnt, slots_per_op, idst, dst_cnt;
+
+ if (flags & DMA_PREP_HAVE_P)
+ pdest = src[src_cnt];
+ else
+ pdest = 0;
+ if (flags & DMA_PREP_HAVE_Q)
+ qdest = src[src_cnt+1];
+ else
+ qdest = 0;
+
+ BUG_ON(src_cnt < 3 );
+
+ /* Always use WXOR for P/Q calculations (two destinations).
+ * Need two extra slots to verify results are zero. Since src_cnt
+ * is the size of the src[] buffer (which includes destination
+ * pointers at the first and/or second positions) then the number
+ * of actual sources should be reduced by DMA_DEST_MAX_NUM (2).
+ */
+ idst = dst_cnt = (pdest && qdest) ? 2 : 1;
+
+ slot_cnt = src_cnt + dst_cnt;
+ slots_per_op = 1;
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ ppc460ex_desc_init_pqzero_sum(sw_desc, dst_cnt, src_cnt);
+
+ /* Setup byte count foreach slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc460ex_desc_set_byte_count(iter, ppc460ex_chan,
+ len);
+ iter->unmap_len = len;
+ }
+
+ /* Setup destinations for P/Q ops */
+ ppc460ex_adma_pqzero_sum_set_dest(sw_desc, pdest, qdest);
+
+ /* Setup sources and mults for P/Q ops */
+ while (src_cnt--) {
+ ppc460ex_adma_pqzero_sum_set_src (sw_desc,
+ src[src_cnt], src_cnt);
+ /* Setup mults for Q-check only; in case of P -
+ * keep the default 0 (==1)
+ */
+ if (qdest)
+ ppc460ex_adma_pqzero_sum_set_src_mult (sw_desc,
+ scf[src_cnt], src_cnt,dst_cnt - 1 );
+ }
+
+ /* Setup zero QWORDs into DCHECK CDBs */
+ idst = dst_cnt;
+ list_for_each_entry_reverse(iter, &sw_desc->group_list,
+ chain_node) {
+ /*
+ * The last CDB corresponds to P-parity check
+ * (if any), the one before last CDB corresponds
+ * Q-parity check
+ */
+ if (idst == DMA_DEST_MAX_NUM) {
+ if (idst == dst_cnt) {
+ set_bit(PPC460EX_DESC_QCHECK,
+ &iter->flags);
+ } else {
+ set_bit(PPC460EX_DESC_PCHECK,
+ &iter->flags);
+ }
+ } else {
+ if (qdest) {
+ set_bit(PPC460EX_DESC_QCHECK,
+ &iter->flags);
+ } else {
+ set_bit(PPC460EX_DESC_PCHECK,
+ &iter->flags);
+ }
+ }
+ iter->xor_check_result = pqres;
+ /*
+ * set it to zero, if check fail then result will
+ * be updated
+ */
+ *iter->xor_check_result = 0;
+ ppc460ex_desc_set_dcheck(iter, ppc460ex_chan,
+ ppc460ex_qword);
+ if (!(--dst_cnt))
+ break;
+ }
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+/**
+ * ppc460ex_adma_prep_dma_mq_zero_sum - prepare CDB group for
+ * a PQ_VAL operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_mq_zero_sum(
+ struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
+ size_t len, u32 *presult, unsigned long flags)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *sw_desc, *iter;
+ int slot_cnt, slots_per_op, idst, dst_cnt;
+
+ BUG_ON(src_cnt < 3 || !src[0]);
+
+ /* Always use WXOR for P/Q calculations (two destinations).
+ * Need two extra slots to verify results are zero. Since src_cnt
+ * is the size of the src[] buffer (which includes destination
+ * pointers at the first and/or second positions) then the number
+ * of actual sources should be reduced by DMA_DEST_MAX_NUM (2).
+ */
+ idst = dst_cnt = 1;
+ slot_cnt = src_cnt ;
+ src_cnt -= dst_cnt;
+ slots_per_op = 1;
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ ppc460ex_desc_init_pqzero_sum(sw_desc, dst_cnt, src_cnt);
+
+ /* Setup byte count foreach slot just allocated */
+ sw_desc->async_tx.flags = flags;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc460ex_desc_set_byte_count(iter, ppc460ex_chan,
+ len);
+ iter->unmap_len = len;
+ }
+
+ /* Setup destinations for P/Q ops */
+
+ ppc460ex_adma_mq_zero_sum_set_dest(sw_desc, src[0]);
+
+ /* Setup sources and mults for P/Q ops */
+ src = &src[dst_cnt];
+ while (src_cnt-- ) {
+ ppc460ex_adma_pqzero_sum_set_src (sw_desc,
+ src[src_cnt ], src_cnt);
+ }
+
+ /* Setup zero QWORDs into DCHECK CDBs */
+ idst = dst_cnt;
+ list_for_each_entry_reverse(iter, &sw_desc->group_list,
+ chain_node) {
+ /*
+ * The last CDB corresponds to P-parity check
+ * (if any), the one before last CDB corresponds
+ * Q-parity check
+ */
+ iter->xor_check_result = presult;
+ /*
+ * set it to zero, if check fail then result will
+ * be updated
+ */
+ *iter->xor_check_result = 0;
+ ppc460ex_desc_set_dcheck(iter, ppc460ex_chan,
+ ppc460ex_qword);
+ if (!(--dst_cnt))
+ break;
+ }
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+static void ppc460ex_adma_pq_zero_op(ppc460ex_desc_t *iter,
+ ppc460ex_ch_t *chan, dma_addr_t addr)
+{
+ /* To clear destinations update the descriptor
+ * (P or Q depending on index) as follows:
+ * addr is destination (0 corresponds to SG2):
+ */
+ ppc460ex_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE, addr, 0);
+
+ /* ... and the addr is source: */
+ ppc460ex_desc_set_src_addr(iter, chan, 0, DMA_CUED_XOR_HB, addr);
+
+ /* addr is always SG2 then the mult is always DST1 */
+ //ppc460ex_desc_set_src_mult(iter, chan, DMA_CUED_MULT1_OFF,
+ // DMA_CDB_SG_DST1, 1);
+}
+/**
+ * ppc460ex_adma_set_dest - set destination address into descriptor
+ */
+static inline void ppc460ex_adma_set_dest(ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ BUG_ON(index >= sw_desc->dst_cnt);
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA1_ID:
+ /* to do: support transfers lengths >
+ * PPC460EX_ADMA_DMA/XOR_MAX_BYTE_COUNT
+ */
+ ppc460ex_desc_set_dest_addr(sw_desc->group_head,
+ // chan, 0x8, addr, index); // Enabling HB bus
+ chan, 0, addr, index);
+ break;
+ case PPC460EX_XOR_ID:
+ sw_desc = ppc460ex_get_group_entry(sw_desc, index);
+ ppc460ex_desc_set_dest_addr(sw_desc,
+ chan, 0, addr, index);
+ break;
+ }
+}
+
+
+static void ppc460ex_adma_dma2rxor_set_dest (
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ ppc460ex_desc_t *iter;
+ int i;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ BUG();
+ break;
+ case PPC460EX_XOR_ID:
+ iter = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->descs_per_op*index);
+ for (i=0;i<sw_desc->descs_per_op;i++) {
+ ppc460ex_desc_set_dest_addr(iter,
+ chan, 0, addr, index);
+ if (i) ppc460ex_wxor_set_base (iter);
+ iter = list_entry (iter->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ }
+ break;
+ }
+}
+
+/**
+ * ppc460ex_adma_pq_xor_set_dest - set destination address into descriptor
+ * for the PQXOR operation
+ */
+static void ppc460ex_adma_pqxor_set_dest(ppc460ex_desc_t *sw_desc,
+ dma_addr_t *addrs, unsigned long flags)
+{
+ ppc460ex_desc_t *iter;
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ dma_addr_t paddr, qaddr;
+ dma_addr_t addr = 0, ppath, qpath;
+ int index = 0, i;
+
+ if (flags & DMA_PREP_HAVE_P)
+ paddr = addrs[0];
+ else
+ paddr = 0;
+
+ if (flags & DMA_PREP_HAVE_Q)
+ qaddr = addrs[1];
+ else
+ qaddr = 0;
+
+ if (!paddr || !qaddr)
+ addr = paddr ? paddr : qaddr;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot:
+ */
+ if (!test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ /* This is WXOR-only chain; may have 1/2 zero descs */
+ if (test_bit(PPC460EX_ZERO_P, &sw_desc->flags))
+ index++;
+ if (test_bit(PPC460EX_ZERO_Q, &sw_desc->flags))
+ index++;
+
+ iter = ppc460ex_get_group_entry(sw_desc, index);
+ if (addr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node)
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, 0);
+ } else {
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node) {
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, paddr, 0);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, qaddr, 1);
+ }
+ }
+
+ if (index) {
+ /* To clear destinations update the descriptor
+ * (1st,2nd, or both depending on flags)
+ */
+ index = 0;
+ if (test_bit(PPC460EX_ZERO_P,
+ &sw_desc->flags)) {
+ iter = ppc460ex_get_group_entry(
+ sw_desc, index++);
+ ppc460ex_adma_pq_zero_op(iter, chan,
+ paddr);
+ }
+
+ if (test_bit(PPC460EX_ZERO_Q,
+ &sw_desc->flags)) {
+ iter = ppc460ex_get_group_entry(
+ sw_desc, index++);
+ ppc460ex_adma_pq_zero_op(iter, chan,
+ qaddr);
+ }
+
+ return;
+ }
+ } else {
+ /* This is RXOR-only or RXOR/WXOR mixed chain */
+
+ /* If we want to include destination into calculations,
+ * then make dest addresses cued with mult=1 (XOR).
+ */
+ ppath = test_bit(PPC460EX_ZERO_P, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+ qpath = test_bit(PPC460EX_ZERO_Q, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ /* Setup destination(s) in RXOR slot(s) */
+ iter = ppc460ex_get_group_entry (sw_desc, index++);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ paddr ? ppath : qpath,
+ paddr ? paddr : qaddr, 0);
+ if (!addr) {
+ /* two destinations */
+ iter = ppc460ex_get_group_entry (sw_desc,
+ index++);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ qpath, qaddr, 0);
+ }
+
+ if (test_bit(PPC460EX_DESC_WXOR, &sw_desc->flags)) {
+ /* Setup destination(s) in remaining WXOR
+ * slots
+ */
+ iter = ppc460ex_get_group_entry(sw_desc,
+ index);
+ if (addr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node)
+ ppc460ex_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ addr, 0);
+
+ } else {
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node) {
+ ppc460ex_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ paddr, 0);
+ ppc460ex_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ qaddr, 1);
+ }
+ }
+ }
+
+ }
+ break;
+
+ case PPC460EX_XOR_ID:
+ /* DMA2 descriptors have only 1 destination, so there are
+ * two chains - one for each dest.
+ * If we want to include destination into calculations,
+ * then make dest addresses cued with mult=1 (XOR).
+ */
+ ppath = test_bit(PPC460EX_ZERO_P, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ qpath = test_bit(PPC460EX_ZERO_Q, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ iter = ppc460ex_get_group_entry (sw_desc, 0);
+ for (i=0; i<sw_desc->descs_per_op; i++) {
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ paddr ? ppath : qpath,
+ paddr ? paddr : qaddr, 0);
+ iter = list_entry (iter->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ }
+
+ if (!addr) {
+ /* Two destinations; setup Q here */
+ iter = ppc460ex_get_group_entry (sw_desc,
+ sw_desc->descs_per_op);
+ for (i=0; i<sw_desc->descs_per_op; i++) {
+ ppc460ex_desc_set_dest_addr(iter,
+ chan, qpath, qaddr, 0);
+ iter = list_entry (iter->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ }
+ }
+
+ break;
+ }
+}
+void ppc460ex_adma_xor_set_dest(ppc460ex_desc_t *sw_desc,
+ dma_addr_t *addrs, unsigned long flags)
+{
+ ppc460ex_desc_t *iter;
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ dma_addr_t paddr, qaddr;
+ dma_addr_t addr = 0, ppath, qpath;
+ int index = 0;
+
+ paddr = addrs[0];
+
+
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot:
+ */
+ if (!test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ /* This is WXOR-only chain; may have 1/2 zero descs */
+ if (test_bit(PPC460EX_ZERO_P, &sw_desc->flags))
+ index++;
+
+ iter = ppc460ex_get_group_entry(sw_desc, index);
+ if (paddr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node)
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, paddr, 0);
+ } else {
+ BUG();
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list, chain_node) {
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, paddr, 0);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, qaddr, 1);
+ }
+ }
+
+ if (index) {
+ /* To clear destinations update the descriptor
+ * (1st,2nd, or both depending on flags)
+ */
+ index = 0;
+ if (test_bit(PPC460EX_ZERO_P,
+ &sw_desc->flags)) {
+ iter = ppc460ex_get_group_entry(
+ sw_desc, index++);
+ ppc460ex_adma_pq_zero_op(iter, chan,
+ paddr);
+ }
+
+ return;
+ }
+ } else {
+ /* This is RXOR-only or RXOR/WXOR mixed chain */
+
+ /* If we want to include destination into calculations,
+ * then make dest addresses cued with mult=1 (XOR).
+ */
+ ppath = test_bit(PPC460EX_ZERO_P, &sw_desc->flags) ?
+ DMA_CUED_XOR_HB :
+ DMA_CUED_XOR_BASE |
+ (1 << DMA_CUED_MULT1_OFF);
+
+ /* Setup destination(s) in RXOR slot(s) */
+ iter = ppc460ex_get_group_entry (sw_desc, index++);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ paddr ? ppath : qpath,
+ paddr ? paddr : qaddr, 0);
+ if (!addr) {
+ /* two destinations */
+ iter = ppc460ex_get_group_entry (sw_desc,
+ index++);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ qpath, qaddr, 0);
+ }
+
+ if (test_bit(PPC460EX_DESC_WXOR, &sw_desc->flags)) {
+ /* Setup destination(s) in remaining WXOR
+ * slots
+ */
+ iter = ppc460ex_get_group_entry(sw_desc,
+ index);
+ if (addr) {
+ /* one destination */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node)
+ ppc460ex_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ addr, 0);
+
+ } else {
+ /* two destinations */
+ list_for_each_entry_from(iter,
+ &sw_desc->group_list,
+ chain_node) {
+ ppc460ex_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ paddr, 0);
+ ppc460ex_desc_set_dest_addr(
+ iter, chan,
+ DMA_CUED_XOR_BASE,
+ qaddr, 1);
+ }
+ }
+ }
+
+ }
+ break;
+
+ }
+}
+/**
+ * ppc460ex_adma_dma01_xor_set_dest - set destination address into descriptor
+ * for the PQXOR operation
+ */
+static void ppc460ex_adma_dma01_xor_set_dest(ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ ppc460ex_desc_t *iter;
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+
+ BUG_ON(index >= sw_desc->dst_cnt);
+ BUG_ON(test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags) && index);
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot:
+ */
+ if (test_bit(PPC460EX_DESC_WXOR, &sw_desc->flags)) {
+ /* If this is RXOR/WXOR chain then dst_cnt == 1
+ * and first WXOR descriptor is the second in RXOR/WXOR
+ * chain
+ */
+ //if (!test_bit(PPC460EX_ZERO_P, &sw_desc->flags)) {
+ if (!test_bit(PPC460EX_ZERO_P, &sw_desc->flags)) {
+ iter = ppc460ex_get_group_entry(sw_desc, 0);
+ } else {
+ iter = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->dst_cnt);
+ }
+ list_for_each_entry_from(iter, &sw_desc->group_list,
+ chain_node) {
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, index);
+ }
+ if (!test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags) &&
+ test_bit(PPC460EX_ZERO_P, &sw_desc->flags)) {
+ /* In a WXOR-only case we probably has had
+ * a reasonable data at P/Q addresses, so
+ * the first operation in chain will be
+ * zeroing P/Q dest:
+ * WXOR (Q, 1*Q) -> 0.
+ *
+ * To do this (clear) update the descriptor
+ * (P or Q depending on index) as follows:
+ * addr is destination (0 corresponds to SG2):
+ */
+ iter = ppc460ex_get_group_entry (sw_desc,
+ index);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, 0);
+ /* ... and the addr is source: */
+ ppc460ex_desc_set_src_addr(iter, chan, 0,
+ DMA_CUED_XOR_HB, addr);
+ /* addr is always SG2 then the mult is always
+ DST1 */
+ ppc460ex_desc_set_src_mult(iter, chan,
+ DMA_CUED_MULT1_OFF, DMA_CDB_SG_DST1, 1);
+ }
+ }
+
+ if (test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ /*
+ * setup Q-destination for RXOR slot (
+ * it shall be a HB address)
+ */
+ iter = ppc460ex_get_group_entry (sw_desc, index);
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_HB, addr, 0);
+ }
+ break;
+ case PPC460EX_XOR_ID:
+ iter = ppc460ex_get_group_entry (sw_desc, index);
+ ppc460ex_desc_set_dest_addr(iter, chan, 0, addr, 0);
+ break;
+ }
+}
+
+/**
+ * ppc460ex_adma_pq_zero_sum_set_dest - set destination address into descriptor
+ * for the PQ_VAL operation
+ */
+static void ppc460ex_adma_mq_zero_sum_set_dest (
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr)
+{
+ ppc460ex_desc_t *iter, *end;
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+
+
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot
+ */
+ end = ppc460ex_get_group_entry(sw_desc, sw_desc->src_cnt);
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ if (unlikely(iter == end))
+ break;
+ ppc460ex_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ addr, 0);
+ }
+ /* The descriptors remain are DATACHECK. These have no need in
+ * destination. Actually, these destination are used there
+ * as a sources for check operation. So, set addr ass source.
+ */
+ end = ppc460ex_get_group_entry(sw_desc, sw_desc->src_cnt );
+ BUG_ON(!end);
+ ppc460ex_desc_set_src_addr(end, chan, 0, 0, addr);
+}
+static void ppc460ex_adma_pqzero_sum_set_dest (
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t paddr, dma_addr_t qaddr)
+{
+ ppc460ex_desc_t *iter, *end;
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ dma_addr_t addr = 0;
+
+
+ /* walk through the WXOR source list and set P/Q-destinations
+ * for each slot
+ */
+ end = ppc460ex_get_group_entry(sw_desc, sw_desc->src_cnt);
+ if(paddr && qaddr) {
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ if (unlikely(iter == end))
+ break;
+ ppc460ex_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ paddr, 0);
+ ppc460ex_desc_set_dest_addr(iter, chan, DMA_CUED_XOR_BASE,
+ qaddr, 1);
+ }
+ } else {
+ /* one destination */
+ addr = paddr ? paddr : qaddr;
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ if (unlikely(iter == end))
+ break;
+ ppc460ex_desc_set_dest_addr(iter, chan,
+ DMA_CUED_XOR_BASE, addr, 0);
+ }
+ }
+ /* The descriptors remain are DATACHECK. These have no need in
+ * destination. Actually, these destination are used there
+ * as a sources for check operation. So, set addr ass source.
+ */
+ end = ppc460ex_get_group_entry(sw_desc, sw_desc->src_cnt );
+ ppc460ex_desc_set_src_addr(end, chan, 0, 0, addr ? addr : paddr);
+ if (!addr) {
+ end = ppc460ex_get_group_entry(sw_desc, sw_desc->src_cnt + 1);
+ ppc460ex_desc_set_src_addr(end, chan, 0, 0, qaddr);
+ }
+}
+
+/**
+ * ppc460ex_desc_set_xor_src_cnt (ppc460ex_desc_t *desc, int src_cnt)
+ */
+static inline void ppc460ex_desc_set_xor_src_cnt (ppc460ex_desc_t *desc,
+ int src_cnt)
+{
+ xor_cb_t *hw_desc = desc->hw_desc;
+ hw_desc->cbc &= ~XOR_CDCR_OAC_MSK;
+ hw_desc->cbc |= src_cnt;
+}
+
+/**
+ * ppc460ex_adma_pqxor_set_src - set source address into descriptor
+ */
+static void ppc460ex_adma_pqxor_set_src(
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr,
+ int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ dma_addr_t haddr = 0;
+ ppc460ex_desc_t *iter;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
+ */
+ if (test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ /* RXOR-only or RXOR/WXOR operation */
+ int iskip = test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index == 0) {
+ /* 1st slot (RXOR) */
+ /* setup sources region (R1-2-3, R1-2-4,
+ or R1-2-5)*/
+ if (test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags))
+ haddr = DMA_RXOR12 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR123,
+ &sw_desc->flags))
+ haddr = DMA_RXOR123 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR124,
+ &sw_desc->flags))
+ haddr = DMA_RXOR124 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR125,
+ &sw_desc->flags))
+ haddr = DMA_RXOR125 <<
+ DMA_CUED_REGION_OFF;
+ else
+ BUG();
+ haddr |= DMA_CUED_XOR_BASE;
+ sw_desc = sw_desc->group_head;
+ } else if (index < iskip) {
+ /* 1st slot (RXOR)
+ * shall actually set source address only once
+ * instead of first <iskip>
+ */
+ sw_desc = NULL;
+ } else {
+ /* second and next slots (WXOR);
+ * skip first slot with RXOR
+ */
+ haddr = DMA_CUED_XOR_HB;
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index - iskip + 1);
+ }
+ } else {
+ /* WXOR-only operation;
+ * skip first slots with destinations
+ */
+ haddr = DMA_CUED_XOR_HB;
+ if (!test_bit(PPC460EX_ZERO_P, &sw_desc->flags))
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index);
+ else
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->dst_cnt + index);
+ }
+
+ if (likely(sw_desc))
+ ppc460ex_desc_set_src_addr(sw_desc, chan, index, haddr,
+ addr);
+ break;
+ case PPC460EX_XOR_ID:
+ /* DMA2 may do Biskup
+ */
+ iter = sw_desc->group_head;
+ if (iter->dst_cnt == 2) {
+ /* both P & Q calculations required; set Q src here */
+ ppc460ex_adma_dma2rxor_set_src(iter, index, addr);
+ /* this is for P. Actually sw_desc already points
+ * to the second CDB though.
+ */
+ iter = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ }
+ ppc460ex_adma_dma2rxor_set_src(iter, index, addr);
+ break;
+ }
+}
+void ppc460ex_adma_xor_set_src(
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr,
+ int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ dma_addr_t haddr = 0;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
+ */
+ if (test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ /* RXOR-only or RXOR/WXOR operation */
+ int iskip = test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index == 0) {
+ /* 1st slot (RXOR) */
+ /* setup sources region (R1-2-3, R1-2-4,
+ or R1-2-5)*/
+ if (test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags))
+ haddr = DMA_RXOR12 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR123,
+ &sw_desc->flags))
+ haddr = DMA_RXOR123 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR124,
+ &sw_desc->flags))
+ haddr = DMA_RXOR124 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR125,
+ &sw_desc->flags))
+ haddr = DMA_RXOR125 <<
+ DMA_CUED_REGION_OFF;
+ else
+ BUG();
+ haddr |= DMA_CUED_XOR_BASE;
+ sw_desc = sw_desc->group_head;
+ } else if (index < iskip) {
+ /* 1st slot (RXOR)
+ * shall actually set source address only once
+ * instead of first <iskip>
+ */
+ sw_desc = NULL;
+ } else {
+ /* second and next slots (WXOR);
+ * skip first slot with RXOR
+ */
+ haddr = DMA_CUED_XOR_HB;
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index - iskip + 1);
+ }
+ } else {
+ /* WXOR-only operation;
+ * skip first slots with destinations
+ */
+ haddr = DMA_CUED_XOR_HB;
+ if (!test_bit(PPC460EX_ZERO_P, &sw_desc->flags))
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index);
+ else
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->dst_cnt + index);
+ }
+
+ if (likely(sw_desc))
+ ppc460ex_desc_set_src_addr(sw_desc, chan, index, haddr,
+ addr);
+ break;
+ }
+}
+/**
+ * ppc460ex_adma_dma01_xor_set_src - set source address into descriptor
+ */
+static void ppc460ex_adma_dma01_xor_set_src(
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr,
+ int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ dma_addr_t haddr = 0;
+ ppc460ex_desc_t *iter;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ /* DMA0,1 may do: WXOR, RXOR, RXOR+WXORs chain
+ */
+ if (test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ /* RXOR-only or RXOR/WXOR operation */
+ int iskip = test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index == 0) {
+ /* 1st slot (RXOR) */
+ /* setup sources region (R1-2-3, R1-2-4,
+ or R1-2-5)*/
+ if (test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags))
+ haddr = DMA_RXOR12 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR123,
+ &sw_desc->flags))
+ haddr = DMA_RXOR123 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR124,
+ &sw_desc->flags))
+ haddr = DMA_RXOR124 <<
+ DMA_CUED_REGION_OFF;
+ else if (test_bit(PPC460EX_DESC_RXOR125,
+ &sw_desc->flags))
+ haddr = DMA_RXOR125 <<
+ DMA_CUED_REGION_OFF;
+ else
+ BUG();
+ haddr |= DMA_CUED_XOR_BASE;
+ sw_desc = sw_desc->group_head;
+ } else if (index < iskip) {
+ /* 1st slot (RXOR)
+ * shall actually set source address only once
+ * instead of first <iskip>
+ */
+ sw_desc = NULL;
+ } else {
+ /* second and next slots (WXOR);
+ * skip first slot with RXOR
+ */
+ haddr = DMA_CUED_XOR_HB;
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index - iskip + 1);
+ }
+ } else {
+ /* WXOR-only operation;
+ * skip first slots with destinations
+ */
+ haddr = DMA_CUED_XOR_HB;
+ if (!test_bit(PPC460EX_ZERO_P, &sw_desc->flags)) {
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index);
+ } else {
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->dst_cnt + index);
+ }
+ }
+
+ if (likely(sw_desc)) {
+ ppc460ex_desc_set_src_addr(sw_desc, chan, index, haddr,
+ addr);
+ }
+ break;
+ case PPC460EX_XOR_ID:
+ /* DMA2 may do Biskup
+ */
+ iter = sw_desc->group_head;
+ if (iter->dst_cnt == 2) {
+ /* both P & Q calculations required; set Q src here */
+ ppc460ex_adma_dma2rxor_set_src(iter, index, addr);
+ /* this is for P. Actually sw_desc already points
+ * to the second CDB though.
+ */
+ iter = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ }
+ ppc460ex_adma_dma2rxor_set_src(iter, index, addr);
+ break;
+ }
+}
+
+/**
+ * ppc460ex_adma_pqzero_sum_set_src - set source address into descriptor
+ */
+static void ppc460ex_adma_pqzero_sum_set_src(
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr,
+ int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ dma_addr_t haddr = DMA_CUED_XOR_HB;
+
+ sw_desc = ppc460ex_get_group_entry(sw_desc, index);
+
+ if (likely(sw_desc))
+ ppc460ex_desc_set_src_addr(sw_desc, chan, index, haddr, addr);
+}
+
+/**
+ * ppc460ex_adma_memcpy_xor_set_src - set source address into descriptor
+ */
+static inline void ppc460ex_adma_memcpy_xor_set_src(
+ ppc460ex_desc_t *sw_desc,
+ dma_addr_t addr,
+ int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+
+ sw_desc = sw_desc->group_head;
+
+ if (likely(sw_desc))
+ //ppc460ex_desc_set_src_addr(sw_desc, chan, index, 0x8, addr); // Enabling HB bus
+ ppc460ex_desc_set_src_addr(sw_desc, chan, index, 0, addr);
+}
+
+/**
+ * ppc460ex_adma_dma2rxor_inc_addr -
+ */
+static void ppc460ex_adma_dma2rxor_inc_addr (ppc460ex_desc_t *desc,
+ ppc460ex_rxor_cursor_t *cursor, int index, int src_cnt)
+{
+ cursor->addr_count++;
+ if (index == src_cnt-1) {
+ ppc460ex_desc_set_xor_src_cnt (desc,
+ cursor->addr_count);
+ if (cursor->desc_count) {
+ ppc460ex_wxor_set_base (desc);
+ }
+ } else if (cursor->addr_count == XOR_MAX_OPS) {
+ ppc460ex_desc_set_xor_src_cnt (desc,
+ cursor->addr_count);
+ if (cursor->desc_count) {
+ ppc460ex_wxor_set_base (desc);
+ }
+ cursor->addr_count = 0;
+ cursor->desc_count++;
+ }
+}
+
+/**
+ * ppc460ex_adma_dma2rxor_prep_src - setup RXOR types in DMA2 CDB
+ */
+static int ppc460ex_adma_dma2rxor_prep_src (ppc460ex_desc_t *hdesc,
+ ppc460ex_rxor_cursor_t *cursor, int index,
+ int src_cnt, u32 addr)
+{
+ int rval = 0;
+ u32 sign;
+ ppc460ex_desc_t *desc = hdesc;
+ int i;
+
+ for (i=0;i<cursor->desc_count;i++) {
+ desc = list_entry (hdesc->chain_node.next, ppc460ex_desc_t,
+ chain_node);
+ }
+
+ switch (cursor->state) {
+ case 0:
+ if (addr == cursor->addrl + cursor->len ) {
+ /* direct RXOR */
+ cursor->state = 1;
+ cursor->xor_count++;
+ if (index == src_cnt-1) {
+ ppc460ex_rxor_set_region (desc,
+ cursor->addr_count,
+ DMA_RXOR12 <<
+ DMA_CUED_REGION_OFF);
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ }
+ } else if (cursor->addrl == addr + cursor->len) {
+ /* reverse RXOR */
+ cursor->state = 1;
+ cursor->xor_count++;
+ set_bit (cursor->addr_count,
+ &desc->reverse_flags[0]);
+ if (index == src_cnt-1) {
+ ppc460ex_rxor_set_region (desc,
+ cursor->addr_count,
+ DMA_RXOR12 <<
+ DMA_CUED_REGION_OFF);
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ }
+ } else {
+ printk (KERN_ERR "Cannot build "
+ "DMA2 RXOR command block.\n");
+ BUG ();
+ }
+ break;
+ case 1:
+ sign = test_bit (cursor->addr_count,
+ desc->reverse_flags)
+ ? -1 : 1;
+ if (index == src_cnt-2 || (sign == -1
+ && addr != cursor->addrl - 2*cursor->len)) {
+ cursor->state = 0;
+ cursor->xor_count = 1;
+ cursor->addrl = addr;
+ ppc460ex_rxor_set_region (desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ } else if (addr == cursor->addrl + 2*sign*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc460ex_rxor_set_region (desc,
+ cursor->addr_count,
+ DMA_RXOR123 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ }
+ } else if (addr == cursor->addrl + 3*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc460ex_rxor_set_region (desc,
+ cursor->addr_count,
+ DMA_RXOR124 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ }
+ } else if (addr == cursor->addrl + 4*cursor->len) {
+ cursor->state = 2;
+ cursor->xor_count = 0;
+ ppc460ex_rxor_set_region (desc,
+ cursor->addr_count,
+ DMA_RXOR125 << DMA_CUED_REGION_OFF);
+ if (index == src_cnt-1) {
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ }
+ } else {
+ cursor->state = 0;
+ cursor->xor_count = 1;
+ cursor->addrl = addr;
+ ppc460ex_rxor_set_region (desc,
+ cursor->addr_count,
+ DMA_RXOR12 << DMA_CUED_REGION_OFF);
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ }
+ break;
+ case 2:
+ cursor->state = 0;
+ cursor->addrl = addr;
+ cursor->xor_count++;
+ if (index) {
+ ppc460ex_adma_dma2rxor_inc_addr (
+ desc, cursor, index, src_cnt);
+ }
+ break;
+ }
+
+ return rval;
+}
+
+/**
+ * ppc460ex_adma_dma2rxor_set_src - set RXOR source address; it's assumed that
+ * ppc460ex_adma_dma2rxor_prep_src() has already done prior this call
+ */
+static void ppc460ex_adma_dma2rxor_set_src (ppc460ex_desc_t *desc,
+ int index, dma_addr_t addr)
+{
+ xor_cb_t *xcb = desc->hw_desc;
+ int k = 0, op = 0, lop = 0;
+
+ /* get the RXOR operand which corresponds to index addr */
+ while (op <= index) {
+ lop = op;
+ if (k == XOR_MAX_OPS) {
+ k = 0;
+ desc = list_entry (desc->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ xcb = desc->hw_desc;
+
+ }
+ if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
+ (DMA_RXOR12 << DMA_CUED_REGION_OFF))
+ op += 2;
+ else
+ op += 3;
+ }
+
+ if (test_bit(/*PPC460EX_DESC_RXOR_REV*/k-1, desc->reverse_flags)) {
+ /* reverse operand order; put last op in RXOR group */
+ if (index == op - 1)
+ ppc460ex_rxor_set_src(desc, k - 1, addr);
+ } else {
+ /* direct operand order; put first op in RXOR group */
+ if (index == lop)
+ ppc460ex_rxor_set_src(desc, k - 1, addr);
+ }
+}
+
+/**
+ * ppc460ex_adma_dma2rxor_set_mult - set RXOR multipliers; it's assumed that
+ * ppc460ex_adma_dma2rxor_prep_src() has already done prior this call
+ */
+static void ppc460ex_adma_dma2rxor_set_mult (ppc460ex_desc_t *desc,
+ int index, u8 mult)
+{
+ xor_cb_t *xcb = desc->hw_desc;
+ int k = 0, op = 0, lop = 0;
+
+ /* get the RXOR operand which corresponds to index mult */
+ while (op <= index) {
+ lop = op;
+ if (k == XOR_MAX_OPS) {
+ k = 0;
+ desc = list_entry (desc->chain_node.next,
+ ppc460ex_desc_t, chain_node);
+ xcb = desc->hw_desc;
+
+ }
+ if ((xcb->ops[k++].h & (DMA_RXOR12 << DMA_CUED_REGION_OFF)) ==
+ (DMA_RXOR12 << DMA_CUED_REGION_OFF))
+ op += 2;
+ else
+ op += 3;
+ }
+
+ if (test_bit(/*PPC460EX_DESC_RXOR_REV*/k-1, desc->reverse_flags)) {
+ /* reverse order */
+ ppc460ex_rxor_set_mult(desc, k - 1, op - index - 1, mult);
+ } else {
+ /* direct order */
+ ppc460ex_rxor_set_mult(desc, k - 1, index - lop, mult);
+ }
+}
+
+/**
+ * ppc460ex_init_rxor_cursor -
+ */
+static void ppc460ex_init_rxor_cursor (ppc460ex_rxor_cursor_t *cursor)
+{
+ memset (cursor, 0, sizeof (ppc460ex_rxor_cursor_t));
+ cursor->state = 2;
+}
+
+/**
+ * ppc460ex_adma_pqxor_set_src_mult - set multiplication coefficient into
+ * descriptor for the PQXOR operation
+ */
+static void ppc460ex_adma_pqxor_set_src_mult (
+ ppc460ex_desc_t *sw_desc,
+ unsigned char mult, int index,int dst_pos)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ u32 mult_idx, mult_dst;
+ ppc460ex_desc_t *iter=NULL, *iter1=NULL;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ if (test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ int region = test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index < region) {
+ /* RXOR multipliers */
+
+ iter = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->dst_cnt - 1);
+ if (sw_desc->dst_cnt == 2)
+ iter1 = ppc460ex_get_group_entry(sw_desc, 0);
+ mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
+ mult_dst = DMA_CDB_SG_SRC;
+ } else {
+ /* WXOR multiplier */
+ iter = ppc460ex_get_group_entry(sw_desc,
+ index - region + 1);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
+ DMA_CDB_SG_DST1;
+ }
+ } else {
+ int znum = 0;
+
+ /* WXOR-only;
+ * skip first slots with destinations (if ZERO_DST has
+ * place)
+ */
+ if (test_bit(PPC460EX_ZERO_P, &sw_desc->flags))
+ znum++;
+ if (test_bit(PPC460EX_ZERO_Q, &sw_desc->flags))
+ znum++;
+ iter = ppc460ex_get_group_entry(sw_desc, index + znum);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
+ }
+
+ if (likely(sw_desc))
+ ppc460ex_desc_set_src_mult(iter, chan,
+ mult_idx, mult_dst, mult);
+ break;
+ case PPC460EX_XOR_ID:
+ iter = sw_desc->group_head;
+ if (iter->dst_cnt == 2) {
+ /* both P & Q calculations required; set Q mult here */
+ ppc460ex_adma_dma2rxor_set_mult(iter, index, mult);
+ /* this is for P. Actually sw_desc already points
+ * to the second CDB though.
+ */
+ mult = 1;
+ iter = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->descs_per_op);
+ }
+ ppc460ex_adma_dma2rxor_set_mult(iter, index, mult);
+ break;
+ }
+}
+void ppc460ex_adma_xor_set_src_mult (
+ ppc460ex_desc_t *sw_desc,
+ unsigned char mult, int index,int dst_pos)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ u32 mult_idx, mult_dst;
+ ppc460ex_desc_t *iter=NULL, *iter1=NULL;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ if (test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ int region = test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index < region) {
+ /* RXOR multipliers */
+
+ iter = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->dst_cnt - 1);
+ if (sw_desc->dst_cnt == 2)
+ iter1 = ppc460ex_get_group_entry(sw_desc, 0);
+ mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
+ mult_dst = DMA_CDB_SG_SRC;
+ } else {
+ /* WXOR multiplier */
+ iter = ppc460ex_get_group_entry(sw_desc,
+ index - region + 1);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 :
+ DMA_CDB_SG_DST1;
+ }
+ } else {
+ int znum = 0;
+
+ /* WXOR-only;
+ * skip first slots with destinations (if ZERO_DST has
+ * place)
+ */
+ if (test_bit(PPC460EX_ZERO_P, &sw_desc->flags))
+ znum++;
+ if (test_bit(PPC460EX_ZERO_Q, &sw_desc->flags))
+ znum++;
+ iter = ppc460ex_get_group_entry(sw_desc, index + znum);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
+ }
+
+ if (likely(sw_desc))
+ ppc460ex_desc_set_src_mult(iter, chan,
+ mult_idx, mult_dst, mult);
+ break;
+ }
+}
+/**
+ * ppc460ex_adma_pqxor_set_src_mult - set multiplication coefficient into
+ * descriptor for the PQXOR operation
+ */
+static void ppc460ex_adma_dma01_xor_set_src_mult (
+ ppc460ex_desc_t *sw_desc,
+ unsigned char mult, int index)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ u32 mult_idx, mult_dst;
+
+ switch (chan->device->id) {
+ case PPC460EX_DMA0_ID:
+ case PPC460EX_DMA1_ID:
+ if (test_bit(PPC460EX_DESC_RXOR, &sw_desc->flags)) {
+ int region = test_bit(PPC460EX_DESC_RXOR12,
+ &sw_desc->flags) ? 2 : 3;
+
+ if (index < region) {
+ /* RXOR multipliers */
+ sw_desc = ppc460ex_get_group_entry(sw_desc, 0);
+ mult_idx = DMA_CUED_MULT1_OFF + (index << 3);
+ mult_dst = DMA_CDB_SG_SRC;
+ } else {
+ /* WXOR multiplier */
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index - region + 1);
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = DMA_CDB_SG_DST1;
+ }
+ } else {
+ /* WXOR-only;
+ * skip first slots with destinations (if ZERO_DST has
+ * place)
+ */
+ if (!test_bit(PPC460EX_ZERO_P, &sw_desc->flags)) {
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ index);
+ } else {
+ sw_desc = ppc460ex_get_group_entry(sw_desc,
+ sw_desc->dst_cnt + index);
+ }
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = DMA_CDB_SG_DST1;
+ }
+
+ if (likely(sw_desc)) {
+ ppc460ex_desc_set_src_mult(sw_desc, chan,
+ mult_idx, mult_dst, mult);
+ }
+ break;
+ }
+}
+
+/**
+ * ppc460ex_adma_pqzero_sum_set_src_mult - set multiplication coefficient
+ * into descriptor for the PQ_VAL operation
+ */
+static void ppc460ex_adma_pqzero_sum_set_src_mult (
+ ppc460ex_desc_t *sw_desc,
+ unsigned char mult, int index, int dst_pos)
+{
+ ppc460ex_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ u32 mult_idx, mult_dst;
+
+ /* set mult for sources only */
+ BUG_ON(index >= sw_desc->src_cnt);
+
+ /* get pointed slot */
+ sw_desc = ppc460ex_get_group_entry(sw_desc, index);
+
+ mult_idx = DMA_CUED_MULT1_OFF;
+ mult_dst = dst_pos ? DMA_CDB_SG_DST2 : DMA_CDB_SG_DST1;
+
+ if (likely(sw_desc))
+ ppc460ex_desc_set_src_mult(sw_desc, chan, mult_idx, mult_dst,
+ mult);
+}
+#if 0
+/**
+ * ppc460ex_adma_dependency_added - schedule clean-up
+ */
+static void ppc460ex_adma_dependency_added(struct dma_chan *chan)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ tasklet_schedule(&ppc460ex_chan->irq_tasklet);
+}
+#endif
+
+/**
+ * ppc460ex_adma_free_chan_resources - free the resources allocated
+ */
+static void ppc460ex_adma_free_chan_resources(struct dma_chan *chan)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_desc_t *iter, *_iter;
+ int in_use_descs = 0;
+
+ ppc460ex_adma_slot_cleanup(ppc460ex_chan);
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &ppc460ex_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(iter, _iter,
+ &ppc460ex_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ ppc460ex_chan->slots_allocated--;
+ }
+ ppc460ex_chan->last_used = NULL;
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d %s slots_allocated %d\n",
+ ppc460ex_chan->device->id,
+ __FUNCTION__, ppc460ex_chan->slots_allocated);
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "GT: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * ppc460ex_adma_is_complete - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status ppc460ex_adma_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+
+ //printk( "--------------- %s: %i-------------------------\n",__FUNCTION__,__LINE__);
+ last_used = chan->cookie;
+ last_complete = ppc460ex_chan->completed_cookie;
+
+ if (done)
+ *done= last_complete;
+ if (used)
+ *used = last_used;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ ppc460ex_adma_slot_cleanup(ppc460ex_chan);
+
+ last_used = chan->cookie;
+ last_complete = ppc460ex_chan->completed_cookie;
+
+ if (done)
+ *done= last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/**
+ * ppc460ex_adma_eot_handler - end of transfer interrupt handler
+ */
+static irqreturn_t ppc460ex_adma_eot_handler(int irq, void *data)
+{
+ ppc460ex_ch_t *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc460ex adma%d: %s\n", chan->device->id, __FUNCTION__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc460ex_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ppc460ex_adma_err_handler - DMA error interrupt handler;
+ * do the same things as a eot handler
+ */
+static irqreturn_t ppc460ex_adma_err_handler(int irq, void *data)
+{
+ ppc460ex_ch_t *chan = data;
+ dev_dbg(chan->device->common.dev,
+ "ppc460ex adma%d: %s\n", chan->device->id, __FUNCTION__);
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc460ex_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+static void ppc460ex_test_rad6_callback (void *unused)
+{
+ complete(&ppc460ex_r6_test_comp);
+}
+/**
+ * ppc460ex_test_callback - called when test operation has been done
+ */
+static void ppc460ex_test_callback (void *unused)
+{
+ complete(&ppc460ex_r5_test_comp);
+}
+
+/**
+ * ppc460ex_adma_issue_pending - flush all pending descriptors to h/w
+ */
+static void ppc460ex_adma_issue_pending(struct dma_chan *chan)
+{
+ ppc460ex_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+#if 0
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s %d \n", ppc460ex_chan->device->id,
+ __FUNCTION__, ppc460ex_chan->pending);
+#endif
+
+ if (ppc460ex_chan->pending) {
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s %d \n", ppc460ex_chan->device->id,
+ __FUNCTION__, ppc460ex_chan->pending);
+ ppc460ex_chan->pending = 0;
+ ppc460ex_chan_append(ppc460ex_chan);
+ }
+}
+
+/**
+ * ppc460ex_adma_remove - remove the asynch device
+ */
+static int __devexit ppc460ex_adma_remove(struct platform_device *dev)
+{
+ ppc460ex_dev_t *device = platform_get_drvdata(dev);
+ struct dma_chan *chan, *_chan;
+ struct ppc_dma_chan_ref *ref, *_ref;
+ ppc460ex_ch_t *ppc460ex_chan;
+ int i;
+ ppc460ex_aplat_t *plat_data = dev->dev.platform_data;
+
+ dma_async_device_unregister(&device->common);
+
+ for (i = 0; i < 3; i++) {
+ u32 irq;
+ irq = platform_get_irq(dev, i);
+ free_irq(irq, device);
+ }
+
+ if ( (ppc460ex_chan) && (ppc460ex_chan->device->desc_memory == ADMA_DESC_MEM_OCM))
+ ocm_free(device->dma_desc_pool_virt);
+ else
+ dma_free_coherent(&dev->dev, plat_data->pool_size,
+ device->dma_desc_pool_virt, device->dma_desc_pool);
+
+
+ do {
+ struct resource *res;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start);
+ } while (0);
+
+ list_for_each_entry_safe(chan, _chan, &device->common.channels,
+ device_node) {
+ ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ list_del(&chan->device_node);
+ kfree(ppc460ex_chan);
+ }
+
+ list_for_each_entry_safe(ref, _ref, &ppc_adma_chan_list, node) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+
+ kfree(device);
+
+ return 0;
+}
+
+/**
+ * ppc460ex_adma_probe - probe the asynch device
+ */
+static int __devinit ppc460ex_adma_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct resource *res;
+ int ret=0, irq;
+ ppc460ex_dev_t *adev;
+ ppc460ex_ch_t *chan;
+ ppc460ex_aplat_t *plat_data;
+ struct ppc_dma_chan_ref *ref;
+ const char *str_prop;
+
+ printk("Improved ADMA - 08312009\n");
+ plat_data = &ppc460ex_dma_1_data;
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_adev_alloc;
+ }
+ adev->res[0].start = ppc460ex_dma_1_channel.resource[0].start;
+ adev->id = ppc460ex_dma_1_channel.id;
+ printk("adev->res[0].start=0x%x\n",adev->res[0].start);
+
+ printk( " adev->id = 0x%x ppc460ex_dma_1_channel.resource[0].start=0x%x \n",
+ adev->id,ppc460ex_dma_1_channel.resource[0].start);
+
+ /* allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly drain the write buffer
+ */
+ str_prop = of_get_property(ofdev->node, "descriptor-memory", NULL);
+ if (str_prop && (!strcmp(str_prop,"ocm") || !strcmp(str_prop,"OCM"))) {
+ printk(KERN_INFO
+ " descriptor-memory = %s\n", str_prop);
+ adev->dma_desc_pool_virt = ocm_alloc(&adev->dma_desc_pool, DMA1_FIFO_SIZE << 2, 4,
+ OCM_NON_CACHED, "ADMA_descriptors");
+ adev->desc_memory = ADMA_DESC_MEM_OCM;
+ } else {
+
+ if ((adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
+ DMA1_FIFO_SIZE << 2, &adev->dma_desc_pool, GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+ adev->desc_memory = 0;
+ }
+ if (adev->dma_desc_pool_virt == NULL) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+ dev_dbg(&ofdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
+ __FUNCTION__, adev->dma_desc_pool_virt,
+ (void *) adev->dma_desc_pool);
+
+ adev->id = PPC460EX_DMA1_ID;
+ /* create the DMA capability MASK . This used to come from resources structure*/
+ dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
+ dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
+ dma_cap_set(DMA_PQ, adev->common.cap_mask);
+ //dma_cap_set(DMA_PQ_VAL, adev->common.cap_mask);
+ dma_cap_set(DMA_XOR, adev->common.cap_mask);/* Marri RAID-5 */
+ dma_cap_set(DMA_XOR_VAL, adev->common.cap_mask);
+ adev->odev = ofdev;
+ dev_set_drvdata(&(ofdev->dev), adev);
+
+ INIT_LIST_HEAD(&adev->common.channels);
+
+ /* set base routines */
+ adev->common.device_alloc_chan_resources =
+ ppc460ex_adma_alloc_chan_resources;
+ adev->common.device_free_chan_resources =
+ ppc460ex_adma_free_chan_resources;
+ adev->common.device_is_tx_complete = ppc460ex_adma_is_complete;
+ adev->common.device_issue_pending = ppc460ex_adma_issue_pending;
+ adev->common.dev = &ofdev->dev;
+
+ /* set prep routines based on capability */
+#if 1
+ if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memcpy =
+ ppc460ex_adma_prep_dma_memcpy;
+ }
+#endif
+ if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memset =
+ ppc460ex_adma_prep_dma_memset;
+ }
+#if 1
+ if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
+ adev->common.max_xor = XOR_MAX_OPS;
+ adev->common.device_prep_dma_xor = ppc460ex_adma_prep_dma_mq_xor;
+ }
+#endif
+#if 1
+ if (dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask)) {
+ adev->common.max_xor = XOR_MAX_OPS;
+ adev->common.device_prep_dma_xor_val = ppc460ex_adma_prep_dma_mq_zero_sum;
+ }
+#endif
+ if (dma_has_cap(DMA_PQ, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC460EX_DMA1_ID:
+ adev->common.max_pq = DMA1_FIFO_SIZE /
+ sizeof(dma_cdb_t);
+ break;
+ }
+ adev->common.device_prep_dma_pq =
+ ppc460ex_adma_prep_dma_pq;
+
+ }
+ if (dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask)) {
+ switch (adev->id) {
+ case PPC460EX_DMA1_ID:
+ adev->common.max_pq = DMA1_FIFO_SIZE /
+ sizeof(dma_cdb_t);
+ break;
+ }
+ adev->common.device_prep_dma_pq_val =
+ ppc460ex_adma_prep_dma_pqzero_sum;
+ }
+
+ if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_interrupt =
+ ppc460ex_adma_prep_dma_interrupt;
+ }
+
+ /* create a channel */
+ if ((chan = kzalloc(sizeof(*chan), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_chan_alloc;
+ }
+
+ tasklet_init(&chan->irq_tasklet, ppc460ex_adma_tasklet,
+ (unsigned long)chan);
+ irq = irq_of_parse_and_map(ofdev->node, 0);
+ printk("<%s> irq=0x%x\n",__FUNCTION__, irq);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_adma_eot_handler,
+ IRQF_DISABLED, "adma-compl", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+
+ irq = irq_of_parse_and_map(ofdev->node, 2);
+ printk("<%s> irq=0x%x\n",__FUNCTION__, irq);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_adma_err_handler,
+ IRQF_DISABLED, "adma-err", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ } else
+ ret = -ENXIO;
+
+ chan->device = adev;
+
+ /* pass the platform data */
+ chan->device->odev->dev.platform_data = &ppc460ex_dma_1_data;
+ spin_lock_init(&chan->lock);
+#if 0
+ init_timer(&chan->cleanup_watchdog);
+ chan->cleanup_watchdog.data = (unsigned long) chan;
+ chan->cleanup_watchdog.function = ppc460ex_adma_tasklet;
+#endif
+ INIT_LIST_HEAD(&chan->chain);
+ INIT_LIST_HEAD(&chan->all_slots);
+ chan->common.device = &adev->common;
+ list_add_tail(&chan->common.device_node, &adev->common.channels);
+
+ dev_dbg(&ofdev->dev, "AMCC(R) PPC460 ADMA Engine found [%d]: "
+ "( %s%s%s%s%s%s%s%s)\n",
+ adev->id,
+ dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq_xor " : "",
+ dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_zero_sum " :
+ "",
+ dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_zero_sum " :
+ "",
+ dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
+ dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "int " : "");
+
+ dma_async_device_register(&adev->common);
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ printk("<%s> ret=0x%x\n", __FUNCTION__,ret);
+ if (ref) {
+ ref->chan = &chan->common;
+ INIT_LIST_HEAD(&ref->node);
+ list_add_tail(&ref->node, &ppc_adma_chan_list);
+ } else
+ printk(KERN_WARNING "%s: failed to allocate channel reference!\n",
+ __FUNCTION__);
+ goto out;
+
+err_irq:
+ free_irq(irq,&adev->id);
+err_chan_alloc:
+ kfree(chan);
+err_dma_alloc:
+ if ( chan->device->desc_memory == ADMA_DESC_MEM_OCM)
+ ocm_free(adev->dma_desc_pool_virt );
+ else
+ dma_free_coherent(&ofdev->dev,DMA1_FIFO_SIZE << 2,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+err_adev_alloc:
+ kfree(adev);
+ release_mem_region(res->start, res->end - res->start);
+out:
+ return ret;
+}
+
+/**
+ * ppc460ex_chan_start_null_xor - initiate the first XOR operation (DMA engines
+ * use FIFOs (as opposite to chains used in XOR) so this is a XOR
+ * gtcific operation)
+ */
+static void ppc460ex_chan_start_null_xor(ppc460ex_ch_t *chan)
+{
+ ppc460ex_desc_t *sw_desc, *group_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc460ex adma%d: %s\n", chan->device->id, __FUNCTION__);
+
+ spin_lock_bh(&chan->lock);
+ slot_cnt = ppc460ex_chan_xor_slot_count(0, 2, &slots_per_op);
+ sw_desc = ppc460ex_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ async_tx_ack(&sw_desc->async_tx);
+ ppc460ex_desc_init_null_xor(group_start);
+
+ cookie = chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ chan->completed_cookie = cookie - 1;
+ chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(ppc460ex_chan_is_busy(chan));
+
+ /* set the descriptor address */
+ ppc460ex_chan_set_first_xor_descriptor(chan, sw_desc);
+
+ /* run the descriptor */
+ ppc460ex_chan_run(chan);
+ } else
+ printk(KERN_ERR "ppc460ex adma%d"
+ " failed to allocate null descriptor\n",
+ chan->device->id);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * ppc460ex_test_raid6 - test are RAID-6 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-6
+ o/of_platform_driver_unregister(&ppc460ex_adma_driver);
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int ppc460ex_test_raid6 (ppc460ex_ch_t *chan)
+{
+ ppc460ex_desc_t *sw_desc, *iter;
+ struct page *pg;
+ char *a;
+ dma_addr_t dma_addr, addrs[2];;
+ unsigned long op = 0;
+ int rval = 0;
+
+ if (!ppc460ex_r6_tchan)
+ return -1;
+ /*FIXME*/
+
+ set_bit(PPC460EX_DESC_WXOR, &op);
+
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = ppc460ex_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dsr, int_ena, WXOR */
+ ppc460ex_desc_init_pq(sw_desc, 1, 1, 1, op);
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc460ex_desc_set_byte_count(iter, chan, PAGE_SIZE);
+ iter->unmap_len = PAGE_SIZE;
+ }
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ /* Fill the test page with ones */
+ memset(page_address(pg), 0xFF, PAGE_SIZE);
+#if 0
+ int i = 0;
+ char *pg_addr = page_address(pg);
+ //for(i=0;i < PAGE_SIZE; i+=64)
+ for(i=0;i < 1000; i+=64)
+ printk("addr = 0x%x data = 0x%x\n",pg_addr + i,*(pg_addr+i));
+#endif
+ dma_addr = dma_map_page(&chan->device->odev->dev, pg, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ /* Setup adresses */
+ ppc460ex_adma_pqxor_set_src(sw_desc, dma_addr, 0);
+ ppc460ex_adma_pqxor_set_src_mult(sw_desc, 1, 0,0);
+ addrs[0] = dma_addr;
+ addrs[1] = 0;
+ ppc460ex_adma_pqxor_set_dest(sw_desc, addrs, DMA_PREP_HAVE_P);
+
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = ppc460ex_test_rad6_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&ppc460ex_r6_test_comp);
+
+ ppc460ex_adma_tx_submit(&sw_desc->async_tx);
+ ppc460ex_adma_issue_pending(&chan->common);
+
+ wait_for_completion(&ppc460ex_r6_test_comp);
+
+ /* Now check is the test page zeroed */
+ a = page_address(pg);
+#if 0
+ i = 0;
+ for(i=0;i < PAGE_SIZE; i+=64)
+ printk("addr = 0x%x data = 0x%x\n",a + i,*(a+i));
+#endif
+ if ((*(u32*)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4)==0) {
+ /* page is zero - RAID-6 enabled */
+ rval = 0;
+ } else {
+ /* RAID-6 was not enabled */
+ rval = -EINVAL;
+ }
+ //printk(__LINE__,__FUNCTION__);
+exit:
+ __free_page(pg);
+ return rval;
+}
+/**
+ * ppc460ex_test_raid5 - test are RAID-5 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-5
+ o/of_platform_driver_unregister(&ppc460ex_adma_driver);
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int ppc460ex_test_raid5 (ppc460ex_ch_t *chan)
+{
+ ppc460ex_desc_t *sw_desc, *iter;
+ struct page *pg;
+ char *a;
+ dma_addr_t dma_addr;
+ unsigned long op = 0;
+ int rval = 0;
+
+ if (!ppc460ex_r5_tchan)
+ return -1;
+ /*FIXME*/
+
+ set_bit(PPC460EX_DESC_WXOR, &op);
+
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = ppc460ex_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dsr, int_ena, WXOR */
+ //ppc460ex_desc_init_pqxor(sw_desc, 1, 1, 1, op);
+ ppc460ex_desc_init_dma01_xor(sw_desc, 1, 1, 1, op);
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc460ex_desc_set_byte_count(iter, chan, PAGE_SIZE);
+ iter->unmap_len = PAGE_SIZE;
+ }
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ /* Fill the test page with ones */
+ memset(page_address(pg), 0xFF, PAGE_SIZE);
+#if 0
+ int i = 0;
+ char *pg_addr = page_address(pg);
+ for(i=0;i < PAGE_SIZE; i+=64)
+ printk("addr = 0x%x data = 0x%x\n",pg_addr + i,*(pg_addr+i));
+#endif
+ //dma_addr = dma_map_page(&chan->device->common, pg, 0, PAGE_SIZE,
+ dma_addr = dma_map_page(&chan->device->odev->dev, pg, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ /* Setup adresses */
+ ppc460ex_adma_dma01_xor_set_src(sw_desc, dma_addr, 0);
+ ppc460ex_adma_dma01_xor_set_src_mult(sw_desc, 1, 0);
+ ppc460ex_adma_dma01_xor_set_dest(sw_desc, dma_addr, 0);
+
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = ppc460ex_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&ppc460ex_r5_test_comp);
+
+ ppc460ex_adma_tx_submit(&sw_desc->async_tx);
+ ppc460ex_adma_issue_pending(&chan->common);
+
+ wait_for_completion(&ppc460ex_r5_test_comp);
+
+ /*Make sure cache is flushed to memory*/
+ dma_addr = dma_map_page(&chan->device->odev->dev, pg, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ /* Now check is the test page zeroed */
+ a = page_address(pg);
+#if 0
+ i = 0;
+ for(i=0;i < PAGE_SIZE; i+=64)
+ printk("addr = 0x%x data = 0x%x\n",a + i,*(a+i));
+#endif
+ if ((*(u32*)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4)==0) {
+ /* page is zero - RAID-5 enabled */
+ rval = 0;
+ } else {
+ /* RAID-5 was not enabled */
+ rval = -EINVAL;
+ }
+ pr_dma(__LINE__,__FUNCTION__);
+exit:
+ __free_page(pg);
+ return rval;
+}
+#if 1
+static struct of_device_id adma_match[] =
+{
+ {
+ .compatible = "amcc,adma",
+ },
+ {},
+};
+static struct of_platform_driver ppc460ex_adma_driver = {
+ .name = "adma",
+ .match_table = adma_match,
+
+ .probe = ppc460ex_adma_probe,
+ .remove = ppc460ex_adma_remove,
+};
+#else
+static struct platform_driver ppc460ex_adma_driver= {
+ .probe = ppc460ex_adma_probe,
+ .remove = ppc460ex_adma_remove,
+ .driver = {
+ .owner = "marri",
+ .name = "PPC460EX-ADMA",
+ },
+};
+#endif
+
+/**
+ * /proc interface
+ */
+static int ppc460ex_poly_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *p = page;
+ u32 reg;
+
+#ifdef CONFIG_440SP
+ /* 440SP has fixed polynomial */
+ reg = 0x4d;
+#else
+ reg = mfdcr(DCRN_MQ0_CFBHL);
+ reg >>= MQ0_CFBHL_POLY;
+ reg &= 0xFF;
+#endif
+
+ p += sprintf (p, "PPC460EX RAID-5 driver uses 0x1%02x polynomial.\n",
+ reg);
+
+ return p - page;
+}
+
+static int ppc460ex_poly_write (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ /* e.g., 0x14D or 0x11d */
+ char tmp[6];
+ unsigned long val, rval;
+
+#ifdef CONFIG_440SP
+ /* 440SP use default 0x14D polynomial only */
+ return -EINVAL;
+#endif
+
+ if (!count || count > 6)
+ return -EINVAL;
+
+ if (copy_from_user(tmp, buffer, count))
+ return -EFAULT;
+
+ tmp[count] = 0;
+ val = simple_strtoul(tmp, NULL, 16);
+
+ if (val & ~0x1FF)
+ return -EINVAL;
+
+ val &= 0xFF;
+ rval = mfdcr(DCRN_MQ0_CFBHL);
+ rval &= ~(0xFF << MQ0_CFBHL_POLY);
+ rval |= val << MQ0_CFBHL_POLY;
+ mtdcr(DCRN_MQ0_CFBHL, rval);
+
+ return count;
+}
+
+static int ppc460ex_r6ena_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *p = page;
+
+ p += sprintf(p, "%s\n",
+ ppc460ex_r6_enabled ?
+ "PPC460Ex RAID-6 capabilities are ENABLED.\n" :
+ "PPC460Ex RAID-6 capabilities are DISABLED.\n");
+
+ return p - page;
+}
+
+static int ppc460ex_r6ena_write (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ /* e.g. 0xffffffff */
+ char tmp[11];
+ unsigned long val;
+
+ if (!count || count > 11)
+ return -EINVAL;
+
+ if (!ppc460ex_r6_tchan)
+ return -EFAULT;
+
+ if (copy_from_user(tmp, buffer, count))
+ return -EFAULT;
+
+ /* Write a key */
+ val = simple_strtoul(tmp, NULL, 16);
+ mtdcr(DCRN_MQ0_XORBA, val);
+ isync();
+
+ /* Verify does it really work now */
+ if (ppc460ex_test_raid6(ppc460ex_r6_tchan) == 0) {
+ /* PPC440SP(e) RAID-6 has been activated successfully */;
+ printk(KERN_INFO "PPC460Ex RAID-6 has been activated "
+ "successfully\n");
+ ppc460ex_r6_enabled = 0;
+ ppc460ex_r5_enabled = 0;
+ } else {
+ /* PPC440SP(e) RAID-6 hasn't been activated! Error key ? */;
+ printk(KERN_INFO "PPC460Ex RAID-6 hasn't been activated!"
+ " Error key ?\n");
+ ppc460ex_r6_enabled = 0;
+ }
+
+ return count;
+}
+static int ppc460ex_r5ena_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *p = page;
+
+ p += sprintf(p, "%s\n",
+ ppc460ex_r5_enabled ?
+ "PPC460Ex RAID-r5 capabilities are ENABLED.\n" :
+ "PPC460Ex RAID-r5 capabilities are DISABLED.\n");
+
+ return p - page;
+}
+
+static int ppc460ex_r5ena_write (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ /* e.g. 0xffffffff */
+ char tmp[11];
+ unsigned long val;
+
+ if (!count /*|| count > 11*/)
+ return -EINVAL;
+
+ if (!ppc460ex_r5_tchan)
+ return -EFAULT;
+
+ if (copy_from_user(tmp, buffer, count))
+ return -EFAULT;
+
+ /* Write a key */
+ val = simple_strtoul(tmp, NULL, 16);
+ mtdcr(DCRN_MQ0_XORBA, val);
+ isync();
+
+ /* Verify does it really work now */
+ if (ppc460ex_test_raid5(ppc460ex_r5_tchan) == 0) {
+ /* PPC440SP(e) RAID-6 has been activated successfully */;
+ printk(KERN_INFO "PPC460Ex RAID-5 has been activated "
+ "successfully\n");
+ ppc460ex_r5_enabled = 1;
+ ppc460ex_r6_enabled = 0;
+ } else {
+ /* PPC440SP(e) RAID-6 hasn't been activated! Error key ? */;
+ printk(KERN_INFO "PPC460Ex RAID-5 hasn't been activated!"
+ " Error key ?\n");
+ ppc460ex_r5_enabled = 0;
+ }
+
+ return count;
+}
+
+static int __init ppc460ex_adma_init (void)
+{
+ int rval;
+ struct proc_dir_entry *p;
+
+ ppc460ex_configure_raid_devices();
+ rval = of_register_platform_driver(&ppc460ex_adma_driver);
+ //rval = platform_driver_register(&ppc460ex_adma_driver);
+
+ if (rval == 0) {
+ /* Create /proc entries */
+ ppc460ex_proot = proc_mkdir(PPC460EX_R5_PROC_ROOT, NULL);
+ if (!ppc460ex_proot) {
+ printk(KERN_ERR "%s: failed to create %s proc "
+ "directory\n",__FUNCTION__,PPC460EX_R5_PROC_ROOT);
+ /* User will not be able to enable h/w RAID-6 */
+ return rval;
+ }
+
+ /* GF polynome to use */
+ p = create_proc_entry("poly", 0, ppc460ex_proot);
+ if (p) {
+ p->read_proc = ppc460ex_poly_read;
+ p->write_proc = ppc460ex_poly_write;
+ }
+
+ /* RAID-6 h/w enable entry */
+ p = create_proc_entry("enable", 0, ppc460ex_proot);
+ if (p) {
+ p->read_proc = ppc460ex_r5ena_read;
+ p->write_proc = ppc460ex_r5ena_write;
+ }
+ /* Create /proc entries */
+ ppc460ex_pqroot = proc_mkdir(PPC460EX_R6_PROC_ROOT, NULL);
+ if (!ppc460ex_pqroot) {
+ printk(KERN_ERR "%s: failed to create %s proc "
+ "directory\n",__FUNCTION__,PPC460EX_R6_PROC_ROOT);
+ /* User will not be able to enable h/w RAID-6 */
+ return rval;
+ }
+
+ /* GF polynome to use */
+ p = create_proc_entry("poly", 0, ppc460ex_pqroot);
+ if (p) {
+ p->read_proc = ppc460ex_poly_read;
+ p->write_proc = ppc460ex_poly_write;
+ }
+
+ /* RAID-6 h/w enable entry */
+ p = create_proc_entry("enable", 0, ppc460ex_pqroot);
+ if (p) {
+ p->read_proc = ppc460ex_r6ena_read;
+ p->write_proc = ppc460ex_r6ena_write;
+ }
+ }
+ return rval;
+}
+
+#if 0
+static void __exit ppc460ex_adma_exit (void)
+{
+ of_unregister_platform_driver(&ppc460ex_adma_driver);
+ return;
+}
+module_exit(ppc460ex_adma_exit);
+#endif
+
+module_init(ppc460ex_adma_init);
+
+MODULE_AUTHOR("Tirumala Marri<tmarri@amcc.com>");
+MODULE_DESCRIPTION("PPC460EX ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc460ex-plbadma.c b/drivers/dma/ppc460ex-plbadma.c
new file mode 100644
index 00000000000..44467211cba
--- /dev/null
+++ b/drivers/dma/ppc460ex-plbadma.c
@@ -0,0 +1,2009 @@
+/*
+ * Copyright(c) 2006 DENX Engineering. All rights reserved.
+ *
+ * Author: Tirumala Marr <tmarri@amcc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the AMCC PPC460ex Processors.
+ * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ * ADMA driver written by D.Williams.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/of_platform.h>
+#include <linux/proc_fs.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/ppc460ex_plb_adma.h>
+#include <asm/ppc460ex_xor.h>
+#define PPC44x_SRAM_ADDR 0x00000000400048000ULL
+//#define PPC44x_SRAM_SIZE 0x10000 /* 64 Kb*/
+#define PPC44x_SRAM_SIZE 0x8000 /* 32 Kb*/
+//#define CONFIG_ADMA_SRAM 1
+
+/* The list of channels exported by ppc460ex ADMA */
+struct list_head
+ppc_adma_p_chan_list = LIST_HEAD_INIT(ppc_adma_p_chan_list);
+
+/* This flag is set when want to refetch the xor chain in the interrupt
+ * handler
+ */
+static u32 do_xor_refetch = 0;
+
+/* Pointers to last submitted to DMA0, DMA1 CDBs */
+static ppc460ex_p_desc_t *chan_last_sub[4];
+static ppc460ex_p_desc_t *chan_first_cdb[4];
+
+/* Pointer to last linked and submitted xor CB */
+static ppc460ex_p_desc_t *xor_last_linked = NULL;
+static ppc460ex_p_desc_t *xor_last_submit = NULL;
+
+
+/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up
+ * the block size in transactions, then we do not allow to activate more than
+ * only one RXOR transactions simultaneously. So use this var to store
+ * the information about is RXOR currently active (PPC460EX_RXOR_RUN bit is
+ * set) or not (PPC460EX_RXOR_RUN is clear).
+ */
+
+/* /proc interface is used here to enable the h/w RAID-6 capabilities
+ */
+static struct proc_dir_entry *ppc460ex_proot;
+
+/* These are used in enable & check routines
+ */
+static u32 ppc460ex_r6_enabled;
+static u32 ppc460ex_r5_enabled;
+static ppc460ex_p_ch_t *ppc460ex_r6_tchan;
+static ppc460ex_p_ch_t *ppc460ex_dma_tchan;
+static struct completion ppc460ex_r6_test_comp;
+static struct completion ppc460ex_r5_test_comp;
+
+#if 1
+static inline void pr_dma(int x, char *str)
+{
+ if(mfdcr(0x60)) {
+ printk("<%s> Line:%d\n",str,x);
+ }
+}
+#else
+static inline void pr_dma(int x, char *str)
+{
+}
+#endif
+static phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
+{
+ phys_addr_t page_4gb = 0;
+
+ return (page_4gb | addr);
+}
+
+
+/******************************************************************************
+ * Command (Descriptor) Blocks low-level routines
+ ******************************************************************************/
+/**
+ * ppc460ex_desc_init_interrupt - initialize the descriptor for INTERRUPT
+ * pseudo operation
+ */
+static inline void ppc460ex_desc_init_interrupt (ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan)
+{
+ u32 base = 0;
+ dma_cdb_t *hw_desc;
+
+
+ hw_desc = desc->hw_desc;
+
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
+ __FUNCTION__);
+ BUG();
+ break;
+ }
+ hw_desc->ctrl = mfdcr(base + DCR_DMA2P40_CTC0);
+ set_bit(PPC460EX_DESC_INT, &desc->flags);
+ set_bit(DMA_CIE_ENABLE,hw_desc->ctrl);
+}
+
+/**
+ * ppc460ex_desc_init_memcpy - initialize the descriptor for MEMCPY operation
+ */
+static inline void ppc460ex_desc_init_memcpy(ppc460ex_p_desc_t *desc,
+ unsigned long flags)
+{
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC460EX_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC460EX_DESC_INT, &desc->flags);
+
+}
+
+/**
+ * ppc460ex_desc_init_memset - initialize the descriptor for MEMSET operation
+ */
+static inline void ppc460ex_desc_init_memset(ppc460ex_p_desc_t *desc, int value,
+ unsigned long flags)
+{
+ dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(PPC460EX_DESC_INT, &desc->flags);
+ else
+ clear_bit(PPC460EX_DESC_INT, &desc->flags);
+
+}
+
+/**
+ * ppc460ex_desc_set_src_addr - set source address into the descriptor
+ */
+static inline void ppc460ex_desc_set_src_addr( ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan,
+ dma_addr_t addrh, dma_addr_t addrl)
+{
+ dma_cdb_t *dma_hw_desc;
+ phys_addr_t addr64, tmplow, tmphi;
+ u32 base = 0;
+
+ dma_hw_desc = desc->hw_desc;
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ BUG();
+ }
+ if (!addrh) {
+ addr64 = fixup_bigphys_addr(addrl, sizeof(phys_addr_t));
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc->src_hi = tmphi;
+ dma_hw_desc->src_lo = tmplow;
+}
+
+
+/**
+ * ppc460ex_desc_set_dest_addr - set destination address into the descriptor
+ */
+static inline void ppc460ex_desc_set_dest_addr(ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan,
+ dma_addr_t addrh, dma_addr_t addrl)
+{
+ dma_cdb_t *dma_hw_desc;
+ phys_addr_t addr64, tmphi, tmplow;
+
+ dma_hw_desc = desc->hw_desc;
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ case PPC460EX_PDMA1_ID:
+ case PPC460EX_PDMA2_ID:
+ case PPC460EX_PDMA3_ID:
+ break;
+ default :
+ BUG();
+ }
+
+ if (!addrh) {
+ addr64 = fixup_bigphys_addr(addrl, sizeof(phys_addr_t));
+ tmphi = (addr64 >> 32);
+ tmplow = (addr64 & 0xFFFFFFFF);
+ } else {
+ tmphi = addrh;
+ tmplow = addrl;
+ }
+ dma_hw_desc->dest_hi = tmphi;
+ dma_hw_desc->dest_lo = tmplow;
+}
+
+/**
+ * ppc460ex_desc_set_byte_count - set number of data bytes involved
+ * into the operation
+ */
+static inline void ppc460ex_desc_set_byte_count(ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan, u32 byte_count)
+{
+ dma_cdb_t *dma_hw_desc;
+ u32 base = 0;
+ u32 count = 0;
+ u32 error = 0;
+
+ dma_hw_desc = desc->hw_desc;
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ }
+ switch (chan->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+
+ case PW_128:
+ if (count & 0xf)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ chan->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, chan->pwidth);
+
+ count = count >> chan->shift;
+ dma_hw_desc->cnt = count;
+
+
+}
+
+/**
+ * ppc460ex_desc_set_link - set the address of descriptor following this
+ * descriptor in chain
+ */
+static inline void ppc460ex_desc_set_link(ppc460ex_p_ch_t *chan,
+ ppc460ex_p_desc_t *prev_desc, ppc460ex_p_desc_t *next_desc)
+{
+ unsigned long flags;
+ ppc460ex_p_desc_t *tail = next_desc;
+
+ if (unlikely(!prev_desc || !next_desc ||
+ (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
+ /* If previous next is overwritten something is wrong.
+ * though we may refetch from append to initiate list
+ * processing; in this case - it's ok.
+ */
+ printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
+ "prev->hw_next=0x%p\n", __FUNCTION__, prev_desc,
+ next_desc, prev_desc ? prev_desc->hw_next : 0);
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ /* do s/w chaining both for DMA and XOR descriptors */
+ prev_desc->hw_next = next_desc;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ case PPC460EX_PDMA1_ID:
+ case PPC460EX_PDMA2_ID:
+ case PPC460EX_PDMA3_ID:
+ break;
+ default:
+ BUG();
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * ppc460ex_desc_get_src_addr - extract the source address from the descriptor
+ */
+static inline u32 ppc460ex_desc_get_src_addr(ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan, int src_idx)
+{
+ dma_cdb_t *dma_hw_desc;
+ u32 base;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->src_lo);
+}
+
+/**
+ * ppc460ex_desc_get_dest_addr - extract the destination address from the
+ * descriptor
+ */
+static inline u32 ppc460ex_desc_get_dest_addr(ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan, int idx)
+{
+ dma_cdb_t *dma_hw_desc;
+ u32 base;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ return 0;
+ }
+
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->dest_lo);
+}
+
+/**
+ * ppc460ex_desc_get_byte_count - extract the byte count from the descriptor
+ */
+static inline u32 ppc460ex_desc_get_byte_count(ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan)
+{
+ dma_cdb_t *dma_hw_desc;
+ u32 base;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->cnt);
+}
+
+
+/**
+ * ppc460ex_desc_get_link - get the address of the descriptor that
+ * follows this one
+ */
+static inline u32 ppc460ex_desc_get_link(ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan)
+{
+ if (!desc->hw_next)
+ return 0;
+
+ return desc->hw_next->phys;
+}
+
+/**
+ * ppc460ex_desc_is_aligned - check alignment
+ */
+static inline int ppc460ex_desc_is_aligned(ppc460ex_p_desc_t *desc,
+ int num_slots)
+{
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+
+
+/******************************************************************************
+ * ADMA channel low-level routines
+ ******************************************************************************/
+
+static inline u32 ppc460ex_chan_get_current_descriptor(ppc460ex_p_ch_t *chan);
+static inline void ppc460ex_chan_append(ppc460ex_p_ch_t *chan);
+
+/*
+ * ppc460ex_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
+ */
+static inline void ppc460ex_adma_device_clear_eot_status (ppc460ex_p_ch_t *chan)
+{
+ u8 *p = chan->dma_desc_pool_virt;
+ dma_cdb_t *cdb;
+ u32 rv ;
+ u32 base;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+
+ rv = mfdcr(base + DCR_DMA2P40_CR0) & ((DMA_CH0_ERR >> chan->chan_id));
+ if (rv) {
+ printk("DMA%d err status: 0x%x\n", chan->device->id,
+ rv);
+ /* write back to clear */
+ mtdcr(base + DCR_DMA2P40_CR0, rv);
+ }
+ break;
+ default:
+ break;
+ }
+
+}
+
+/*
+ * ppc460ex_chan_is_busy - get the channel status
+ */
+
+static inline int ppc460ex_chan_is_busy(ppc460ex_p_ch_t *chan)
+{
+ int busy = 0;
+ u32 base = 0;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ BUG();
+ }
+ if(mfdcr((DCR_DMA2P40_SR) & 0x00000800))
+ busy = 1;
+ else
+ busy = 0;
+
+ return busy;
+}
+
+/**
+ * ppc460ex_dma_put_desc - put DMA0,1 descriptor to FIFO
+ */
+static inline void ppc460ex_dma_put_desc(ppc460ex_p_ch_t *chan,
+ ppc460ex_p_desc_t *desc)
+{
+ unsigned int control;
+ u32 sg_cmd;
+ u32 sg_hi;
+ u32 sg_lo;
+ u32 base = 0;
+
+ sg_lo = desc->phys;
+
+ control |= (chan->mode | DMA_CE_ENABLE);
+ control |= DMA_BEN;
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ BUG();
+ }
+ chan->in_use = 1;
+ sg_cmd = mfdcr(DCR_DMA2P40_SGC);
+ sg_cmd = sg_cmd | SSG_ENABLE(chan->chan_id);
+ sg_cmd = sg_cmd & 0xF0FFFFFF;
+ mtdcr(base + DCR_DMA2P40_SGL0, sg_lo);
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(base + DCR_DMA2P40_SGH0, sg_hi);
+#endif
+ mtdcr(DCR_DMA2P40_SGC,sg_cmd);
+}
+
+/**
+ * ppc460ex_chan_append - update the h/w chain in the channel
+ */
+static inline void ppc460ex_chan_append(ppc460ex_p_ch_t *chan)
+{
+ ppc460ex_p_desc_t *iter;
+ u32 cur_desc;
+ unsigned long flags;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ case PPC460EX_PDMA1_ID:
+ case PPC460EX_PDMA2_ID:
+ case PPC460EX_PDMA3_ID:
+ cur_desc = ppc460ex_chan_get_current_descriptor(chan);
+
+ if (likely(cur_desc)) {
+ iter = chan_last_sub[chan->device->id];
+ BUG_ON(!iter);
+ } else {
+ /* first peer */
+ iter = chan_first_cdb[chan->device->id];
+ BUG_ON(!iter);
+ ppc460ex_dma_put_desc(chan, iter);
+ chan->hw_chain_inited = 1;
+ }
+
+ /* is there something new to append */
+ if (!iter->hw_next)
+ return;
+
+ /* flush descriptors from the s/w queue to fifo */
+ list_for_each_entry_continue(iter, &chan->chain, chain_node) {
+ ppc460ex_dma_put_desc(chan, iter);
+ if (!iter->hw_next)
+ break;
+ }
+ break;
+ default:
+ BUG();
+ }
+}
+
+/**
+ * ppc460ex_chan_get_current_descriptor - get the currently executed descriptor
+ */
+static inline u32 ppc460ex_chan_get_current_descriptor(ppc460ex_p_ch_t *chan)
+{
+ u32 base;
+
+
+ if (unlikely(!chan->hw_chain_inited))
+ /* h/w descriptor chain is not initialized yet */
+ return 0;
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ base = DCR_DMA0_BASE;
+ break;
+ case PPC460EX_PDMA1_ID:
+ base = DCR_DMA1_BASE;
+ break;
+ case PPC460EX_PDMA2_ID:
+ base = DCR_DMA2_BASE;
+ break;
+ case PPC460EX_PDMA3_ID:
+ base = DCR_DMA3_BASE;
+ break;
+ default:
+ BUG();
+ }
+
+ return (mfdcr(base + DCR_DMA2P40_SGH0));
+}
+
+
+/******************************************************************************
+ * ADMA device level
+ ******************************************************************************/
+
+static void ppc460ex_chan_start_null_xor(ppc460ex_p_ch_t *chan);
+static int ppc460ex_adma_alloc_chan_resources(struct dma_chan *chan);
+static dma_cookie_t ppc460ex_adma_tx_submit(
+ struct dma_async_tx_descriptor *tx);
+
+static void ppc460ex_adma_set_dest(
+ ppc460ex_p_desc_t *tx,
+ dma_addr_t addr, int index);
+
+
+
+/**
+ * ppc460ex_adma_device_estimate - estimate the efficiency of processing
+ * the operation given on this channel. It's assumed that 'chan' is
+ * capable to process 'cap' type of operation.
+ * @chan: channel to use
+ * @cap: type of transaction
+ * @src_lst: array of source pointers
+ * @src_cnt: number of source operands
+ * @src_sz: size of each source operand
+ */
+int ppc460ex_adma_p_estimate (struct dma_chan *chan,
+ enum dma_transaction_type cap, struct page **src_lst,
+ int src_cnt, size_t src_sz)
+{
+ int ef = 1;
+
+ if (cap == DMA_PQ || cap == DMA_PQ_ZERO_SUM) {
+ /* If RAID-6 capabilities were not activated don't try
+ * to use them
+ */
+ if (unlikely(!ppc460ex_r6_enabled))
+ return -1;
+ }
+ /* channel idleness increases the priority */
+ if (likely(ef) &&
+ !ppc460ex_chan_is_busy(to_ppc460ex_adma_chan(chan)))
+ ef++;
+
+ return ef;
+}
+
+/**
+ * ppc460ex_get_group_entry - get group entry with index idx
+ * @tdesc: is the last allocated slot in the group.
+ */
+static inline ppc460ex_p_desc_t *
+ppc460ex_get_group_entry ( ppc460ex_p_desc_t *tdesc, u32 entry_idx)
+{
+ ppc460ex_p_desc_t *iter = tdesc->group_head;
+ int i = 0;
+
+ if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
+ printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
+ __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
+ BUG();
+ }
+ list_for_each_entry(iter, &tdesc->group_list, chain_node) {
+ if (i++ == entry_idx)
+ break;
+ }
+ return iter;
+}
+
+/**
+ * ppc460ex_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &ppc460ex_chan->lock while calling this function
+ */
+static void ppc460ex_adma_free_slots(ppc460ex_p_desc_t *slot,
+ ppc460ex_p_ch_t *chan)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ /*async_tx_clear_ack(&slot->async_tx);*/ /* Don't need to clear. It is hack*/
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ ppc460ex_p_desc_t,
+ slot_node);
+ }
+}
+
+static void
+ppc460ex_adma_unmap(ppc460ex_p_ch_t *chan, ppc460ex_p_desc_t *desc)
+{
+ u32 src_cnt, dst_cnt;
+ dma_addr_t addr;
+ /*
+ * get the number of sources & destination
+ * included in this descriptor and unmap
+ * them all
+ */
+ src_cnt = 1;
+ dst_cnt = 1;
+
+ /* unmap destinations */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ while (dst_cnt--) {
+ addr = ppc460ex_desc_get_dest_addr(
+ desc, chan, dst_cnt);
+ dma_unmap_page(&chan->device->odev->dev,
+ addr, desc->unmap_len,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* unmap sources */
+ if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ while (src_cnt--) {
+ addr = ppc460ex_desc_get_src_addr(
+ desc, chan, src_cnt);
+ dma_unmap_page(&chan->device->odev->dev,
+ addr, desc->unmap_len,
+ DMA_TO_DEVICE);
+ }
+ }
+
+}
+/**
+ * ppc460ex_adma_run_tx_complete_actions - call functions to be called
+ * upon complete
+ */
+static dma_cookie_t ppc460ex_adma_run_tx_complete_actions(
+ ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan,
+ dma_cookie_t cookie)
+{
+ int i;
+ enum dma_data_direction dir;
+
+ BUG_ON(desc->async_tx.cookie < 0);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ *
+ * actually, ppc's dma_unmap_page() functions are empty, so
+ * the following code is just for the sake of completeness
+ */
+ if (chan && chan->needs_unmap && desc->group_head &&
+ desc->unmap_len) {
+ ppc460ex_p_desc_t *unmap = desc->group_head;
+ /* assume 1 slot per op always */
+ u32 slot_count = unmap->slot_cnt;
+
+ /* Run through the group list and unmap addresses */
+ for (i = 0; i < slot_count; i++) {
+ BUG_ON(!unmap);
+ ppc460ex_adma_unmap(chan, unmap);
+ unmap = unmap->hw_next;
+ }
+ desc->group_head = NULL;
+ }
+ }
+
+ /* run dependent operations */
+ dma_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+/**
+ * ppc460ex_adma_clean_slot - clean up CDB slot (if ack is set)
+ */
+static int ppc460ex_adma_clean_slot(ppc460ex_p_desc_t *desc,
+ ppc460ex_p_ch_t *chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx))
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (list_is_last(&desc->chain_node, &chan->chain) ||
+ desc->phys == ppc460ex_chan_get_current_descriptor(chan))
+ return 1;
+
+ dev_dbg(chan->device->common.dev, "\tfree slot %x: %d stride: %d\n",
+ desc->phys, desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ ppc460ex_adma_free_slots(desc, chan);
+ return 0;
+}
+
+/**
+ * #define DEBUG 1__ppc460ex_adma_slot_cleanup - this is the common clean-up routine
+ * which runs through the channel CDBs list until reach the descriptor
+ * currently processed. When routine determines that all CDBs of group
+ * are completed then corresponding callbacks (if any) are called and slots
+ * are freed.
+ */
+static void __ppc460ex_adma_slot_cleanup(ppc460ex_p_ch_t *chan)
+{
+ ppc460ex_p_desc_t *iter, *_iter, *group_start = NULL;
+ dma_cookie_t cookie = 0;
+ u32 current_desc = ppc460ex_chan_get_current_descriptor(chan);
+ int busy = ppc460ex_chan_is_busy(chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ dev_dbg(chan->device->common.dev, "ppc460ex adma%d: %s\n",
+ chan->device->id, __FUNCTION__);
+
+ if (!current_desc) {
+ /* There were no transactions yet, so
+ * nothing to clean
+ */
+ return;
+ }
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &chan->chain,
+ chain_node) {
+ dev_dbg(chan->device->common.dev, "\tcookie: %d slot: %d "
+ "busy: %d this_desc: %#x next_desc: %#x cur: %#x ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy, iter->phys,
+ ppc460ex_desc_get_link(iter, chan), current_desc,
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel,subsequent descriptors are either in process
+ * or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || ppc460ex_desc_get_link(iter, chan)) {
+ /* not all descriptors of the group have
+ * been completed; exit.
+ */
+ break;
+ }
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ if (!group_start)
+ group_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ ppc460ex_p_desc_t *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+
+ /* clean up the group */
+ slot_cnt = group_start->slot_cnt;
+ grp_iter = group_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &chan->chain, chain_node) {
+
+ cookie = ppc460ex_adma_run_tx_complete_actions(
+ grp_iter, chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = ppc460ex_adma_clean_slot(
+ grp_iter, chan);
+ if (end_of_chain && slot_cnt) {
+ /* Should wait for ZeroSum complete */
+ if (cookie > 0)
+ chan->completed_cookie = cookie;
+ return;
+ }
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ group_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ cookie = ppc460ex_adma_run_tx_complete_actions(iter, chan,
+ cookie);
+
+ if (ppc460ex_adma_clean_slot(iter, chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ if (cookie > 0) {
+ chan->completed_cookie = cookie;
+ pr_debug("\tcompleted cookie %d\n", cookie);
+ }
+
+}
+
+/**
+ * ppc460ex_adma_tasklet - clean up watch-dog initiator
+ */
+static void ppc460ex_adma_tasklet (unsigned long data)
+{
+ ppc460ex_p_ch_t *chan = (ppc460ex_p_ch_t *) data;
+ __ppc460ex_adma_slot_cleanup(chan);
+}
+
+/**
+ * ppc460ex_adma_slot_cleanup - clean up scheduled initiator
+ */
+static void ppc460ex_adma_slot_cleanup (ppc460ex_p_ch_t *chan)
+{
+ spin_lock_bh(&chan->lock);
+ __ppc460ex_adma_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * ppc460ex_adma_alloc_slots - allocate free slots (if any)
+ */
+static ppc460ex_p_desc_t *ppc460ex_adma_alloc_slots(
+ ppc460ex_p_ch_t *chan, int num_slots,
+ int slots_per_op)
+{
+ ppc460ex_p_desc_t *iter = NULL, *_iter, *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+
+ BUG_ON(!num_slots || !slots_per_op);
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = chan->last_used;
+ else
+ iter = list_entry(&chan->all_slots, ppc460ex_p_desc_t,
+ slot_node);
+ prefetch(iter);
+ list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
+ slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+ if (slots_found == num_slots) {
+ ppc460ex_p_desc_t *alloc_tail = NULL;
+ ppc460ex_p_desc_t *last_used = NULL;
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op) {
+ async_tx_ack(&iter->async_tx);
+ }
+#if 0
+ else
+ /* Don't need to clear. It is hack*/
+ async_tx_clear_ack(&iter->async_tx);
+#endif
+
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->hw_next = NULL;
+ iter->flags = 0;
+ iter->slot_cnt = num_slots;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ ppc460ex_p_desc_t,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->group_list);
+ chan->last_used = last_used;
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+ static empty_slot_cnt;
+ if(!(empty_slot_cnt%100))
+ printk(KERN_INFO"No empty slots trying to free some\n");
+ empty_slot_cnt++;
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&chan->irq_tasklet);
+ return NULL;
+}
+
+/**
+ * ppc460ex_adma_alloc_chan_resources - allocate pools for CDB slots
+ */
+static int ppc460ex_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ ppc460ex_p_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_p_desc_t *slot = NULL;
+ char *hw_desc;
+ int i, db_sz;
+ int init = ppc460ex_chan->slots_allocated ? 0 : 1;
+ int pool_size = DMA_FIFO_SIZE * DMA_CDB_SIZE;
+
+ chan->chan_id = ppc460ex_chan->device->id;
+
+ /* Allocate descriptor slots */
+ i = ppc460ex_chan->slots_allocated;
+ db_sz = sizeof (dma_cdb_t);
+
+ for (; i < (pool_size/db_sz); i++) {
+ slot = kzalloc(sizeof(ppc460ex_p_desc_t), GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "GT ADMA Channel only initialized"
+ " %d descriptor slots", i--);
+ break;
+ }
+
+ hw_desc = (char *) ppc460ex_chan->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[i * db_sz];
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = ppc460ex_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->group_list);
+ hw_desc = (char *) ppc460ex_chan->dma_desc_pool;
+ slot->phys = (dma_addr_t) &hw_desc[i * db_sz];
+ slot->idx = i;
+ spin_lock_bh(&ppc460ex_chan->lock);
+ ppc460ex_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &ppc460ex_chan->all_slots);
+ spin_unlock_bh(&ppc460ex_chan->lock);
+ }
+
+ if (i && !ppc460ex_chan->last_used) {
+ ppc460ex_chan->last_used =
+ list_entry(ppc460ex_chan->all_slots.next,
+ ppc460ex_p_desc_t,
+ slot_node);
+ }
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: allocated %d descriptor slots\n",
+ ppc460ex_chan->device->id, i);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ switch (ppc460ex_chan->chan_id)
+ {
+ case PPC460EX_PDMA0_ID:
+ case PPC460EX_PDMA1_ID:
+ ppc460ex_chan->hw_chain_inited = 0;
+ /* Use WXOR for self-testing */
+ if (!ppc460ex_dma_tchan)
+ ppc460ex_dma_tchan = ppc460ex_chan;
+ if (!ppc460ex_r6_tchan)
+ ppc460ex_r6_tchan = ppc460ex_chan;
+ break;
+ default:
+ BUG();
+ }
+ ppc460ex_chan->needs_unmap = 1;
+ }
+
+ return (i > 0) ? i : -ENOMEM;
+}
+
+/**
+ * ppc460ex_desc_assign_cookie - assign a cookie
+ */
+static dma_cookie_t ppc460ex_desc_assign_cookie(ppc460ex_p_ch_t *chan,
+ ppc460ex_p_desc_t *desc)
+{
+ dma_cookie_t cookie = chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+
+/**
+ * ppc460ex_adma_check_threshold - append CDBs to h/w chain if threshold
+ * has been achieved
+ */
+static void ppc460ex_adma_check_threshold(ppc460ex_p_ch_t *chan)
+{
+ dev_dbg(chan->device->common.dev, "ppc460ex adma%d: pending: %d\n",
+ chan->device->id, chan->pending);
+
+ if (chan->pending >= PPC460EX_ADMA_THRESHOLD) {
+ chan->pending = 0;
+ ppc460ex_chan_append(chan);
+ }
+}
+
+/**
+ * ppc460ex_adma_tx_submit - submit new descriptor group to the channel
+ * (it's not necessary that descriptors will be submitted to the h/w
+ * chains too right now)
+ */
+static dma_cookie_t ppc460ex_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ ppc460ex_p_desc_t *sw_desc = tx_to_ppc460ex_adma_slot(tx);
+ ppc460ex_p_ch_t *chan = to_ppc460ex_adma_chan(tx->chan);
+ ppc460ex_p_desc_t *group_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+
+ group_start = sw_desc->group_head;
+ slot_cnt = group_start->slot_cnt;
+ slots_per_op = group_start->slots_per_op;
+
+ spin_lock_bh(&chan->lock);
+
+ cookie = ppc460ex_desc_assign_cookie(chan, sw_desc);
+
+ if (unlikely(list_empty(&chan->chain))) {
+ /* first peer */
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ chan_first_cdb[chan->device->id] = group_start;
+ } else {
+ /* isn't first peer, bind CDBs to chain */
+ old_chain_tail = list_entry(chan->chain.prev,
+ ppc460ex_p_desc_t, chain_node);
+ list_splice_init(&sw_desc->group_list,
+ &old_chain_tail->chain_node);
+ /* fix up the hardware chain */
+ ppc460ex_desc_set_link(chan, old_chain_tail, group_start);
+ }
+
+ /* increment the pending count by the number of operations */
+ chan->pending += slot_cnt / slots_per_op;
+ ppc460ex_adma_check_threshold(chan);
+ spin_unlock_bh(&chan->lock);
+
+ dev_dbg(chan->device->common.dev,
+ "ppc460ex adma%d: %s cookie: %d slot: %d tx %p\n",
+ chan->device->id,__FUNCTION__,
+ sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
+ return cookie;
+}
+
+/**
+ * ppc460ex_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ ppc460ex_p_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_p_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s\n", ppc460ex_chan->device->id,
+ __FUNCTION__);
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc460ex_desc_init_interrupt(group_start, ppc460ex_chan);
+ group_start->unmap_len = 0;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc460ex_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ ppc460ex_p_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_p_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > PPC460EX_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s len: %u int_en %d \n",
+ ppc460ex_chan->device->id, __FUNCTION__, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ prefetch(group_start);
+ ppc460ex_desc_init_memcpy(group_start, flags);
+ ppc460ex_desc_set_dest_addr(sw_desc->group_head, chan, dma_dest, 0);
+ ppc460ex_desc_set_src_addr(sw_desc->group_head, chan, dma_src, 0);
+ ppc460ex_desc_set_byte_count(group_start, ppc460ex_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ if(mfdcr(0x60) == 0xfee8) {
+ printk("Byte Count = 0x%x\n",len);
+ printk("src= 0x%x\n",dma_src);
+ printk("Dest = 0x%x\n",dma_dest);
+ }
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * ppc460ex_adma_prep_dma_memset - prepare CDB for a MEMSET operation
+ */
+static struct dma_async_tx_descriptor *ppc460ex_adma_prep_dma_memset(
+ struct dma_chan *chan, dma_addr_t dma_dest, int value,
+ size_t len, unsigned long flags)
+{
+ ppc460ex_p_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_p_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > PPC460EX_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s cal: %u len: %u int_en %d\n",
+ ppc460ex_chan->device->id, __FUNCTION__, value, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = ppc460ex_adma_alloc_slots(ppc460ex_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ ppc460ex_desc_init_memset(group_start, value, flags);
+ ppc460ex_adma_set_dest(group_start, dma_dest, 0);
+ ppc460ex_desc_set_byte_count(group_start, ppc460ex_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+
+/**
+ * ppc460ex_adma_set_dest - set destination address into descriptor
+ */
+static void ppc460ex_adma_set_dest(ppc460ex_p_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ ppc460ex_p_ch_t *chan = to_ppc460ex_adma_chan(sw_desc->async_tx.chan);
+ BUG_ON(index >= sw_desc->dst_cnt);
+
+ switch (chan->chan_id) {
+ case PPC460EX_PDMA0_ID:
+ case PPC460EX_PDMA1_ID:
+ case PPC460EX_PDMA2_ID:
+ case PPC460EX_PDMA3_ID:
+ /* to do: support transfers lengths >
+ * PPC460EX_ADMA_DMA/XOR_MAX_BYTE_COUNT
+ */
+ ppc460ex_desc_set_dest_addr(sw_desc->group_head,
+ // chan, 0x8, addr, index); // Enabling HB bus
+ chan, 0, addr);
+ break;
+ default:
+ BUG();
+ }
+}
+
+
+
+
+/**
+ * ppc460ex_adma_free_chan_resources - free the resources allocated
+ */
+static void ppc460ex_adma_free_chan_resources(struct dma_chan *chan)
+{
+ ppc460ex_p_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ ppc460ex_p_desc_t *iter, *_iter;
+ int in_use_descs = 0;
+
+ ppc460ex_adma_slot_cleanup(ppc460ex_chan);
+
+ spin_lock_bh(&ppc460ex_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &ppc460ex_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(iter, _iter,
+ &ppc460ex_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ ppc460ex_chan->slots_allocated--;
+ }
+ ppc460ex_chan->last_used = NULL;
+
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d %s slots_allocated %d\n",
+ ppc460ex_chan->device->id,
+ __FUNCTION__, ppc460ex_chan->slots_allocated);
+ spin_unlock_bh(&ppc460ex_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "GT: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * ppc460ex_adma_is_complete - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status ppc460ex_adma_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used)
+{
+ ppc460ex_p_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+ enum dma_status ret;
+
+ last_used = chan->cookie;
+ last_complete = ppc460ex_chan->completed_cookie;
+
+ if (done)
+ *done= last_complete;
+ if (used)
+ *used = last_used;
+
+ ret = dma_async_is_complete(cookie, last_complete, last_used);
+ if (ret == DMA_SUCCESS)
+ return ret;
+
+ ppc460ex_adma_slot_cleanup(ppc460ex_chan);
+
+ last_used = chan->cookie;
+ last_complete = ppc460ex_chan->completed_cookie;
+
+ if (done)
+ *done= last_complete;
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/**
+ * ppc460ex_adma_eot_handler - end of transfer interrupt handler
+ */
+static irqreturn_t ppc460ex_adma_eot_handler(int irq, void *data)
+{
+ ppc460ex_p_ch_t *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "ppc460ex adma%d: %s\n", chan->device->id, __FUNCTION__);
+
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc460ex_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ppc460ex_adma_err_handler - DMA error interrupt handler;
+ * do the same things as a eot handler
+ */
+static irqreturn_t ppc460ex_adma_err_handler(int irq, void *data)
+{
+ ppc460ex_p_ch_t *chan = data;
+ dev_dbg(chan->device->common.dev,
+ "ppc460ex adma%d: %s\n", chan->device->id, __FUNCTION__);
+ tasklet_schedule(&chan->irq_tasklet);
+ ppc460ex_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+static void ppc460ex_test_rad6_callback (void *unused)
+{
+ complete(&ppc460ex_r6_test_comp);
+}
+/**
+ * ppc460ex_test_callback - called when test operation has been done
+ */
+static void ppc460ex_test_callback (void *unused)
+{
+ complete(&ppc460ex_r5_test_comp);
+}
+
+/**
+ * ppc460ex_adma_issue_pending - flush all pending descriptors to h/w
+ */
+static void ppc460ex_adma_issue_pending(struct dma_chan *chan)
+{
+ ppc460ex_p_ch_t *ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+
+ if (ppc460ex_chan->pending) {
+ dev_dbg(ppc460ex_chan->device->common.dev,
+ "ppc460ex adma%d: %s %d \n", ppc460ex_chan->device->id,
+ __FUNCTION__, ppc460ex_chan->pending);
+ ppc460ex_chan->pending = 0;
+ ppc460ex_chan_append(ppc460ex_chan);
+ }
+}
+
+/**
+ * ppc460ex_adma_remove - remove the asynch device
+ */
+static int __devexit ppc460ex_pdma_remove(struct platform_device *dev)
+{
+ ppc460ex_p_dev_t *device = platform_get_drvdata(dev);
+ struct dma_chan *chan, *_chan;
+ struct ppc_dma_chan_ref *ref, *_ref;
+ ppc460ex_p_ch_t *ppc460ex_chan;
+ int i;
+
+ dma_async_device_unregister(&device->common);
+
+ for (i = 0; i < 3; i++) {
+ u32 irq;
+ irq = platform_get_irq(dev, i);
+ free_irq(irq, device);
+ }
+
+
+ do {
+ struct resource *res;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, res->end - res->start);
+ } while (0);
+
+ list_for_each_entry_safe(chan, _chan, &device->common.channels,
+ device_node) {
+ ppc460ex_chan = to_ppc460ex_adma_chan(chan);
+ list_del(&chan->device_node);
+ kfree(ppc460ex_chan);
+ }
+
+ list_for_each_entry_safe(ref, _ref, &ppc_adma_p_chan_list, node) {
+ list_del(&ref->node);
+ kfree(ref);
+ }
+
+ kfree(device);
+
+ return 0;
+}
+/*
+ * Per channel probe
+ */
+int __devinit ppc460ex_dma_per_chan_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int ret=0;
+ ppc460ex_p_dev_t *adev;
+ ppc460ex_p_ch_t *new_chan;
+ int err;
+
+ adev = dev_get_drvdata(ofdev->dev.parent);
+ BUG_ON(!adev);
+ if ((new_chan = kzalloc(sizeof(*new_chan), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ err = of_address_to_resource(ofdev->node,0,&new_chan->reg);
+ if (err) {
+ printk("ERROR:Can't get %s property reg\n", __FUNCTION__);
+ goto err;
+ }
+ new_chan->device = &ofdev->dev;
+ new_chan->reg_base = ioremap(new_chan->reg.start,
+ new_chan->reg.end - new_chan->reg.start + 1);
+ if ((new_chan->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
+ DMA_FIFO_SIZE << 2, &new_chan->dma_desc_pool, GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+ new_chan->chan_id = ((new_chan->reg.start - 0x200)& 0xfff) >> 3;
+ adev->chan[new_chan->chan_id] = new_chan;
+
+ return 0;
+err:
+ return ret;
+err_dma_alloc:
+err_chan_alloc:
+ kfree(new_chan);
+ return ret;
+}
+static struct of_device_id dma_4chan_match[] =
+{
+ {
+ .compatible = "amcc,dma",
+ },
+ {},
+};
+static struct of_device_id dma_per_chan_match[] = {
+ {
+ .compatible = "amcc,dma-4channel",
+ },
+ {},
+};
+/**
+ * ppc460ex_adma_probe - probe the asynch device
+ */
+//static int __devinit ppc460ex_adma_probe(struct platform_device *pdev)
+static int __devinit ppc460ex_pdma_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct resource *res;
+ int ret=0, irq;
+ ppc460ex_p_dev_t *adev;
+ ppc460ex_p_ch_t *chan;
+ struct ppc_dma_chan_ref *ref;
+
+
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_adev_alloc;
+ }
+ adev->dev = &ofdev->dev;
+ adev->id = PPC460EX_PDMA0_ID;
+ /* create the DMA capability MASK . This used to come from resources structure*/
+ dma_cap_set(DMA_MEMCPY, adev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, adev->common.cap_mask);
+ dma_cap_set(DMA_MEMSET, adev->common.cap_mask);
+ adev->odev = ofdev;
+ dev_set_drvdata(&(ofdev->dev), adev);
+
+ INIT_LIST_HEAD(&adev->common.channels);
+
+ /* set base routines */
+ adev->common.device_alloc_chan_resources =
+ ppc460ex_adma_alloc_chan_resources;
+ adev->common.device_free_chan_resources =
+ ppc460ex_adma_free_chan_resources;
+ adev->common.device_is_tx_complete = ppc460ex_adma_is_complete;
+ adev->common.device_issue_pending = ppc460ex_adma_issue_pending;
+ adev->common.dev = &ofdev->dev;
+
+ /* set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memcpy =
+ ppc460ex_adma_prep_dma_memcpy;
+ }
+ if (dma_has_cap(DMA_MEMSET, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memset =
+ ppc460ex_adma_prep_dma_memset;
+ }
+
+ if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_interrupt =
+ ppc460ex_adma_prep_dma_interrupt;
+ }
+
+ /* create a channel */
+ if ((chan = kzalloc(sizeof(*chan), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_chan_alloc;
+ }
+
+ tasklet_init(&chan->irq_tasklet, ppc460ex_adma_tasklet,
+ (unsigned long)chan);
+ irq = irq_of_parse_and_map(ofdev->node, 0);
+ printk("<%s> irq=0x%x\n",__FUNCTION__, irq);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan0", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+
+ irq = irq_of_parse_and_map(ofdev->node, 1);
+ printk("<%s> irq=0x%x\n",__FUNCTION__, irq);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_adma_err_handler,
+ IRQF_DISABLED, "adma-chan-1", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ irq = irq_of_parse_and_map(ofdev->node, 2);
+ printk("<%s> irq=0x%x\n",__FUNCTION__, irq);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_adma_err_handler,
+ IRQF_DISABLED, "adma-chan2", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ irq = irq_of_parse_and_map(ofdev->node, 3);
+ printk("<%s> irq=0x%x\n",__FUNCTION__, irq);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_adma_err_handler,
+ IRQF_DISABLED, "adma-chan3", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+
+
+ }
+
+
+ }
+
+ }
+ } else
+ ret = -ENXIO;
+
+ chan->device = adev;
+ /* pass the platform data */
+ spin_lock_init(&chan->lock);
+#if 0
+ init_timer(&chan->cleanup_watchdog);
+ chan->cleanup_watchdog.data = (unsigned long) chan;
+ chan->cleanup_watchdog.function = ppc460ex_adma_tasklet;
+#endif
+ INIT_LIST_HEAD(&chan->chain);
+ INIT_LIST_HEAD(&chan->all_slots);
+ chan->common.device = &adev->common;
+ list_add_tail(&chan->common.device_node, &adev->common.channels);
+
+ dev_dbg(&ofdev->dev, "AMCC(R) PPC440SP(E) ADMA Engine found [%d]: "
+ "( %s%s%s%s%s%s%s%s%s%s)\n",
+ adev->id,
+ dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq_xor " : "",
+ dma_has_cap(DMA_PQ_UPDATE, adev->common.cap_mask) ? "pq_update " : "",
+ dma_has_cap(DMA_PQ_ZERO_SUM, adev->common.cap_mask) ? "pq_zero_sum " :
+ "",
+ dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_DUAL_XOR, adev->common.cap_mask) ? "dual_xor " : "",
+ dma_has_cap(DMA_ZERO_SUM, adev->common.cap_mask) ? "xor_zero_sum " :
+ "",
+ dma_has_cap(DMA_MEMSET, adev->common.cap_mask) ? "memset " : "",
+ dma_has_cap(DMA_MEMCPY_CRC32C, adev->common.cap_mask) ? "memcpy+crc "
+ : "",
+ dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "int " : "");
+
+ of_platform_bus_probe(ofdev->node, dma_per_chan_match,&ofdev->dev);
+ dma_async_device_register(&adev->common);
+ ref = kmalloc(sizeof(*ref), GFP_KERNEL);
+ printk("<%s> ret=0x%x\n", __FUNCTION__,ret);
+ if (ref) {
+ ref->chan = &chan->common;
+ INIT_LIST_HEAD(&ref->node);
+ list_add_tail(&ref->node, &ppc_adma_p_chan_list);
+ } else
+ printk(KERN_WARNING "%s: failed to allocate channel reference!\n",
+ __FUNCTION__);
+ goto out;
+
+err:
+ ret = ret;
+err_irq:
+ kfree(chan);
+err_chan_alloc:
+err_dma_alloc:
+ kfree(adev);
+err_adev_alloc:
+ release_mem_region(res->start, res->end - res->start);
+out:
+ return ret;
+}
+
+/**
+ * ppc460ex_test_dma - test are RAID-5 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-5
+ o/of_platform_driver_unregister(&ppc460ex_pdma_driver);
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int ppc460ex_test_dma (ppc460ex_p_ch_t *chan)
+{
+ ppc460ex_p_desc_t *sw_desc, *iter;
+ struct page *pg;
+ char *a;
+ dma_addr_t dma_addr;
+ unsigned long op = 0;
+ int rval = 0;
+
+ if (!ppc460ex_dma_tchan)
+ return -1;
+ /*FIXME*/
+
+ pg = alloc_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = ppc460ex_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dsr, int_ena */
+ ppc460ex_desc_init_memcpy(sw_desc,0);
+ list_for_each_entry(iter, &sw_desc->group_list, chain_node) {
+ ppc460ex_desc_set_byte_count(iter, chan, PAGE_SIZE);
+ iter->unmap_len = PAGE_SIZE;
+ }
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ /* Fill the test page with ones */
+ memset(page_address(pg), 0xFF, PAGE_SIZE);
+ int i = 0;
+ char *pg_addr = page_address(pg);
+#if 0
+ for(i=0;i < PAGE_SIZE; i+=64)
+ printk("addr = 0x%x data = 0x%x\n",pg_addr + i,*(pg_addr+i));
+#endif
+ //dma_addr = dma_map_page(&chan->device->common, pg, 0, PAGE_SIZE,
+ dma_addr = dma_map_page(&chan->device->odev->dev, pg, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ /* Setup adresses */
+ ppc460ex_desc_set_src_addr(sw_desc, chan, dma_addr, 0);
+ ppc460ex_desc_set_dest_addr(sw_desc, chan, dma_addr, 0);
+
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = ppc460ex_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&ppc460ex_r5_test_comp);
+
+ ppc460ex_adma_tx_submit(&sw_desc->async_tx);
+ ppc460ex_adma_issue_pending(&chan->common);
+
+ wait_for_completion(&ppc460ex_r5_test_comp);
+
+ /*Make sure cache is flushed to memory*/
+ dma_addr = dma_map_page(&chan->device->odev->dev, pg, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ /* Now check is the test page zeroed */
+ a = page_address(pg);
+#if 0
+ i = 0;
+ for(i=0;i < PAGE_SIZE; i+=64)
+ printk("addr = 0x%x data = 0x%x\n",a + i,*(a+i));
+#endif
+ if ((*(u32*)a) == 0 && memcmp(a, a+4, PAGE_SIZE-4)==0) {
+ /* page is zero - RAID-5 enabled */
+ rval = 0;
+ } else {
+ /* RAID-5 was not enabled */
+ rval = -EINVAL;
+ }
+ pr_dma(__LINE__,__FUNCTION__);
+exit:
+ __free_page(pg);
+ return rval;
+}
+
+
+static struct of_platform_driver ppc460ex_pdma_driver = {
+ .name = "plb_dma",
+ .match_table = dma_4chan_match,
+
+ .probe = ppc460ex_pdma_probe,
+ .remove = ppc460ex_pdma_remove,
+};
+struct of_platform_driver ppc460ex_dma_per_chan_driver = {
+ .name = "dma-4channel",
+ .match_table = dma_per_chan_match,
+ .probe = ppc460ex_dma_per_chan_probe,
+};
+
+static int ppc460ex_dma_read (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ char *p = page;
+
+ p += sprintf(p, "%s\n",
+ ppc460ex_r5_enabled ?
+ "PPC460Ex RAID-r5 capabilities are ENABLED.\n" :
+ "PPC460Ex RAID-r5 capabilities are DISABLED.\n");
+
+ return p - page;
+}
+
+static int ppc460ex_dma_write (struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ /* e.g. 0xffffffff */
+ char tmp[11];
+ unsigned long val;
+
+ if (!count || count > 11)
+ return -EINVAL;
+
+ if (!ppc460ex_dma_tchan)
+ return -EFAULT;
+
+ if (copy_from_user(tmp, buffer, count))
+ return -EFAULT;
+
+ /* Write a key */
+ val = simple_strtoul(tmp, NULL, 16);
+ if(!strcmp(val,"copy"))
+ printk("Testing copy feature");
+ /* Verify does it really work now */
+ if (ppc460ex_test_dma(ppc460ex_dma_tchan) == 0) {
+ /* PPC440SP(e) RAID-6 has been activated successfully */;
+ printk(KERN_INFO "PPC460Ex RAID-5 has been activated "
+ "successfully\n");
+ ppc460ex_r5_enabled = 1;
+ ppc460ex_r6_enabled = 1;
+ } else {
+ /* PPC440SP(e) RAID-6 hasn't been activated! Error key ? */;
+ printk(KERN_INFO "PPC460Ex RAID-5 hasn't been activated!"
+ " Error key ?\n");
+ ppc460ex_r5_enabled = 0;
+ }
+
+ return count;
+}
+
+static int __init ppc460ex_adma_init (void)
+{
+ int rval;
+ struct proc_dir_entry *p;
+
+ rval = of_register_platform_driver(&ppc460ex_pdma_driver);
+
+ if (rval == 0) {
+ /* Create /proc entries */
+ ppc460ex_proot = proc_mkdir(PPC460EX_DMA_PROC_ROOT, NULL);
+ if (!ppc460ex_proot) {
+ printk(KERN_ERR "%s: failed to create %s proc "
+ "directory\n",__FUNCTION__,PPC460EX_DMA_PROC_ROOT);
+ /* User will not be able to enable h/w RAID-6 */
+ return rval;
+ }
+
+ /* RAID-6 h/w enable entry */
+ p = create_proc_entry("enable", 0, ppc460ex_proot);
+ if (p) {
+ p->read_proc = ppc460ex_dma_read;
+ p->write_proc = ppc460ex_dma_write;
+ }
+ }
+ return rval;
+}
+
+#if 0
+static void __exit ppc460ex_adma_exit (void)
+{
+ of_unregister_platform_driver(&ppc460ex_pdma_driver);
+ return;
+}
+module_exit(ppc460ex_adma_exit);
+#endif
+
+module_init(ppc460ex_adma_init);
+
+MODULE_AUTHOR("Tirumala Marri<tmarri@amcc.com>");
+MODULE_DESCRIPTION("PPC460EX ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc460ex_4chan_dma.c b/drivers/dma/ppc460ex_4chan_dma.c
new file mode 100755
index 00000000000..cb2888d5213
--- /dev/null
+++ b/drivers/dma/ppc460ex_4chan_dma.c
@@ -0,0 +1,1103 @@
+/*
+ * Copyright(c) 2008 Applied Micro Circuits Corporation(AMCC). All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <asm/dcr-regs.h>
+#include <asm/dcr.h>
+#include "ppc460ex_4chan_dma.h"
+
+
+
+#ifdef DEBUG_TEST
+#define dma_pr printk
+#else
+#define dma_pr
+#endif
+#define TEST_SIZE 12
+
+
+ppc460ex_plb_dma_dev_t *adev;
+
+
+
+int ppc460ex_get_dma_channel(void)
+{
+ int i;
+ unsigned int status = 0;
+ status = mfdcr(DCR_DMA2P40_SR);
+
+ for(i=0; i<MAX_PPC460EX_DMA_CHANNELS; i++) {
+ if ((status & (1 >> (20+i))) == 0)
+ return i;
+ }
+ return -ENODEV;
+}
+
+
+int ppc460ex_get_dma_status(void)
+{
+ return (mfdcr(DCR_DMA2P40_SR));
+
+}
+
+
+int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr)
+{
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCR_DMA2P40_SAH0 + ch_id*8, src_addr >> 32);
+#endif
+ mtdcr(DCR_DMA2P40_SAL0 + ch_id*8, (u32)src_addr);
+
+ return DMA_STATUS_GOOD;
+}
+
+int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr)
+{
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCR_DMA2P40_DAH0 + ch_id*8, dst_addr >> 32);
+#endif
+ mtdcr(DCR_DMA2P40_DAL0 + ch_id*8, (u32)dst_addr);
+
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later. This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ peripheral to memory
+ * DMA_MODE_WRITE memory to peripheral
+ * DMA_MODE_MM memory to memory
+ * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
+ */
+int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode)
+{
+
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("%s: bad channel %d\n", __FUNCTION__, dma_chan->chan_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ dma_chan->mode = mode;
+ return DMA_STATUS_GOOD;
+}
+
+
+
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width. Thus, count
+ * MUST be a multiple of the bus width.
+ */
+void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count)
+{
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+//#ifdef DEBUG_4xxDMA
+
+ {
+ int error = 0;
+ switch (dma_chan->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+
+ case PW_128:
+ if (count & 0xf)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ dma_chan->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, dma_chan->pwidth);
+ }
+//#endif
+ count = count >> dma_chan->shift;
+ //count = 10;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), count);
+
+}
+
+
+
+
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ dma_chan->int_enable = 1;
+
+
+ control = mfdcr(DCR_DMA2P40_CR0);
+ control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCR_DMA2P40_CR0, control);
+
+
+
+#if 1
+ control = mfdcr(DCR_DMA2P40_CTC0);
+ control |= DMA_CTC_TCIE | DMA_CTC_ETIE| DMA_CTC_EIE;
+ mtdcr(DCR_DMA2P40_CTC0, control);
+
+#endif
+
+
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+int ppc460ex_disable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ dma_chan->int_enable = 0;
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * This function returns the channel configuration.
+ */
+int ppc460ex_get_channel_config(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_dma_ch)
+{
+ unsigned int polarity;
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ memcpy(p_dma_ch, dma_chan, sizeof(ppc460ex_plb_dma_ch_t));
+
+ polarity = mfdcr(DCR_DMA2P40_POL);
+
+ p_dma_ch->polarity = polarity & GET_DMA_POLARITY(ch_id);
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+
+ p_dma_ch->cp = GET_DMA_PRIORITY(control);
+ p_dma_ch->pwidth = GET_DMA_PW(control);
+ p_dma_ch->psc = GET_DMA_PSC(control);
+ p_dma_ch->pwc = GET_DMA_PWC(control);
+ p_dma_ch->phc = GET_DMA_PHC(control);
+ p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
+ p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+ p_dma_ch->shift = GET_DMA_PW(control);
+ p_dma_ch->pf = GET_DMA_PREFETCH(control);
+
+ return DMA_STATUS_GOOD;
+
+}
+
+/*
+ * Sets the priority for the DMA channel dmanr.
+ * Since this is setup by the hardware init function, this function
+ * can be used to dynamically change the priority of a channel.
+ *
+ * Acceptable priorities:
+ *
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ *
+ */
+int ppc460ex_set_channel_priority(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ unsigned int priority)
+{
+ unsigned int control;
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ if ((priority != PRIORITY_LOW) &&
+ (priority != PRIORITY_MID_LOW) &&
+ (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
+ printk("%s:bad priority: 0x%x\n", __FUNCTION__, priority);
+ }
+
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control |= SET_DMA_PRIORITY(priority);
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Returns the width of the peripheral attached to this channel. This assumes
+ * that someone who knows the hardware configuration, boot code or some other
+ * init code, already set the width.
+ *
+ * The return value is one of:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ *
+ * The function returns 0 on error.
+ */
+unsigned int ppc460ex_get_peripheral_width(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ return (GET_DMA_PW(control));
+}
+
+/*
+ * Enables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_enable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) | DMA_CTC_BTEN;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * Disables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_disable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BTEN;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * Sets the burst size (number of peripheral widths) for the channel
+ * (BSIZ bits in the control/count register))
+ * must be one of:
+ * DMA_CTC_BSIZ_2
+ * DMA_CTC_BSIZ_4
+ * DMA_CTC_BSIZ_8
+ * DMA_CTC_BSIZ_16
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_set_burst_size(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ unsigned int bsize)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BSIZ_MSK;
+ ctc |= (bsize & DMA_CTC_BSIZ_MSK);
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Returns the number of bytes left to be transferred.
+ * After a DMA transfer, this should return zero.
+ * Reading this while a DMA transfer is still in progress will return
+ * unpredictable results.
+ */
+int ppc460ex_get_dma_residue(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int count;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ count = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8));
+ count &= DMA_CTC_TC_MASK ;
+
+ return (count << dma_chan->shift);
+
+}
+
+
+/*
+ * Configures a DMA channel, including the peripheral bus width, if a
+ * peripheral is attached to the channel, the polarity of the DMAReq and
+ * DMAAck signals, etc. This information should really be setup by the boot
+ * code, since most likely the configuration won't change dynamically.
+ * If the kernel has to call this function, it's recommended that it's
+ * called from platform specific init code. The driver should not need to
+ * call this function.
+ */
+int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_init)
+{
+ unsigned int polarity;
+ uint32_t control = 0;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+
+ DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */
+ DMA_MODE_WRITE = 0; /* Memory to Peripheral */
+
+ if (!p_init) {
+ printk("%s: NULL p_init\n", __FUNCTION__);
+ return DMA_STATUS_NULL_POINTER;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#if DCR_DMA2P40_POL > 0
+ polarity = mfdcr(DCR_DMA2P40_POL);
+#else
+ polarity = 0;
+#endif
+
+ p_init->int_enable = 0;
+ p_init->buffer_enable = 1;
+ p_init->etd_output = 1;
+ p_init->tce_enable = 1;
+ p_init->pl = 0;
+ p_init->dai = 1;
+ p_init->sai = 1;
+ /* Duc Dang: make channel priority to 2, original is 3 */
+ p_init->cp = 2;
+ p_init->pwidth = PW_8;
+ p_init->psc = 0;
+ p_init->pwc = 0;
+ p_init->phc = 0;
+ p_init->pf = 1;
+
+
+ /* Setup the control register based on the values passed to
+ * us in p_init. Then, over-write the control register with this
+ * new value.
+ */
+#if 0
+ control |= SET_DMA_CONTROL;
+#endif
+ control = SET_DMA_CONTROL;
+ /* clear all polarity signals and then "or" in new signal levels */
+
+//PMB - Workaround
+ //control = 0x81A2CD80;
+ //control = 0x81A00180;
+
+
+ polarity &= ~GET_DMA_POLARITY(ch_id);
+ polarity |= p_init->polarity;
+
+#if DCR_DMA2P40_POL > 0
+ mtdcr(DCR_DMA2P40_POL, polarity);
+#endif
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ /* save these values in our dma channel structure */
+ //memcpy(dma_chan, p_init, sizeof(ppc460ex_plb_dma_ch_t));
+ /*
+ * The peripheral width values written in the control register are:
+ * PW_8 0
+ * PW_16 1
+ * PW_32 2
+ * PW_64 3
+ * PW_128 4
+ *
+ * Since the DMA count register takes the number of "transfers",
+ * we need to divide the count sent to us in certain
+ * functions by the appropriate number. It so happens that our
+ * right shift value is equal to the peripheral width value.
+ */
+ dma_chan->shift = p_init->pwidth;
+ dma_chan->sai = p_init->sai;
+ dma_chan->dai = p_init->dai;
+ dma_chan->tce_enable = p_init->tce_enable;
+ dma_chan->mode = DMA_MODE_MM;
+ /*
+ * Save the control word for easy access.
+ */
+ dma_chan->control = control;
+ mtdcr(DCR_DMA2P40_SR, 0xffffffff);
+
+
+ return DMA_STATUS_GOOD;
+}
+
+
+int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+ unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
+ DMA_CS1 | DMA_TS1 | DMA_CH1_ERR};
+
+ if (dma_chan->in_use) {
+ printk("%s:enable_dma: channel %d in use\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_CHANNEL_NOTFREE;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#if 0
+ if (dma_chan->mode == DMA_MODE_READ) {
+ /* peripheral to memory */
+ ppc460ex_set_src_addr(ch_id, 0);
+ ppc460ex_set_dst_addr(ch_id, dma_chan->addr);
+ } else if (dma_chan->mode == DMA_MODE_WRITE) {
+ /* memory to peripheral */
+ ppc460ex_set_src_addr(ch_id, dma_chan->addr);
+ ppc460ex_set_dst_addr(ch_id, 0);
+ }
+#endif
+ /* for other xfer modes, the addresses are already set */
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
+ if (dma_chan->mode == DMA_MODE_MM) {
+ /* software initiated memory to memory */
+ control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
+ control |= DMA_MODE_MM;
+ if (dma_chan->dai) {
+ control |= DMA_DAI;
+ }
+ if (dma_chan->sai) {
+ control |= DMA_SAI;
+ }
+ }
+
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+ /*
+ * Clear the CS, TS, RI bits for the channel from DMASR. This
+ * has been observed to happen correctly only after the mode and
+ * ETD/DCE bits in DMACRx are set above. Must do this before
+ * enabling the channel.
+ */
+ mtdcr(DCR_DMA2P40_SR, status_bits[ch_id]);
+ /*
+ * For device-paced transfers, Terminal Count Enable apparently
+ * must be on, and this must be turned on after the mode, etc.
+ * bits are cleared above (at least on Redwood-6).
+ */
+
+ if ((dma_chan->mode == DMA_MODE_MM_DEVATDST) ||
+ (dma_chan->mode == DMA_MODE_MM_DEVATSRC))
+ control |= DMA_TCE_ENABLE;
+
+ /*
+ * Now enable the channel.
+ */
+
+ control |= (dma_chan->mode | DMA_CE_ENABLE);
+ control |= DMA_BEN;
+ //control = 0xc4effec0;
+
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+ dma_chan->in_use = 1;
+ return 0;
+
+}
+
+
+void
+ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (!dma_chan->in_use) {
+ printk("disable_dma: channel %d not in use\n", ch_id);
+ return;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("disable_dma: bad channel: %d\n", ch_id);
+ return;
+ }
+
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~DMA_CE_ENABLE;
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ dma_chan->in_use = 0;
+}
+
+
+
+
+/*
+ * Clears the channel status bits
+ */
+int ppc460ex_clear_dma_status(unsigned int ch_id)
+{
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("KERN_ERR %s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ mtdcr(DCR_DMA2P40_SR, ((u32)DMA_CH0_ERR | (u32)DMA_CS0 | (u32)DMA_TS0) >> ch_id);
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/**
+ * ppc460ex_dma_eot_handler - end of transfer interrupt handler
+ */
+irqreturn_t ppc460ex_4chan_dma_eot_handler(int irq, void *data)
+{
+ unsigned int data_read = 0;
+ unsigned int try_cnt = 0;
+
+ //printk("transfer complete\n");
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ //printk("%s: status 0x%08x\n", __FUNCTION__, data_read);
+
+ do{
+ //while bit 3 TC done is 0
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ if (data_read & 0x00800000 ) {printk("test FAIL\n"); } //see if error bit is set
+ }while(((data_read & 0x80000000) != 0x80000000) && ++try_cnt <= 10);// TC is now 0
+
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ while (data_read & 0x00000800){ //while channel is busy
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ printk("%s: status for busy 0x%08x\n", __FUNCTION__, data_read);
+ }
+ mtdcr(DCR_DMA2P40_SR, 0xffffffff);
+
+
+
+ return IRQ_HANDLED;
+}
+
+
+
+static struct of_device_id dma_per_chan_match[] = {
+ {
+ .compatible = "amcc,dma-4channel",
+ },
+ {},
+};
+
+
+
+
+#if 0
+/*** test code ***/
+static int ppc460ex_dma_memcpy_self_test(ppc460ex_plb_dma_dev_t *device, unsigned int dma_ch_id)
+{
+ ppc460ex_plb_dma_ch_t p_init;
+ int res = 0, i;
+ unsigned int control;
+ phys_addr_t *src;
+ phys_addr_t *dest;
+
+ phys_addr_t *gap;
+
+ phys_addr_t dma_dest, dma_src;
+
+ src = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+ gap = kzalloc(200, GFP_KERNEL);
+ if (!gap)
+ return -ENOMEM;
+
+
+
+ dest = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ printk("src = 0x%08x\n", (unsigned int)src);
+ printk("gap = 0x%08x\n", (unsigned int)gap);
+ printk("dest = 0x%08x\n", (unsigned int)dest);
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src)[i] = (u8)i;
+
+ printk("dump src\n");
+ DMA_HEXDUMP(src, TEST_SIZE);
+ DMA_HEXDUMP(dest, TEST_SIZE);
+#if 1
+ dma_src = dma_map_single(p_init.device->dev, src, TEST_SIZE,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(p_init.device->dev, dest, TEST_SIZE,
+ DMA_FROM_DEVICE);
+#endif
+ printk("%s:channel = %d chan 0x%08x\n", __FUNCTION__, device->chan[dma_ch_id]->chan_id,
+ (unsigned int)(device->chan));
+
+ p_init.polarity = 0;
+ p_init.pwidth = PW_32;
+ p_init.in_use = 0;
+ p_init.sai = 1;
+ p_init.dai = 1;
+ res = ppc460ex_init_dma_channel(device, dma_ch_id, &p_init);
+
+ if (res) {
+ printk("%32s: init_dma_channel return %d\n",
+ __FUNCTION__, res);
+ }
+ ppc460ex_clear_dma_status(dma_ch_id);
+
+ ppc460ex_set_src_addr(dma_ch_id, dma_src);
+ ppc460ex_set_dst_addr(dma_ch_id, dma_dest);
+
+ ppc460ex_set_dma_mode(device, dma_ch_id, DMA_MODE_MM);
+ ppc460ex_set_dma_count(device, dma_ch_id, TEST_SIZE);
+
+ res = ppc460ex_enable_dma_interrupt(device, dma_ch_id);
+ if (res) {
+ printk("%32s: en/disable_dma_interrupt\n",
+ __FUNCTION__);
+ }
+
+
+ if (dma_ch_id == 0)
+ control = mfdcr(DCR_DMA2P40_CR0);
+ else if (dma_ch_id == 1)
+ control = mfdcr(DCR_DMA2P40_CR1);
+
+
+ control &= ~(SET_DMA_BEN(1));
+ control &= ~(SET_DMA_PSC(3));
+ control &= ~(SET_DMA_PWC(0x3f));
+ control &= ~(SET_DMA_PHC(0x7));
+ control &= ~(SET_DMA_PL(1));
+
+
+
+ if (dma_ch_id == 0)
+ mtdcr(DCR_DMA2P40_CR0, control);
+ else if (dma_ch_id == 1)
+ mtdcr(DCR_DMA2P40_CR1, control);
+
+
+ ppc460ex_enable_dma(device, dma_ch_id);
+
+
+ if (memcmp(src, dest, TEST_SIZE)) {
+ printk("Self-test copy failed compare, disabling\n");
+ res = -ENODEV;
+ goto out;
+ }
+
+
+ return 0;
+
+ out: kfree(src);
+ kfree(dest);
+ return res;
+
+}
+
+
+
+static int test1(void)
+{
+ void *src, *dest;
+ void *src1, *dest1;
+ int i;
+ unsigned int chan;
+
+ src = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ dest = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ src1 = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src1)
+ return -ENOMEM;
+
+ dest1 = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest1) {
+ kfree(src1);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src)[i] = (u8)i;
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src1)[i] = (u8)0xaa;
+
+#ifdef DEBUG_TEST
+ DMA_HEXDUMP(src, TEST_SIZE);
+ DMA_HEXDUMP(dest, TEST_SIZE);
+ DMA_HEXDUMP(src1, TEST_SIZE);
+ DMA_HEXDUMP(dest1, TEST_SIZE);
+#endif
+ chan = ppc460ex_get_dma_channel();
+
+#ifdef ENABLE_SGL
+ test_sgdma_memcpy(src, dest, src1, dest1, TEST_SIZE, chan);
+#endif
+ test_dma_memcpy(src, dest, TEST_SIZE, chan);
+
+
+ out: kfree(src);
+ kfree(dest);
+ kfree(src1);
+ kfree(dest1);
+
+ return 0;
+
+}
+#endif
+
+
+
+/*******************************************************************************
+ * Module Initialization Routine
+ *******************************************************************************
+ */
+int __devinit ppc460ex_dma_per_chan_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int ret=0;
+ //ppc460ex_plb_dma_dev_t *adev;
+ ppc460ex_plb_dma_ch_t *new_chan;
+ int err;
+
+
+
+ adev = dev_get_drvdata(ofdev->dev.parent);
+ BUG_ON(!adev);
+ /* create a device */
+ if ((new_chan = kzalloc(sizeof(*new_chan), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ err = of_address_to_resource(ofdev->node,0,&new_chan->reg);
+ if (err) {
+ printk("ERROR:Can't get %s property reg\n", __FUNCTION__);
+ goto err;
+ }
+ new_chan->device = adev;
+ new_chan->reg_base = ioremap(new_chan->reg.start,new_chan->reg.end - new_chan->reg.start + 1);
+#if 1
+ printk("PPC460ex PLB DMA engine @0x%02X_%08X size %d\n",
+ (u32)(new_chan->reg.start >> 32),
+ (u32)new_chan->reg.start,
+ (u32)(new_chan->reg.end - new_chan->reg.start + 1));
+#endif
+
+ switch(new_chan->reg.start) {
+ case 0x100:
+ new_chan->chan_id = 0;
+ break;
+ case 0x108:
+ new_chan->chan_id = 1;
+ break;
+ case 0x110:
+ new_chan->chan_id = 2;
+ break;
+ case 0x118:
+ new_chan->chan_id = 3;
+ break;
+ }
+ new_chan->chan_id = ((new_chan->reg.start - 0x100)& 0xfff) >> 3;
+ printk("new_chan->chan_id 0x%x\n",new_chan->chan_id);
+ adev->chan[new_chan->chan_id] = new_chan;
+ printk("new_chan->chan->chan_id 0x%x\n",adev->chan[new_chan->chan_id]->chan_id);
+ //adev->chan[new_chan->chan_id]->reg_base = new_chan->reg_base;
+
+ return 0;
+
+ err:
+ return ret;
+
+}
+
+int __devinit ppc460ex_dma_4chan_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ int ret=0, irq = 0;
+ //ppc460ex_plb_dma_dev_t *adev;
+ ppc460ex_plb_dma_ch_t *chan = NULL;
+
+
+ /* create a device */
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_adev_alloc;
+ }
+ adev->dev = &ofdev->dev;
+#if !defined(CONFIG_APM82181)
+ err = of_address_to_resource(ofdev->node,0,&adev->reg);
+ if(err) {
+ printk(KERN_ERR"Can't get %s property 'reg'\n",ofdev->node->full_name);
+ }
+#endif
+ printk(KERN_INFO"Probing AMCC DMA driver\n");
+#if !defined(CONFIG_APM82181)
+ adev->reg_base = ioremap(adev->reg.start, adev->reg.end - adev->reg.start + 1);
+#endif
+
+#if 1
+ irq = of_irq_to_resource(ofdev->node, 0, NULL);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_4chan_dma_eot_handler,
+ IRQF_DISABLED, "Peripheral DMA0/1", chan);
+ if (ret) {
+ ret = -EIO;
+ goto err_irq;
+ }
+ //irq = platform_get_irq(adev, 0);
+ /* only DMA engines have a separate err IRQ
+ * so it's Ok if irq < 0 in XOR case
+ */
+ } else
+ ret = -ENXIO;
+
+#if !defined(CONFIG_APM82181)
+ printk("PPC4xx PLB DMA engine @0x%02X_%08X size %d IRQ %d \n",
+ (u32)(adev->reg.start >> 32),
+ (u32)adev->reg.start,
+ (u32)(adev->reg.end - adev->reg.start + 1),
+ irq);
+#else
+ printk("PPC4xx PLB DMA engine IRQ %d\n", irq);
+#endif
+#endif
+ dev_set_drvdata(&(ofdev->dev),adev);
+ of_platform_bus_probe(ofdev->node,dma_per_chan_match,&ofdev->dev);
+
+
+ //ppc460ex_dma_memcpy_self_test(adev, 0);
+ //test1();
+
+
+ return 0;
+
+
+err_adev_alloc:
+ //release_mem_region(adev->reg.start, adev->reg.end - adev->reg.start);
+err_irq:
+ kfree(chan);
+
+ return ret;
+}
+
+
+static struct of_device_id dma_4chan_match[] = {
+ {
+ .compatible = "amcc,dma",
+ },
+ {},
+};
+
+struct of_platform_driver ppc460ex_dma_4chan_driver = {
+ .name = "plb_dma",
+ .match_table = dma_4chan_match,
+ .probe = ppc460ex_dma_4chan_probe,
+};
+
+struct of_platform_driver ppc460ex_dma_per_chan_driver = {
+ .name = "dma-4channel",
+ .match_table = dma_per_chan_match,
+ .probe = ppc460ex_dma_per_chan_probe,
+};
+
+
+static int __init mod_init (void)
+{
+ printk("%s:%d\n", __FUNCTION__, __LINE__);
+ return of_register_platform_driver(&ppc460ex_dma_4chan_driver);
+ printk("here 2\n");
+}
+
+static void __exit mod_exit(void)
+{
+ of_unregister_platform_driver(&ppc460ex_dma_4chan_driver);
+}
+
+static int __init ppc460ex_dma_per_chan_init (void)
+{
+ printk("%s:%d\n", __FUNCTION__, __LINE__);
+ return of_register_platform_driver(&ppc460ex_dma_per_chan_driver);
+ printk("here 3\n");
+}
+
+static void __exit ppc460ex_dma_per_chan_exit(void)
+{
+ of_unregister_platform_driver(&ppc460ex_dma_per_chan_driver);
+}
+
+subsys_initcall(ppc460ex_dma_per_chan_init);
+subsys_initcall(mod_init);
+
+//module_exit(mod_exit);
+
+//module_exit(ppc460ex_dma_per_chan_exit);
+
+MODULE_DESCRIPTION("AMCC PPC460EX 4 channel Engine Driver");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(ppc460ex_get_dma_status);
+EXPORT_SYMBOL_GPL(ppc460ex_set_src_addr);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dst_addr);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dma_mode);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dma_count);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_dma_interrupt);
+EXPORT_SYMBOL_GPL(ppc460ex_init_dma_channel);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_dma);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_dma);
+EXPORT_SYMBOL_GPL(ppc460ex_clear_dma_status);
+EXPORT_SYMBOL_GPL(ppc460ex_get_dma_residue);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_dma_interrupt);
+EXPORT_SYMBOL_GPL(ppc460ex_get_channel_config);
+EXPORT_SYMBOL_GPL(ppc460ex_set_channel_priority);
+EXPORT_SYMBOL_GPL(ppc460ex_get_peripheral_width);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_burst);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_burst);
+EXPORT_SYMBOL_GPL(ppc460ex_set_burst_size);
+
+/************************************************************************/
diff --git a/drivers/dma/ppc460ex_4chan_dma.h b/drivers/dma/ppc460ex_4chan_dma.h
new file mode 100755
index 00000000000..c9448f34de4
--- /dev/null
+++ b/drivers/dma/ppc460ex_4chan_dma.h
@@ -0,0 +1,531 @@
+
+
+#include <linux/types.h>
+
+
+
+
+#define DMA_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+
+
+#define MAX_PPC460EX_DMA_CHANNELS 4
+
+
+#define DCR_DMA0_BASE 0x200
+#define DCR_DMA1_BASE 0x208
+#define DCR_DMA2_BASE 0x210
+#define DCR_DMA3_BASE 0x218
+#define DCR_DMASR_BASE 0x220
+
+
+
+
+
+
+/* DMA Registers */
+#define DCR_DMA2P40_CR0 (DCR_DMA0_BASE + 0x0) /* DMA Channel Control 0 */
+#define DCR_DMA2P40_CTC0 (DCR_DMA0_BASE + 0x1) /* DMA Count 0 */
+#define DCR_DMA2P40_SAH0 (DCR_DMA0_BASE + 0x2) /* DMA Src Addr High 0 */
+#define DCR_DMA2P40_SAL0 (DCR_DMA0_BASE + 0x3) /* DMA Src Addr Low 0 */
+#define DCR_DMA2P40_DAH0 (DCR_DMA0_BASE + 0x4) /* DMA Dest Addr High 0 */
+#define DCR_DMA2P40_DAL0 (DCR_DMA0_BASE + 0x5) /* DMA Dest Addr Low 0 */
+#define DCR_DMA2P40_SGH0 (DCR_DMA0_BASE + 0x6) /* DMA SG Desc Addr High 0 */
+#define DCR_DMA2P40_SGL0 (DCR_DMA0_BASE + 0x7) /* DMA SG Desc Addr Low 0 */
+
+#define DCR_DMA2P40_CR1 (DCR_DMA1_BASE + 0x0) /* DMA Channel Control 1 */
+#define DCR_DMA2P40_CTC1 (DCR_DMA1_BASE + 0x1) /* DMA Count 1 */
+#define DCR_DMA2P40_SAH1 (DCR_DMA1_BASE + 0x2) /* DMA Src Addr High 1 */
+#define DCR_DMA2P40_SAL1 (DCR_DMA1_BASE + 0x3) /* DMA Src Addr Low 1 */
+#define DCR_DMA2P40_DAH1 (DCR_DMA1_BASE + 0x4) /* DMA Dest Addr High 1 */
+#define DCR_DMA2P40_DAL1 (DCR_DMA1_BASE + 0x5) /* DMA Dest Addr Low 1 */
+#define DCR_DMA2P40_SGH1 (DCR_DMA1_BASE + 0x6) /* DMA SG Desc Addr High 1 */
+#define DCR_DMA2P40_SGL1 (DCR_DMA1_BASE + 0x7) /* DMA SG Desc Addr Low 1 */
+
+#define DCR_DMA2P40_CR2 (DCR_DMA2_BASE + 0x0) /* DMA Channel Control 2 */
+#define DCR_DMA2P40_CTC2 (DCR_DMA2_BASE + 0x1) /* DMA Count 2 */
+#define DCR_DMA2P40_SAH2 (DCR_DMA2_BASE + 0x2) /* DMA Src Addr High 2 */
+#define DCR_DMA2P40_SAL2 (DCR_DMA2_BASE + 0x3) /* DMA Src Addr Low 2 */
+#define DCR_DMA2P40_DAH2 (DCR_DMA2_BASE + 0x4) /* DMA Dest Addr High 2 */
+#define DCR_DMA2P40_DAL2 (DCR_DMA2_BASE + 0x5) /* DMA Dest Addr Low 2 */
+#define DCR_DMA2P40_SGH2 (DCR_DMA2_BASE + 0x6) /* DMA SG Desc Addr High 2 */
+#define DCR_DMA2P40_SGL2 (DCR_DMA2_BASE + 0x7) /* DMA SG Desc Addr Low 2 */
+
+#define DCR_DMA2P40_CR3 (DCR_DMA3_BASE + 0x0) /* DMA Channel Control 3 */
+#define DCR_DMA2P40_CTC3 (DCR_DMA3_BASE + 0x1) /* DMA Count 3 */
+#define DCR_DMA2P40_SAH3 (DCR_DMA3_BASE + 0x2) /* DMA Src Addr High 3 */
+#define DCR_DMA2P40_SAL3 (DCR_DMA3_BASE + 0x3) /* DMA Src Addr Low 3 */
+#define DCR_DMA2P40_DAH3 (DCR_DMA3_BASE + 0x4) /* DMA Dest Addr High 3 */
+#define DCR_DMA2P40_DAL3 (DCR_DMA3_BASE + 0x5) /* DMA Dest Addr Low 3 */
+#define DCR_DMA2P40_SGH3 (DCR_DMA3_BASE + 0x6) /* DMA SG Desc Addr High 3 */
+#define DCR_DMA2P40_SGL3 (DCR_DMA3_BASE + 0x7) /* DMA SG Desc Addr Low 3 */
+
+#define DCR_DMA2P40_SR (DCR_DMASR_BASE + 0x0) /* DMA Status Register */
+#define DCR_DMA2P40_SGC (DCR_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */
+#define DCR_DMA2P40_SLP (DCR_DMASR_BASE + 0x5) /* DMA Sleep Register */
+#define DCR_DMA2P40_POL (DCR_DMASR_BASE + 0x6) /* DMA Polarity Register */
+
+
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD 0
+#define DMA_STATUS_BAD_CHANNEL 1
+#define DMA_STATUS_BAD_HANDLE 2
+#define DMA_STATUS_BAD_MODE 3
+#define DMA_STATUS_NULL_POINTER 4
+#define DMA_STATUS_OUT_OF_MEMORY 5
+#define DMA_STATUS_SGL_LIST_EMPTY 6
+#define DMA_STATUS_GENERAL_ERROR 7
+#define DMA_STATUS_CHANNEL_NOTFREE 8
+
+#define DMA_CHANNEL_BUSY 0x80000000
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR 0
+#define DMA_STATUS_CS 1 /* Count Status */
+#define DMA_STATUS_TS 2 /* Transfer Status */
+#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
+#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
+
+/*
+ * DMA Channel Control Registers
+ */
+#ifdef CONFIG_44x
+#define PPC4xx_DMA_64BIT
+#define DMA_CR_OFFSET 1
+#else
+#define DMA_CR_OFFSET 0
+#endif
+
+#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */
+#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD (1<<29)
+#define SET_DMA_TD(x) (((x)&0x1)<<29)
+#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
+
+#define DMA_PL (1<<28) /* Peripheral Location */
+#define SET_DMA_PL(x) (((x)&0x1)<<28)
+#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL 0
+#define INTERNAL_PERIPHERAL 1
+
+#define SET_DMA_PW(x) (((x)&0x7)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
+#define DMA_PW_MASK SET_DMA_PW(7)
+#define PW_8 0
+#define PW_16 1
+#define PW_32 2
+#define PW_64 3
+#define PW_128 4
+
+
+#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
+
+#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */
+#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET))
+
+#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */
+#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET))
+
+#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */
+#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET))
+
+#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
+#define DMA_TM_MASK SET_DMA_TM(3)
+#define TM_PERIPHERAL 0 /* Peripheral */
+#define TM_RESERVED 1 /* Reserved */
+#define TM_S_MM 2 /* Memory to Memory */
+#define TM_D_MM 3 /* Device Paced Memory to Memory */
+#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
+
+#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */
+#define DMA_PSC_MASK SET_DMA_PSC(3)
+#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
+
+#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */
+#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
+
+#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */
+#define DMA_PHC_MASK SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
+
+#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */
+#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET))
+
+#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET))
+#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET))
+
+#define DMA_DEC (1<<(2)) /* Address Decrement */
+#define SET_DMA_DEC(x) (((x)&0x1)<<2)
+#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+
+#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
+
+ /* Device-paced memory to memory, */
+ /* device is at source address */
+#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
+
+ /* Device-paced memory to memory, */
+ /* device is at destination address */
+#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
+
+#define SGL_LIST_SIZE 16384
+#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
+
+#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW 0
+#define PRIORITY_MID_LOW 1
+#define PRIORITY_MID_HIGH 2
+#define PRIORITY_HIGH 3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
+
+
+#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */
+#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
+#define PREFETCH_1 0 /* Prefetch 1 Double Word */
+#define PREFETCH_2 1
+#define PREFETCH_4 2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
+
+#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
+#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET))
+#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
+#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
+#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<18)
+#define DMA_IN_DMA_REQ2 (1<<17)
+#define DMA_IN_DMA_REQ3 (1<<16)
+
+#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<14)
+#define DMA_EXT_DMA_REQ2 (1<<13)
+#define DMA_EXT_DMA_REQ3 (1<<12)
+
+#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<10)
+#define DMA_CH2_BUSY (1<<9)
+#define DMA_CH3_BUSY (1<<8)
+
+#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1 (1<<6)
+#define DMA_SG2 (1<<5)
+#define DMA_SG3 (1<<4)
+
+/* DMA Channel Count Register */
+#define DMA_CTC_TCIE (1<<29) /* Terminal Count Interrupt Enable */
+#define DMA_CTC_ETIE (1<<28) /* EOT Interupt Enable */
+#define DMA_CTC_EIE (1<<27) /* Error Interrupt Enable */
+#define DMA_CTC_BTEN (1<<23) /* Burst Enable/Disable bit */
+#define DMA_CTC_BSIZ_MSK (3<<21) /* Mask of the Burst size bits */
+#define DMA_CTC_BSIZ_2 (0)
+#define DMA_CTC_BSIZ_4 (1<<21)
+#define DMA_CTC_BSIZ_8 (2<<21)
+#define DMA_CTC_BSIZ_16 (3<<21)
+#define DMA_CTC_TC_MASK 0xFFFFF
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */
+#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */
+
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK (1<<31) /* Link */
+#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
+#define SG_COUNT_MASK 0xFFFF /* Count Field */
+
+#define SET_DMA_CONTROL \
+ (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+ SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
+ SET_DMA_PL(p_init->pl) | /* peripheral location */ \
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
+ SET_DMA_PREFETCH(p_init->pf) /* read prefetch */)
+
+#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
+
+
+/**
+ * struct ppc460ex_dma_device - internal representation of an DMA device
+ * @pdev: Platform device
+ * @id: HW DMA Device selector
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @common: embedded struct dma_device
+ */
+typedef struct ppc460ex_plb_dma_device {
+ //struct platform_device *pdev;
+ void __iomem *reg_base;
+ struct device *dev;
+ struct resource reg; /* Resource for register */
+ int id;
+ struct ppc460ex_plb_dma_chan *chan[MAX_PPC460EX_DMA_CHANNELS];
+ wait_queue_head_t queue;
+} ppc460ex_plb_dma_dev_t;
+
+typedef uint32_t sgl_handle_t;
+/**
+ * struct ppc460ex_dma_chan - internal representation of an ADMA channel
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @device: parent device
+ * @chain: device chain view of the descriptors
+ * @common: common dmaengine channel object members
+ * @all_slots: complete domain of slots usable by the channel
+ * @reg: Resource for register
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @hw_chain_inited: h/w descriptor chain initialization flag
+ * @irq_tasklet: bottom half where ppc460ex_adma_slot_cleanup runs
+ * @needs_unmap: if buffers should not be unmapped upon final processing
+ */
+typedef struct ppc460ex_plb_dma_chan {
+ void __iomem *reg_base;
+ struct ppc460ex_plb_dma_device *device;
+ struct timer_list cleanup_watchdog;
+ struct resource reg; /* Resource for register */
+ unsigned int chan_id;
+ struct tasklet_struct irq_tasklet;
+ sgl_handle_t *phandle;
+ unsigned short in_use; /* set when channel is being used, clr when
+ * available.
+ */
+ /*
+ * Valid polarity settings:
+ * DMAReq_ActiveLow(n)
+ * DMAAck_ActiveLow(n)
+ * EOT_ActiveLow(n)
+ *
+ * n is 0 to max dma chans
+ */
+ unsigned int polarity;
+
+ char buffer_enable; /* Boolean: buffer enable */
+ char tce_enable; /* Boolean: terminal count enable */
+ char etd_output; /* Boolean: eot pin is a tc output */
+ char pce; /* Boolean: parity check enable */
+
+ /*
+ * Peripheral location:
+ * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+ * EXTERNAL_PERIPHERAL
+ */
+ char pl; /* internal/external peripheral */
+
+ /*
+ * Valid pwidth settings:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ */
+ unsigned int pwidth;
+
+ char dai; /* Boolean: dst address increment */
+ char sai; /* Boolean: src address increment */
+
+ /*
+ * Valid psc settings: 0-3
+ */
+ unsigned int psc; /* Peripheral Setup Cycles */
+
+ /*
+ * Valid pwc settings:
+ * 0-63
+ */
+ unsigned int pwc; /* Peripheral Wait Cycles */
+
+ /*
+ * Valid phc settings:
+ * 0-7
+ */
+ unsigned int phc; /* Peripheral Hold Cycles */
+
+ /*
+ * Valid cp (channel priority) settings:
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ */
+ unsigned int cp; /* channel priority */
+
+ /*
+ * Valid pf (memory read prefetch) settings:
+ *
+ * PREFETCH_1
+ * PREFETCH_2
+ * PREFETCH_4
+ */
+ unsigned int pf; /* memory read prefetch */
+
+ /*
+ * Boolean: channel interrupt enable
+ * NOTE: for sgl transfers, only the last descriptor will be setup to
+ * interrupt.
+ */
+ char int_enable;
+
+ char shift; /* easy access to byte_count shift, based on */
+ /* the width of the channel */
+
+ uint32_t control; /* channel control word */
+
+ /* These variabled are used ONLY in single dma transfers */
+ unsigned int mode; /* transfer mode */
+ phys_addr_t addr;
+ char ce; /* channel enable */
+ char int_on_final_sg;/* for scatter/gather - only interrupt on last sg */
+
+} ppc460ex_plb_dma_ch_t;
+
+/*
+ * PPC44x DMA implementations have a slightly different
+ * descriptor layout. Probably moved about due to the
+ * change to 64-bit addresses and link pointer. I don't
+ * know why they didn't just leave control_count after
+ * the dst_addr.
+ */
+#ifdef PPC4xx_DMA_64BIT
+typedef struct {
+ uint32_t control;
+ uint32_t control_count;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ phys_addr_t next;
+} ppc_sgl_t;
+#else
+typedef struct {
+ uint32_t control;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+#endif
+
+
+
+typedef struct {
+ unsigned int ch_id;
+ uint32_t control; /* channel ctrl word; loaded from each descrptr */
+ uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
+ dma_addr_t dma_addr; /* dma (physical) address of this list */
+ dma_addr_t dummy; /*Dummy variable to allow quad word alignment*/
+ ppc_sgl_t *phead;
+ dma_addr_t phead_dma;
+ ppc_sgl_t *ptail;
+ dma_addr_t ptail_dma;
+} sgl_list_info_t;
+
+typedef struct {
+ phys_addr_t *src_addr;
+ phys_addr_t *dst_addr;
+ phys_addr_t dma_src_addr;
+ phys_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+#define PPC460EX_DMA_SGXFR_COMPLETE(id) (!((1 << (11-id)) & mfdcr(DCR_DMA2P40_SR)))
+#define PPC460EX_DMA_CHAN_BUSY(id) ( (1 << (11-id)) & mfdcr(DCR_DMA2P40_SR) )
+#define DMA_STATUS(id) (mfdcr(DCR_DMA2P40_SR))
+#define CLEAR_DMA_STATUS(id) (mtdcr(DCR_DMA2P40_SR, 0xFFFFFFFF))
+#define PPC460EX_DMA_SGSTAT_FREE(id) (!((1 << (7-id)) & mfdcr(DCR_DMA2P40_SR)) )
+#define PPC460EX_DMA_TC_REACHED(id) ( (1 << (31-id)) & mfdcr(DCR_DMA2P40_SR) )
+#define PPC460EX_DMA_CHAN_XFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && (PPC460EX_DMA_TC_REACHED(id)) )
+#define PPC460EX_DMA_CHAN_SGXFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && PPC460EX_DMA_SGSTAT_FREE(id) )
+#define PPC460EX_DMA_SG_IN_PROGRESS(id) ( (1 << (7-id)) | (1 << (11-id)) )
+#define PPC460EX_DMA_SG_OP_COMPLETE(id) ( (PPC460EX_DMA_SG_IN_PROGRESS(id) & DMA_STATUS(id) ) == 0)
+
+extern ppc460ex_plb_dma_dev_t *adev;
+int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev,
+ unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_init);
+
+int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr);
+
+int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr);
+
+int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode);
+
+void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count);
+
+int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_get_dma_channel(void);
+
+void ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_clear_dma_status(unsigned int ch_id);
+
+#if 0
+extern int test_dma_memcpy(void *src, void *dst, unsigned int length, unsigned int dma_ch);
+
+extern int test_sgdma_memcpy(void *src, void *dst, void *src1, void *dst1,
+ unsigned int length, unsigned int dma_ch);
+#endif
diff --git a/drivers/dma/ppc460ex_4chan_sgdma.c b/drivers/dma/ppc460ex_4chan_sgdma.c
new file mode 100755
index 00000000000..fb26dd76ce7
--- /dev/null
+++ b/drivers/dma/ppc460ex_4chan_sgdma.c
@@ -0,0 +1,1003 @@
+/*
+ * Copyright(c) 2008 Applied Micro Circuits Corporation(AMCC). All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <asm/dcr-regs.h>
+#include <asm/dcr.h>
+#include <linux/delay.h>
+#include <asm/cacheflush.h>
+#include "ppc460ex_4chan_dma.h"
+#include <asm/page.h>
+#include <asm/time.h>
+#include <linux/pipe_fs_i.h>
+#include <linux/splice.h>
+
+#define SGDMA_MAX_POLL_COUNT 100000000
+#define SGDMA_POLL_DELAY 5
+
+static phys_addr_t splice_src_dma_addrs[PIPE_BUFFERS];
+static dma_addr_t splice_dst_dma_addrs[PIPE_BUFFERS];
+
+//#define DEBUG_SPLICE_DMA 1
+//#define SPLICE_DMA_COHERENT 1
+//#define DEBUG_SPLICE_DMA_TIMECAL 1
+
+extern int ppc460ex_disable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+extern int ppc460ex_disable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+extern int ppc460ex_enable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+void *dma_mem_page = NULL;
+
+
+
+
+
+#define dma_pr(x) printk(KERN_DEBUG,x)
+
+
+int ppc460ex_set_sg_addr(int ch_id, phys_addr_t sg_addr)
+{
+ if (unlikely(ch_id >= MAX_PPC460EX_DMA_CHANNELS)) {
+ printk("%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCR_DMA2P40_SGH0 + ch_id*8, sg_addr >> 32);
+#endif
+ mtdcr(DCR_DMA2P40_SGL0 + ch_id*8, (u32)sg_addr);
+
+ return 0;
+}
+
+static int
+poll_for_sgdma_done(int chan)
+{
+ int i;
+ volatile int status = 0;
+#ifdef DEBUG_SPLICE_DMA_TIMECAL
+ u64 time1=0, time2=0, timediff=0;
+#endif
+
+#ifdef DEBUG_SPLICE_DMA_TIMECAL
+ time1 = get_tbl() | (get_tbu() << 32);
+#endif
+ for(i = 0; i < SGDMA_MAX_POLL_COUNT; i++) {
+#ifdef DEBUG_SPLICE_DMA
+ if(i%16 == 0)
+ printk("%s:%s:%d - waiting %d\n", __FILE__, __FUNCTION__, __LINE__, i);
+#endif
+ status = PPC460EX_DMA_CHAN_SGXFR_COMPLETE(chan);
+ if(status) {
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - Breaking\n", __FILE__, __FUNCTION__, __LINE__);
+#endif
+ break;
+ }
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("status = %d dma_status = 0x%08x\n", status, DMA_STATUS(chan));
+#endif
+ //udelay(SGDMA_POLL_DELAY);
+ }
+#ifdef DEBUG_SPLICE_DMA_TIMECAL
+ time2 = get_tbl() | (get_tbu() << 32);
+#endif
+
+#ifdef DEBUG_SPLICE_DMA_TIMECAL
+ printk("%s:%s:%d time taken for transfer is %llu\n",
+ __FILE__, __FUNCTION__, __LINE__, time2-time1);
+#endif
+ if(unlikely(i >= SGDMA_MAX_POLL_COUNT)) {
+ printk("%s:%s:%d - timeout\n",
+ __FILE__, __FUNCTION__, __LINE__);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
+static int
+get_transfer_width(u64 align)
+{
+ if(!(align & 0xF))
+ return 128;
+
+ if(!(align & 0x7))
+ return 64;
+
+ if(!(align & 0x3))
+ return 32;
+
+ if(!(align & 0x1))
+ return 16;
+
+ return 8;
+}
+
+
+/*
+ * Add a new sgl descriptor to the end of a scatter/gather list
+ * which was created by alloc_dma_handle().
+ *
+ * For a memory to memory transfer, both dma addresses must be
+ * valid. For a peripheral to memory transfer, one of the addresses
+ * must be set to NULL, depending on the direction of the transfer:
+ * memory to peripheral: set dst_addr to NULL,
+ * peripheral to memory: set src_addr to NULL.
+ */
+int ppc460ex_add_dma_sgl(ppc460ex_plb_dma_dev_t *adev,
+ sgl_handle_t handle,
+ phys_addr_t src_addr,
+ phys_addr_t dst_addr,
+ unsigned int count)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc460ex_plb_dma_ch_t *p_dma_ch;
+ u64 align;
+ int tr_width = 8; /* initial value 8 bits */
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - Filling in dma sgl list\n", __func__, __FILE__, __LINE__);
+#endif
+
+ if (unlikely(!handle)) {
+ printk("%s: null handle\n", __FUNCTION__);
+ return DMA_STATUS_BAD_HANDLE;
+ }
+ if (unlikely(psgl->ch_id >= MAX_PPC460EX_DMA_CHANNELS)) {
+ printk("%s: bad channel %d\n", __FUNCTION__, psgl->ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch = adev->chan[psgl->ch_id];
+ align = src_addr | dst_addr | count;
+
+ tr_width = get_transfer_width(align);
+ switch(tr_width) {
+ case 128:
+ p_dma_ch->pwidth = PW_128;
+ break;
+ case 64:
+ p_dma_ch->pwidth = PW_64;
+ break;
+ case 32:
+ p_dma_ch->pwidth = PW_32;
+ break;
+ case 16:
+ p_dma_ch->pwidth = PW_16;
+ break;
+ default:
+ p_dma_ch->pwidth = PW_8;
+ break;
+ }
+
+ {
+ int error = 0;
+ u64 aligned =
+ src_addr | dst_addr | count;
+ switch (p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (aligned & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (aligned & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (aligned & 0x7)
+ error = 1;
+ break;
+ case PW_128:
+ if (aligned & 0xf)
+ error = 1;
+ break;
+ default:
+ printk("%s:set_dma_count: invalid bus width: 0x%x\n", __FUNCTION__,
+ p_dma_ch->pwidth);
+ return DMA_STATUS_GENERAL_ERROR;
+ }
+ if (unlikely(error))
+ printk
+ ("Warning: set_dma_count count 0x%x bus width %d aligned= 0x%llx\n",
+ count, p_dma_ch->pwidth, aligned);
+ }
+
+ p_dma_ch->shift = p_dma_ch->pwidth;
+
+
+ if (unlikely((unsigned) (psgl->ptail + 1) >= ((unsigned) psgl + SGL_LIST_SIZE))) {
+ printk("sgl handle out of memory \n");
+ return DMA_STATUS_OUT_OF_MEMORY;
+ }
+
+ if (!psgl->ptail) {
+ psgl->phead = (ppc_sgl_t *)
+ ((((unsigned) psgl + sizeof (sgl_list_info_t))) );
+ psgl->phead_dma = (psgl->dma_addr + sizeof(sgl_list_info_t)) ;
+ psgl->ptail = psgl->phead;
+ psgl->ptail_dma = psgl->phead_dma;
+ }
+ else {
+ if(p_dma_ch->int_on_final_sg) {
+ /* mask out all dma interrupts, except error, on tail
+ before adding new tail. */
+ psgl->ptail->control_count &=
+ ~(SG_TCI_ENABLE | SG_ETI_ENABLE);
+ }
+ psgl->ptail->next = psgl->ptail_dma + sizeof(ppc_sgl_t);
+ psgl->ptail++;
+ psgl->ptail_dma += sizeof(ppc_sgl_t);
+ }
+ psgl->ptail->control = psgl->control | SET_DMA_PW(p_dma_ch->pwidth);
+#if !defined(CONFIG_APM82181)
+ /* Move to Highband segment to expect higher performance */
+ psgl->ptail->src_addr = src_addr | (0x8ULL << 32);
+ psgl->ptail->dst_addr = dst_addr | (0x8ULL << 32);
+#else /* APM821x1 */
+ psgl->ptail->src_addr = src_addr;
+ psgl->ptail->dst_addr = dst_addr;
+#endif
+#ifdef DEBUG_SPLICE_DMA
+ psgl->ptail->control_count = (count >> p_dma_ch->shift);
+ // | psgl->sgl_control;
+#endif
+ psgl->ptail->control_count = (count >> p_dma_ch->shift) | psgl->sgl_control;
+
+ psgl->ptail->next = (uint32_t) NULL;
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("count=%d control=0x%08x p_dma_ch->pwidth=%d bits=0x%08x\n",
+ count, psgl->ptail->control, p_dma_ch->pwidth, SET_DMA_PW(p_dma_ch->pwidth));
+ printk("src_addr=0x%llx\n", psgl->ptail->src_addr);
+ printk("dst_addr=0x%llx\n", psgl->ptail->dst_addr);
+ printk("control_count=0x%08x\n", psgl->ptail->control_count);
+ printk("sgl_control=0x%08x\n", psgl->sgl_control);
+#endif
+
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/*
+ * Enable (start) the DMA described by the sgl handle.
+ */
+int ppc460ex_enable_dma_sgl(ppc460ex_plb_dma_dev_t *adev, sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *)handle;
+ ppc460ex_plb_dma_ch_t *p_dma_ch;
+ uint32_t sg_command;
+
+
+ if (unlikely(!handle)) {
+ printk("%s: null handle\n", __FUNCTION__);
+ return DMA_STATUS_BAD_HANDLE;
+ }
+ if (unlikely(psgl->ch_id >= MAX_PPC460EX_DMA_CHANNELS)) {
+ printk("%s: bad channel %d\n", __FUNCTION__, psgl->ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch = adev->chan[psgl->ch_id];
+ psgl->ptail->control_count &= ~SG_LINK; /* make this the last dscrptr */
+ sg_command = mfdcr(DCR_DMA2P40_SGC);
+ ppc460ex_set_sg_addr(psgl->ch_id, psgl->phead_dma);
+ sg_command |= SSG_ENABLE(psgl->ch_id);
+//PMB - work around for PLB
+ sg_command &= 0xF0FFFFFF;
+ mtdcr(DCR_DMA2P40_SGC, sg_command); /* start transfer */
+
+ return 0;
+}
+
+/*
+ * Halt an active scatter/gather DMA operation.
+ */
+int ppc460ex_disable_dma_sgl(ppc460ex_plb_dma_dev_t *adev, sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
+ uint32_t sg_command;
+
+ if (unlikely(!handle)) {
+ printk("%s: null handle\n", __FUNCTION__);
+ return DMA_STATUS_BAD_HANDLE;
+ }
+ if (unlikely(psgl->ch_id >= MAX_PPC460EX_DMA_CHANNELS)) {
+ printk("%s: bad channel %d\n", __FUNCTION__, psgl->ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ sg_command = mfdcr(DCR_DMA2P40_SGC);
+ sg_command &= ~SSG_ENABLE(psgl->ch_id);
+ mtdcr(DCR_DMA2P40_SGC, sg_command); /* stop transfer */
+ return 0;
+}
+
+
+/*
+ * Returns number of bytes left to be transferred from the entire sgl list.
+ * *src_addr and *dst_addr get set to the source/destination address of
+ * the sgl descriptor where the DMA stopped.
+ *
+ * An sgl transfer must NOT be active when this function is called.
+ */
+int ppc460ex_get_dma_sgl_residue(ppc460ex_plb_dma_dev_t *adev, sgl_handle_t handle, phys_addr_t * src_addr,
+ phys_addr_t * dst_addr)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
+ ppc460ex_plb_dma_ch_t *p_dma_ch;
+ ppc_sgl_t *pnext, *sgl_addr;
+ uint32_t count_left;
+
+ if (unlikely(!handle)) {
+ printk("%s: null handle\n", __FUNCTION__);
+ return DMA_STATUS_BAD_HANDLE;
+ }
+ if (unlikely(psgl->ch_id >= MAX_PPC460EX_DMA_CHANNELS)) {
+ printk("%s: bad channel %d\n", __FUNCTION__, psgl->ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ sgl_addr = (ppc_sgl_t *) __va(mfdcr(DCR_DMA2P40_SGL0 + (psgl->ch_id * 0x8)));
+ count_left = mfdcr(DCR_DMA2P40_CTC0 + (psgl->ch_id * 0x8)) & SG_COUNT_MASK;
+ if (unlikely(!sgl_addr)) {
+ printk("%s: sgl addr register is null\n", __FUNCTION__);
+ goto error;
+ }
+ pnext = psgl->phead;
+ while (pnext &&
+ ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE) &&
+ (pnext != sgl_addr))
+ ) {
+ pnext++;
+ }
+ if (pnext == sgl_addr) { /* found the sgl descriptor */
+
+ *src_addr = pnext->src_addr;
+ *dst_addr = pnext->dst_addr;
+
+ /*
+ * Now search the remaining descriptors and add their count.
+ * We already have the remaining count from this descriptor in
+ * count_left.
+ */
+ pnext++;
+
+ while ((pnext != psgl->ptail) &&
+ ((unsigned) pnext < ((unsigned) psgl + SGL_LIST_SIZE))
+ ) {
+ count_left += pnext->control_count & SG_COUNT_MASK;
+ }
+ if (unlikely(pnext != psgl->ptail)) { /* should never happen */
+ printk
+ ("%s:error (1) psgl->ptail 0x%x handle 0x%x\n", __FUNCTION__,
+ (unsigned int) psgl->ptail, (unsigned int) handle);
+ goto error;
+ }
+ /* success */
+ p_dma_ch = adev->chan[psgl->ch_id];
+ return (count_left << p_dma_ch->shift); /* count in bytes */
+
+ } else {
+ /* this shouldn't happen */
+ printk
+ ("get_dma_sgl_residue, unable to match current address 0x%x, handle 0x%x\n",
+ (unsigned int) sgl_addr, (unsigned int) handle);
+
+ }
+
+ error:
+ src_addr = NULL;
+ dst_addr = NULL;
+ return 0;
+
+}
+
+/*
+ * Returns the address(es) of the buffer(s) contained in the head element of
+ * the scatter/gather list. The element is removed from the scatter/gather
+ * list and the next element becomes the head.
+ *
+ * This function should only be called when the DMA is not active.
+ */
+int ppc460ex_delete_dma_sgl_element(sgl_handle_t handle, phys_addr_t * src_dma_addr,
+ phys_addr_t * dst_dma_addr)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
+
+ if (unlikely(!handle)) {
+ printk("%s: null handle\n", __FUNCTION__);
+ return DMA_STATUS_BAD_HANDLE;
+ }
+ if (unlikely(psgl->ch_id >= MAX_PPC460EX_DMA_CHANNELS)) {
+ printk("%s: bad channel %d\n", __FUNCTION__, psgl->ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ if (unlikely(!psgl->phead)) {
+ printk("%s: sgl list empty\n", __FUNCTION__);
+ src_dma_addr = NULL;
+ dst_dma_addr = NULL;
+ return DMA_STATUS_SGL_LIST_EMPTY;
+ }
+
+ *src_dma_addr = (phys_addr_t) psgl->phead->src_addr;
+ *dst_dma_addr = (phys_addr_t) psgl->phead->dst_addr;
+
+ if (psgl->phead == psgl->ptail) {
+ /* last descriptor on the list */
+ psgl->phead = NULL;
+ psgl->ptail = NULL;
+ } else {
+ psgl->phead++;
+ psgl->phead_dma += sizeof(ppc_sgl_t);
+ }
+
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/*
+ * Create a scatter/gather list handle. This is simply a structure which
+ * describes a scatter/gather list.
+ *
+ * A handle is returned in "handle" which the driver should save in order to
+ * be able to access this list later. A chunk of memory will be allocated
+ * to be used by the API for internal management purposes, including managing
+ * the sg list and allocating memory for the sgl descriptors. One page should
+ * be more than enough for that purpose. Perhaps it's a bit wasteful to use
+ * a whole page for a single sg list, but most likely there will be only one
+ * sg list per channel.
+ *
+ * Interrupt notes:
+ * Each sgl descriptor has a copy of the DMA control word which the DMA engine
+ * loads in the control register. The control word has a "global" interrupt
+ * enable bit for that channel. Interrupts are further qualified by a few bits
+ * in the sgl descriptor count register. In order to setup an sgl, we have to
+ * know ahead of time whether or not interrupts will be enabled at the completion
+ * of the transfers. Thus, enable_dma_interrupt()/disable_dma_interrupt() MUST
+ * be called before calling alloc_dma_handle(). If the interrupt mode will never
+ * change after powerup, then enable_dma_interrupt()/disable_dma_interrupt()
+ * do not have to be called -- interrupts will be enabled or disabled based
+ * on how the channel was configured after powerup by the hw_init_dma_channel()
+ * function. Each sgl descriptor will be setup to interrupt if an error occurs;
+ * however, only the last descriptor will be setup to interrupt. Thus, an
+ * interrupt will occur (if interrupts are enabled) only after the complete
+ * sgl transfer is done.
+ */
+int ppc460ex_alloc_dma_handle(ppc460ex_plb_dma_dev_t *adev, sgl_handle_t *phandle,
+ unsigned int mode, unsigned int ch_id)
+{
+ sgl_list_info_t *psgl=NULL;
+ static dma_addr_t dma_addr;
+ ppc460ex_plb_dma_ch_t *p_dma_ch = adev->chan[ch_id];
+ uint32_t sg_command;
+#if 0
+ void *ret;
+#endif
+ if (unlikely(ch_id >= MAX_PPC460EX_DMA_CHANNELS)) {
+ printk("%s: bad channel %d\n", __FUNCTION__, p_dma_ch->chan_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ if (unlikely(!phandle)) {
+ printk("%s: null handle pointer\n", __FUNCTION__);
+ return DMA_STATUS_NULL_POINTER;
+ }
+#if 0
+ /* Get a page of memory, which is zeroed out by consistent_alloc() */
+ ret = dma_alloc_coherent(NULL, DMA_PPC4xx_SIZE, &dma_addr, GFP_KERNEL);
+ if (ret != NULL) {
+ memset(ret, 0, DMA_PPC4xx_SIZE);
+ psgl = (sgl_list_info_t *) ret;
+ }
+#else
+
+ if(!dma_mem_page) {
+ dma_mem_page = dma_alloc_coherent(adev->dev, DMA_PPC4xx_SIZE, &dma_addr, GFP_KERNEL);
+ if (unlikely(!dma_mem_page)){
+ printk("dma_alloc_coherent failed\n");
+ return -1;
+ }
+ }
+
+ psgl = (sgl_list_info_t *) dma_mem_page;
+#endif
+
+
+ if (unlikely(psgl == NULL)) {
+ *phandle = (sgl_handle_t) NULL;
+ return DMA_STATUS_OUT_OF_MEMORY;
+ }
+
+
+ psgl->dma_addr = dma_addr;
+ psgl->ch_id = ch_id;
+ /*
+ * Modify and save the control word. These words will be
+ * written to each sgl descriptor. The DMA engine then
+ * loads this control word into the control register
+ * every time it reads a new descriptor.
+ */
+ psgl->control = p_dma_ch->control;
+ /* Clear all mode bits */
+ psgl->control &= ~(DMA_TM_MASK | DMA_TD);
+ /* Save control word and mode */
+ psgl->control |= (mode | DMA_CE_ENABLE);
+ /* In MM mode, we must set ETD/TCE */
+ if (mode == DMA_MODE_MM) { /* PMB - Workaround */
+ psgl->control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
+ psgl->control &= 0xFFFCFFFF;
+ psgl->control |= 0x00020000;
+ }
+
+ if (p_dma_ch->int_enable) {
+ /* Enable channel interrupt */
+ psgl->control |= DMA_CIE_ENABLE;
+ } else {
+ psgl->control &= ~DMA_CIE_ENABLE;
+ }
+ psgl->control &= ~DMA_CIE_ENABLE;
+ sg_command = mfdcr(DCR_DMA2P40_SGC);
+ sg_command |= SSG_MASK_ENABLE(ch_id);
+
+
+ /*Enable SGL control access */
+ mtdcr(DCR_DMA2P40_SGC, sg_command);
+ psgl->sgl_control = SG_ERI_ENABLE | SG_LINK;
+
+
+ p_dma_ch->int_enable=0;
+ if (p_dma_ch->int_enable) {
+ if (p_dma_ch->tce_enable)
+ psgl->sgl_control |= SG_TCI_ENABLE | SG_ETI_ENABLE;
+ else
+ psgl->sgl_control |= SG_ETI_ENABLE | SG_TCI_ENABLE;
+ }
+
+ *phandle = (sgl_handle_t) psgl;
+ return DMA_STATUS_GOOD;
+
+}
+
+/*
+ * Destroy a scatter/gather list handle that was created by alloc_dma_handle().
+ * The list must be empty (contain no elements).
+ */
+void
+ppc460ex_free_dma_handle(sgl_handle_t handle)
+{
+ sgl_list_info_t *psgl = (sgl_list_info_t *) handle;
+
+ if (!handle) {
+ printk("%s: got NULL\n", __FUNCTION__);
+ return;
+ } else if (psgl->phead) {
+ printk("%s: list not empty\n", __FUNCTION__);
+ return;
+ } else if (!psgl->dma_addr) { /* should never happen */
+ printk("%s: no dma address\n", __FUNCTION__);
+ return;
+ }
+
+ //dma_free_coherent(NULL, DMA_PPC4xx_SIZE, (void *) psgl, 0);
+}
+
+#if 0
+
+int test_sgdma_memcpy(void *src, void *dst, void *src1, void *dst1, unsigned int length, unsigned int dma_ch_id)
+{
+ ppc460ex_plb_dma_dev_t *device;
+ ppc460ex_plb_dma_ch_t p_init;
+ phys_addr_t dma_dest, dma_src;
+ phys_addr_t dma_dest1, dma_src1;
+ int res = 0;
+ ppc460ex_plb_dma_ch_t *new_chan;
+ unsigned int control;
+ u32 status = 0;
+ u32 value = 0;
+ sgl_handle_t handle_p;
+
+ /* create a device */
+ if ((device = kzalloc(sizeof(*device), GFP_KERNEL)) == NULL) {
+ res = -ENOMEM;
+ }
+
+ if ((new_chan = kzalloc(sizeof(ppc460ex_plb_dma_ch_t), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ res = -ENOMEM;
+ }
+
+ dma_src = dma_map_single(p_init.device->dev, src, length,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(p_init.device->dev, dst, length,
+ DMA_FROM_DEVICE);
+
+ dma_src1 = dma_map_single(p_init.device->dev, src1, length,
+ DMA_TO_DEVICE);
+ dma_dest1 = dma_map_single(p_init.device->dev, dst1, length,
+ DMA_FROM_DEVICE);
+
+ memset(new_chan, 0 , sizeof(ppc460ex_plb_dma_ch_t));
+ device->chan[dma_ch_id] = new_chan;
+
+
+ memset((char *)&p_init, sizeof(p_init), 0);
+ p_init.polarity = 0;
+ p_init.pwidth = PW_32;
+ p_init.in_use = 0;
+ p_init.sai = 1;
+ p_init.dai = 1;
+ p_init.tce_enable = 1;
+ //printk("%s:channel id = %d\n", __FUNCTION__, dma_ch_id);
+
+ res = ppc460ex_init_dma_channel(device, dma_ch_id, &p_init);
+
+
+ ppc460ex_set_dma_count(device, dma_ch_id, length);
+
+ res = ppc460ex_enable_dma_interrupt(device, dma_ch_id);
+ if (res) {
+ printk("%32s: en/disable_dma_interrupt\n",
+ __FUNCTION__);
+ }
+
+ res = ppc460ex_alloc_dma_handle(device, &handle_p, DMA_MODE_MM, dma_ch_id);
+
+ ppc460ex_add_dma_sgl(device, handle_p, dma_src, dma_dest, length);
+ ppc460ex_add_dma_sgl(device, handle_p, dma_src1, dma_dest1, length);
+
+ ppc460ex_enable_dma_sgl(device, handle_p);
+
+
+ /*do {
+ value = mfdcr(DCR_DMA2P40_SR);
+ }while ((value & 0x80000000) != 0x80000000);*/
+
+#if DEBUG_TEST
+ printk("%s:out:dump src \n", __FUNCTION__);
+ DMA_HEXDUMP(src, length);
+ printk("%s:out:dump dst\n", __FUNCTION__);
+ DMA_HEXDUMP(dst, length);
+ printk("%s:out:dump src1 \n", __FUNCTION__);
+ DMA_HEXDUMP(src1, length);
+ printk("%s:out:dump dst1\n", __FUNCTION__);
+ DMA_HEXDUMP(dst1, length);
+#endif
+
+ if (memcmp(src, dst, length) || memcmp(src1, dst1, length)) {
+ printk("Self-test copy failed compare, disabling\n");
+ res = -ENODEV;
+ goto out;
+ }
+
+ return 0;
+ out:
+
+ return res;
+
+}
+#endif
+
+#ifdef SPLICE_DMA_COHERENT
+int
+ppc460ex_sgdma_pipebufs_memcpy(struct pipe_inode_info *pipe, void *dest, dma_addr_t dma_dest, unsigned int length)
+#else
+int
+ppc460ex_sgdma_pipebufs_memcpy(struct pipe_inode_info *pipe, void *dest, unsigned int length)
+#endif
+{
+ sgl_list_info_t *psgl;
+ ppc460ex_plb_dma_dev_t *device;
+ ppc460ex_plb_dma_ch_t p_init;
+#ifndef SPLICE_DMA_COHERENT
+ dma_addr_t dma_dest;
+#endif
+ //dma_addr_t dma_addrs[32];
+ phys_addr_t dma_src;
+ ppc460ex_plb_dma_ch_t *new_chan;
+ sgl_handle_t handle_p;
+ int dma_ch_id;
+ void *src;
+ int nrbufs = pipe->nrbufs;
+ int res = 0;
+ int len = 0;
+#ifdef DEBUG_SPLICE_DMA
+ char *s_vaddr = NULL, *d_vaddr = NULL;
+ char strbuf[256];
+ int firstbuf=0;
+#endif
+ int curbuf = pipe->curbuf; /* stash away pipe->curbuf */
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - dest = %p, length = %d len = %d\n",
+ __FILE__, __FUNCTION__, __LINE__, dest, length, len);
+#endif
+
+ if(unlikely(!nrbufs))
+ return -EFAULT;
+
+ /* create a device */
+ if(unlikely ((device = kzalloc(sizeof(*device), GFP_KERNEL)) == NULL)) {
+ res = -ENOMEM;
+ }
+
+ if(unlikely ((new_chan = kzalloc(sizeof(ppc460ex_plb_dma_ch_t), GFP_KERNEL)) == NULL)) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ res = -ENOMEM;
+ }
+
+ memset(new_chan, 0 , sizeof(ppc460ex_plb_dma_ch_t));
+ dma_ch_id = ppc460ex_get_dma_channel();
+ if(unlikely(dma_ch_id == -ENODEV))
+ return dma_ch_id;
+
+ device->chan[dma_ch_id] = new_chan;
+ memset((char *)&p_init, 0, sizeof(ppc460ex_plb_dma_ch_t));
+ p_init.polarity = 0;
+ p_init.pwidth = PW_8;
+ p_init.in_use = 0;
+ p_init.sai = 1;
+ p_init.dai = 1;
+ p_init.tce_enable = 1;
+
+
+ res = ppc460ex_init_dma_channel(device, dma_ch_id, &p_init);
+ if(unlikely(res != DMA_STATUS_GOOD))
+ goto out;
+
+ init_waitqueue_head(&device->queue);
+
+ /* ppc460ex_disable_dma_interrupt(device, dma_ch_id);
+ ppc460ex_disable_burst(device, dma_ch_id); */
+ res = ppc460ex_alloc_dma_handle(device, &handle_p, DMA_MODE_MM, dma_ch_id);
+ if(unlikely(res != DMA_STATUS_GOOD))
+ goto out;
+
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - nrbufs = %d pipe->curbuf = %d\n",
+ __FILE__, __FUNCTION__, __LINE__, nrbufs, pipe->curbuf);
+#endif
+ for(;;) {
+ if(nrbufs) {
+ struct pipe_buffer *buf = pipe->bufs + curbuf;
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - buf[%d] buf->len=%d length=%d len=%d\n",
+ __FILE__, __FUNCTION__, __LINE__, curbuf, buf->len, length, len);
+#endif
+ if(len < length) {
+
+ if(!buf->len)
+ continue;
+
+ src = page_address(buf->page);
+ dma_src = dma_map_single(p_init.device->dev, src + buf->offset, buf->len, DMA_TO_DEVICE);
+#ifndef SPLICE_DMA_COHERENT
+ dma_dest = dma_map_single(p_init.device->dev, dest + len, buf->len, DMA_FROM_DEVICE);
+#endif
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("maping %d src: %p, dest: %p, buf->len=%d dma_dest = 0x%08x\n",
+ curbuf, src + buf->offset, dest+len, buf->len, dma_dest);
+ printk("ADDING BUF NUMBER %d\n\n\n\n", curbuf);
+#endif
+ ppc460ex_add_dma_sgl(device, handle_p, dma_src, dma_dest, buf->len);
+ len += buf->len;
+ curbuf = (curbuf + 1) & (PIPE_BUFFERS - 1);
+ --nrbufs;
+ }
+ else /* all pipe buf elements mapped to sgl */
+ break;
+ }
+ else
+ break;
+ }
+
+
+ __dma_sync(dest, length, DMA_FROM_DEVICE);
+ ppc460ex_enable_dma_sgl(device, handle_p);
+
+#if 0
+ res = wait_event_interruptible(device->queue, PPC460EX_DMA_CHAN_SGXFR_COMPLETE(dma_ch_id));
+#else
+ res = poll_for_sgdma_done(dma_ch_id);
+#endif
+
+ if(unlikely(res)) {
+ printk("%s:%s:%d - Timeout while waiting for SG Xfr to complete\n",
+ __FILE__, __FUNCTION__, __LINE__);
+ printk("dma_status = 0x%08x\n", DMA_STATUS(dma_ch_id));
+ }
+
+ /* Check the error status bits */
+ printk("DCR_DMA2P40_SR=0x%x\n",mfdcr(DCR_DMA2P40_SR));
+ if(unlikely(mfdcr(DCR_DMA2P40_SR) & (1 << (23 + dma_ch_id)))) {
+ printk(KERN_ERR"Error happened in the channel %d\n",dma_ch_id);
+ printk("DCR_DMA2P40_SR=0x%x\n",mfdcr(DCR_DMA2P40_SR));
+ }
+
+ mtdcr(DCR_DMA2P40_SR, 0xFFFFFFFF);
+
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - dma status = 0x%08x\n", __FILE__, __FUNCTION__, __LINE__, DMA_STATUS(dma_ch_id));
+#endif
+
+
+ /* Hack */
+ psgl = (sgl_list_info_t *) handle_p;
+ psgl->phead = NULL;
+ ppc460ex_free_dma_handle(handle_p);
+ handle_p = 0;
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - returning res = %d\n", __FILE__, __FUNCTION__, __LINE__, res);
+
+ struct pipe_buffer *buf = pipe->bufs + pipe->curbuf;
+ memset(strbuf, 0, 256);
+ s_vaddr = page_address(buf->page);
+ memcpy(strbuf, s_vaddr+buf->offset, 255);
+ *(strbuf+255) = '\0';
+ printk("%s:%s:%d - source strbuf is %s\n", __FILE__, __FUNCTION__, __LINE__, strbuf);
+
+ d_vaddr = dest;
+ memset(strbuf, 0, 256);
+ memcpy(strbuf, d_vaddr, 255);
+ *(strbuf+255) = '\0';
+ printk("%s:%s:%d - dest strbuf is %s\n", __FILE__, __FUNCTION__, __LINE__, strbuf);
+#endif
+
+out:
+
+ return res;
+}
+
+int splice_dma_memcpy(struct splice_dma_desc *sd_p, unsigned int len)
+{
+ sgl_list_info_t *psgl;
+ //static ppc460ex_plb_dma_dev_t *device;
+ ppc460ex_plb_dma_ch_t p_init;
+ dma_addr_t dma_dest;
+ phys_addr_t dma_src;
+ ppc460ex_plb_dma_ch_t *new_chan;
+ unsigned int size = 0;
+ sgl_handle_t handle_p;
+ int dma_ch_id;
+ void *src=NULL, *dst=NULL;
+ int res = 0;
+ int i = 0;
+ int dma_xfr_size=0;
+ dma_ch_id = ppc460ex_get_dma_channel();
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - sd_p->n_elems=%d, len = %d \n",
+ __FILE__, __FUNCTION__, __LINE__, sd_p->n_elems, len);
+#endif
+
+ if(unlikely(!sd_p->n_elems))
+ return -EFAULT;
+ new_chan = adev->chan[dma_ch_id];
+ p_init.polarity = 0;
+ p_init.pwidth = PW_8;
+ p_init.in_use = 0;
+ p_init.sai = 1;
+ p_init.dai = 1;
+ p_init.tce_enable = 1;
+
+
+ res = ppc460ex_init_dma_channel(adev, dma_ch_id, &p_init);
+ if(unlikely(res != DMA_STATUS_GOOD))
+ goto out;
+
+ init_waitqueue_head(&adev->queue);
+
+ ppc460ex_enable_burst(adev, dma_ch_id);
+ res = ppc460ex_alloc_dma_handle(adev, &handle_p, DMA_MODE_MM, dma_ch_id);
+ if(unlikely(res != DMA_STATUS_GOOD))
+ goto out;
+
+ for(i=0; i<sd_p->n_elems; i++) {
+ src = (void *)(sd_p->src_addrs[i]);
+ dst = (void *)(sd_p->dst_addrs[i]);
+ size = sd_p->xfr_size[i];
+#ifdef DEBUG_SPLICE_DMA
+ printk(KERN_DEBUG "index=%d src=0x%08x dst=0x%08x size=%d\n", i, src, dst, size);;
+#endif
+ dma_src = dma_map_single(adev->dev, src, size, DMA_TO_DEVICE);
+ dma_dest = dma_map_single(adev->dev, dst, size, DMA_FROM_DEVICE);
+ ppc460ex_add_dma_sgl(adev, handle_p, dma_src, dma_dest, size);
+ dma_xfr_size += size;
+ }
+
+#ifdef DEBUG_SPLICE_DMA
+ printk(KERN_DEBUG "%s:%s:%d - dma_xfr_size=%d\n", __FILE__, __FUNCTION__, __LINE__, dma_xfr_size);
+#endif
+
+ dst = (void *)(sd_p->dst_addrs[0]);
+
+ ppc460ex_enable_dma_sgl(adev, handle_p);
+
+#if 0
+ res = wait_event_interruptible(device->queue, PPC460EX_DMA_CHAN_SGXFR_COMPLETE(dma_ch_id));
+#else
+ res = poll_for_sgdma_done(dma_ch_id);
+#endif
+
+ if(unlikely(res)) {
+ printk("%s:%s:%d - Timeout while waiting for SG Xfr to complete\n",
+ __FILE__, __FUNCTION__, __LINE__);
+ printk("dma_status = 0x%08x\n", DMA_STATUS(dma_ch_id));
+ }
+
+ /* Check the error status bits */
+ if(unlikely(mfdcr(DCR_DMA2P40_SR) & (1 << (23 + dma_ch_id)))) {
+ printk(KERN_ERR"Error happened in the channel %d\n",dma_ch_id);
+ printk("DCR_DMA2P40_SR=0x%x\n",mfdcr(DCR_DMA2P40_SR));
+ }
+ mtdcr(DCR_DMA2P40_SR, 0xFFFFFFFF);
+
+
+
+#ifdef DEBUG_SPLICE_DMA
+ printk("%s:%s:%d - dma status = 0x%08x\n", __FILE__, __FUNCTION__, __LINE__, DMA_STATUS(dma_ch_id));
+#endif
+
+ for(i=0; i<sd_p->n_elems; i++) {
+ dma_unmap_single(adev->dev, splice_src_dma_addrs[i], size, DMA_TO_DEVICE);
+ dma_unmap_single(adev->dev, splice_dst_dma_addrs[i], size, DMA_FROM_DEVICE);
+ }
+
+ /* Hack to clean up dma handle without memset */
+ psgl = (sgl_list_info_t *) handle_p;
+ psgl->phead = NULL;
+ psgl->ptail = NULL;
+ ppc460ex_free_dma_handle(handle_p);
+ handle_p = 0;
+
+out:
+ return res;
+}
+
+
+
+EXPORT_SYMBOL(ppc460ex_alloc_dma_handle);
+EXPORT_SYMBOL(ppc460ex_free_dma_handle);
+EXPORT_SYMBOL(ppc460ex_add_dma_sgl);
+EXPORT_SYMBOL(ppc460ex_delete_dma_sgl_element);
+EXPORT_SYMBOL(ppc460ex_enable_dma_sgl);
+EXPORT_SYMBOL(ppc460ex_disable_dma_sgl);
+EXPORT_SYMBOL(ppc460ex_get_dma_sgl_residue);
+EXPORT_SYMBOL(ppc460ex_sgdma_pipebufs_memcpy);
+EXPORT_SYMBOL(splice_dma_memcpy);
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index a8b84854fb7..2907edec103 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -3,7 +3,7 @@
#
# Each configuration option enables a list of files.
-
+obj-y += a3g_button.o
obj-$(CONFIG_INPUT_APANEL) += apanel.o
obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
diff --git a/drivers/input/misc/a3g_button.c b/drivers/input/misc/a3g_button.c
new file mode 100644
index 00000000000..978ed41af9a
--- /dev/null
+++ b/drivers/input/misc/a3g_button.c
@@ -0,0 +1,643 @@
+/*
+ * Button driver to support Apollo 3G board
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/leds.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/suspend.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/signal.h>
+#include <linux/freezer.h>
+#include <linux/ioport.h>
+
+#include <linux/netlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/major.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+
+
+extern void __iomem *led_port;
+
+#define DRV_NAME "a3g-button"
+
+#define EV_PRESSED 1
+#define EV_RELEASED 0
+#define RESET_BTN 0 /* reset button */
+
+#define _3G_BTN_BIT 0x04
+#define POL_RATE_MSECS 200
+#define MAX_EV_LEN (sizeof(struct input_event))
+
+int btn_ev_flag = 0;
+struct task_struct * btn_threadptr;
+
+struct input_event btn_event;
+
+static struct resource a3g_res = {
+ .name = "cs2",
+ .start = 0x4e0100000ULL,
+ .end = 0x4e0100300ULL,
+ /*.flags = IORESOURCE_IO,*/
+ .flags = IORESOURCE_MEM,
+ };
+
+
+static void __iomem *button_port = NULL;
+
+
+struct btndev_hw_data {
+ int abs_event;
+ unsigned long buttons;
+};
+
+struct btndev {
+ int exist;
+ int open;
+ int minor;
+ struct input_handle handle;
+ wait_queue_head_t wait;
+ struct list_head client_list;
+ spinlock_t client_lock; /* protects client_list */
+ struct mutex mutex;
+ struct device dev;
+
+ struct list_head mixdev_node;
+ int mixdev_open;
+
+ struct btndev_hw_data packet;
+ unsigned int pkt_count;
+};
+
+struct btndev * btndev_ptr;
+
+#define BTNDEV_MINOR_BASE 31
+#define BTNDEV_MINORS 31
+#define BTNDEV_ZERO 0
+
+
+struct btndev_motion {
+ unsigned long buttons;
+};
+
+#define PACKET_QUEUE_LEN 16
+struct btndev_client {
+ struct fasync_struct *fasync;
+ struct btndev *btndev;
+ struct list_head node;
+
+ struct btndev_motion packets[PACKET_QUEUE_LEN];
+ unsigned int head, tail;
+ spinlock_t packet_lock;
+
+ signed char ps2[6];
+ unsigned char ready, buffer, bufsiz;
+ unsigned char imexseq, impsseq;
+ unsigned long last_buttons;
+};
+
+
+
+struct btndev *btndev_table[BTNDEV_MINORS];
+static DEFINE_MUTEX(btndev_table_mutex);
+
+static const struct input_device_id btndev_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT |
+ INPUT_DEVICE_ID_MATCH_RELBIT,
+ .evbit = { BIT_MASK(EV_KEY) | BIT_MASK(EV_REL) },
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(input, btndev_ids);
+static void btndev_remove_chrdev(struct btndev *btndev) {
+ mutex_lock(&btndev_table_mutex);
+ btndev_table[btndev->minor] = NULL;
+ mutex_unlock(&btndev_table_mutex);
+}
+
+
+/*********************************************************/
+static void btndev_mark_dead(struct btndev *btndev) {
+ mutex_lock(&btndev->mutex);
+ btndev->exist = 0;
+ mutex_unlock(&btndev->mutex);
+}
+
+/*********************************************************/
+static void btndev_cleanup(struct btndev *btndev)
+{
+ struct input_handle *handle = &btndev->handle;
+
+ btndev_mark_dead(btndev);
+ btndev_remove_chrdev(btndev);
+
+ if (btndev->open)
+ input_close_device(handle);
+}
+
+/**********************************************************/
+static void btndev_free(struct device * dev) {
+ struct btndev * btndevptr = container_of( dev, struct btndev, dev);
+ input_put_device( btndevptr->handle.dev);
+ kfree(btndevptr);
+}
+/**********************************************************/
+static int btndev_install_chrdev(struct btndev *btndev) {
+ btndev_table[btndev->minor] = btndev;
+ return 0;
+}
+
+/**********************************************************/
+static struct btndev *btndev_create(struct input_dev *dev,
+ struct input_handler *handler,
+ int minor)
+{
+ struct btndev *btndev;
+ int error;
+ btndev = kzalloc(sizeof(struct btndev), GFP_KERNEL);
+ if (!btndev) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+
+
+ INIT_LIST_HEAD(&btndev->client_list);
+ INIT_LIST_HEAD(&btndev->mixdev_node);
+ spin_lock_init(&btndev->client_lock);
+ mutex_init(&btndev->mutex);
+ lockdep_set_subclass(&btndev->mutex, 0);
+ init_waitqueue_head(&btndev->wait);
+
+ dev_set_name(&btndev->dev, "event%d", minor);
+
+ btndev->minor = minor;
+ btndev->exist = 1;
+ btndev->handle.dev = input_get_device(dev);
+ btndev->handle.name = dev_name(&btndev->dev);
+ btndev->handle.handler = handler;
+ btndev->handle.private = btndev;
+
+ btndev->dev.class = &input_class;
+ if (dev)
+ btndev->dev.parent = &dev->dev;
+ btndev->dev.devt = MKDEV(INPUT_MAJOR, BTNDEV_MINOR_BASE + minor);
+ btndev->dev.release = btndev_free;
+ device_initialize(&btndev->dev);
+
+ error = input_register_handle(&(btndev->handle));
+ if (error) {
+ goto err_free_btndev;
+ }
+
+ error = btndev_install_chrdev(btndev);
+ if (error)
+ goto err_unregister_handle;
+
+ error = device_add(&btndev->dev);
+ if (error)
+ goto err_cleanup_btndev;
+
+ return btndev;
+
+ err_cleanup_btndev:
+ btndev_cleanup(btndev);
+ err_unregister_handle:
+ input_unregister_handle(&btndev->handle);
+ err_free_btndev:
+ put_device(&btndev->dev);
+ err_out:
+ return ERR_PTR(error);
+
+}
+
+
+/**********************************************************/
+static void btndev_destroy(struct btndev *btndev) {
+ device_del(&btndev->dev);
+ btndev_cleanup(btndev);
+ input_unregister_handle(&btndev->handle);
+ put_device(&btndev->dev);
+}
+
+
+/**********************************************************/
+static void btndev_event(struct input_handle *handle,
+ unsigned int type, unsigned int code, int value) {
+ switch( type ) {
+
+ case EV_KEY:
+ break;
+ case EV_REL:
+ break;
+ default:
+ break;
+ }
+}
+
+
+static int btndev_open_device(struct btndev *btndev) {
+ int retval;
+
+ retval = mutex_lock_interruptible(&btndev->mutex);
+ if (retval)
+ return retval;
+
+ if (!btndev->exist)
+ retval = -ENODEV;
+ else if (!btndev->open++) {
+ retval = input_open_device(&btndev->handle);
+ if (retval)
+ btndev->open--;
+ }
+
+ mutex_unlock(&btndev->mutex);
+ return retval;
+}
+
+
+/**********************************************************/
+static void btndev_attach_client(struct btndev *btndev, struct btndev_client *client) {
+ spin_lock(&btndev->client_lock);
+ list_add_tail_rcu(&client->node, &btndev->client_list);
+ spin_unlock(&btndev->client_lock);
+ synchronize_rcu();
+}
+
+/**********************************************************/
+static void btndev_detach_client(struct btndev *btndev,
+ struct btndev_client *client)
+{
+ spin_lock(&btndev->client_lock);
+ list_del_rcu(&client->node);
+ spin_unlock(&btndev->client_lock);
+ synchronize_rcu();
+}
+
+static int btndev_release(struct inode *inode, struct file *file) {
+ return 0;
+}
+
+/**********************************************************/
+static int btndev_open(struct inode *inode, struct file *file)
+{
+ struct btndev_client *client;
+ struct btndev *btndev;
+ int error;
+ int i;
+
+ i = iminor(inode) - BTNDEV_MINOR_BASE;
+
+ if (i >= BTNDEV_MINORS) {
+ printk(KERN_ERR "*** error btndev_open()\n");
+ return -ENODEV;
+ }
+
+ error = mutex_lock_interruptible(&btndev_table_mutex);
+ if (error) {
+ return error;
+ }
+ btndev = btndev_table[i];
+ if (btndev)
+ get_device(&btndev->dev);
+ mutex_unlock(&btndev_table_mutex);
+
+ if (!btndev) {
+ return -ENODEV;
+ }
+
+ client = kzalloc(sizeof(struct btndev_client), GFP_KERNEL);
+ if (!client) {
+ error = -ENOMEM;
+ goto err_put_btndev;
+ }
+
+ spin_lock_init(&client->packet_lock);
+ client->btndev = btndev;
+ btndev_attach_client(btndev, client);
+
+ error = btndev_open_device(btndev);
+ if (error)
+ goto err_free_client;
+
+ file->private_data = client;
+ return 0;
+
+ err_free_client:
+ btndev_detach_client(btndev, client);
+ kfree(client);
+ err_put_btndev:
+ put_device(&btndev->dev);
+ return error;
+}
+
+static ssize_t btndev_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos) {
+ return 0;
+}
+
+static int btndev_fasync(int fd, struct file *file, int on) {
+ struct btndev_client *client = file->private_data;
+
+ return fasync_helper(fd, file, on, &client->fasync);
+}
+
+static ssize_t btndev_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) {
+ struct btndev_client *client = file->private_data;
+ struct btndev *btndev = client->btndev;
+ signed char data[MAX_EV_LEN];
+ int retval = 0;
+/*
+ if (!client->ready && !client->buffer && btndev->exist &&
+ (file->f_flags & O_NONBLOCK) && !btn_ev_flag) {
+*/
+ if (file->f_flags & O_NONBLOCK) {
+ /*
+ * This read is from VFT, copy data to user buffer anyway.
+ *
+ */
+ spin_lock_irq(&client->packet_lock);
+ if (count > MAX_EV_LEN)
+ count = MAX_EV_LEN;
+ memcpy(data, (char*)&btn_event, count);
+ /* client->buffer -= count; */
+ spin_unlock_irq(&client->packet_lock);
+ if (copy_to_user(buffer, data, count))
+ return -EFAULT;
+
+ return -EAGAIN;
+ }
+ retval = wait_event_interruptible(btndev->wait,
+ !btndev->exist ||
+ client->ready ||
+ btn_ev_flag );
+ if( btn_ev_flag ) {
+ btn_ev_flag = 0;
+ }
+ if (retval)
+ return retval;
+ if (!btndev->exist)
+ return -ENODEV;
+ spin_lock_irq(&client->packet_lock);
+ if (count > MAX_EV_LEN)
+ count = MAX_EV_LEN;
+ memcpy(data, (char*)&btn_event, count);
+ client->buffer -= count;
+ spin_unlock_irq(&client->packet_lock);
+ if (copy_to_user(buffer, data, count))
+ return -EFAULT;
+
+ return MAX_EV_LEN;
+}
+
+
+static unsigned int btndev_poll(struct file *file, poll_table *wait) {
+
+ return 0;
+}
+
+
+
+static int btndev_connect( struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id){
+
+ return 0;
+ }
+
+
+static void btndev_disconnect(struct input_handle *handle) {
+
+}
+
+
+static const struct file_operations btndev_fops = {
+ .owner = THIS_MODULE,
+ .read = btndev_read,
+ .write = btndev_write,
+ .poll = btndev_poll,
+ .open = btndev_open,
+ .release = btndev_release,
+ .fasync = btndev_fasync,
+};
+
+
+static struct input_handler btndev_handler = {
+ .event = btndev_event,
+ .connect = btndev_connect,
+ .disconnect = btndev_disconnect,
+ .fops = &btndev_fops,
+ .minor = BTNDEV_MINOR_BASE,
+ .name = "btndev",
+ .id_table = btndev_ids,
+};
+
+
+
+
+
+
+/**************************************************/
+u8 enable_cs2( void ){
+ u8 val, reg;
+
+ reg = readb(led_port);
+ val = (reg | 0x02);
+ writeb(val, led_port);
+ return reg;
+}
+
+/**************************************************/
+void disable_cs2( u8 val ){
+ val &= ~0x02;
+ writeb(val, led_port);
+}
+
+/**************************************************/
+static int is_button_pressed(void) {
+ u8 saved, val = EV_REL;
+
+ if( led_port ) {
+ saved = enable_cs2();
+ val = readb(button_port);
+ disable_cs2( saved );
+ }
+ return ((val & _3G_BTN_BIT) == _3G_BTN_BIT) ? EV_REL : EV_KEY ;
+}
+
+
+/**************************************************/
+
+static struct input_dev * input_dev;
+
+static int button_dev_init(struct platform_device *parent_pdev)
+{
+ int error;
+
+
+ input_dev = input_allocate_device();
+ if( !input_dev ) {
+ printk(KERN_ERR "*** a3g-button: unable to allocate input_dev\n");
+ return -ENOMEM;
+ }
+
+ input_dev->name = "rst_button";
+ input_dev->phys = "a3g/input0";
+ input_dev->id.bustype = BUS_HOST;
+ input_set_capability( input_dev, EV_MSC, MSC_SCAN);
+ __set_bit( EV_KEY, input_dev->evbit);
+ __set_bit( EV_REL, input_dev->relbit);
+ error = input_register_device(input_dev);
+
+ if( error ) {
+ input_free_device( input_dev );
+ printk(KERN_ERR "*** a3g-button: error input_register_device()\n");
+ return error;
+ }
+
+ error = input_register_handler( &btndev_handler);
+ if( error ) {
+ printk(KERN_ERR "*** a3g-button: error input_register_handler()\n");
+ input_free_device( input_dev );
+ /*btndev_destroy(btndev_ptr);*/
+ return error;
+ }
+
+ btndev_ptr = btndev_create( input_dev , &btndev_handler, BTNDEV_ZERO);
+ if( IS_ERR(btndev_ptr) ) {
+ input_free_device( input_dev );
+ printk(KERN_ERR "*** a3g-button: error btndev_create()\n");
+ return PTR_ERR( btndev_ptr );
+ }
+
+ return 0;
+}
+
+
+
+static int btn_thread( void * data ) {
+
+ struct task_struct * tsk = current;
+ struct sched_param param = { .sched_priority = 1};
+ int btn_last_state, btn_cur_state;
+ btn_cur_state = btn_last_state = EV_REL;
+
+
+ sched_setscheduler(tsk, SCHED_FIFO, &param);
+ set_freezable();
+
+ if( button_port ) {
+ btn_last_state = is_button_pressed();
+ }
+ while( !kthread_should_stop() ) {
+
+ if( button_port ) {
+ msleep(POL_RATE_MSECS);
+ if( button_port ) {
+ btn_cur_state = is_button_pressed();
+ }
+ if( btn_last_state != btn_cur_state ){
+/*
+ printk( KERN_INFO "state changed from %d to %d\n",
+ btn_last_state, btn_cur_state);
+*/
+ btn_last_state = btn_cur_state;
+ do_gettimeofday(&btn_event.time);
+ btn_event.type = RESET_BTN;
+ btn_event.code = btn_cur_state;
+ btn_event.value = btn_cur_state;
+ btn_ev_flag = 1; /* wake up event read */
+ if( btndev_ptr ) {
+ wake_up_interruptible(&(btndev_ptr->wait));
+ }
+
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+/**************************************************/
+static int __init a3g_button_init(void) {
+
+ resource_size_t res_size;
+ struct resource *phys_res = &a3g_res;
+ int retval;
+
+ res_size = resource_size(phys_res);
+
+ if( !request_mem_region(phys_res->start, res_size, phys_res->name) ) {
+ printk(KERN_DEBUG "**** error request_mem_region()\n");
+ return -1;
+ }
+
+ button_port = ioremap(phys_res->start, res_size);
+ if (button_port == NULL) {
+ release_mem_region(phys_res->start, res_size);
+ printk(KERN_DEBUG "*** Error ioremap()");
+ return -1;
+ }
+
+
+ retval = button_dev_init(NULL);
+ if( retval != 0 ) {
+ printk( KERN_ERR "*** failed button_dev_init() %d\n", retval);
+ iounmap(button_port);
+ button_port = NULL;
+ release_mem_region(a3g_res.start, (a3g_res.end - a3g_res.start + 1));
+ return -1;
+ }
+
+ btn_threadptr = kthread_run( btn_thread, NULL, "btn_t");
+
+ retval = (btn_threadptr == NULL) ? -1 : 0;
+
+ return retval;
+
+ /* return platform_driver_register( &a3g_button_driver ); */
+}
+
+/**************************************************/
+static void __exit a3g_button_exit(void) {
+ /*platform_driver_unregister( &a3g_button_driver);*/
+ if( button_port ) {
+ iounmap(button_port);
+ button_port = NULL;
+ release_mem_region(a3g_res.start, (a3g_res.end - a3g_res.start + 1));
+ }
+
+ btndev_destroy( btndev_ptr );
+ if( btn_threadptr ) {
+ kthread_stop(btn_threadptr);
+ btn_threadptr = NULL;
+ }
+
+ input_unregister_handler( &btndev_handler);
+ input_unregister_device(input_dev);
+}
+
+module_init( a3g_button_init );
+module_exit( a3g_button_exit );
+
+MODULE_AUTHOR("Hai Le");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Support for reset button on Apollo3G board");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 46d72704d60..6b2691c49d2 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -30,6 +30,10 @@ obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
+
+obj-y += led-class-3g.o
+obj-y += leds-apollo3g.o
+
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class-3g.c b/drivers/leds/led-class-3g.c
new file mode 100644
index 00000000000..c0c1c11cbdb
--- /dev/null
+++ b/drivers/leds/led-class-3g.c
@@ -0,0 +1,281 @@
+/*
+ * LED Class Core
+ *
+ * Copyright (C) 2005 John Lenz <lenz@cs.wisc.edu>
+ * Copyright (C) 2005-2007 Richard Purdie <rpurdie@openedhand.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+static struct class *leds_class;
+
+static void led_update_color(struct led_classdev *led_cdev)
+{
+ if (led_cdev->color_get)
+ led_cdev->color = led_cdev->color_get(led_cdev);
+}
+
+static ssize_t led_color_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state = 9;
+
+ switch (buf[0]) {
+ case 'r': /* red */
+ state = 1;
+ break;
+ case 'g': /* green */
+ state = 2;
+ break;
+ case 'b': /* blue */
+ state = 3;
+ break;
+ case 'y': /* yellow */
+ state = 4;
+ break;
+ case 'w': /* white */
+ state = 5;
+ break;
+ case 'o': /* off */
+ state = 0;
+ break;
+ default:
+ break;
+ }
+
+ led_set_color(led_cdev, state);
+
+ return (ssize_t)size;
+}
+
+static ssize_t led_color_show(struct device *dev,
+ struct device_attribute *attr, char *buf) {
+
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ char * readbuf[] = {"off", "red", "green", "blue", "yellow", "white"} ;
+ /* no lock needed for this */
+ led_update_color(led_cdev);
+
+ return sprintf(buf, "%s\n", readbuf[led_cdev->color]);
+}
+
+static ssize_t led_blink_show(struct device *dev, struct device_attribute *attr,
+ char *buf) {
+ char *blinkStr = "no";
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ if( led_cdev->blink == 0 ){
+ blinkStr = "no";
+ }
+ else if (led_cdev->blink == 1 ){
+ blinkStr = "yes";
+ }
+ return sprintf(buf, "%s\n", blinkStr);
+}
+
+static ssize_t led_blink_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size) {
+ int val = 0;
+ struct led_classdev * led_cdev = dev_get_drvdata(dev);
+
+ if( buf[0] == 'y' ) {
+ val = 1;
+ }
+ else if( buf[0] == 'n' ) {
+ val = 0;
+ }
+ else if( buf[0] == 'f' ) {
+ val = 2;
+ }
+ led_set_blink( led_cdev, val );
+
+ return (ssize_t)size;
+}
+
+static ssize_t led_max_brightness_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", led_cdev->max_brightness);
+}
+
+/*static DEVICE_ATTR(brightness, 0644, led_brightness_show, led_brightness_store);*/
+static DEVICE_ATTR(color, 0644, led_color_show, led_color_store);
+static DEVICE_ATTR(blink, 0644, led_blink_show, led_blink_store);
+static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL);
+#ifdef CONFIG_LEDS_TRIGGERS
+static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
+#endif
+
+/**
+ * led_classdev_suspend - suspend an led_classdev.
+ * @led_cdev: the led_classdev to suspend.
+ */
+void led_classdev_suspend(struct led_classdev *led_cdev)
+{
+ led_cdev->flags |= LED_SUSPENDED;
+ led_cdev->color_set(led_cdev, 0);
+}
+EXPORT_SYMBOL_GPL(led_classdev_suspend);
+
+/**
+ * led_classdev_resume - resume an led_classdev.
+ * @led_cdev: the led_classdev to resume.
+ */
+void led_classdev_resume(struct led_classdev *led_cdev)
+{
+ led_cdev->color_set(led_cdev, led_cdev->color);
+ led_cdev->flags &= ~LED_SUSPENDED;
+}
+EXPORT_SYMBOL_GPL(led_classdev_resume);
+
+static int led_suspend(struct device *dev, pm_message_t state)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+ led_classdev_suspend(led_cdev);
+
+ return 0;
+}
+
+static int led_resume(struct device *dev)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ if (led_cdev->flags & LED_CORE_SUSPENDRESUME)
+ led_classdev_resume(led_cdev);
+
+ return 0;
+}
+
+/**
+ * led_classdev_register - register a new object of led_classdev class.
+ * @parent: The device to register.
+ * @led_cdev: the led_classdev structure for this device.
+ */
+int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
+{
+ int rc;
+
+ led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
+ "%s", led_cdev->name);
+ if (IS_ERR(led_cdev->dev))
+ return PTR_ERR(led_cdev->dev);
+
+ /* register the attributes */
+ rc = device_create_file(led_cdev->dev, &dev_attr_color);
+ if (rc)
+ goto err_out;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_blink);
+ if (rc)
+ goto err_out;
+
+#ifdef CONFIG_LEDS_TRIGGERS
+ init_rwsem(&led_cdev->trigger_lock);
+#endif
+ /* add to the list of leds */
+ down_write(&leds_list_lock);
+ list_add_tail(&led_cdev->node, &leds_list);
+ up_write(&leds_list_lock);
+
+ if (!led_cdev->max_brightness)
+ led_cdev->max_brightness = LED_FULL;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_max_brightness);
+ if (rc)
+ goto err_out_attr_max;
+
+ led_update_color(led_cdev);
+
+#ifdef CONFIG_LEDS_TRIGGERS
+ rc = device_create_file(led_cdev->dev, &dev_attr_trigger);
+ if (rc)
+ goto err_out_led_list;
+
+ led_trigger_set_default(led_cdev);
+#endif
+
+ printk(KERN_INFO "Registered led device: %s\n",
+ led_cdev->name);
+
+ return 0;
+
+#ifdef CONFIG_LEDS_TRIGGERS
+err_out_led_list:
+ device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
+#endif
+err_out_attr_max:
+ device_remove_file(led_cdev->dev, &dev_attr_color);
+ list_del(&led_cdev->node);
+err_out:
+ device_unregister(led_cdev->dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(led_classdev_register);
+
+/**
+ * led_classdev_unregister - unregisters a object of led_properties class.
+ * @led_cdev: the led device to unregister
+ *
+ * Unregisters a previously registered via led_classdev_register object.
+ */
+void led_classdev_unregister(struct led_classdev *led_cdev)
+{
+ device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
+ device_remove_file(led_cdev->dev, &dev_attr_color);
+#ifdef CONFIG_LEDS_TRIGGERS
+ device_remove_file(led_cdev->dev, &dev_attr_trigger);
+ down_write(&led_cdev->trigger_lock);
+ if (led_cdev->trigger)
+ led_trigger_set(led_cdev, NULL);
+ up_write(&led_cdev->trigger_lock);
+#endif
+
+ device_unregister(led_cdev->dev);
+
+ down_write(&leds_list_lock);
+ list_del(&led_cdev->node);
+ up_write(&leds_list_lock);
+}
+EXPORT_SYMBOL_GPL(led_classdev_unregister);
+
+static int __init leds_init(void)
+{
+ leds_class = class_create(THIS_MODULE, "leds");
+ if (IS_ERR(leds_class))
+ return PTR_ERR(leds_class);
+ leds_class->suspend = led_suspend;
+ leds_class->resume = led_resume;
+ return 0;
+}
+
+static void __exit leds_exit(void)
+{
+ class_destroy(leds_class);
+}
+
+subsys_initcall(leds_init);
+module_exit(leds_exit);
+
+MODULE_AUTHOR("John Lenz, Richard Purdie");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("LED Class Interface");
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index f2cc13d7681..b1e6be1ea63 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -30,17 +30,6 @@ static void led_update_brightness(struct led_classdev *led_cdev)
led_cdev->brightness = led_cdev->brightness_get(led_cdev);
}
-static ssize_t led_brightness_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct led_classdev *led_cdev = dev_get_drvdata(dev);
-
- /* no lock needed for this */
- led_update_brightness(led_cdev);
-
- return sprintf(buf, "%u\n", led_cdev->brightness);
-}
-
static ssize_t led_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
@@ -52,18 +41,29 @@ static ssize_t led_brightness_store(struct device *dev,
if (*after && isspace(*after))
count++;
-
if (count == size) {
ret = count;
if (state == LED_OFF)
led_trigger_remove(led_cdev);
+
led_set_brightness(led_cdev, state);
}
return ret;
}
+static ssize_t led_brightness_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+ /* no lock needed for this */
+ led_update_brightness(led_cdev);
+
+ return sprintf(buf, "%u\n", led_cdev->brightness);
+}
+
static ssize_t led_max_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/leds/leds-apollo3g.c b/drivers/leds/leds-apollo3g.c
new file mode 100644
index 00000000000..6e796dc7c24
--- /dev/null
+++ b/drivers/leds/leds-apollo3g.c
@@ -0,0 +1,365 @@
+/*
+ * LED Platform driver for Apollo3G board.
+ *
+ * © 2010 Western Digital Technologies, Inc. All rights reserved.
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/leds.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/ioport.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <linux/suspend.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/signal.h>
+#include <linux/freezer.h>
+
+
+
+#define _3G_BIT_LED_RED 0x10
+#define _3G_BIT_LED_GREEN 0x20
+#define _3G_BIT_LED_BLUE 0x40
+#define _3G_BIT_LED_YELLOW (_3G_BIT_LED_GREEN | _3G_BIT_LED_RED)
+#define _3G_BIT_LED_ALL 0x70
+#define _3G_BIT_LED_OFF 0x00
+
+#define _3G_LED_OFF 0
+#define _3G_LED_RED 1
+#define _3G_LED_GREEN 2
+#define _3G_LED_BLUE 3
+#define _3G_LED_YELLOW 4
+#define _3G_LED_ALL 5
+#define _3G_LED_WHITE 5 /* save as _ALL */
+
+#define _BLINK_YES 1
+#define _BLINK_NO 0
+
+#define HDD_BLINK_RATE 250
+
+static DEFINE_SPINLOCK(led_lock);
+struct task_struct * threadptr;
+
+wait_queue_head_t ts_wait;
+int blink_flag = 0;
+
+
+void __iomem *led_port = NULL;
+
+typedef struct led_state_s {
+ int cur_color;
+ int cur_action;
+} led_state_t;
+
+led_state_t led_state = { .cur_color = _3G_LED_YELLOW,
+ .cur_action = _BLINK_NO
+ };
+
+EXPORT_SYMBOL(led_port);
+
+/****************************************************/
+/* read value from 3g led_port */
+ u8 read_3gled( void ) {
+ return readb(led_port);
+}
+
+/****************************************************/
+/* read-modify-write 3g led port */
+u8 write_3gled( u8 mask ,u8 value ) {
+ u8 regval;
+
+ regval = read_3gled();
+ regval &= ~mask;
+ regval |= (value & mask);
+ writeb( regval, led_port);
+ return regval;
+}
+
+/****************************************************/
+/* return 3g led color */
+static enum led_brightness a3g_led_get( struct led_classdev * led_cdev ) {
+ unsigned char readval;
+
+ readval = readb(led_port);
+ if( (readval & _3G_BIT_LED_ALL) == _3G_BIT_LED_RED ) {
+ readval = _3G_LED_RED;
+ }
+ else if( (readval & _3G_BIT_LED_ALL) == _3G_BIT_LED_GREEN ) {
+ readval = _3G_LED_GREEN;
+ }
+ else if( (readval & _3G_BIT_LED_ALL) == _3G_BIT_LED_BLUE ) {
+ readval = _3G_LED_BLUE;
+ }
+ else if( (readval & _3G_BIT_LED_ALL) == _3G_BIT_LED_YELLOW ) {
+ readval = _3G_LED_YELLOW;
+ }
+ else if( (readval & _3G_BIT_LED_ALL) == _3G_BIT_LED_ALL ) {
+ readval = _3G_LED_ALL;
+ }
+ else if( (readval & _3G_BIT_LED_ALL) == _3G_BIT_LED_OFF ) {
+ readval = _3G_LED_OFF;
+ }
+
+ return readval;
+}
+
+/****************************************************/
+/* set 3g led color */
+static void a3g_led_set(struct led_classdev *led_cdev, enum led_brightness value) {
+
+ unsigned long flags;
+
+ spin_lock_irqsave(&led_lock, flags);
+
+ switch (value) {
+ case _3G_LED_RED:
+ write_3gled( _3G_BIT_LED_ALL, _3G_BIT_LED_RED);
+ break;
+ case _3G_LED_GREEN:
+ write_3gled( _3G_BIT_LED_ALL, _3G_BIT_LED_GREEN);
+ break;
+ case _3G_LED_BLUE:
+ write_3gled( _3G_BIT_LED_ALL, _3G_BIT_LED_BLUE);
+ break;
+ case _3G_LED_OFF:
+ write_3gled( _3G_BIT_LED_ALL, _3G_BIT_LED_OFF);
+ break;
+ case _3G_LED_ALL:
+ write_3gled( _3G_BIT_LED_ALL, _3G_BIT_LED_ALL);
+ break;
+ case _3G_LED_YELLOW:
+ write_3gled(_3G_BIT_LED_ALL, _3G_BIT_LED_YELLOW);
+ break;
+ default:
+ break; /* should never be here */
+ }
+ led_state.cur_color = value;
+
+ spin_unlock_irqrestore(&led_lock, flags);
+
+}
+
+/****************************************************/
+/* set 3g led blinking */
+static int a3g_led_blink(struct led_classdev *led_cdev, int value) {
+
+ /*
+ * if forced blink, don't set blink_flag
+ */
+ if( blink_flag == 2 ) {
+ return 0;
+ }
+
+ /*spin_lock_irqsave(&led_lock, flags);*/
+ /* user wants to blink led */
+ if( value == 1 ) {
+ wake_up(&ts_wait);
+ blink_flag = 1;
+
+ }
+ else if( value == 0) {
+ blink_flag = 0;
+ }
+ else if( value == 2 ) {
+ wake_up(&ts_wait);
+ blink_flag = 2;
+ }
+ /* spin_unlock_irqrestore(&led_lock, flags);*/
+
+ return 0;
+}
+
+/****************************************************/
+/*
+ * flag = blink or not
+ * color = blinking color
+ */
+void signal_hdd_led(int flag, int color) {
+
+ /*
+ * if forced blinking was set, keep it blinking forever
+ */
+ if( blink_flag == 2 ) {
+ return;
+ }
+
+ if( flag && /* blink or not */
+ (led_state.cur_color == _3G_LED_GREEN)
+#if 0
+ (led_state.cur_color != _3G_LED_WHITE) && /* don't touch fw update led */
+ (led_state.cur_color != _3G_LED_RED) && /* don't touch system error led */
+ !((led_state.cur_color == _3G_LED_BLUE) && (led_state.cur_action == _BLINK_YES)) && /* leave identity alone */
+ (color != _3G_LED_RED)
+#endif
+ ) {
+ if( color == _3G_LED_RED ) {
+ a3g_led_set( NULL, _3G_LED_RED);
+ }
+ blink_flag = 1;
+ wake_up(&ts_wait);
+ }
+ else {
+ blink_flag = 0;
+ }
+}
+
+static struct led_classdev a3g_led_dev = {
+ .name = "a3g_led",
+ .color_set = a3g_led_set,
+ .color_get = a3g_led_get,
+ .blink_set_3g = a3g_led_blink,
+};
+
+/****************************************************/
+static int __init a3g_led_probe(struct platform_device *pdev ) {
+
+ /* Not used */
+ return 0;
+}
+
+/****************************************************/
+static int __devexit a3g_led_remove(struct platform_device *pdev){
+
+ led_classdev_unregister(&a3g_led_dev);
+ if( led_port ){
+ iounmap(led_port);
+ led_port = NULL;
+ }
+ return 0;
+}
+static struct platform_driver a3g_led_driver = {
+ .probe = a3g_led_probe,
+ .remove = __devexit_p(a3g_led_remove),
+ .driver = {
+ .name = "a3g-leds",
+ .owner = THIS_MODULE,
+ },
+};
+
+#if 0
+struct platform_device {
+ const char * name;
+ int id;
+ struct device dev;
+ u32 num_resources;
+ struct resource * resource;
+
+ struct platform_device_id *id_entry;
+
+ /* arch specific additions */
+ struct pdev_archdata archdata;
+};
+#endif
+
+static struct resource a3g_res = {
+ .name = "cs1",
+ .start = 0x4e0000000ULL,
+ .end = 0x4e0000300ULL,
+ /*.flags = IORESOURCE_IO,*/
+ .flags = IORESOURCE_MEM,
+ };
+
+
+/****************************************************/
+static int a3g_led_blink_thread( void * data ) {
+ unsigned char readval, color;
+
+ struct task_struct * tsk = current;
+ struct sched_param param = { .sched_priority = 1};
+
+ init_waitqueue_head(&ts_wait);
+
+ sched_setscheduler(tsk, SCHED_FIFO, &param);
+ set_freezable();
+
+ while( !kthread_should_stop() ) {
+
+ led_state.cur_action = _BLINK_NO;
+ /* always set current color before blinking */
+ a3g_led_set( NULL, led_state.cur_color);
+ wait_event_freezable_timeout(ts_wait, blink_flag || kthread_should_stop(), MAX_SCHEDULE_TIMEOUT);
+ if( led_port ) {
+ readval = readb(led_port);
+ color = readval & _3G_BIT_LED_ALL;
+ write_3gled( _3G_BIT_LED_ALL, _3G_BIT_LED_OFF);
+ msleep(HDD_BLINK_RATE);
+ write_3gled( _3G_BIT_LED_ALL, color);
+ msleep(HDD_BLINK_RATE);
+ led_state.cur_action = _BLINK_YES;
+ }
+ }
+
+ return 0;
+}
+
+
+/****************************************************/
+static int __init a3g_led_init(void) {
+
+ resource_size_t res_size;
+ struct resource *phys_res = &a3g_res;
+ int retval;
+
+ res_size = resource_size(phys_res);
+
+ if( !request_mem_region(phys_res->start, res_size, phys_res->name) ) {
+ printk(KERN_DEBUG "**** error request_mem_region()\n");
+ return -1;
+ }
+
+ led_port = ioremap(phys_res->start, res_size);
+ if (led_port == NULL) {
+ release_mem_region(phys_res->start, res_size);
+ printk(KERN_DEBUG "*** Error ioremap()");
+ return -1;
+ }
+ else {
+ retval = led_classdev_register(NULL, &a3g_led_dev);
+ if (retval) {
+ led_classdev_unregister(&a3g_led_dev);
+ iounmap(led_port);
+ led_port = NULL;
+ release_mem_region(phys_res->start, res_size);
+ return -1;
+ }
+
+ threadptr = kthread_run( a3g_led_blink_thread, NULL, "a3gblink_t");
+
+
+ return platform_driver_register(&a3g_led_driver);
+ }
+}
+
+/****************************************************/
+static void __exit a3g_led_exit(void) {
+
+ platform_driver_unregister(&a3g_led_driver);
+ if( led_port ){
+ led_classdev_unregister(&a3g_led_dev);
+ iounmap(led_port);
+ led_port = NULL;
+ if( threadptr ){
+ kthread_stop(threadptr);
+ }
+ release_mem_region(a3g_res.start, (a3g_res.end - a3g_res.start + 1));
+ }
+}
+
+
+module_init(a3g_led_init);
+module_exit(a3g_led_exit);
+
+MODULE_AUTHOR("Hai Le <hai.le@wdc.com>");
+MODULE_DESCRIPTION("Apollo3G LED driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 0823e2622e8..33927c2a0d6 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -18,6 +18,9 @@
#include <asm/gpio.h>
+
+#define CONFIG_LEDS_GPIO_PLATFORM
+
struct gpio_led_data {
struct led_classdev cdev;
unsigned gpio;
@@ -141,6 +144,7 @@ static int __devinit gpio_led_probe(struct platform_device *pdev)
struct gpio_led_data *leds_data;
int i, ret = 0;
+printk(KERN_INFO ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> 1\n");
if (!pdata)
return -EBUSY;
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index 2dd8ecbfdc3..f4b75500b60 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -17,6 +17,25 @@
#include <linux/rwsem.h>
#include <linux/leds.h>
+static inline void led_set_color(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ led_cdev->color = value;
+ if (!(led_cdev->flags & LED_SUSPENDED))
+ led_cdev->color_set(led_cdev, value);
+}
+
+static inline int led_get_color(struct led_classdev *led_cdev)
+{
+ return led_cdev->color;
+}
+static inline void led_set_blink(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ led_cdev->blink = value;
+ led_cdev->blink_set_3g(led_cdev, value);
+}
+
static inline void led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 2158377a135..bb72359d8dc 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -165,6 +165,19 @@ config MULTICORE_RAID456
If unsure, say N.
+config MD_RAID_SKIP_BIO_COPY
+ bool "Skip intermediate bio->cache copy"
+ depends on MD_RAID456
+ default n
+ ---help---
+ Skip intermediate data copying between the bio requested to write and
+ the disk cache in <sh> if the full-stripe write operation is on the
+ way. This might improve the performance of write operations in some
+ dedicated cases but generally eliminating disk cache slows the
+ performance down.
+
+ If unsure, say N.
+
config MD_RAID6_PQ
tristate
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index e355e7f6a53..7d424aa10fa 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -11,8 +11,8 @@ dm-mirror-y += dm-raid1.o
dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
md-mod-y += md.o bitmap.o
-raid456-y += raid5.o
-raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
+raid456-y += raid5.o
+raid6_pq-y += raid6algos.o raid6recov.o raid6tables.o \
raid6int1.o raid6int2.o raid6int4.o \
raid6int8.o raid6int16.o raid6int32.o \
raid6altivec1.o raid6altivec2.o raid6altivec4.o \
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 08f7471d015..1297c9db0cf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -217,12 +217,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio)
return 0;
}
rcu_read_lock();
- if (mddev->suspended) {
+ if (mddev->suspended || mddev->barrier) {
DEFINE_WAIT(__wait);
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
- if (!mddev->suspended)
+ if (!mddev->suspended && !mddev->barrier)
break;
rcu_read_unlock();
schedule();
@@ -264,11 +264,117 @@ static void mddev_resume(mddev_t *mddev)
int mddev_congested(mddev_t *mddev, int bits)
{
+ if (mddev->barrier)
+ return 1;
return mddev->suspended;
}
EXPORT_SYMBOL(mddev_congested);
+/*
+ * Generic barrier handling for md
+ */
+
+static void md_end_barrier(struct bio *bio, int err)
+{
+ mdk_rdev_t *rdev = bio->bi_private;
+ mddev_t *mddev = rdev->mddev;
+ if (err == -EOPNOTSUPP && mddev->barrier != (void*)1)
+ set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags);
+
+ rdev_dec_pending(rdev, mddev);
+
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ if (mddev->barrier == (void*)1) {
+ mddev->barrier = NULL;
+ wake_up(&mddev->sb_wait);
+ } else
+ schedule_work(&mddev->barrier_work);
+ }
+ bio_put(bio);
+}
+
+static void md_submit_barrier(struct work_struct *ws)
+{
+ mddev_t *mddev = container_of(ws, mddev_t, barrier_work);
+ struct bio *bio = mddev->barrier;
+
+ atomic_set(&mddev->flush_pending, 1);
+ if (!test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) {
+ mdk_rdev_t *rdev;
+
+ bio->bi_rw &= ~(1<<BIO_RW_BARRIER);
+ if (mddev->pers->make_request(mddev->queue, bio))
+ generic_make_request(bio);
+ mddev->barrier = (void*)1;
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+ /* Take two references, one is dropped
+ * when request finishes, one after
+ * we reclaim rcu_read_lock
+ */
+ struct bio *bi;
+ atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ bi = bio_alloc(GFP_KERNEL, 0);
+ bi->bi_end_io = md_end_barrier;
+ bi->bi_private = rdev;
+ bi->bi_bdev = rdev->bdev;
+ atomic_inc(&mddev->flush_pending);
+ submit_bio(WRITE_BARRIER, bi);
+ rcu_read_lock();
+ rdev_dec_pending(rdev, mddev);
+ }
+ rcu_read_unlock();
+ } else
+ bio_endio(bio, -EOPNOTSUPP);
+ if (atomic_dec_and_test(&mddev->flush_pending)) {
+ mddev->barrier = NULL;
+ wake_up(&mddev->sb_wait);
+ }
+}
+
+void md_barrier_request(mddev_t *mddev, struct bio *bio)
+{
+ mdk_rdev_t *rdev;
+
+ spin_lock_irq(&mddev->write_lock);
+ wait_event_lock_irq(mddev->sb_wait,
+ !mddev->barrier,
+ mddev->write_lock, /*nothing*/);
+ mddev->barrier = bio;
+ spin_unlock_irq(&mddev->write_lock);
+
+ atomic_set(&mddev->flush_pending, 1);
+ INIT_WORK(&mddev->barrier_work, md_submit_barrier);
+
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &mddev->disks, same_set)
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Faulty, &rdev->flags)) {
+ struct bio *bi;
+
+ atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev->nr_pending);
+ rcu_read_unlock();
+ bi = bio_alloc(GFP_KERNEL, 0);
+ bi->bi_end_io = md_end_barrier;
+ bi->bi_private = rdev;
+ bi->bi_bdev = rdev->bdev;
+ atomic_inc(&mddev->flush_pending);
+ submit_bio(WRITE_BARRIER, bi);
+ rcu_read_lock();
+ rdev_dec_pending(rdev, mddev);
+ }
+ rcu_read_unlock();
+ if (atomic_dec_and_test(&mddev->flush_pending))
+ schedule_work(&mddev->barrier_work);
+}
+EXPORT_SYMBOL(md_barrier_request);
static inline mddev_t *mddev_get(mddev_t *mddev)
{
atomic_inc(&mddev->active);
@@ -377,6 +483,7 @@ static mddev_t * mddev_find(dev_t unit)
atomic_set(&new->openers, 0);
atomic_set(&new->active_io, 0);
spin_lock_init(&new->write_lock);
+ atomic_set(&new->flush_pending, 0);
init_waitqueue_head(&new->sb_wait);
init_waitqueue_head(&new->recovery_wait);
new->reshape_position = MaxSector;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 87430fea287..abe8ba3ab01 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -292,6 +292,17 @@ struct mddev_s
struct mutex bitmap_mutex;
struct list_head all_mddevs;
+
+ /* Generic barrier handling.
+ * If there is a pending barrier request, all other
+ * writes are blocked while the devices are flushed.
+ * The last to finish a flush schedules a worker to
+ * submit the barrier request (without the barrier flag),
+ * then submit more flush requests.
+ */
+ struct bio *barrier;
+ atomic_t flush_pending;
+ struct work_struct barrier_work;
};
@@ -430,8 +441,9 @@ extern void md_write_start(mddev_t *mddev, struct bio *bi);
extern void md_write_end(mddev_t *mddev);
extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
-
extern int mddev_congested(mddev_t *mddev, int bits);
+extern void md_barrier_request(mddev_t *mddev, struct bio *bio);
+
extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
sector_t sector, int size, struct page *page);
extern void md_super_wait(mddev_t *mddev);
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index ee7646f974a..cbc0a99f379 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -145,7 +145,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index d3a4ce06015..122d07af5b5 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -453,7 +453,7 @@ static int raid0_make_request(struct request_queue *q, struct bio *bio)
int cpu;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c2cb7b87b44..2fbf867f8b3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -804,7 +804,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
mdk_rdev_t *blocked_rdev;
if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
- bio_endio(bio, -EOPNOTSUPP);
+ md_barrier_request(mddev, bio);
return 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 431b9b26ca5..0d403ca12ae 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -639,7 +639,8 @@ static void mark_target_uptodate(struct stripe_head *sh, int target)
return;
tgt = &sh->dev[target];
- set_bit(R5_UPTODATE, &tgt->flags);
+ if(!tgt->dpage)
+ set_bit(R5_UPTODATE, &tgt->flags);
BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
clear_bit(R5_Wantcompute, &tgt->flags);
}
@@ -681,6 +682,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
struct dma_async_tx_descriptor *tx;
struct async_submit_ctl submit;
int i;
+ enum async_tx_flags flags = ASYNC_TX_FENCE | ASYNC_TX_XOR_ZERO_DST;
pr_debug("%s: stripe %llu block: %d\n",
__func__, (unsigned long long)sh->sector, target);
@@ -692,7 +694,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu)
atomic_inc(&sh->count);
- init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL,
+ init_async_submit(&submit, flags, NULL,
ops_complete_compute, sh, to_addr_conv(sh, percpu));
if (unlikely(count == 1))
tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit);
@@ -915,6 +917,7 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
struct page **xor_srcs = percpu->scribble;
int count = 0, pd_idx = sh->pd_idx, i;
struct async_submit_ctl submit;
+ enum async_tx_flags flags = ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST;
/* existing parity data subtracted */
struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
@@ -929,7 +932,7 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu,
xor_srcs[count++] = dev->page;
}
- init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx,
+ init_async_submit(&submit, flags, tx,
ops_complete_prexor, sh, to_addr_conv(sh, percpu));
tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit);
@@ -942,9 +945,80 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
int disks = sh->disks;
int i;
+#ifdef CONFIG_MD_RAID_SKIP_BIO_COPY
+ int pd_idx = sh->pd_idx;
+ int qd_idx = sh->raid_conf->level == 6 ?
+ raid6_next_disk(pd_idx, disks) : -1;
+ int fswrite = 1;
+#endif
+
pr_debug("%s: stripe %llu\n", __func__,
(unsigned long long)sh->sector);
+#ifdef CONFIG_MD_RAID_SKIP_BIO_COPY
+ /* initially assume that the operation is a full-stripe write*/
+ for (i = disks; i-- ;) {
+ struct r5dev *dev = &sh->dev[i];
+
+ if (unlikely(i == pd_idx || i == qd_idx))
+ continue;
+ if (unlikely(!test_bit(R5_Wantdrain, &dev->flags)))
+ goto do_copy;
+ if ((test_bit(R5_OVERWRITE, &dev->flags)) &&
+ !r5_next_bio(sh->dev[i].towrite, sh->dev[i].sector)) {
+ /* now check if there is only one bio_vec within
+ * the bio covers the sh->dev[i]
+ */
+ struct bio *pbio = sh->dev[i].towrite;
+ struct bio_vec *bvl;
+ int found = 0;
+ int bvec_page = pbio->bi_sector << 9, k;
+ int dev_page = sh->dev[i].sector << 9;
+
+ /* search for the bio_vec that covers dev[i].page */
+ bio_for_each_segment(bvl, pbio, k) {
+ if (bvec_page == dev_page &&
+ bio_iovec_idx(pbio,k)->bv_len ==
+ STRIPE_SIZE) {
+ /* found the vector which covers the
+ * strip fully
+ */
+ found = 1;
+ break;
+ }
+ bvec_page += bio_iovec_idx(pbio,k)->bv_len;
+ }
+ if (found) {
+ /* save the direct pointer to buffer */
+ if(dev->dpage)
+ printk("BIO bugs\n");
+ BUG_ON(dev->dpage);
+ dev->dpage = bio_iovec_idx(pbio,k)->bv_page;
+ clear_bit(R5_Skipped, &dev->flags);
+ continue;
+ }
+ }
+do_copy:
+ /* come here in two cases:
+ * - the dev[i] is not covered fully with the bio;
+ * - there are more than one bios cover the dev[i].
+ * in both cases do copy from bio to dev[i].page
+ */
+ pr_debug("%s: do copy because of disk %d\n", __FUNCTION__, i);
+ do {
+ /* restore dpages set */
+ sh->dev[i].dpage = NULL;
+ } while (++i != disks);
+ fswrite = 0;
+ break;
+ }
+
+ if (fswrite) {
+ /* won't add new txs right now, so run ops currently pending */
+ async_tx_issue_pending_all();
+ }
+#endif
+
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
struct bio *chosen;
@@ -959,6 +1033,13 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
wbi = dev->written = chosen;
spin_unlock(&sh->lock);
+#ifdef CONFIG_MD_RAID_SKIP_BIO_COPY
+ if (fswrite) {
+ /* just update dev bio vec pointer */
+ dev->vec.bv_page = dev->dpage;
+ continue;
+ }
+#endif
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
tx = async_copy_data(1, wbi, dev->page,
@@ -985,8 +1066,10 @@ static void ops_complete_reconstruct(void *stripe_head_ref)
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
- if (dev->written || i == pd_idx || i == qd_idx)
- set_bit(R5_UPTODATE, &dev->flags);
+ if (dev->dpage)
+ set_bit(R5_Skipped, &dev->flags);
+ else if (dev->written || i == pd_idx || i == qd_idx)
+ set_bit(R5_UPTODATE, &dev->flags);
}
if (sh->reconstruct_state == reconstruct_state_drain_run)
@@ -1026,14 +1109,16 @@ ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu,
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (dev->written)
- xor_srcs[count++] = dev->page;
+ xor_srcs[count++] = dev->dpage ?
+ dev->dpage : dev->page;
}
} else {
xor_dest = sh->dev[pd_idx].page;
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (i != pd_idx)
- xor_srcs[count++] = dev->page;
+ xor_srcs[count++] = dev->dpage ?
+ dev->dpage : dev->page;
}
}
@@ -2437,7 +2522,8 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
if (sh->dev[i].written) {
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) &&
- test_bit(R5_UPTODATE, &dev->flags)) {
+ (test_bit(R5_UPTODATE, &dev->flags) ||
+ test_bit(R5_Skipped, &dev->flags))) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
int bitmap_end = 0;
@@ -2445,6 +2531,17 @@ static void handle_stripe_clean_event(raid5_conf_t *conf,
spin_lock_irq(&conf->device_lock);
wbi = dev->written;
dev->written = NULL;
+
+ if (dev->dpage) {
+ /* with direct writes the raid disk
+ * cache actually isn't UPTODATE
+ */
+ clear_bit(R5_Skipped, &dev->flags);
+ clear_bit(R5_OVERWRITE, &dev->flags);
+ dev->vec.bv_page = dev->page;
+ dev->dpage = NULL;
+ }
+
while (wbi && wbi->bi_sector <
dev->sector + STRIPE_SECTORS) {
wbi2 = r5_next_bio(wbi, dev->sector);
@@ -2947,6 +3044,7 @@ static void handle_stripe5(struct stripe_head *sh)
struct r5dev *dev;
mdk_rdev_t *blocked_rdev = NULL;
int prexor;
+ int dec_preread_active = 0;
memset(&s, 0, sizeof(s));
pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
@@ -3096,12 +3194,8 @@ static void handle_stripe5(struct stripe_head *sh)
set_bit(STRIPE_INSYNC, &sh->state);
}
}
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
@@ -3208,6 +3302,16 @@ static void handle_stripe5(struct stripe_head *sh)
ops_run_io(sh, &s);
+ if (dec_preread_active) {
+ /* We delay this until after ops_run_io so that if make_request
+ * is waiting on a barrier, it won't continue until the writes
+ * have actually been submitted.
+ */
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
return_io(return_bi);
}
@@ -3221,6 +3325,7 @@ static void handle_stripe6(struct stripe_head *sh)
struct r6_state r6s;
struct r5dev *dev, *pdev, *qdev;
mdk_rdev_t *blocked_rdev = NULL;
+ int dec_preread_active = 0;
pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
"pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
@@ -3358,7 +3463,6 @@ static void handle_stripe6(struct stripe_head *sh)
* completed
*/
if (sh->reconstruct_state == reconstruct_state_drain_result) {
- int qd_idx = sh->qd_idx;
sh->reconstruct_state = reconstruct_state_idle;
/* All the 'written' buffers and the parity blocks are ready to
@@ -3380,12 +3484,8 @@ static void handle_stripe6(struct stripe_head *sh)
set_bit(STRIPE_INSYNC, &sh->state);
}
}
- if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) {
- atomic_dec(&conf->preread_active_stripes);
- if (atomic_read(&conf->preread_active_stripes) <
- IO_THRESHOLD)
- md_wakeup_thread(conf->mddev->thread);
- }
+ if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ dec_preread_active = 1;
}
/* Now to consider new write requests and what else, if anything
@@ -3494,6 +3594,18 @@ static void handle_stripe6(struct stripe_head *sh)
ops_run_io(sh, &s);
+
+ if (dec_preread_active) {
+ /* We delay this until after ops_run_io so that if make_request
+ * is waiting on a barrier, it won't continue until the writes
+ * have actually been submitted.
+ */
+ atomic_dec(&conf->preread_active_stripes);
+ if (atomic_read(&conf->preread_active_stripes) <
+ IO_THRESHOLD)
+ md_wakeup_thread(conf->mddev->thread);
+ }
+
return_io(return_bi);
}
@@ -3741,7 +3853,7 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
{
mddev_t *mddev = q->queuedata;
raid5_conf_t *conf = mddev->private;
- unsigned int dd_idx;
+ int dd_idx;
struct bio* align_bi;
mdk_rdev_t *rdev;
@@ -3866,7 +3978,13 @@ static int make_request(struct request_queue *q, struct bio * bi)
int cpu, remaining;
if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) {
- bio_endio(bi, -EOPNOTSUPP);
+ /* Drain all pending writes. We only really need
+ * to ensure they have been submitted, but this is
+ * easier.
+ */
+ mddev->pers->quiesce(mddev, 1);
+ mddev->pers->quiesce(mddev, 0);
+ md_barrier_request(mddev, bi);
return 0;
}
@@ -3990,6 +4108,9 @@ static int make_request(struct request_queue *q, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
set_bit(STRIPE_HANDLE, &sh->state);
clear_bit(STRIPE_DELAYED, &sh->state);
+ if (mddev->barrier &&
+ !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
+ atomic_inc(&conf->preread_active_stripes);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -4009,6 +4130,14 @@ static int make_request(struct request_queue *q, struct bio * bi)
bio_endio(bi, 0);
}
+
+ if (mddev->barrier) {
+ /* We need to wait for the stripes to all be handled.
+ * So: wait for preread_active_stripes to drop to 0.
+ */
+ wait_event(mddev->thread->wqueue,
+ atomic_read(&conf->preread_active_stripes) == 0);
+ }
return 0;
}
@@ -5104,9 +5233,8 @@ static int stop(mddev_t *mddev)
mddev->thread = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
- sysfs_remove_group(&mddev->kobj, &raid5_attrs_group);
free_conf(conf);
- mddev->private = NULL;
+ mddev->private = &raid5_attrs_group;
return 0;
}
@@ -5863,6 +5991,7 @@ static void raid5_exit(void)
module_init(raid5_init);
module_exit(raid5_exit);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
MODULE_ALIAS("md-personality-4"); /* RAID5 */
MODULE_ALIAS("md-raid5");
MODULE_ALIAS("md-raid4");
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index dd708359b45..7ffc683d69d 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -233,6 +233,7 @@ struct stripe_head {
struct bio req;
struct bio_vec vec;
struct page *page;
+ struct page *dpage;
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
unsigned long flags;
@@ -252,7 +253,7 @@ struct stripe_head_state {
/* r6_state - extra state data only relevant to r6 */
struct r6_state {
- int p_failed, q_failed, failed_num[2];
+ int p_failed, q_failed, qd_idx, failed_num[2];
};
/* Flags */
@@ -275,6 +276,7 @@ struct r6_state {
* filling
*/
#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
+#define R5_Skipped 14 /* SKIP_BIO_COPY completed */
/*
* Write method
*/
@@ -314,6 +316,10 @@ struct r6_state {
#define STRIPE_OP_RECONSTRUCT 4
#define STRIPE_OP_CHECK 5
+#define STRIPE_OP_CHECK_PP 6
+#define STRIPE_OP_CHECK_QP 7
+
+
/*
* Plugging:
*
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index b6992b7b0d9..2d123c5ec1e 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1587,10 +1587,10 @@ mpt_mapresources(MPT_ADAPTER *ioc)
{
u8 __iomem *mem;
int ii;
- unsigned long mem_phys;
- unsigned long port;
- u32 msize;
- u32 psize;
+ resource_size_t mem_phys;
+ resource_size_t port;
+ resource_size_t msize;
+ resource_size_t psize;
u8 revision;
int r = -ENODEV;
struct pci_dev *pdev;
@@ -1677,8 +1677,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
return -EINVAL;
}
ioc->memmap = mem;
- dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %lx\n",
- ioc->name, mem, mem_phys));
+ dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "mem = %p, mem_phys = %llx\n",
+ ioc->name, mem, (u64)mem_phys));
ioc->mem_phys = mem_phys;
ioc->chip = (SYSIF_REGS __iomem *)mem;
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 8dd4d219e43..d313a4dcc08 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -605,8 +605,8 @@ typedef struct _MPT_ADAPTER
SYSIF_REGS __iomem *chip; /* == c8817000 (mmap) */
SYSIF_REGS __iomem *pio_chip; /* Programmed IO (downloadboot) */
u8 bus_type;
- u32 mem_phys; /* == f4020000 (mmap) */
- u32 pio_mem_phys; /* Programmed IO (downloadboot) */
+ resource_size_t mem_phys; /* == f4020000 (mmap) */
+ resource_size_t pio_mem_phys; /* Programmed IO (downloadboot) */
int mem_size; /* mmap memory size */
int number_of_buses;
int devices_per_bus;
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index c5a84fda541..c5a84fda541 100755..100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8aca5523a33..8aca5523a33 100755..100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 61e4eb48bb2..efb2d3699e4 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -228,7 +228,6 @@ static int __devinit of_flash_probe(struct of_device *dev,
" tree\n");
goto err_out;
}
-
dev_dbg(&dev->dev, "of_flash device: %.8llx-%.8llx\n",
(unsigned long long)res.start,
(unsigned long long)res.end);
@@ -347,10 +346,6 @@ err_flash_remove:
static struct of_device_id of_flash_match[] = {
{
- .compatible = "cfi-flash",
- .data = (void *)"cfi_probe",
- },
- {
/* FIXME: JEDEC chips can't be safely and reliably
* probed, although the mtd code gets it right in
* practice most of the time. We should use the
@@ -362,6 +357,10 @@ static struct of_device_id of_flash_match[] = {
.data = (void *)"jedec_probe",
},
{
+ .compatible = "cfi-flash",
+ .data = (void *)"cfi_probe",
+ },
+ {
.compatible = "mtd-ram",
.data = (void *)"map_ram",
},
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 2fda0b61524..aa75e66c494 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -11,6 +11,13 @@ menuconfig MTD_NAND
if MTD_NAND
+config MTD_NAND_NDFC
+ tristate "NDFC NanD Flash Controller"
+ depends on MTD_NAND && 4xx
+ select MTD_NAND_ECC_SMC
+ help
+ NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
config MTD_NAND_VERIFY_WRITE
bool "Verify NAND page writes"
help
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index 40b5658bdbe..568608d638c 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -102,9 +102,16 @@ static int ndfc_calculate_ecc(struct mtd_info *mtd,
wmb();
ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
/* The NDFC uses Smart Media (SMC) bytes order */
+#if !defined(CONFIG_APM82181)
ecc_code[0] = p[1];
ecc_code[1] = p[2];
ecc_code[2] = p[3];
+#else
+ /* Change to match with byte order in u-boot */
+ ecc_code[0] = p[2];
+ ecc_code[1] = p[1];
+ ecc_code[2] = p[3];
+#endif
return 0;
}
diff --git a/drivers/net/ibm_newemac/Kconfig b/drivers/net/ibm_newemac/Kconfig
index 78a1628c989..a2490452a80 100644
--- a/drivers/net/ibm_newemac/Kconfig
+++ b/drivers/net/ibm_newemac/Kconfig
@@ -27,6 +27,13 @@ config IBM_NEW_EMAC_RX_COPY_THRESHOLD
depends on IBM_NEW_EMAC
default "256"
+config IBM_EMAC_MAL_QOS_V404
+ bool "VLAN QOS support"
+ depends on IBM_NEW_EMAC && 460SX
+ select VLAN_8021Q
+ help
+ When selected the VLAN QOS support will be enabled.
+
config IBM_NEW_EMAC_RX_SKB_HEADROOM
int "Additional RX skb headroom (bytes)"
depends on IBM_NEW_EMAC
@@ -39,6 +46,17 @@ config IBM_NEW_EMAC_RX_SKB_HEADROOM
If unsure, set to 0.
+config IBM_NEW_EMAC_MASK_CEXT
+ bool "Mask Carrier Extension signals"
+ depends on IBM_NEW_EMAC && APM82181
+ default n
+ help
+ During normal idle TX, continously send dummy packets to mask
+ the Carrier Extension signals. This creates a separate BD
+ specifically for this purpose.
+
+ If unsure, set to N.
+
config IBM_NEW_EMAC_DEBUG
bool "Debugging"
depends on IBM_NEW_EMAC
@@ -63,6 +81,33 @@ config IBM_NEW_EMAC_EMAC4
bool
default n
+config IBM_NEW_EMAC_INTR_COALESCE
+ bool "Hardware Interrupt coalescing"
+ depends on IBM_NEW_EMAC && (460EX || 460GT || 405EX || 460SX || APM82181)
+ default y
+ help
+ When selected the Ethernet interrupt coalescing is selected.
+
+config IBM_NEW_EMAC_TX_COAL_COUNT
+ int "TX Coalescence frame count (packets)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "16"
+
+config IBM_NEW_EMAC_TX_COAL_TIMER
+ int "TX Coalescence timer (clock ticks)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "1000000"
+
+config IBM_NEW_EMAC_RX_COAL_COUNT
+ int "RX Coalescence frame count (packets)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "1"
+
+config IBM_NEW_EMAC_RX_COAL_TIMER
+ int "RX Coalescence timer (clock ticks)"
+ depends on IBM_NEW_EMAC_INTR_COALESCE
+ default "1000000"
+
config IBM_NEW_EMAC_NO_FLOW_CTRL
bool
default n
@@ -74,3 +119,11 @@ config IBM_NEW_EMAC_MAL_CLR_ICINTSTAT
config IBM_NEW_EMAC_MAL_COMMON_ERR
bool
default n
+
+config IBM_NEW_EMAC_SYSFS
+ bool "sysfs support for IBM NEW EMAC"
+ depends on IBM_NEW_EMAC
+ default y
+ help
+ When selected, IBM NEW EMAC parameters are exported
+ via /sys interface
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3fae8755979..fb9049bcbce 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -39,6 +39,7 @@
#include <linux/bitops.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/sysctl.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -46,8 +47,12 @@
#include <asm/uaccess.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
+#include <asm/time.h>
#include "core.h"
+#define SDR0_PERCLK 0x4201
+#define TX_FIFO_SYNC_USEC 20
+
/*
* Lack of dma_unmap_???? calls is intentional.
@@ -146,18 +151,35 @@ static inline void emac_rx_clk_tx(struct emac_instance *dev)
{
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, 0x80000000 >> dev->cell_index);
+#elif defined(CONFIG_APM82181)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, 0x00000100 >> dev->cell_index);
+#else
dcri_clrset(SDR0, SDR0_MFR,
0, SDR0_MFR_ECS >> dev->cell_index);
#endif
+#endif
}
static inline void emac_rx_clk_default(struct emac_instance *dev)
{
#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_440EP_PHY_CLK_FIX))
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0x80000000 >> dev->cell_index, 0);
+#elif defined(CONFIG_APM82181)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0x00000100 >> dev->cell_index, 0);
+#else
+
dcri_clrset(SDR0, SDR0_MFR,
SDR0_MFR_ECS >> dev->cell_index, 0);
#endif
+#endif
}
/* PHY polling intervals */
@@ -196,6 +218,7 @@ static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
};
static irqreturn_t emac_irq(int irq, void *dev_instance);
+static irqreturn_t wol_irq(int irq, void *dev_instance);
static void emac_clean_tx_ring(struct emac_instance *dev);
static void __emac_set_multicast_list(struct emac_instance *dev);
@@ -247,6 +270,59 @@ static void emac_tx_disable(struct emac_instance *dev)
}
}
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+static void emac_spin_delay(unsigned long spin_usecs)
+{
+ u64 tick_start, tick_end;
+ u64 spin_ticks = spin_usecs*tb_ticks_per_usec;
+ //printk("spin_ticks = %lld\n", spin_ticks);
+
+ tick_start = get_tb();
+ while(1) {
+ tick_end = get_tb();
+ if((tick_end - tick_start) >= spin_ticks)
+ return;
+ }
+}
+
+/* some code duplication here to avoid function calls */
+static inline void emac_start_idlemode(struct emac_instance *dev)
+{
+ u32 perclk;
+ //printk("ibmnewemac: start_idle\n");
+ DBG(dev, "start_idlemode" NL);
+
+ //emac_spin_delay(TX_FIFO_SYNC_USEC); /* Wait for TX FIFO to Sync */
+
+ /* Disable Ethernet Clock */
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ mtdcri(SDR0, SDR0_PERCLK, perclk | 0x88000000);
+ /* Write0 to set rising clock edge next time*/
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ mtdcri(SDR0, SDR0_PERCLK, perclk & 0x7fffffff);
+
+ //perclk = mfdcri(SDR0, SDR0_PERCLK);
+ //printk("%s:%d - Ethernet TX Clock Disabled perclk=0x%08lx\n", __FUNCTION__, __LINE__, perclk);
+}
+
+static inline void emac_exit_idlemode(struct emac_instance *dev)
+{
+ u32 perclk;
+ DBG(dev, "exit_idlemode" NL);
+
+ /* Enable Ethernet Clock */
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ mtdcri(SDR0, SDR0_PERCLK, (perclk & 0xF7ffffff) | 0x80000000);
+ perclk = mfdcri(SDR0, SDR0_PERCLK);
+ /* Write0 to set rising clock edge next time*/
+ mtdcri(SDR0, SDR0_PERCLK, perclk & 0x7fffffff);
+
+ //perclk = mfdcri(SDR0, SDR0_PERCLK);
+ //printk("%s:%d - Ethernet TX Clock Enabled perclk=0x%08lx\n", __FUNCTION__, __LINE__, perclk);
+
+}
+#endif
+
static void emac_rx_enable(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
@@ -348,12 +424,24 @@ static int emac_reset(struct emac_instance *dev)
DBG(dev, "reset" NL);
if (!dev->reset_failed) {
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ if (atomic_read(&dev->mask_cext_enable))
+ if (atomic_read(&dev->idle_mode)) {
+ emac_exit_idlemode(dev);
+ atomic_set(&dev->idle_mode, 0);
+ }
+#endif
/* 40x erratum suggests stopping RX channel before reset,
* we stop TX as well
*/
emac_rx_disable(dev);
emac_tx_disable(dev);
}
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, SDR0_ETH_CFG,
+ 0, 0x80000000 >> dev->cell_index);
+ out_be32(&p->mr1, in_be32(&p->mr1) | EMAC_MR1_ILE);
+#endif
#ifdef CONFIG_PPC_DCR_NATIVE
/* Enable internal clock source */
@@ -365,6 +453,11 @@ static int emac_reset(struct emac_instance *dev)
out_be32(&p->mr0, EMAC_MR0_SRST);
while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
--n;
+#if defined(CONFIG_460SX)
+ dcri_clrset(SDR0, 0x4103,
+ 0x80000000 >> dev->cell_index, 0);
+ out_be32(&p->mr1, in_be32(&p->mr1) & ~EMAC_MR1_ILE);
+#endif
#ifdef CONFIG_PPC_DCR_NATIVE
/* Enable external clock source */
@@ -383,6 +476,33 @@ static int emac_reset(struct emac_instance *dev)
}
}
+/* spham: backup code
+static void emac_hash_mc(struct emac_instance *dev)
+{
+ struct emac_regs __iomem *p = dev->emacp;
+ u16 gaht[8] = { 0 };
+ struct dev_mc_list *dmi;
+
+ DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
+
+ for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
+ int bit;
+ DBG2(dev, "mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
+ dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
+ dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
+ bit = 255 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 24);
+ gaht[bit >> 5] |= 0x80000000 >> (bit & 0x1f);
+ }
+ out_be32(&p->gaht1, gaht[0]);
+ out_be32(&p->gaht2, gaht[1]);
+ out_be32(&p->gaht3, gaht[2]);
+ out_be32(&p->gaht4, gaht[3]);
+ out_be32(&p->gaht5, gaht[4]);
+ out_be32(&p->gaht6, gaht[5]);
+ out_be32(&p->gaht7, gaht[6]);
+ out_be32(&p->gaht8, gaht[7]);
+}
+*/
static void emac_hash_mc(struct emac_instance *dev)
{
const int regs = EMAC_XAHT_REGS(dev);
@@ -415,7 +535,7 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
struct emac_instance *dev = netdev_priv(ndev);
u32 r;
- r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE;
+ r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE | EMAC_RMR_RFP;
if (emac_has_feature(dev, EMAC_FTR_EMAC4))
r |= EMAC4_RMR_BASE;
@@ -430,6 +550,18 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
else if (ndev->mc_count > 0)
r |= EMAC_RMR_MAE;
+#if defined(CONFIG_APM82181)
+ /*
+ * When Jumbo Frame is not enabled, MJS field has no effect.
+ * So setting MJS when Jumbo Frame is disabled should not
+ * cause any issue.
+ */
+ DBG(dev, "emac_iff2rmr: Current MTU = %d" NL, ndev->mtu);
+ r &= ~EMAC4_RMR_MJS_MASK;
+ r |= EMAC4_RMR_MJS(ndev->mtu);
+ DBG(dev, "emac_iff2rmr: EMAC_RMR = 0x%08x" NL, r);
+#endif
+
return r;
}
@@ -465,7 +597,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_size)
{
- u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR |
+ u32 ret = EMAC_MR1_VLE | EMAC_MR1_IST | EMAC4_MR1_TR | EMAC_MR1_APP |
EMAC4_MR1_OBCI(dev->opb_bus_freq / 1000000);
DBG2(dev, "__emac4_calc_base_mr1" NL);
@@ -474,6 +606,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_TFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_TFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_TFS_4K;
break;
@@ -489,6 +624,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
case 16384:
ret |= EMAC4_MR1_RFS_16K;
break;
+ case 8192:
+ ret |= EMAC4_MR1_RFS_8K;
+ break;
case 4096:
ret |= EMAC4_MR1_RFS_4K;
break;
@@ -559,7 +697,11 @@ static int emac_configure(struct emac_instance *dev)
/* Check for full duplex */
else if (dev->phy.duplex == DUPLEX_FULL)
+#if !defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
mr1 |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
+#else
+ mr1 |= EMAC_MR1_FDE;
+#endif
/* Adjust fifo sizes, mr1 and timeouts based on link speed */
dev->stop_timeout = STOP_TIMEOUT_10;
@@ -626,7 +768,7 @@ static int emac_configure(struct emac_instance *dev)
ndev->dev_addr[5]);
/* VLAN Tag Protocol ID */
- out_be32(&p->vtpid, 0x8100);
+ out_be32(&p->vtpid, 0x07ff);
/* Receive mode register */
r = emac_iff2rmr(ndev);
@@ -984,32 +1126,103 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
dev->rx_desc[i].data_len = 0;
dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
(i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ dev->rx_desc[i].ctrl |= MAL_RX_CTRL_INTR;
+#endif
}
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for ( v = 1; v < dev->rx_vchans; v++ ) {
+ struct emac_instance *vdev = dev->vdev[v];
+ if (vdev->rx_sg_skb) {
+ ++vdev->estats.rx_dropped_resize;
+ dev_kfree_skb(vdev->rx_sg_skb);
+ vdev->rx_sg_skb = NULL;
+ }
+
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ if (vdev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
+ ++vdev->estats.rx_dropped_resize;
+
+ vdev->rx_desc[i].data_len = 0;
+ vdev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
+ (i == (NUM_RX_BUFF - 1) ?
+ MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ vdev->rx_desc[i].ctrl |= MAL_RX_CTRL_INTR;
+#endif
+ }
+ }
+ }
+#endif
+
/* Reallocate RX ring only if bigger skb buffers are required */
+ DBG(dev, "New rx_skb_size = %d" NL, rx_skb_size);
+ DBG(dev, "Current rx_skb_size = %d" NL, dev->rx_skb_size);
if (rx_skb_size <= dev->rx_skb_size)
goto skip;
-
+ DBG(dev, "Alocating new SKB buffers" NL);
/* Second pass, allocate new skbs */
for (i = 0; i < NUM_RX_BUFF; ++i) {
- struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
+ struct sk_buff *skb;
+
+ /* Try to free mem. before doing new mem. allocation */
+ BUG_ON(!dev->rx_skb[i]);
+ dev_kfree_skb(dev->rx_skb[i]);
+
+ skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
if (!skb) {
+ DBG(dev, "Cannot allocate new SKB entry %d" NL, i);
ret = -ENOMEM;
goto oom;
}
- BUG_ON(!dev->rx_skb[i]);
- dev_kfree_skb(dev->rx_skb[i]);
-
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM);
dev->rx_desc[i].data_ptr =
dma_map_single(&dev->ofdev->dev, skb->data - 2, rx_sync_size,
DMA_FROM_DEVICE) + 2;
dev->rx_skb[i] = skb;
}
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for ( v = 1; v < dev->rx_vchans; v++ ) {
+ struct emac_instance *vdev = dev->vdev[v];
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ struct sk_buff *skb =
+ alloc_skb(rx_skb_size, GFP_ATOMIC);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto oom;
+ }
+
+ BUG_ON(!vdev->rx_skb[i]);
+ dev_kfree_skb(vdev->rx_skb[i]);
+
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ vdev->rx_desc[i].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2,
+ rx_sync_size,DMA_FROM_DEVICE) + 2;
+ vdev->rx_skb[i] = skb;
+ }
+ }
+ }
+#endif
+
skip:
/* Check if we need to change "Jumbo" bit in MR1 */
- if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
+#if defined(CONFIG_APM82181)
+ /*
+ * Maui supports setting Max Jumbo Frame size
+ * so we need to update it here
+ */
+ if ((new_mtu > ETH_DATA_LEN) || (dev->ndev->mtu > ETH_DATA_LEN)) {
+#else
+ if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
+#endif
/* This is to prevent starting RX channel in emac_rx_enable() */
set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
@@ -1088,6 +1301,27 @@ static void emac_clean_rx_ring(struct emac_instance *dev)
dev_kfree_skb(dev->rx_sg_skb);
dev->rx_sg_skb = NULL;
}
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for ( v = 1; v < dev->rx_vchans; v++ ) {
+ struct emac_instance *vdev = dev->vdev[v];
+ for (i = 0; i < NUM_RX_BUFF; ++i)
+ if (vdev->rx_skb[i]) {
+ vdev->rx_desc[i].ctrl = 0;
+ dev_kfree_skb(vdev->rx_skb[i]);
+ vdev->rx_skb[i] = NULL;
+ vdev->rx_desc[i].data_ptr = 0;
+ }
+
+ if (vdev->rx_sg_skb) {
+ dev_kfree_skb(vdev->rx_sg_skb);
+ vdev->rx_sg_skb = NULL;
+ }
+ }
+ }
+#endif
}
static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
@@ -1100,13 +1334,16 @@ static inline int emac_alloc_rx_skb(struct emac_instance *dev, int slot,
dev->rx_skb[slot] = skb;
dev->rx_desc[slot].data_len = 0;
- skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
+ skb_reserve(skb, EMAC_RX_SKB_HEADROOM);
dev->rx_desc[slot].data_ptr =
dma_map_single(&dev->ofdev->dev, skb->data - 2, dev->rx_sync_size,
DMA_FROM_DEVICE) + 2;
wmb();
dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
(slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ dev->rx_desc[slot].ctrl |= MAL_RX_CTRL_INTR;
+#endif
return 0;
}
@@ -1139,6 +1376,15 @@ static int emac_open(struct net_device *ndev)
return err;
}
+ if (dev->wol_irq != NO_IRQ) {
+ /* Setup WOL IRQ handler */
+ err = request_irq(dev->wol_irq, wol_irq, 0, "EMAC WOL", dev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to request IRQ %d\n",
+ ndev->name, dev->wol_irq);
+ return err;
+ }
+ }
/* Allocate RX ring */
for (i = 0; i < NUM_RX_BUFF; ++i)
if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
@@ -1147,6 +1393,25 @@ static int emac_open(struct net_device *ndev)
goto oom;
}
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ /* alloc skb_buff's for the QOS virtual channels */
+ for ( v = 1; v < dev->rx_vchans; v++){
+ for (i = 0; i < NUM_RX_BUFF; ++i) {
+ if (emac_alloc_rx_skb(dev->vdev[v],
+ i,GFP_KERNEL)){
+ printk(KERN_ERR "%s: failed to allocate"
+ " RX virtual ring\n",
+ ndev->name);
+ goto oom;
+ }
+ }
+ dev->vdev[v]->rx_sg_skb = NULL;
+ dev->vdev[v]->rx_slot = 0;
+ }
+ }
+#endif
dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot = 0;
clear_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
dev->rx_sg_skb = NULL;
@@ -1193,6 +1458,9 @@ static int emac_open(struct net_device *ndev)
oom:
emac_clean_rx_ring(dev);
free_irq(dev->emac_irq, dev);
+ if (dev->wol_irq != NO_IRQ) {
+ free_irq(dev->wol_irq, dev);
+ }
return -ENOMEM;
}
@@ -1310,6 +1578,8 @@ static int emac_close(struct net_device *ndev)
free_irq(dev->emac_irq, dev);
netif_carrier_off(ndev);
+ if (dev->wol_irq != NO_IRQ)
+ free_irq(dev->wol_irq, dev);
return 0;
}
@@ -1320,7 +1590,10 @@ static inline u16 emac_tx_csum(struct emac_instance *dev,
if (emac_has_feature(dev, EMAC_FTR_HAS_TAH) &&
(skb->ip_summed == CHECKSUM_PARTIAL)) {
++dev->stats.tx_packets_csum;
- return EMAC_TX_CTRL_TAH_CSUM;
+ if (skb_is_gso(skb))
+ return EMAC_TX_CTRL_TAH_SSR0;
+ else
+ return EMAC_TX_CTRL_TAH_CSUM;
}
return 0;
}
@@ -1360,6 +1633,16 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ if (atomic_read(&dev->mask_cext_enable))
+ if (atomic_read(&dev->idle_mode)) {
+ emac_exit_idlemode(dev);
+ atomic_set(&dev->idle_mode, 0);
+ }
+#endif
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ ctrl |= MAL_TX_CTRL_INTR;
+#endif
slot = dev->tx_slot++;
if (dev->tx_slot == NUM_TX_BUFF) {
@@ -1371,7 +1654,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dev->tx_skb[slot] = skb;
dev->tx_desc[slot].data_ptr = dma_map_single(&dev->ofdev->dev,
- skb->data, len,
+ skb->data, EMAC_DMA_ALIGN(len),
DMA_TO_DEVICE);
dev->tx_desc[slot].data_len = (u16) len;
wmb();
@@ -1394,6 +1677,9 @@ static inline int emac_xmit_split(struct emac_instance *dev, int slot,
ctrl |= MAL_TX_CTRL_LAST;
if (slot == NUM_TX_BUFF - 1)
ctrl |= MAL_TX_CTRL_WRAP;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ ctrl |= MAL_TX_CTRL_INTR;
+#endif
dev->tx_skb[slot] = NULL;
dev->tx_desc[slot].data_ptr = pd;
@@ -1423,6 +1709,14 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
return emac_start_xmit(skb, ndev);
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ if (atomic_read(&dev->mask_cext_enable))
+ if (atomic_read(&dev->idle_mode)) {
+ emac_exit_idlemode(dev);
+ atomic_set(&dev->idle_mode, 0);
+ }
+#endif
+
len -= skb->data_len;
/* Note, this is only an *estimation*, we can still run out of empty
@@ -1434,13 +1728,16 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
emac_tx_csum(dev, skb);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ ctrl |= MAL_TX_CTRL_INTR;
+#endif
slot = dev->tx_slot;
/* skb data */
dev->tx_skb[slot] = NULL;
chunk = min(len, MAL_MAX_TX_SIZE);
dev->tx_desc[slot].data_ptr = pd =
- dma_map_single(&dev->ofdev->dev, skb->data, len, DMA_TO_DEVICE);
+ dma_map_single(&dev->ofdev->dev, skb->data, EMAC_DMA_ALIGN(len), DMA_TO_DEVICE);
dev->tx_desc[slot].data_len = (u16) chunk;
len -= chunk;
if (unlikely(len))
@@ -1481,6 +1778,7 @@ static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
*/
while (slot != dev->tx_slot) {
dev->tx_desc[slot].ctrl = 0;
+ dev->tx_desc[slot].ctrl |= (slot == (NUM_TX_BUFF - 1) ? MAL_TX_CTRL_WRAP : 0);
--dev->tx_cnt;
if (--slot < 0)
slot = NUM_TX_BUFF - 1;
@@ -1554,16 +1852,43 @@ static void emac_poll_tx(void *param)
if (--dev->tx_cnt)
goto again;
- }
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ else {
+ DBG(dev, "Testing for idle... " NL);
+ if (atomic_read(&dev->mask_cext_enable)) {
+ if (!atomic_read(&dev->idle_mode)) {
+ DBG(dev, "Entering idle mode" NL);
+ emac_start_idlemode(dev);
+ atomic_set(&dev->idle_mode, 1);
+ } else
+ DBG(dev, "Already In Idle Mode" NL);
+
+ }
+ }
+#endif
+ }
+
if (n) {
dev->ack_slot = slot;
if (netif_queue_stopped(dev->ndev) &&
dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
netif_wake_queue(dev->ndev);
-
DBG2(dev, "tx %d pkts" NL, n);
}
- }
+ }
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ else {
+ DBG(dev, "Testing for idle... " NL);
+ if (atomic_read(&dev->mask_cext_enable)) {
+ if (!atomic_read(&dev->idle_mode)) {
+ DBG(dev, "Entering idle mode" NL);
+ emac_start_idlemode(dev);
+ atomic_set(&dev->idle_mode, 1);
+ } else
+ DBG(dev, "Already In Idle Mode" NL);
+ }
+ }
+#endif
netif_tx_unlock_bh(dev->ndev);
}
@@ -1575,13 +1900,17 @@ static inline void emac_recycle_rx_skb(struct emac_instance *dev, int slot,
DBG2(dev, "recycle %d %d" NL, slot, len);
if (len)
- dma_map_single(&dev->ofdev->dev, skb->data - 2,
- EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
+ dev->rx_desc[slot].data_ptr =
+ dma_map_single(&dev->ofdev->dev, skb->data - 2,
+ EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE) + 2;
dev->rx_desc[slot].data_len = 0;
wmb();
dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
(slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ dev->rx_desc[slot].ctrl |= MAL_RX_CTRL_INTR;
+#endif
}
static void emac_parse_rx_error(struct emac_instance *dev, u16 ctrl)
@@ -1685,11 +2014,11 @@ static int emac_poll_rx(void *param, int budget)
if (len && len < EMAC_RX_COPY_THRESH) {
struct sk_buff *copy_skb =
- alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
+ alloc_skb(len + EMAC_RX_SKB_HEADROOM, GFP_ATOMIC);
if (unlikely(!copy_skb))
goto oom;
- skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
+ skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM);
cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
len + 2);
emac_recycle_rx_skb(dev, slot, len);
@@ -1865,6 +2194,11 @@ static irqreturn_t emac_irq(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+static irqreturn_t wol_irq(int irq, void *dev_instance)
+{
+ return IRQ_HANDLED;
+}
+
static struct net_device_stats *emac_stats(struct net_device *ndev)
{
struct emac_instance *dev = netdev_priv(ndev);
@@ -2092,11 +2426,11 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
hdr->index = dev->cell_index;
if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
hdr->version = EMAC4_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
+ memcpy(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
return ((void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev));
} else {
hdr->version = EMAC_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
+ memcpy(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev));
}
}
@@ -2185,6 +2519,52 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
info->n_stats = emac_ethtool_get_stats_count(ndev);
info->regdump_len = emac_ethtool_get_regs_len(ndev);
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+static int emac_ethtool_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct emac_instance *ei = netdev_priv(dev);
+ /* clean up */
+ memset(ec, 0, sizeof(*ec));
+
+ /* Update with current status */
+ ec->rx_coalesce_usecs = (ei->mal->coales_param[0].rx_time / ei->plb_bus_freq);
+ ec->rx_max_coalesced_frames = ei->mal->coales_param[0].rx_count;
+
+ ec->tx_coalesce_usecs = (ei->mal->coales_param[0].tx_time / ei->plb_bus_freq);
+ ec->tx_max_coalesced_frames = ei->mal->coales_param[0].tx_count;
+ return 0;
+}
+
+static int emac_ethtool_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct emac_instance *ei = netdev_priv(dev);
+
+ ei->mal->coales_param[0].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[1].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[2].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[3].tx_count = (ec->tx_max_coalesced_frames & COAL_FRAME_MASK);
+
+ ei->mal->coales_param[0].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[1].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[2].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+ ei->mal->coales_param[3].rx_count = (ec->rx_max_coalesced_frames & COAL_FRAME_MASK);
+
+ ei->mal->coales_param[0].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[1].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[2].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[3].tx_time = (ec->tx_coalesce_usecs * ei->plb_bus_freq);
+
+ ei->mal->coales_param[0].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[1].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[2].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+ ei->mal->coales_param[3].rx_time = (ec->rx_coalesce_usecs * ei->plb_bus_freq);
+
+ mal_enable_coal(ei->mal);
+ return 0;
+}
+#endif
static const struct ethtool_ops emac_ethtool_ops = {
.get_settings = emac_ethtool_get_settings,
@@ -2208,8 +2588,256 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_tx_csum = ethtool_op_get_tx_csum,
.get_sg = ethtool_op_get_sg,
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ .get_coalesce = emac_ethtool_get_coalesce,
+ .set_coalesce = emac_ethtool_set_coalesce,
+#endif
+
};
+/* sysfs support for IBM NEW EMAC */
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+
+/* Display interrupt coalesce parametters values */
+static ssize_t show_tx_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].tx_count);
+}
+static ssize_t show_rx_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].rx_count);
+}
+static ssize_t show_tx_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].tx_time);
+}
+static ssize_t show_rx_time(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", dev_ins->mal->coales_param[0].rx_time);
+}
+
+static int core_reset(struct emac_instance * dev_ins)
+{
+ mutex_lock(&dev_ins->link_lock);
+ emac_netif_stop(dev_ins);
+ emac_rx_disable(dev_ins);
+ mal_disable_rx_channel(dev_ins->mal, dev_ins->mal_rx_chan);
+
+ if (dev_ins->rx_sg_skb) {
+ ++dev_ins->estats.rx_dropped_resize;
+ dev_kfree_skb(dev_ins->rx_sg_skb);
+ dev_ins->rx_sg_skb = NULL;
+ }
+
+ /* This is to prevent starting RX channel in emac_rx_enable() */
+ set_bit(MAL_COMMAC_RX_STOPPED, &dev_ins->commac.flags);
+
+ emac_full_tx_reset(dev_ins);
+
+ /* Restart RX */
+ clear_bit(MAL_COMMAC_RX_STOPPED, &dev_ins->commac.flags);
+ dev_ins->rx_slot = 0;
+ mal_enable_rx_channel(dev_ins->mal, dev_ins->mal_rx_chan);
+ emac_rx_enable(dev_ins);
+ emac_netif_start(dev_ins);
+ mutex_unlock(&dev_ins->link_lock);
+
+ return 0;
+}
+
+/* Set interrupt coalesce parametters values */
+static ssize_t store_tx_count(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].tx_count = tmp;
+
+ mutex_lock(&dev_ins->link_lock);
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+static ssize_t store_rx_count(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].rx_count = tmp;
+
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mutex_lock(&dev_ins->link_lock);
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+static ssize_t store_tx_time(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].tx_time = tmp;
+
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mutex_lock(&dev_ins->link_lock);
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+static ssize_t store_rx_time(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ dev_ins->mal->coales_param[0].rx_time = tmp;
+
+ /* Reconfigure MAL interrupt coalesce parameters */
+ mutex_lock(&dev_ins->link_lock);
+ mal_enable_coal(dev_ins->mal);
+ mutex_unlock(&dev_ins->link_lock);
+
+ /*
+ * FIXME: It seems that not reset the interface cause
+ * it hangs after short period of time
+ */
+ if (netif_running(dev_ins->ndev)) {
+ core_reset(dev_ins);
+ }
+
+ return count;
+}
+
+#endif
+
+#if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
+
+static ssize_t show_emi_fix_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ return sprintf(buf, "%d\n", atomic_read(&dev_ins->mask_cext_enable));
+}
+
+static ssize_t store_emi_fix_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct net_device *ndev = to_net_dev(dev);
+ struct emac_instance *dev_ins = netdev_priv(ndev);
+
+ long tmp = simple_strtol(buf, NULL, 10);
+ tmp = (tmp) ? 1 : 0;
+
+ printk(KERN_INFO "%s EMAC EMI Fix\n", (tmp) ? "Enable" : "Disable");
+ atomic_set(&dev_ins->mask_cext_enable, tmp);
+
+ /* Exit idle mode before return */
+ if (atomic_read(&dev_ins->idle_mode)) {
+ emac_exit_idlemode(dev_ins);
+ atomic_set(&dev_ins->idle_mode, 0);
+ }
+
+ return count;
+}
+
+#endif
+
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+static DEVICE_ATTR(coalesce_param_tx_count,
+ S_IRUGO | S_IWUSR, show_tx_count, store_tx_count);
+static DEVICE_ATTR(coalesce_param_rx_count,
+ S_IRUGO | S_IWUSR, show_rx_count, store_rx_count);
+static DEVICE_ATTR(coalesce_param_tx_time,
+ S_IRUGO | S_IWUSR, show_tx_time, store_tx_time);
+static DEVICE_ATTR(coalesce_param_rx_time,
+ S_IRUGO | S_IWUSR, show_rx_time, store_rx_time);
+#endif
+
+#if defined(CONFIG_APM82181)
+ #if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
+static DEVICE_ATTR(emi_fix_enable, S_IRUGO | S_IWUSR,
+ show_emi_fix_enable, store_emi_fix_enable);
+ #endif
+#endif
+
+static struct attribute *ibm_newemac_attr[] = {
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+ &dev_attr_coalesce_param_tx_count.attr,
+ &dev_attr_coalesce_param_rx_count.attr,
+ &dev_attr_coalesce_param_tx_time.attr,
+ &dev_attr_coalesce_param_rx_time.attr,
+#endif
+
+#if defined(CONFIG_APM82181)
+ #if defined(CONFIG_IBM_NEW_EMAC_MASK_CEXT)
+ &dev_attr_emi_fix_enable.attr,
+ #endif
+#endif
+ NULL
+};
+
+static const struct attribute_group ibm_newemac_attr_group = {
+ .attrs = ibm_newemac_attr,
+};
+
+#endif
+
+
static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct emac_instance *dev = netdev_priv(ndev);
@@ -2553,6 +3181,12 @@ static int __devinit emac_init_config(struct emac_instance *dev)
dev->gpcs_address = 0xffffffff;
if (emac_read_uint_prop(np->parent, "clock-frequency", &dev->opb_bus_freq, 1))
return -ENXIO;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ if (emac_read_uint_prop(np->parent->parent, "clock-frequency", &dev->plb_bus_freq, 1))
+ return -ENXIO;
+ /* save as MHz */
+ dev->plb_bus_freq /= 1000000;
+#endif
if (emac_read_uint_prop(np, "tah-device", &dev->tah_ph, 0))
dev->tah_ph = 0;
if (emac_read_uint_prop(np, "tah-channel", &dev->tah_port, 0))
@@ -2750,6 +3384,29 @@ static int __devinit emac_probe(struct of_device *ofdev,
dev->blist = blist;
SET_NETDEV_DEV(ndev, &ofdev->dev);
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ dev->vdev_index = 0;
+ dev->vdev[0] = NULL;
+
+ dev->mal_rx_chan = MAX_VCHANS;
+ dev->rx_vchans = dev->mal_rx_chan;
+ for (i = 1; i < dev->rx_vchans; i++) {
+ dev->vdev[i] = (struct emac_instance*)
+ alloc_etherdev(sizeof(struct emac_instance));
+ if (!dev->vdev[i]) {
+ printk(KERN_ERR "emac%s: could not allocate vchannel\n",
+ np->full_name);
+ return -ENOMEM;
+ }
+
+ dev->vdev[i]->vdev_index = i;
+ dev->vdev[i]->rx_vchans = 0; /* we are the virtual channel */
+ dev->vdev[i]->ndev = dev->ndev;
+ dev->vdev[i]->ofdev = dev->ofdev;
+ dev->vdev[i]->mal = dev->mal;
+ }
+#endif
+
/* Initialize some embedded data structures */
mutex_init(&dev->mdio_lock);
mutex_init(&dev->link_lock);
@@ -2813,6 +3470,13 @@ static int __devinit emac_probe(struct of_device *ofdev,
dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ for (i = 1; i < dev->rx_vchans; i++) {
+ dev->vdev[i]->rx_skb_size = emac_rx_skb_size(ndev->mtu);
+ dev->vdev[i]->rx_sync_size = emac_rx_sync_size(ndev->mtu);
+ }
+#endif
+
/* Get pointers to BD rings */
dev->tx_desc =
dev->mal->bd_virt + mal_tx_bd_offset(dev->mal, dev->mal_tx_chan);
@@ -2827,7 +3491,28 @@ static int __devinit emac_probe(struct of_device *ofdev,
memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
memset(dev->tx_skb, 0, NUM_TX_BUFF * sizeof(struct sk_buff *));
memset(dev->rx_skb, 0, NUM_RX_BUFF * sizeof(struct sk_buff *));
-
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ /* By default: DISABLE EMI fix */
+ atomic_set(&dev->mask_cext_enable, 0);
+#endif
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ /*
+ * On the 440GT and 440EX, the MAL RX active channel 0 (emac0) and
+ * active channel 8 (emac1) have 8 virtual RX channels each for QOS.
+ */
+ for (i = 1; i < dev->rx_vchans; i++) {
+ /* Get pointers to BD RX rings */
+ dev->vdev[i]->rx_desc =
+ dev->mal->bd_virt+mal_rx_bd_offset(dev->mal,
+ (i+dev->mal_rx_chan));
+
+ /* Clean rings */
+ memset(dev->vdev[i]->rx_desc, 0,
+ NUM_RX_BUFF * sizeof(struct mal_descriptor));
+ }
+#endif
+
/* Attach to ZMII, if needed */
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII) &&
(err = zmii_attach(dev->zmii_dev, dev->zmii_port, &dev->phy_mode)) != 0)
@@ -2857,7 +3542,7 @@ static int __devinit emac_probe(struct of_device *ofdev,
goto err_detach_tah;
if (dev->tah_dev)
- ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
ndev->watchdog_timeo = 5 * HZ;
if (emac_phy_supports_gige(dev->phy_mode)) {
ndev->netdev_ops = &emac_gige_netdev_ops;
@@ -2885,7 +3570,6 @@ static int __devinit emac_probe(struct of_device *ofdev,
/* There's a new kid in town ! Let's tell everybody */
wake_up_all(&emac_probe_wait);
-
printk(KERN_INFO "%s: EMAC-%d %s, MAC %pM\n",
ndev->name, dev->cell_index, np->full_name, ndev->dev_addr);
@@ -2895,9 +3579,20 @@ static int __devinit emac_probe(struct of_device *ofdev,
if (dev->phy.address >= 0)
printk("%s: found %s PHY (0x%02x)\n", ndev->name,
dev->phy.def->name, dev->phy.address);
+
emac_dbg_register(dev);
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+ /* Register sys fs hooks */
+ err = sysfs_create_group(&dev->ndev->dev.kobj,
+ &ibm_newemac_attr_group);
+ if (err) {
+ printk("WARN: %s: failed to create sys interfaces for EMAC-%d %s\n",
+ ndev->name, dev->cell_index, np->full_name);
+ goto err_sysfs;
+ }
+#endif
/* Life is good */
return 0;
@@ -2934,6 +3629,9 @@ static int __devinit emac_probe(struct of_device *ofdev,
*blist = NULL;
wake_up_all(&emac_probe_wait);
}
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+ err_sysfs:
+#endif
return err;
}
@@ -2945,6 +3643,9 @@ static int __devexit emac_remove(struct of_device *ofdev)
dev_set_drvdata(&ofdev->dev, NULL);
+#if defined(CONFIG_IBM_NEW_EMAC_SYSFS)
+ sysfs_remove_group(&dev->ndev->dev.kobj, &ibm_newemac_attr_group);
+#endif
unregister_netdev(dev->ndev);
flush_scheduled_work();
@@ -2967,6 +3668,14 @@ static int __devexit emac_remove(struct of_device *ofdev)
if (dev->emac_irq != NO_IRQ)
irq_dispose_mapping(dev->emac_irq);
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ if (dev->rx_vchans) {
+ int v;
+ for (v = 1; v < dev->rx_vchans; v++) {
+ kfree(dev->vdev[v]);
+ }
+ }
+#endif
kfree(dev->ndev);
return 0;
diff --git a/drivers/net/ibm_newemac/core.h b/drivers/net/ibm_newemac/core.h
index 18d56c6c423..274514bdfa6 100644
--- a/drivers/net/ibm_newemac/core.h
+++ b/drivers/net/ibm_newemac/core.h
@@ -71,7 +71,7 @@ static inline int emac_rx_size(int mtu)
#define EMAC_DMA_ALIGN(x) ALIGN((x), dma_get_cache_alignment())
#define EMAC_RX_SKB_HEADROOM \
- EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM)
+ EMAC_DMA_ALIGN(CONFIG_IBM_NEW_EMAC_RX_SKB_HEADROOM + 2)
/* Size of RX skb for the given MTU */
static inline int emac_rx_skb_size(int mtu)
@@ -161,6 +161,11 @@ struct emac_error_stats {
u64 tx_errors;
};
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+#define MAX_VCHANS 8 /* MAX virtual channels */
+#endif
+
#define EMAC_ETHTOOL_STATS_COUNT ((sizeof(struct emac_stats) + \
sizeof(struct emac_error_stats)) \
/ sizeof(u64))
@@ -220,6 +225,11 @@ struct emac_instance {
/* OPB bus frequency in Mhz */
u32 opb_bus_freq;
+
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ /* PLB bus frequency in Mhz */
+ u32 plb_bus_freq;
+#endif
/* Cell index within an ASIC (for clk mgmnt) */
u32 cell_index;
@@ -266,6 +276,10 @@ struct emac_instance {
/* Misc
*/
+#ifdef CONFIG_IBM_NEW_EMAC_MASK_CEXT
+ atomic_t idle_mode;
+ atomic_t mask_cext_enable;
+#endif
int reset_failed;
int stop_timeout; /* in us */
int no_mcast;
@@ -273,6 +287,12 @@ struct emac_instance {
int opened;
struct work_struct reset_work;
spinlock_t lock;
+
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ int rx_vchans; /* N rx virtual channels */
+ int vdev_index;
+ struct emac_instance *vdev[MAX_VCHANS]; /* virtual channels */
+#endif
};
/*
diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h
index 8a61b597a16..bc54a228556 100644
--- a/drivers/net/ibm_newemac/emac.h
+++ b/drivers/net/ibm_newemac/emac.h
@@ -144,16 +144,18 @@ struct emac_regs {
#define EMAC_MR1_TFS_2K 0x00080000
#define EMAC_MR1_TR0_MULT 0x00008000
#define EMAC_MR1_JPSM 0x00000000
-#define EMAC_MR1_MWSW_001 0x00000000
+#define EMAC_MR1_MWSW_001 0x00001000
#define EMAC_MR1_BASE(opb) (EMAC_MR1_TFS_2K | EMAC_MR1_TR0_MULT)
#define EMAC4_MR1_RFS_2K 0x00100000
#define EMAC4_MR1_RFS_4K 0x00180000
+#define EMAC4_MR1_RFS_8K 0x00200000
#define EMAC4_MR1_RFS_16K 0x00280000
#define EMAC4_MR1_TFS_2K 0x00020000
-#define EMAC4_MR1_TFS_4K 0x00030000
-#define EMAC4_MR1_TFS_16K 0x00050000
+#define EMAC4_MR1_TFS_4K 0x00030000
+#define EMAC4_MR1_TFS_8K 0x00040000
+#define EMAC4_MR1_TFS_16K 0x00050000
#define EMAC4_MR1_TR 0x00008000
#define EMAC4_MR1_MWSW_001 0x00001000
#define EMAC4_MR1_JPSM 0x00000800
@@ -211,6 +213,10 @@ struct emac_regs {
#define EMAC4_RMR_RFAF_64_1024 0x00000006
#define EMAC4_RMR_RFAF_128_2048 0x00000007
#define EMAC4_RMR_BASE EMAC4_RMR_RFAF_128_2048
+#if defined(CONFIG_APM82181)
+#define EMAC4_RMR_MJS_MASK 0x0001fff8
+#define EMAC4_RMR_MJS(s) ((s << 3) & EMAC4_RMR_MJS_MASK)
+#endif
/* EMACx_ISR & EMACx_ISER */
#define EMAC4_ISR_TXPE 0x20000000
@@ -264,7 +270,7 @@ struct emac_regs {
/* EMACx_TRTR */
#define EMAC_TRTR_SHIFT_EMAC4 24
-#define EMAC_TRTR_SHIFT 27
+#define EMAC_TRTR_SHIFT 27
/* EMAC specific TX descriptor control fields (write access) */
#define EMAC_TX_CTRL_GFCS 0x0200
@@ -308,4 +314,11 @@ struct emac_regs {
EMAC_RX_ST_AE | EMAC_RX_ST_BFCS | \
EMAC_RX_ST_PTL | EMAC_RX_ST_ORE | \
EMAC_RX_ST_IRE )
+#define EMAC_TX_CTRL_TAH_SSR0 0x0002
+#define EMAC_TX_CTRL_TAH_SSR1 0x0004
+#define EMAC_TX_CTRL_TAH_SSR2 0x0006
+#define EMAC_TX_CTRL_TAH_SSR3 0x0008
+#define EMAC_TX_CTRL_TAH_SSR4 0x000a
+#define EMAC_TX_CTRL_TAH_SSR5 0x000c
+#define EMAC_TX_CTRL_TAH_CSUM 0x000e
#endif /* __IBM_NEWEMAC_H */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 2a2fc17b287..1f79a34bedf 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -29,8 +29,23 @@
#include "core.h"
#include <asm/dcr-regs.h>
+#include <asm/ppc4xx_ocm.h>
static int mal_count;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+static char *tx_coal_irqname[] = {
+ "TX0 COAL",
+ "TX1 COAL",
+ "TX2 COAL",
+ "TX3 COAL",
+};
+static char *rx_coal_irqname[] = {
+ "RX0 COAL",
+ "RX1 COAL",
+ "RX2 COAL",
+ "RX3 COAL",
+};
+#endif
int __devinit mal_register_commac(struct mal_instance *mal,
struct mal_commac *commac)
@@ -217,9 +232,176 @@ static inline void mal_disable_eob_irq(struct mal_instance *mal)
MAL_DBG2(mal, "disable_irq" NL);
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+
+#if defined(CONFIG_460SX)
+/* Set Tx fram count */
+static inline void set_ic_txfthr(struct mal_instance *mal)
+{
+ int reg;
+ int val = mal->coales_param[0].tx_count;
+
+ reg = (val<<23) | (1<<22) ;
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX0, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX0,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX1, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX1,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX2, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX2,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRTX3, reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRTX3,(val<<23)); /* enable counter */
+
+
+ mal->enet_coales_iccrtx = reg;
+}
+/* Set Rx fram count */
+static inline void set_ic_rxfthr(struct mal_instance *mal)
+{
+ int reg;
+ int val = mal->coales_param[0].rx_count;
+
+ reg = (val<<23) | (1<<22) ;
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX0,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX0,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX1,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX1,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX2,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX2,(val<<23)); /* enable counter */
+
+ SDR_WRITE(DCRN_SDR0_ICCRRX3,reg); /* set counter */
+ SDR_WRITE(DCRN_SDR0_ICCRRX3,(val<<23)); /* enable counter */
+
+ mal->enet_coales_iccrrx = reg;
+}
+#endif
+
+inline void mal_enable_coal(struct mal_instance *mal)
+{
+ unsigned int val;
+#if defined(CONFIG_405EX)
+ /* Clear the counters */
+ val = SDR0_ICC_FLUSH0 | SDR0_ICC_FLUSH1;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX, val);
+
+ /* Set Tx/Rx Timer values */
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX1, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX1, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+
+ /* Enable the Tx/Rx Coalescing interrupt */
+ val = ((CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR0_SHIFT) |
+ ((CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR1_SHIFT);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX, val);
+
+ val = ((CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR0_SHIFT) |
+ ((CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR1_SHIFT);
+
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX, val);
+#elif defined(CONFIG_APM82181)
+ /* Clear the counters */
+ val = SDR0_ICC_FLUSH;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+
+ /* Set Tx/Rx Timer values */
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, mal->coales_param[0].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, mal->coales_param[0].rx_time);
+
+ /* Enable the Tx/Rx Coalescing interrupt */
+ val = (mal->coales_param[0].tx_count & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+
+ val = (mal->coales_param[0].rx_count & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+
+#elif defined(CONFIG_460EX) || defined(CONFIG_460GT)
+ /* Clear the counters */
+ val = SDR0_ICC_FLUSH;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX1, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX1, val);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX3, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX3, val);
+#endif
+
+ /* Set Tx/Rx Timer values */
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX1, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX1, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX2, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX3, CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX2, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX3, CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER);
+#endif
+
+ /* Enable the Tx/Rx Coalescing interrupt */
+ val = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX1, val);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRTX3, val);
+#endif
+
+ val = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK)
+ << SDR0_ICC_FTHR_SHIFT;
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX0, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX1, val);
+#if defined(CONFIG_460GT)
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX2, val);
+ mtdcri(SDR0, DCRN_SDR0_ICCRRX3, val);
+#endif
+
+#elif defined(CONFIG_460SX)
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX0, mal->coales_param[0].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX1, mal->coales_param[1].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX2, mal->coales_param[2].tx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRTX3, mal->coales_param[3].tx_time);
+
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX0, mal->coales_param[0].rx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX1, mal->coales_param[1].rx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX2, mal->coales_param[2].rx_time);
+ mtdcri(SDR0, DCRN_SDR0_ICCTRRX3, mal->coales_param[3].rx_time);
+
+ set_ic_rxfthr(mal);
+ set_ic_txfthr(mal);
+#endif
+ printk(KERN_INFO "MAL: Enabled Interrupt Coal TxCnt: %d RxCnt: %d\n",
+ mal->coales_param[0].tx_count,
+ mal->coales_param[0].rx_count);
+
+ printk(KERN_INFO " TxTimer: %d RxTimer: %d\n",
+ mal->coales_param[0].tx_time,
+ mal->coales_param[0].rx_time);
+}
+#endif
+
static irqreturn_t mal_serr(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
+ struct list_head *l;
u32 esr = get_mal_dcrn(mal, MAL_ESR);
@@ -256,6 +438,14 @@ static irqreturn_t mal_serr(int irq, void *dev_instance)
"mal%d: system error, OPB (ESR = 0x%08x)\n",
mal->index, esr);
}
+
+
+ list_for_each(l, &mal->poll_list) {
+ struct mal_commac *mc =
+ list_entry(l, struct mal_commac, poll_list);
+ mc->ops->reset(mc->dev);
+ }
+
return IRQ_HANDLED;
}
@@ -309,6 +499,15 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+static irqreturn_t mal_coal(int irq, void *dev_instance)
+{
+ struct mal_instance *mal = dev_instance;
+ mal_schedule_poll(mal);
+ return IRQ_HANDLED;
+}
+#endif
+
static irqreturn_t mal_txde(int irq, void *dev_instance)
{
struct mal_instance *mal = dev_instance;
@@ -393,6 +592,9 @@ void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac)
static int mal_poll(struct napi_struct *napi, int budget)
{
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ int v;
+#endif
struct mal_instance *mal = container_of(napi, struct mal_instance, napi);
struct list_head *l;
int received = 0;
@@ -455,6 +657,32 @@ static int mal_poll(struct napi_struct *napi, int budget)
mc->ops->poll_tx(mc->dev);
}
+#if defined(CONFIG_IBM_EMAC_MAL_QOS_V404)
+ /* Process RX skbs QOS virtual channels.
+ *
+ */
+ for ( v = 1; v < MAX_VCHANS; v++ ) {
+ list_for_each(l, &mal->poll_list) {
+ struct mal_commac *mc =
+ list_entry(l, struct mal_commac, poll_list);
+ struct emac_instance *dev = mc->dev;
+ int n;
+ if ( v >= dev->rx_vchans ) {
+ continue;
+ }
+ n = mc->ops->poll_rx(dev->vdev[v],budget);
+ if (n) {
+ received += n;
+ budget -= n;
+ if (budget <= 0) {
+ goto more_work;
+ }
+ }
+ }
+
+ }
+#endif
+
more_work:
MAL_DBG2(mal, "poll() %d <- %d" NL, budget, received);
return received;
@@ -516,6 +744,7 @@ void *mal_dump_regs(struct mal_instance *mal, void *buf)
return regs + 1;
}
+
static int __devinit mal_probe(struct of_device *ofdev,
const struct of_device_id *match)
{
@@ -524,9 +753,14 @@ static int __devinit mal_probe(struct of_device *ofdev,
int index = mal_count++;
unsigned int dcr_base;
const u32 *prop;
+ const char *str_prop;
u32 cfg;
unsigned long irqflags;
irq_handler_t hdlr_serr, hdlr_txde, hdlr_rxde;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ int num_phys_chans;
+ int coal_intr_index;
+#endif
mal = kzalloc(sizeof(struct mal_instance), GFP_KERNEL);
if (!mal) {
@@ -541,6 +775,13 @@ static int __devinit mal_probe(struct of_device *ofdev,
MAL_DBG(mal, "probe" NL);
+ str_prop = of_get_property(ofdev->node, "descriptor-memory", NULL);
+ if (str_prop && (!strcmp(str_prop,"ocm") || !strcmp(str_prop,"OCM"))) {
+ printk(KERN_INFO
+ "mal%d: descriptor-memory = %s\n", index, str_prop);
+ mal->desc_memory = MAL_DESC_MEM_OCM;
+ }
+
prop = of_get_property(ofdev->node, "num-tx-chans", NULL);
if (prop == NULL) {
printk(KERN_ERR
@@ -609,6 +850,46 @@ static int __devinit mal_probe(struct of_device *ofdev,
goto fail_unmap;
}
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ /* Number of Tx channels is equal to Physical channels */
+ /* Rx channels include Virtual channels so use Tx channels */
+ BUG_ON(mal->num_tx_chans > MAL_MAX_PHYS_CHANNELS);
+ num_phys_chans = mal->num_tx_chans;
+ /* Older revs in 460EX and 460GT have coalesce bug in h/w */
+#if defined(CONFIG_460EX) || defined(CONFIG_460GT)
+ {
+ unsigned int pvr;
+ unsigned short min;
+ pvr = mfspr(SPRN_PVR);
+ min = PVR_MIN(pvr);
+ if (min < 4) {
+ printk(KERN_INFO "PVR %x Intr Coal disabled: H/W bug\n",
+ pvr);
+ mal->coalesce_disabled = 1;
+ }
+ }
+#else
+ mal->coalesce_disabled = 0;
+#endif
+ coal_intr_index = 5;
+
+ /* If device tree doesn't Interrupt coal IRQ, fall back to EOB IRQ */
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0) ; i++) {
+ mal->txcoal_irq[i] = irq_of_parse_and_map(ofdev->node, coal_intr_index++);
+ if (mal->txcoal_irq[i] == NO_IRQ) {
+ printk(KERN_INFO "MAL: No device tree IRQ for TxCoal%d - disabling coalescing\n", i);
+ mal->coalesce_disabled = 1;
+ }
+ }
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0); i++) {
+ mal->rxcoal_irq[i] = irq_of_parse_and_map(ofdev->node, coal_intr_index++);
+ if (mal->rxcoal_irq[i] == NO_IRQ) {
+ printk(KERN_INFO "MAL: No device tree IRQ for RxCoal%d - disabling coalescing\n", i);
+ mal->coalesce_disabled = 1;
+ }
+ }
+#endif
+
INIT_LIST_HEAD(&mal->poll_list);
INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock);
@@ -641,9 +922,25 @@ static int __devinit mal_probe(struct of_device *ofdev,
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt =
- dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL);
+
+ if (mal->desc_memory == MAL_DESC_MEM_OCM) {
+ mal->bd_virt = ocm_alloc(&mal->bd_phys, bd_size, 4,
+ OCM_NON_CACHED, "mal_descriptors");
+ mal->bd_dma = (u32)mal->bd_phys;
+ }
+
+ if (mal->bd_virt == NULL) {
+ /* Allocate BD on SDRAM in case !MAL_DESC_MEM_OCM or failed OCM alloc */
+ if (mal->desc_memory == MAL_DESC_MEM_OCM){
+ printk(KERN_INFO
+ "mal%d: failed OCM alloc, descriptor-memory = SDRAM\n", index);
+ mal->desc_memory = MAL_DESC_MEM_SDRAM;
+ }
+ mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size,
+ &mal->bd_dma, GFP_KERNEL);
+ }
+
+
if (mal->bd_virt == NULL) {
printk(KERN_ERR
"mal%d: out of memory allocating RX/TX descriptors!\n",
@@ -651,17 +948,25 @@ static int __devinit mal_probe(struct of_device *ofdev,
err = -ENOMEM;
goto fail_unmap;
}
+
memset(mal->bd_virt, 0, bd_size);
+ for (i = 0; i < mal->num_tx_chans; ++i) {
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ set_mal_dcrn(mal, MAL_TXBADDR, (mal->bd_phys >> 32));
- for (i = 0; i < mal->num_tx_chans; ++i)
set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
sizeof(struct mal_descriptor) *
mal_tx_bd_offset(mal, i));
+ }
+
+ for (i = 0; i < mal->num_rx_chans; ++i) {
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ set_mal_dcrn(mal, MAL_RXBADDR, (u32)(mal->bd_phys >> 32));
- for (i = 0; i < mal->num_rx_chans; ++i)
set_mal_dcrn(mal, MAL_RXCTPR(i), mal->bd_dma +
sizeof(struct mal_descriptor) *
mal_rx_bd_offset(mal, i));
+ }
if (mal_has_feature(mal, MAL_FTR_COMMON_ERR_INT)) {
irqflags = IRQF_SHARED;
@@ -674,20 +979,65 @@ static int __devinit mal_probe(struct of_device *ofdev,
}
err = request_irq(mal->serr_irq, hdlr_serr, irqflags, "MAL SERR", mal);
- if (err)
- goto fail2;
+ if (err) {
+ mal->serr_irq = NO_IRQ;
+ goto failirq;
+ }
err = request_irq(mal->txde_irq, hdlr_txde, irqflags, "MAL TX DE", mal);
- if (err)
- goto fail3;
- err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
- if (err)
- goto fail4;
+ if (err) {
+ mal->txde_irq = NO_IRQ;
+ goto failirq;
+ }
err = request_irq(mal->rxde_irq, hdlr_rxde, irqflags, "MAL RX DE", mal);
- if (err)
- goto fail5;
- err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
- if (err)
- goto fail6;
+ if (err) {
+ mal->rxde_irq = NO_IRQ;
+ goto failirq;
+ }
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0); i++) {
+ err = request_irq(mal->txcoal_irq[i],
+ mal_coal, 0, tx_coal_irqname[i], mal);
+ if (err) {
+ printk(KERN_INFO "MAL: TxCoal%d ReqIRQ failed - disabling coalescing\n", i);
+ mal->txcoal_irq[i] = NO_IRQ;
+ mal->coalesce_disabled = 1;
+ break;
+ }
+ }
+ for (i = 0; (i < num_phys_chans) && (mal->coalesce_disabled == 0); i++) {
+ err = request_irq(mal->rxcoal_irq[i],
+ mal_coal, 0, rx_coal_irqname[i], mal);
+ if (err) {
+ printk(KERN_INFO "MAL: RxCoal%d ReqIRQ failed - disabling coalescing\n", i);
+ mal->rxcoal_irq[i] = NO_IRQ;
+ mal->coalesce_disabled = 1;
+ break;
+ }
+ }
+
+ /* Fall back to EOB IRQ if coalesce not supported */
+ if (mal->coalesce_disabled) {
+ /* Clean up any IRQs allocated for Coalescing */
+ for (i = 0; i < num_phys_chans; i++) {
+ if (mal->txcoal_irq[i] != NO_IRQ)
+ free_irq(mal->txcoal_irq[i], mal);
+ if (mal->rxcoal_irq[i] != NO_IRQ)
+ free_irq(mal->rxcoal_irq[i], mal);
+ }
+#endif
+ err = request_irq(mal->txeob_irq, mal_txeob, 0, "MAL TX EOB", mal);
+ if (err) {
+ mal->txeob_irq = NO_IRQ;
+ goto failirq;
+ }
+ err = request_irq(mal->rxeob_irq, mal_rxeob, 0, "MAL RX EOB", mal);
+ if (err) {
+ mal->rxeob_irq = NO_IRQ;
+ goto failirq;
+ }
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ }
+#endif
/* Enable all MAL SERR interrupt sources */
if (mal->version == 2)
@@ -695,6 +1045,31 @@ static int __devinit mal_probe(struct of_device *ofdev,
else
set_mal_dcrn(mal, MAL_IER, MAL1_IER_EVENTS);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ if (mal->coalesce_disabled == 0) {
+ mal->coales_param[0].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[1].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[2].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[3].tx_count = (CONFIG_IBM_NEW_EMAC_TX_COAL_COUNT & COAL_FRAME_MASK);
+
+ mal->coales_param[0].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[1].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[2].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+ mal->coales_param[3].rx_count = (CONFIG_IBM_NEW_EMAC_RX_COAL_COUNT & COAL_FRAME_MASK);
+
+ mal->coales_param[0].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+ mal->coales_param[1].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+ mal->coales_param[2].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+ mal->coales_param[3].tx_time = CONFIG_IBM_NEW_EMAC_TX_COAL_TIMER;
+
+ mal->coales_param[0].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+ mal->coales_param[1].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+ mal->coales_param[2].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+ mal->coales_param[3].rx_time = CONFIG_IBM_NEW_EMAC_RX_COAL_TIMER;
+
+ mal_enable_coal(mal);
+}
+#endif
/* Enable EOB interrupt */
mal_enable_eob_irq(mal);
@@ -711,16 +1086,35 @@ static int __devinit mal_probe(struct of_device *ofdev,
return 0;
- fail6:
- free_irq(mal->rxde_irq, mal);
- fail5:
- free_irq(mal->txeob_irq, mal);
- fail4:
- free_irq(mal->txde_irq, mal);
- fail3:
- free_irq(mal->serr_irq, mal);
- fail2:
- dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
+ failirq:
+ if (mal->serr_irq != NO_IRQ)
+ free_irq(mal->serr_irq, mal);
+ if (mal->txde_irq != NO_IRQ)
+ free_irq(mal->txde_irq, mal);
+ if (mal->rxde_irq != NO_IRQ)
+ free_irq(mal->rxde_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ if (mal->coalesce_disabled == 0) {
+ for (i = 0; i < num_phys_chans; i++) {
+ if (mal->txcoal_irq[i] != NO_IRQ)
+ free_irq(mal->txcoal_irq[i], mal);
+ if (mal->rxcoal_irq[i] != NO_IRQ)
+ free_irq(mal->rxcoal_irq[i], mal);
+ }
+ } else {
+#endif
+ if (mal->txeob_irq != NO_IRQ)
+ free_irq(mal->txeob_irq, mal);
+ if (mal->rxeob_irq != NO_IRQ)
+ free_irq(mal->rxeob_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ }
+#endif
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ ocm_free(mal->bd_virt);
+ else
+ dma_free_coherent(&ofdev->dev, bd_size,
+ mal->bd_virt, mal->bd_dma);
fail_unmap:
dcr_unmap(mal->dcr_host, 0x100);
fail:
@@ -732,6 +1126,10 @@ static int __devinit mal_probe(struct of_device *ofdev,
static int __devexit mal_remove(struct of_device *ofdev)
{
struct mal_instance *mal = dev_get_drvdata(&ofdev->dev);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ int i;
+ int num_phys_chans;
+#endif
MAL_DBG(mal, "remove" NL);
@@ -748,17 +1146,38 @@ static int __devexit mal_remove(struct of_device *ofdev)
dev_set_drvdata(&ofdev->dev, NULL);
- free_irq(mal->serr_irq, mal);
- free_irq(mal->txde_irq, mal);
- free_irq(mal->txeob_irq, mal);
- free_irq(mal->rxde_irq, mal);
- free_irq(mal->rxeob_irq, mal);
-
+ if (mal->serr_irq != NO_IRQ)
+ free_irq(mal->serr_irq, mal);
+ if (mal->txde_irq != NO_IRQ)
+ free_irq(mal->txde_irq, mal);
+ if (mal->rxde_irq != NO_IRQ)
+ free_irq(mal->rxde_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ num_phys_chans = mal->num_tx_chans;
+ if (mal->coalesce_disabled == 0) {
+ for (i = 0; i < num_phys_chans; i++) {
+ if (mal->txcoal_irq[i] != NO_IRQ)
+ free_irq(mal->txcoal_irq[i], mal);
+ if (mal->rxcoal_irq[i] != NO_IRQ)
+ free_irq(mal->rxcoal_irq[i], mal);
+ }
+ } else {
+#endif
+ if (mal->txeob_irq != NO_IRQ)
+ free_irq(mal->txeob_irq, mal);
+ if (mal->rxeob_irq != NO_IRQ)
+ free_irq(mal->rxeob_irq, mal);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ }
+#endif
mal_reset(mal);
mal_dbg_unregister(mal);
- dma_free_coherent(&ofdev->dev,
+ if (mal->desc_memory == MAL_DESC_MEM_OCM)
+ ocm_free(mal->bd_virt);
+ else
+ dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
diff --git a/drivers/net/ibm_newemac/mal.h b/drivers/net/ibm_newemac/mal.h
index 9ededfbf072..a52dd75b1b2 100644
--- a/drivers/net/ibm_newemac/mal.h
+++ b/drivers/net/ibm_newemac/mal.h
@@ -118,10 +118,12 @@
#define MAL_TXCARR 0x05
#define MAL_TXEOBISR 0x06
#define MAL_TXDEIR 0x07
+#define MAL_TXBADDR 0x09
#define MAL_RXCASR 0x10
#define MAL_RXCARR 0x11
#define MAL_RXEOBISR 0x12
#define MAL_RXDEIR 0x13
+#define MAL_RXBADDR 0x15
#define MAL_TXCTPR(n) ((n) + 0x20)
#define MAL_RXCTPR(n) ((n) + 0x40)
#define MAL_RCBS(n) ((n) + 0x60)
@@ -169,7 +171,71 @@ struct mal_descriptor {
#define MAL_TX_CTRL_LAST 0x1000
#define MAL_TX_CTRL_INTR 0x0400
+#define MAL_DESC_MEM_SDRAM 0x0
+#define MAL_DESC_MEM_OCM 0x1
+
+#if defined(CONFIG_405EX)
+#define DCRN_SDR0_ICCRTX 0x430B /* Int coal Tx control register */
+#define DCRN_SDR0_ICCRRX 0x430C /* Int coal Rx control register */
+#define SDR0_ICC_FTHR0_SHIFT 23
+#define SDR0_ICC_FLUSH0 22
+#define SDR0_ICC_FLUWI0 21
+#define SDR0_ICC_FTHR1_SHIFT 12
+#define SDR0_ICC_FLUSH1 11
+#define SDR0_ICC_FLUWI1 10
+#define DCRN_SDR0_ICCTRTX0 0x430D /* Int coal Tx0 count threshold */
+#define DCRN_SDR0_ICCTRTX1 0x430E /* Int coal Tx1 count threshold */
+#define DCRN_SDR0_ICCTRRX0 0x430F /* Int coal Rx0 count threshold */
+#define DCRN_SDR0_ICCTRRX1 0x4310 /* Int coal Rx1 count threshold */
+#define DCRN_SDR0_ICTSRTX0 0x4307 /* Int coal Tx0 timer status*/
+#define DCRN_SDR0_ICTSRTX1 0x4308 /* Int coal Tx1 timer status*/
+#define DCRN_SDR0_ICTSRRX0 0x4309 /* Int coal Rx0 timer status*/
+#define DCRN_SDR0_ICTSRRX1 0x430A /* Int coal Rx1 timer status*/
+#elif defined(CONFIG_APM82181)
+#define DCRN_SDR0_ICCRTX0 0x4410 /* Int coal Tx0 control register */
+#define DCRN_SDR0_ICCRRX0 0x4414 /* Int coal Rx0 control register */
+#define SDR0_ICC_FTHR_SHIFT 23
+#define SDR0_ICC_FLUSH 22
+#define SDR0_ICC_FLUWI 21
+#define DCRN_SDR0_ICCTRTX0 0x4418 /* Int coal Tx0 count threshold */
+#define DCRN_SDR0_ICCTRRX0 0x441C /* Int coal Rx0 count threshold */
+#define DCRN_SDR0_ICTSRTX0 0x4420 /* Int coal Tx0 timer status*/
+#define DCRN_SDR0_ICTSRRX0 0x4424 /* Int coal Rx0 timer status*/
+#elif defined(CONFIG_460EX) || defined(CONFIG_460GT) || defined(CONFIG_460SX)
+#define DCRN_SDR0_ICCRTX0 0x4410 /* Int coal Tx0 control register */
+#define DCRN_SDR0_ICCRTX1 0x4411 /* Int coal Tx1 control register */
+#define DCRN_SDR0_ICCRTX2 0x4412 /* Int coal Tx2 control register */
+#define DCRN_SDR0_ICCRTX3 0x4413 /* Int coal Tx3 control register */
+#define DCRN_SDR0_ICCRRX0 0x4414 /* Int coal Rx0 control register */
+#define DCRN_SDR0_ICCRRX1 0x4415 /* Int coal Rx1 control register */
+#define DCRN_SDR0_ICCRRX2 0x4416 /* Int coal Rx2 control register */
+#define DCRN_SDR0_ICCRRX3 0x4417 /* Int coal Rx3 control register */
+#define SDR0_ICC_FTHR_SHIFT 23
+#define SDR0_ICC_FLUSH 22
+#define SDR0_ICC_FLUWI 21
+#define DCRN_SDR0_ICCTRTX0 0x4418 /* Int coal Tx0 count threshold */
+#define DCRN_SDR0_ICCTRTX1 0x4419 /* Int coal Tx1 count threshold */
+#define DCRN_SDR0_ICCTRTX2 0x441A /* Int coal Tx2 count threshold */
+#define DCRN_SDR0_ICCTRTX3 0x441B /* Int coal Tx3 count threshold */
+#define DCRN_SDR0_ICCTRRX0 0x441C /* Int coal Rx0 count threshold */
+#define DCRN_SDR0_ICCTRRX1 0x441D /* Int coal Rx1 count threshold */
+#define DCRN_SDR0_ICCTRRX2 0x441E /* Int coal Rx2 count threshold */
+#define DCRN_SDR0_ICCTRRX3 0x441F /* Int coal Rx3 count threshold */
+#define DCRN_SDR0_ICTSRTX0 0x4420 /* Int coal Tx0 timer status*/
+#define DCRN_SDR0_ICTSRTX1 0x4421 /* Int coal Tx1 timer status*/
+#define DCRN_SDR0_ICTSRTX2 0x4422 /* Int coal Tx2 timer status*/
+#define DCRN_SDR0_ICTSRTX3 0x4423 /* Int coal Tx3 timer status*/
+#define DCRN_SDR0_ICTSRRX0 0x4424 /* Int coal Rx0 timer status*/
+#define DCRN_SDR0_ICTSRRX1 0x4425 /* Int coal Rx1 timer status*/
+#define DCRN_SDR0_ICTSRRX2 0x4426 /* Int coal Rx2 timer status*/
+#define DCRN_SDR0_ICTSRRX3 0x4427 /* Int coal Rx3 timer status*/
+#endif
+
+#define COAL_FRAME_MASK 0x1FF
+#define MAL_MAX_PHYS_CHANNELS 4
+
struct mal_commac_ops {
+ void (*reset) (void *dev);
void (*poll_tx) (void *dev);
int (*poll_rx) (void *dev, int budget);
int (*peek_rx) (void *dev);
@@ -188,10 +254,22 @@ struct mal_commac {
struct list_head list;
};
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+struct mal_coales_param
+{
+ /* Configuration parameters for the coalescing function */
+ int tx_count;
+ int tx_time;
+ int rx_count;
+ int rx_time;
+};
+#endif
+
struct mal_instance {
int version;
dcr_host_t dcr_host;
+ int desc_memory; /* SDRAM or OCM */
int num_tx_chans; /* Number of TX channels */
int num_rx_chans; /* Number of RX channels */
int txeob_irq; /* TX End Of Buffer IRQ */
@@ -200,6 +278,27 @@ struct mal_instance {
int rxde_irq; /* RX Descriptor Error IRQ */
int serr_irq; /* MAL System Error IRQ */
+#if defined(CONFIG_IBM_NEW_EMAC_INTR_COALESCE)
+
+ int txcoal0_irq; /* COAL */
+ int txcoal1_irq; /* COAL */
+ int txcoal2_irq; /* COAL */
+ int txcoal3_irq; /* COAL */
+ int rxcoal0_irq; /* COAL */
+ int rxcoal1_irq; /* COAL */
+ int rxcoal2_irq; /* COAL */
+ int rxcoal3_irq; /* COAL */
+
+ struct mal_coales_param coales_param[4];
+ /* add copy of iccrtx and iccrrx registers
+ * to bypass the bug on the 440EPX pass1 where these
+ * registers are write only
+ */
+ u32 enet_coales_iccrtx;
+ u32 enet_coales_iccrrx;
+ struct timer_list mal_coal_timer;
+#endif
+
struct list_head poll_list;
struct napi_struct napi;
@@ -208,6 +307,7 @@ struct mal_instance {
u32 rx_chan_mask;
dma_addr_t bd_dma;
+ phys_addr_t bd_phys;
struct mal_descriptor *bd_virt;
struct of_device *ofdev;
@@ -217,6 +317,11 @@ struct mal_instance {
struct net_device dummy_dev;
unsigned int features;
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+ int txcoal_irq[MAL_MAX_PHYS_CHANNELS]; /* MAL TxCoalesce Error IRQ */
+ int rxcoal_irq[MAL_MAX_PHYS_CHANNELS]; /* MAL RxCoalesce IRQ */
+ int coalesce_disabled; /* Coalesce disable flag */
+#endif
};
static inline u32 get_mal_dcrn(struct mal_instance *mal, int reg)
@@ -284,6 +389,9 @@ void mal_disable_rx_channel(struct mal_instance *mal, int channel);
void mal_poll_disable(struct mal_instance *mal, struct mal_commac *commac);
void mal_poll_enable(struct mal_instance *mal, struct mal_commac *commac);
+#ifdef CONFIG_IBM_NEW_EMAC_INTR_COALESCE
+void mal_enable_coal(struct mal_instance *mal);
+#endif
/* Add/remove EMAC to/from MAL polling list */
void mal_poll_add(struct mal_instance *mal, struct mal_commac *commac);
diff --git a/drivers/net/ibm_newemac/phy.c b/drivers/net/ibm_newemac/phy.c
index ac9d964e59e..635cb96ef64 100644
--- a/drivers/net/ibm_newemac/phy.c
+++ b/drivers/net/ibm_newemac/phy.c
@@ -52,7 +52,7 @@ int emac_mii_reset_phy(struct mii_phy *phy)
{
int val;
int limit = 10000;
-
+#ifndef CONFIG_APOLLO3G
val = phy_read(phy, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
val |= BMCR_RESET;
@@ -68,7 +68,7 @@ int emac_mii_reset_phy(struct mii_phy *phy)
}
if ((val & BMCR_ISOLATE) && limit > 0)
phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
-
+#endif /* CONFIG_APOLLO3G */
return limit <= 0;
}
@@ -359,7 +359,51 @@ static struct mii_phy_def bcm5248_phy_def = {
.name = "BCM5248 10/100 SMII Ethernet",
.ops = &generic_phy_ops
};
+#ifdef CONFIG_APOLLO3G
+static int bcm54610_init(struct mii_phy *phy)
+{
+ int regb, rega;
+
+ phy_write(phy, 0x1C, 0x2C00);
+ regb = phy_read(phy, 0x1C);
+
+ phy_write(phy, 0x1C, 0xAC8C);
+
+ phy_write(phy, 0x1C, 0x2C00);
+ rega = phy_read(phy, 0x1C);
+
+ printk(KERN_INFO "%s: before 0x%04x, after 0x%04x\n",
+ __FUNCTION__, (regb & 0xffff), (rega & 0xffff));
+
+ /* the RGMII interface is not half-duplex capable */
+ rega = phy_read(phy, 0x04);
+ phy_write(phy, 0x04, rega & ~0x00a0);
+
+ regb = phy_read(phy, 0x09);
+ phy_write(phy, 0x09, regb & ~0x0100);
+
+ printk(KERN_INFO "%s: before 0x%04x, 0x%04x; after 0x%04x, 0x%04x\n",
+ __FUNCTION__, (rega & 0xffff), (regb & 0xffff),
+ (phy_read(phy, 0x04) & 0xffff), (phy_read(phy, 0x09) & 0xffff));
+
+ return 0;
+}
+
+static struct mii_phy_ops bcm54610_phy_ops = {
+ .init = bcm54610_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+static struct mii_phy_def bcm54610_phy_def = {
+ .phy_id = 0x0143BD63,
+ .phy_id_mask = 0xffffffff,
+ .name = "BCM54610 Gigabit Ethernet",
+ .ops = &bcm54610_phy_ops
+};
+#endif
static int m88e1111_init(struct mii_phy *phy)
{
pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
@@ -400,6 +444,111 @@ static int m88e1112_init(struct mii_phy *phy)
return 0;
}
+static int m88e1141_init(struct mii_phy *phy)
+{
+ unsigned short data;
+
+ printk(KERN_CRIT "we go to init for %d\n", phy->mode);
+ switch (phy->mode) {
+ case PHY_MODE_GMII:
+#if defined(CONFIG_M88E1141_DEBUG)
+ data = phy_read(phy, 0x00);
+ data |= 0x2000; /* Speed Select 1000Mbps */
+ phy_write(phy, 0x00, data);
+ data = phy_read(phy, 0x14);
+ data |= 0x0010; /* GMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x8000; /* Auto Selection = Disable */
+ data |= 0x0400; /* Interrupt Polarity = Active Low */
+ data |= 0x0080; /* DTE Detect Status wait time */
+ data |= 0x000F; /* HWCFG_MODE = GMII */
+ phy_write(phy, 0x1B, data);
+ data = phy_read(phy, 0x04);
+ data |= 0x0C00; /* Async Pause + Pause */
+ data |= 0x01E0; /* 100FDX + 100HDX + 10FDX + 10HDX */
+ phy_write(phy, 0x04, data);
+ data = phy_read(phy, 0x09);
+ //data |= 0x1C00; /* Master/Slave Config */
+ data |= 0x0300; /* 1000FDX + 1000HDX */
+ phy_write(phy, 0x09, data);
+#else
+ data = phy_read(phy, 0x14);
+ data |= 0x0010; /* GMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x000F; /* HWCFG_MODE = GMII */
+ phy_write(phy, 0x1B, data);
+#endif
+ break;
+ case PHY_MODE_RGMII:
+#if defined(CONFIG_M88E1141_DEBUG)
+ data = phy_read(phy, 0x00);
+ data |= 0x2000; /* Speed Select 1000Mbps */
+ phy_write(phy, 0x00, data);
+ data = phy_read(phy, 0x14);
+ data |= 0x0080; /* RGMII RX Timing Control */
+ data |= 0x0002; /* RGMII TX Timing Control */
+ data |= 0x0050; /* RGMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x8000; /* Auto Selection = Disable */
+ data |= 0x0400; /* Interrupt Polarity = Active Low */
+ data |= 0x0080; /* DTE Detect Status wait time */
+ data |= 0x000B; /* HWCFG_MODE = RGMII */
+ phy_write(phy, 0x1B, data);
+ data = phy_read(phy, 0x04);
+ data |= 0x0C00; /* Async Pause + Pause */
+ data |= 0x01E0; /* 100FDX + 100HDX + 10FDX + 10HDX */
+ phy_write(phy, 0x04, data);
+ data = phy_read(phy, 0x09);
+ //data |= 0x1C00; /* Master/Slave Config */
+ data |= 0x0300; /* 1000FDX + 1000HDX */
+ phy_write(phy, 0x09, data);
+#else
+ data = phy_read(phy, 0x14);
+ data |= 0x0080; /* RGMII RX Timing Control */
+ data |= 0x0002; /* RGMII TX Timing Control */
+ data |= 0x0050; /* RGMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+ data = phy_read(phy, 0x1B);
+ data |= 0x000B; /* HWCFG_MODE = RGMII */
+ phy_write(phy, 0x1B, data);
+#endif
+ break;
+ case PHY_MODE_SGMII:
+ data = phy_read(phy, 0x14);
+ data &= ~0x0080; /* CLEAR - RGMII setting */
+ data &= ~0x0002; /* CLEAR - RGMII setting */
+ data &= ~0x0070; /* CLEAR - Default MAC speed */
+ data |= 0x0070; /* GMII Deafult MAC interface speed */
+ phy_write(phy, 0x14, data);
+
+ data = phy_read(phy, 0x1B);
+ data |= 0x8000; /* Auto Selection = Disable */
+ data &= ~0x0400; /* Interrupt Polarity = Active Low */
+ data |= 0x0120; /* DTE Detect Status wait time */
+ data &= ~0x000F;/* CLEAR - HWCFG_MODE setting */
+ data |= 0x0000; /* HWCFG_MODE = SGMII */
+ phy_write(phy, 0x1B, data);
+
+ phy_write(phy, 0x10, 0x0068);
+ phy_write(phy, 0x16, 0x0001);
+ phy_write(phy, 0x00, 0x8100);
+ phy_write(phy, 0x16, 0x0000);
+ break;
+ }
+
+#if 0
+ data = phy_read(phy, 0x00);
+ data |= 0x8000; /* Reset PHY */
+ phy_write(phy, 0x00, data);
+ udelay(1000);
+#endif
+
+ return 0;
+}
+
static int et1011c_init(struct mii_phy *phy)
{
u16 reg_short;
@@ -467,12 +616,31 @@ static struct mii_phy_def m88e1112_phy_def = {
.ops = &m88e1112_phy_ops,
};
+static struct mii_phy_ops m88e1141_phy_ops = {
+ .init = m88e1141_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link
+};
+
+static struct mii_phy_def m88e1141_phy_def = {
+ .phy_id = 0x01410CD0,
+ .phy_id_mask = 0x0ffffff0,
+ .name = "Marvell 88E1141 Ethernet",
+ .ops = &m88e1141_phy_ops,
+};
+
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
+#ifdef CONFIG_APOLLO3G
+ &bcm54610_phy_def,
+#endif
&m88e1111_phy_def,
&m88e1112_phy_def,
+ &m88e1141_phy_def,
&genmii_phy_def,
NULL
};
@@ -487,7 +655,11 @@ int emac_mii_phy_probe(struct mii_phy *phy, int address)
phy->advertising = 0;
phy->address = address;
phy->speed = SPEED_10;
+#ifndef CONFIG_APOLLO3G
phy->duplex = DUPLEX_HALF;
+#else
+ phy->duplex = DUPLEX_FULL;
+#endif
phy->pause = phy->asym_pause = 0;
/* Take PHY out of isolate mode and reset it. */
@@ -511,24 +683,36 @@ int emac_mii_phy_probe(struct mii_phy *phy, int address)
u16 bmsr = phy_read(phy, MII_BMSR);
if (bmsr & BMSR_ANEGCAPABLE)
phy->features |= SUPPORTED_Autoneg;
+#ifndef CONFIG_APOLLO3G
if (bmsr & BMSR_10HALF)
phy->features |= SUPPORTED_10baseT_Half;
+#endif
if (bmsr & BMSR_10FULL)
phy->features |= SUPPORTED_10baseT_Full;
+#ifndef CONFIG_APOLLO3G
if (bmsr & BMSR_100HALF)
phy->features |= SUPPORTED_100baseT_Half;
+#endif
if (bmsr & BMSR_100FULL)
phy->features |= SUPPORTED_100baseT_Full;
if (bmsr & BMSR_ESTATEN) {
u16 esr = phy_read(phy, MII_ESTATUS);
if (esr & ESTATUS_1000_TFULL)
phy->features |= SUPPORTED_1000baseT_Full;
+#ifndef CONFIG_APOLLO3G
if (esr & ESTATUS_1000_THALF)
phy->features |= SUPPORTED_1000baseT_Half;
+#endif
}
phy->features |= SUPPORTED_MII;
}
+#if (defined CONFIG_APM82181) /* RGMII does not support half-duplex */
+ phy->features &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+#endif
+
/* Setup default advertising */
phy->advertising = phy->features;
diff --git a/drivers/net/ibm_newemac/rgmii.c b/drivers/net/ibm_newemac/rgmii.c
index 8d76cb89dbd..98048d8b667 100644
--- a/drivers/net/ibm_newemac/rgmii.c
+++ b/drivers/net/ibm_newemac/rgmii.c
@@ -222,7 +222,7 @@ void *rgmii_dump_regs(struct of_device *ofdev, void *buf)
* rgmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
- memcpy_fromio(regs, dev->base, sizeof(struct rgmii_regs));
+ memcpy(regs, dev->base, sizeof(struct rgmii_regs));
return regs + 1;
}
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 30173a9fb55..8d31b4a2c91 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -61,7 +61,7 @@ void tah_reset(struct of_device *ofdev)
/* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
out_be32(&p->mr,
- TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
+ TAH_MR_CVR | TAH_MR_ST_256 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
TAH_MR_DIG);
}
@@ -82,7 +82,7 @@ void *tah_dump_regs(struct of_device *ofdev, void *buf)
* zmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
- memcpy_fromio(regs, dev->base, sizeof(struct tah_regs));
+ memcpy(regs, dev->base, sizeof(struct tah_regs));
return regs + 1;
}
diff --git a/drivers/net/ibm_newemac/zmii.c b/drivers/net/ibm_newemac/zmii.c
index 17b15412494..edb710c4a35 100644
--- a/drivers/net/ibm_newemac/zmii.c
+++ b/drivers/net/ibm_newemac/zmii.c
@@ -225,7 +225,7 @@ void *zmii_dump_regs(struct of_device *ofdev, void *buf)
* zmii ? if yes, then we'll add a cell_index
* like we do for emac
*/
- memcpy_fromio(regs, dev->base, sizeof(struct zmii_regs));
+ memcpy(regs, dev->base, sizeof(struct zmii_regs));
return regs + 1;
}
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 5a0c6ad53f8..1d28d863285 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -10,6 +10,18 @@ config PCIEPORTBUS
Power Management Event support and Virtual Channel support to run
on PCI Express Ports (Root or Switch).
+choice
+ prompt "PCIE Max Payload Size"
+ depends on PCIEPORTBUS && APM82181
+ default PCIE_MAX_PAYLOAD_SIZE_256
+
+config PCIE_MAX_PAYLOAD_SIZE_128
+ bool "128 Bytes"
+
+config PCIE_MAX_PAYLOAD_SIZE_256
+ bool "256 Bytes"
+endchoice
+
#
# Include service Kconfig here
#
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 3c20dae43ce..005169a6c04 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -414,6 +414,13 @@ config RTC_DRV_CMOS
This driver can also be built as a module. If so, the module
will be called rtc-cmos.
+config RTC_DRV_PPC_DCR
+ tristate "IBM RTC through DCR bus base on MC146818"
+ select RTC_DRV_M48T86
+ help
+ If you say yes here you get support for IBM RTC chips.
+
+
config RTC_DRV_DS1216
tristate "Dallas DS1216"
depends on SNI_RM
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index aa3fbd5517a..a530c13d5ea 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o
obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o
+obj-$(CONFIG_RTC_DRV_IBM) += ibm_rtc.o
diff --git a/drivers/rtc/ibm_rtc.c b/drivers/rtc/ibm_rtc.c
new file mode 100644
index 00000000000..aff94f85428
--- /dev/null
+++ b/drivers/rtc/ibm_rtc.c
@@ -0,0 +1,479 @@
+/*
+ * IBM RTC driver
+ * Copyright (c) 2010 Applied Micro
+ *
+ * Author: Duy Nguyen <dpnguyen@appliedmicro.com>
+ *
+ * Base on ST M48T86 / Dallas DS12887 RTC driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This drivers only supports the clock running in BCD and 24H mode.
+ * If it will be ever adapted to binary and 12H mode, care must be taken
+ * to not introduce bugs.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/of_platform.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+
+#define RTC_REG_SEC 0x00
+#define RTC_REG_SECALRM 0x01
+#define RTC_REG_MIN 0x02
+#define RTC_REG_MINALRM 0x03
+#define RTC_REG_HOUR 0x04
+#define RTC_REG_HOURALRM 0x05
+#define RTC_REG_DOW 0x06 /* 1 = sunday */
+#define RTC_REG_DOM 0x07
+#define RTC_REG_MONTH 0x08 /* 1 - 12 */
+#define RTC_REG_YEAR 0x09 /* 0 - 99 */
+#define RTC_REG_A 0x0A
+#define RTC_REG_B 0x0B
+#define RTC_REG_C 0x0C
+#define RTC_REG_D 0x0D
+
+#define RTC_REG_B_H24 (1 << 1)
+#define RTC_REG_B_DM (1 << 2)
+#define RTC_REG_B_SET (1 << 7)
+#define RTC_REG_D_VRT (1 << 7)
+
+#define RTC_REG_PF 0x80 /* Period */
+#define RTC_REG_AF 0x40 /* Alarm */
+#define RTC_REG_UF 0x10 /* Update */
+
+#define RTC_DCR_BASE_ADDR 0x300
+#define RTC_READ(reg) mfdcr(RTC_DCR_BASE_ADDR + reg)
+#define RTC_WRITE(val, reg) mtdcr(RTC_DCR_BASE_ADDR + reg, val)
+
+/* Debug support */
+#define DBG_LEVEL 0
+
+#define RTC_DBG(dev, name, fmt, arg...) \
+ printk(KERN_DEBUG #name ": " fmt, ## arg)
+
+#if DBG_LEVEL > 0
+# define DBG(d,f,x...) RTC_DBG(d, ibm-rtc, f, ##x)
+# define NL "\n"
+#else
+# define DBG(f,x...) ((void)0)
+#endif
+
+#if DBG_LEVEL > 1
+# define DBG2(d,f,x...) DBG(d,f, ##x)
+#else
+# define DBG2(f,x...) ((void)0)
+#endif
+
+/*
+ * FIXME: Temporatorily comment out read/setalarm()
+ * because it make /dev/rtc work wrong.
+ * Enabling it later (if needed)
+ */
+#undef CONFIG_IBM_RTC_ALARM
+
+struct ibm_rtc_instance {
+ void __iomem *ioaddr;
+ int irq;
+ struct rtc_device *rtc;
+ struct of_device *ofdev;
+ spinlock_t lock; /* serialize the NVRAM and RTC access */
+};
+
+static int ibm_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char reg;
+ //struct platform_device *pdev = to_platform_device(dev);
+
+ DBG2(dev, "ibm_rtc_read_time" NL);
+
+ reg = RTC_READ(RTC_REG_B);
+
+ if (reg & RTC_REG_B_DM) {
+ /* data (binary) mode */
+ tm->tm_sec = RTC_READ(RTC_REG_SEC);
+ tm->tm_min = RTC_READ(RTC_REG_MIN);
+ tm->tm_hour = RTC_READ(RTC_REG_HOUR) & 0x3F;
+ tm->tm_mday = RTC_READ(RTC_REG_DOM);
+ /* tm_mon is 0-11 */
+ tm->tm_mon = RTC_READ(RTC_REG_MONTH) - 1;
+ tm->tm_year = RTC_READ(RTC_REG_YEAR) + 100;
+ tm->tm_wday = RTC_READ(RTC_REG_DOW);
+ } else {
+ /* bcd mode */
+ tm->tm_sec = bcd2bin(RTC_READ(RTC_REG_SEC));
+ tm->tm_min = bcd2bin(RTC_READ(RTC_REG_MIN));
+ tm->tm_hour = bcd2bin(RTC_READ(RTC_REG_HOUR) & 0x3F);
+ tm->tm_mday = bcd2bin(RTC_READ(RTC_REG_DOM));
+ /* tm_mon is 0-11 */
+ tm->tm_mon = bcd2bin(RTC_READ(RTC_REG_MONTH)) - 1;
+ tm->tm_year = bcd2bin(RTC_READ(RTC_REG_YEAR)) + 100;
+ tm->tm_wday = bcd2bin(RTC_READ(RTC_REG_DOW));
+ }
+
+ /* correct the hour if the clock is in 12h mode */
+ if (!(reg & RTC_REG_B_H24))
+ if (RTC_READ(RTC_REG_HOUR) & 0x80)
+ tm->tm_hour += 12;
+
+ return 0;
+}
+
+static int ibm_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned char reg;
+ //struct platform_device *pdev = to_platform_device(dev);
+
+ DBG2(dev, "ibm_rtc_set_time" NL);
+
+ reg = RTC_READ(RTC_REG_B);
+
+ /* update flag and 24h mode */
+ reg |= RTC_REG_B_SET | RTC_REG_B_H24;
+ RTC_WRITE(reg, RTC_REG_B);
+
+ if (reg & RTC_REG_B_DM) {
+ /* data (binary) mode */
+ RTC_WRITE(tm->tm_sec, RTC_REG_SEC);
+ RTC_WRITE(tm->tm_min, RTC_REG_MIN);
+ RTC_WRITE(tm->tm_hour, RTC_REG_HOUR);
+ RTC_WRITE(tm->tm_mday, RTC_REG_DOM);
+ RTC_WRITE(tm->tm_mon + 1, RTC_REG_MONTH);
+ RTC_WRITE(tm->tm_year % 100, RTC_REG_YEAR);
+ RTC_WRITE(tm->tm_wday, RTC_REG_DOW);
+ } else {
+ /* bcd mode */
+ RTC_WRITE(bin2bcd(tm->tm_sec), RTC_REG_SEC);
+ RTC_WRITE(bin2bcd(tm->tm_min), RTC_REG_MIN);
+ RTC_WRITE(bin2bcd(tm->tm_hour), RTC_REG_HOUR);
+ RTC_WRITE(bin2bcd(tm->tm_mday), RTC_REG_DOM);
+ RTC_WRITE(bin2bcd(tm->tm_mon + 1), RTC_REG_MONTH);
+ RTC_WRITE(bin2bcd(tm->tm_year % 100), RTC_REG_YEAR);
+ RTC_WRITE(bin2bcd(tm->tm_wday), RTC_REG_DOW);
+ }
+
+ /* update ended */
+ reg &= ~RTC_REG_B_SET;
+ RTC_WRITE(reg, RTC_REG_B);
+
+ return 0;
+}
+
+#if defined(CONFIG_IBM_RTC_ALARM)
+/*
+ * Read alarm time and date in RTC
+ */
+static int ibm_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ unsigned char reg;
+ //struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_time *tm = &alrm->time;
+
+ DBG2(dev, "ibm_rtc_read_time" NL);
+
+ reg = RTC_READ(RTC_REG_B);
+
+ if (reg & RTC_REG_B_DM) {
+ /* data (binary) mode */
+ tm->tm_sec = RTC_READ(RTC_REG_SECALRM);
+ tm->tm_min = RTC_READ(RTC_REG_MINALRM);
+ tm->tm_hour = RTC_READ(RTC_REG_HOURALRM) & 0x3F;
+ tm->tm_mday = RTC_READ(RTC_REG_DOM);
+ /* tm_mon is 0-11 */
+ tm->tm_mon = RTC_READ(RTC_REG_MONTH) - 1;
+ tm->tm_year = RTC_READ(RTC_REG_YEAR) + 100;
+ tm->tm_wday = RTC_READ(RTC_REG_DOW);
+ } else {
+ /* bcd mode */
+ tm->tm_sec = bcd2bin(RTC_READ(RTC_REG_SECALRM));
+ tm->tm_min = bcd2bin(RTC_READ(RTC_REG_MINALRM));
+ tm->tm_hour = bcd2bin(RTC_READ(RTC_REG_HOURALRM) & 0x3F);
+ tm->tm_mday = bcd2bin(RTC_READ(RTC_REG_DOM));
+ /* tm_mon is 0-11 */
+ tm->tm_mon = bcd2bin(RTC_READ(RTC_REG_MONTH)) - 1;
+ tm->tm_year = bcd2bin(RTC_READ(RTC_REG_YEAR)) + 100;
+ tm->tm_wday = bcd2bin(RTC_READ(RTC_REG_DOW));
+ }
+
+ /* correct the hour if the clock is in 12h mode */
+ if (!(reg & RTC_REG_B_H24))
+ if (RTC_READ(RTC_REG_HOUR) & 0x80)
+ tm->tm_hour += 12;
+
+ return 0;
+}
+
+/*
+ * Set alarm in RTC
+ */
+static int ibm_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ unsigned char reg;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ibm_rtc_instance *rtc = platform_get_drvdata(pdev);
+ struct rtc_time *tm = &alrm->time;
+
+ /* If no irq, we don't support ALARM */
+ if (rtc->irq == NO_IRQ)
+ return -EIO;
+
+ reg = RTC_READ(RTC_REG_B);
+
+ /* update flag and 24h mode */
+ reg |= RTC_REG_B_SET | RTC_REG_B_H24;
+ RTC_WRITE(reg, RTC_REG_B);
+
+ if (reg & RTC_REG_B_DM) {
+ /* data (binary) mode */
+ RTC_WRITE(tm->tm_sec, RTC_REG_SECALRM);
+ RTC_WRITE(tm->tm_min, RTC_REG_MINALRM);
+ RTC_WRITE(tm->tm_hour, RTC_REG_HOURALRM);
+ RTC_WRITE(tm->tm_mday, RTC_REG_DOM);
+ RTC_WRITE(tm->tm_mon + 1, RTC_REG_MONTH);
+ RTC_WRITE(tm->tm_year % 100, RTC_REG_YEAR);
+ RTC_WRITE(tm->tm_wday, RTC_REG_DOW);
+ } else {
+ /* bcd mode */
+ RTC_WRITE(bin2bcd(tm->tm_sec), RTC_REG_SECALRM);
+ RTC_WRITE(bin2bcd(tm->tm_min), RTC_REG_MINALRM);
+ RTC_WRITE(bin2bcd(tm->tm_hour), RTC_REG_HOURALRM);
+ RTC_WRITE(bin2bcd(tm->tm_mday), RTC_REG_DOM);
+ RTC_WRITE(bin2bcd(tm->tm_mon + 1), RTC_REG_MONTH);
+ RTC_WRITE(bin2bcd(tm->tm_year % 100), RTC_REG_YEAR);
+ RTC_WRITE(bin2bcd(tm->tm_wday), RTC_REG_DOW);
+ }
+
+ /* update ended */
+ reg &= ~RTC_REG_B_SET;
+ RTC_WRITE(reg, RTC_REG_B);
+
+ return 0;
+}
+#endif //CONFIG_IBM_RTC_ALARM
+
+/*
+ * Handle commands from user-space
+ */
+static int ibm_rtc_ioctl(struct device *dev, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned char r;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ibm_rtc_instance *rtc = platform_get_drvdata(pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rtc->lock, flags);
+ r = RTC_READ(RTC_REG_B);
+
+ switch (cmd) {
+ case RTC_AIE_OFF: /* alarm interrupt off */
+ r &= ~RTC_REG_AF;
+ break;
+ case RTC_AIE_ON: /* alarm interrupt on */
+ r |= RTC_REG_AF;
+ break;
+ case RTC_UIE_OFF: /* update interrupt off */
+ r &= ~RTC_REG_UF;
+ break;
+ case RTC_UIE_ON: /* update interrupt on */
+ r |= RTC_REG_UF;
+ break;
+ case RTC_PIE_OFF: /* periodic interrupt off */
+ r &= ~RTC_REG_PF;
+ break;
+ case RTC_PIE_ON: /* periodic interrupt on */
+ r |= RTC_REG_PF;
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ break;
+ }
+
+ RTC_WRITE(r, RTC_REG_B);
+ spin_unlock_irqrestore(&rtc->lock, flags);
+
+ return ret;
+}
+
+static int ibm_rtc_proc(struct device *dev, struct seq_file *seq)
+{
+ unsigned char reg;
+ //struct platform_device *pdev = to_platform_device(dev);
+
+ DBG(dev, "ibm_rtc_proc" NL);
+
+ reg = RTC_READ(RTC_REG_B);
+
+ seq_printf(seq, "mode\t\t: %s\n",
+ (reg & RTC_REG_B_DM) ? "binary" : "bcd");
+
+ reg = RTC_READ(RTC_REG_D);
+
+ seq_printf(seq, "battery\t\t: %s\n",
+ (reg & RTC_REG_D_VRT) ? "ok" : "exhausted");
+
+ return 0;
+}
+
+/*
+ * IRQ handler for the RTC
+ */
+static irqreturn_t ibm_rtc_interrupt(int irq, void *dev_id)
+{
+ unsigned char reg;
+ struct platform_device *pdev = to_platform_device((struct device *)dev_id);
+ struct ibm_rtc_instance *dev = platform_get_drvdata(pdev);
+ unsigned char events = RTC_IRQF;
+
+ /*
+ * read and clear interrupt
+ */
+ spin_lock(&dev->lock);
+ reg = RTC_READ(RTC_REG_C);
+ spin_unlock(&dev->lock);
+
+ DBG(dev, "RTC_REG_C = %04x" NL, reg);
+
+ if (reg & RTC_REG_PF)
+ events|= RTC_PF;
+ if (reg & RTC_REG_AF)
+ events|= RTC_AF;
+ if (reg & RTC_REG_UF)
+ events|= RTC_UF;
+
+ rtc_update_irq(dev->rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static const struct rtc_class_ops ibm_rtc_ops = {
+ .ioctl = ibm_rtc_ioctl,
+ .read_time = ibm_rtc_read_time,
+ .set_time = ibm_rtc_set_time,
+#if defined(CONFIG_IBM_RTC_ALARM)
+ .read_alarm = ibm_rtc_read_alarm,
+ .set_alarm = ibm_rtc_set_alarm,
+#endif
+ .proc = ibm_rtc_proc,
+};
+
+static int ibm_rtc_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ unsigned char reg;
+ struct platform_device *pdev = &ofdev->dev;
+ struct ibm_rtc_instance *dev = platform_get_drvdata(pdev);
+ struct device_node *np = ofdev->node;
+ struct rtc_device *rtc;
+ int err;
+
+ DBG(dev, "ibm_rtc_probe" NL);
+
+ /*
+ * Skip unused/unwired RTC.
+ */
+ if (of_get_property(np, "unused", NULL) || !of_device_is_available(np))
+ return -ENODEV;
+
+ /* Allocate RTC device */
+ dev = kmalloc(sizeof(struct ibm_rtc_instance), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&ofdev->dev, "kmalloc failed for dev\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ memset(dev, 0, sizeof(struct ibm_rtc_instance));
+
+ rtc = rtc_device_register("ibm_rtc",
+ &ofdev->dev, &ibm_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ dev->rtc = rtc;
+ dev->ofdev = ofdev;
+
+ /* Try to get irq number. We also can work in
+ * the mode without IRQ.
+ */
+ dev->irq = irq_of_parse_and_map(np, 0);
+ if (dev->irq == NO_IRQ) {
+ printk(KERN_ERR "%s: Can't map main interrupt\n", np->full_name);
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (dev->irq != NO_IRQ) {
+ err = request_irq(dev->irq, ibm_rtc_interrupt,
+ IRQF_SHARED, "IBM RTC", &ofdev->dev);
+ if (err)
+ goto out;
+ }
+
+ dev_set_drvdata(&ofdev->dev, dev);
+
+ /* read battery status */
+ reg = RTC_READ(RTC_REG_D);
+ dev_info(&ofdev->dev, "battery %s\n",
+ (reg & RTC_REG_D_VRT) ? "ok" : "exhausted");
+
+ return 0;
+out:
+ return err;
+}
+
+static int ibm_rtc_remove(struct of_device *ofdev)
+{
+ struct platform_device *pdev = &ofdev->dev;
+ struct ibm_rtc_instance *dev = platform_get_drvdata(pdev);
+ struct rtc_device *rtc = dev->rtc;
+
+ DBG(dev, "ibm_rtc_remove" NL);
+
+ dev_set_drvdata(&ofdev->dev, NULL);
+
+ if (rtc)
+ rtc_device_unregister(rtc);
+
+ return 0;
+}
+
+static const struct of_device_id ibm_rtc_match[] = {
+ { .compatible = "ibm,rtc", },
+ {}
+};
+
+static struct of_platform_driver ibm_rtc_platform_driver = {
+ .name = "ibm-rtc",
+ .match_table = ibm_rtc_match,
+
+ .probe = ibm_rtc_probe,
+ .remove = ibm_rtc_remove,
+};
+
+static int __init ibm_rtc_init(void)
+{
+ return of_register_platform_driver(&ibm_rtc_platform_driver);
+}
+
+static void __exit ibm_rtc_exit(void)
+{
+ of_unregister_platform_driver(&ibm_rtc_platform_driver);
+}
+
+MODULE_AUTHOR("Duy Nguyen <dpnguyen@appliedmicro.com>");
+MODULE_DESCRIPTION("IBM RTC driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:ibm-rtc");
+
+module_init(ibm_rtc_init);
+module_exit(ibm_rtc_exit);
diff --git a/drivers/scp/Kconfig b/drivers/scp/Kconfig
new file mode 100755
index 00000000000..b44c6d556cb
--- /dev/null
+++ b/drivers/scp/Kconfig
@@ -0,0 +1,12 @@
+#
+# Character device configuration
+#
+
+menu "SCP support"
+config SCP
+ tristate "SCP device interface"
+ depends on (440EPX || 440GRX || 405EX || 405EXr || APM82181)
+ default n
+ help
+ When selected the SCP device driver is provided.
+endmenu
diff --git a/drivers/scp/Makefile b/drivers/scp/Makefile
new file mode 100755
index 00000000000..8fa43d08906
--- /dev/null
+++ b/drivers/scp/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the kernel spi bus driver.
+#
+amcc_scp-objs := scp-dev.o spi_eeprom.o
+obj-$(CONFIG_SCP) += amcc_scp.o
diff --git a/drivers/scp/scp-dev.c b/drivers/scp/scp-dev.c
new file mode 100755
index 00000000000..b8f286c0342
--- /dev/null
+++ b/drivers/scp/scp-dev.c
@@ -0,0 +1,654 @@
+/*
+ **************************************************************************
+ * drivers/spi/scp-dev.c -- Serial Communications Port
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *
+ * (C) Copyright IBM Corp. 2004 Christophe Lombard <christophe_lombard@fr.ibm.com>
+ *
+ *
+ * Usage:
+ * Transmit data to scp
+ * echo -n "..." > /proc/driver/scp_dev
+ * Example: Write and read the value 0x19 at the address 0x1EE
+
+ echo "Status Read "
+
+ echo -n "CSENA" > /proc/driver/scp_dev # Enable FRAM
+ echo -n "RDSR" > /proc/driver/scp_dev # RDSR: 0x05 to read FRAM status
+ echo -n "0" > /proc/driver/scp_dev
+ echo -n "RXDATA" > /proc/driver/scp_dev # read data
+ echo -n "CSDIS" > /proc/driver/scp_dev # Disable FRAM
+
+
+ #echo "write data "
+
+ echo -n "CSENA" > /proc/driver/scp_dev # Enable FRAM
+ echo -n "WREN" > /proc/driver/scp_dev # WREN: 0x06
+ echo -n "CSDIS" > /proc/driver/scp_dev # Disable FRAM
+
+ echo -n "CSENA" > /proc/driver/scp_dev # Enable FRAM
+ echo -n "WRITE" > /proc/driver/scp_dev # WRITE: 0x02
+ echo -n "01" > /proc/driver/scp_dev # write address on 10 bits. Ex: 0x17EE (max : 3FF)
+ echo -n "EE" > /proc/driver/scp_dev
+ echo -n "19" > /proc/driver/scp_dev # write data on 8 bits. Ex: 19
+ echo -n "CSDIS" > /proc/driver/scp_dev # Disable FRAM
+
+
+ #echo "read data "
+
+ echo -n "CSENA" > /proc/driver/scp_dev # Enable FRAM
+ echo -n "READ" > /proc/driver/scp_dev # READ: 0x03
+ echo -n "1" > /proc/driver/scp_dev # read address on 10 bits. Ex: 0x1EE
+ echo -n "EE" > /proc/driver/scp_dev
+ echo -n "0" > /proc/driver/scp_dev # write dummy data: 0 (Mandatory)
+ echo -n "RXDATA" > /proc/driver/scp_dev # read data
+ echo -n "CSDIS" > /proc/driver/scp_dev # Disable FRAM
+ *
+ * It is impotant to know the example to know how the SCP works.
+ * On Beech board the SPI EEPROM is AT25080A. This affects the max address.
+ * ************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+#include <linux/platform_device.h>
+#include <asm/dcr-native.h>
+#include <linux/of_platform.h>
+
+#undef SCP_DEBUG
+#define SCP_DEBUG
+#include "scp-dev.h"
+
+/*
+ * If you want debugging uncomment:
+ */
+#ifdef SCP_DEBUG
+int scp_verbose = 1;
+#endif
+
+#define DRIVER_VERSION "0.1"
+
+MODULE_DESCRIPTION("AMCC scp driver v" DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+static int major; /* default is dynamic major device number */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+#define MASK_SEL_SCP 0xFFFDFFFF
+#define CS_LOW 0xFFFDFFFF
+#define CS_HIGH 0x00020000
+#define GPIO_MASK 0xFFFFFFF3
+
+static int
+scp_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
+static struct file_operations scp_device_fops = {
+ owner:THIS_MODULE,
+ ioctl:scp_ioctl,
+};
+
+static int scpdev_initialized;
+static void *virtual_scp_add; /* Virtual address to access to the scp core */
+static void *gpio_base;
+
+
+#define scp_readb(addr) in_8((volatile u8 *)(virtual_scp_add + (addr)))
+#define scp_writeb(addr,b) out_8((volatile u8 *)(virtual_scp_add + (addr)), (b))
+
+//static DECLARE_WAIT_QUEUE_HEAD(scp_wait);
+
+/* ***********************************************************************
+ * FUNCTIONS
+ *
+ */
+
+/*
+ **************************************************************************
+ * scp_set_configuration
+ *
+ * Change the configuration of the SCP.
+ * Enable the serial port and the serial clock Phase
+ *
+ * Return nothing.
+ * ************************************************************************/
+void scp_set_configuration(void)
+{
+ /*
+ * SCP Clock Divisor Modulus register
+ */
+ scp_writeb(SCPD_CDM, 0x18);
+
+ /*
+ * SCP Mode register => Serial port Enabled + loopback (for test)
+ */
+ /*
+ * scp_writeb(SCPD_SPMODE, SCPD_EN | SCPD_LOOP | SCPD_SCP);
+ */
+
+ /*
+ * SCP Mode register => Serial port Enabled + Serial Clock Phase
+ */
+ scp_writeb(SCPD_SPMODE, SCPD_EN | SCPD_SCP);
+}
+
+
+/*
+ **************************************************************************
+ + * scp_rx_data
+ + *
+ + * Read the Receive Data Register. Read Only register.
+ + *
+ + * Return unsigned char read
+ + * **********************************************************************/
+unsigned char scp_rx_data(void)
+{
+ unsigned char buffer = 0x00;
+
+ /*
+ * In case of loopback the data are inverted
+ */
+ /*
+ * buffer = ~scp_readb(SCPD_RXDATA);
+ */
+ /*
+ * start the xfer
+ */
+ //scp_writeb(SCPD_CR, SCPD_STR); /* fscz */
+ buffer = (unsigned char) scp_readb(SCPD_RXDATA);
+#ifdef SCP_DEBUG
+ if (scp_verbose)
+ printk("scp_rx_data: receive data: %#X \n", buffer);
+#endif
+
+ return buffer;
+}
+
+
+/*
+ **************************************************************************
+ * scp_tx_data
+ *
+ * Write data from command on the Transmit Data Register.
+ *
+ * Return nothing.
+ * ************************************************************************/
+void scp_tx_data(unsigned char buffer)
+{
+ /*
+ * write data to txdata
+ */
+#ifdef SCP_DEBUG
+ if (scp_verbose)
+ printk("scp_tx_data: Transmit data: %#X \n", buffer);
+#endif
+ scp_writeb(SCPD_TXDATA, buffer);
+
+ /*
+ * start the xfer
+ */
+ scp_writeb(SCPD_CR, SCPD_STR);
+}
+
+/*
+ **************************************************************************
+ * scp_cs
+ *
+ * Enable/Disable Chip Select in the EPLD register (Selection_2_reg).
+ *
+ * Return nothing.
+ * ************************************************************************/
+void scp_cs(int chipid, int high)
+{
+ ulong val;
+#if 1
+ val = in_be32((gpio_base+(GPIO0_OR-GPIO_BASE)));
+ if (!high) {
+ out_be32((gpio_base+(GPIO0_OR-GPIO_BASE)), val && CS_LOW);
+ }
+ else {
+ out_be32((gpio_base+(GPIO0_OR-GPIO_BASE)), val | CS_HIGH );
+ }
+#endif
+/* CPU 32bit not 64bit
+ u64 val;
+
+ val = in_be64((gpio_base+(GPIO0_OR-GPIO_BASE)));
+ if (!high) {
+ out_be64((gpio_base+(GPIO0_OR-GPIO_BASE)), val && CS_LOW);
+ }
+ else {
+ out_be64((gpio_base+(GPIO0_OR-GPIO_BASE)), val | CS_HIGH );
+ }
+*/
+
+}
+
+/*
+ **************************************************************************
+ * scp_write_proc: proc interface
+ *
+ * This function is called when the user writes in the interface /proc/driver/scp_dev
+ *
+ * Commands: works on chip 0
+ * CSENA: Enable Chip Select in the EPLD
+ * CSDIS: Disable Chip Select in the EPLD
+ * RXDATA: Read Receive Data register
+ *
+ * WREN: Set Write Enable Latch 0x06
+ * WRDI: Write Disable 0x04
+ * RDSR: Read Status 0x05
+ * WRSR: Write Status 0x01
+ * WRITE: Write Memory Data 0x02
+ * READ: Read Memory Data 0x03
+ *
+ * Return the number of data
+ *
+ * ************************************************************************/
+
+static int
+scp_write_proc(struct file *file, const char *buffer,
+ unsigned long count, void *data)
+{
+ char str[10];
+ int val;
+#ifdef SCP_DEBUG
+ scp_verbose = 1;
+#endif
+
+ memset(str, 0, sizeof(str));
+ if (copy_from_user(str, buffer, min_t(unsigned long, count, sizeof(str))))
+ return -EFAULT;
+
+ /*
+ * Enable Chip Select in the EPLD
+ */
+ if (!strncmp(str, "CSENA", 5))
+ scp_cs(0, 0);
+
+ /*
+ * Disable Chip Select in the EPLD
+ */
+ else if (!strncmp(str, "CSDIS", 5))
+ scp_cs(0, 1);
+
+ /*
+ * Read RX DATA
+ */
+ else if (!strncmp(str, "RXDATA", 6)) {
+ scp_rx_data();
+ }
+ /*
+ * Set Write Enable Latch: 0x06
+ */
+ else if (!strncmp(str, "WREN", 4))
+ scp_tx_data(0x06);
+
+ /*
+ * Write Disable: 0x04
+ */
+ else if (!strncmp(str, "WRDI", 4))
+ scp_tx_data(0x04);
+
+ /*
+ * Read Status Command: 0x05
+ */
+ else if (!strncmp(str, "RDSR", 4))
+ scp_tx_data(0x05);
+ /*
+ * Write Status Command: 0x01
+ */
+ else if (!strncmp(str, "WRSR", 4))
+ scp_tx_data(0x01);
+
+ /*
+ * Write Memory Data: 0x02
+ */
+ else if (!strncmp(str, "WRITE", 5))
+ scp_tx_data(0x02);
+
+ /*
+ * Read Memory Data: 0x03
+ */
+ else if (!strncmp(str, "READ", 4))
+ scp_tx_data(0x03);
+
+ else {
+ val = simple_strtol(str, NULL, 16);
+ scp_tx_data((unsigned char) val);
+ }
+ return count;
+}
+
+/*
+ **************************************************************************
+ * scp_int_handler: Handle general SCP interrupts.
+ *
+ *
+ * ************************************************************************/
+static irqreturn_t scp_int_handler(int irq, void *dev_id)
+{
+ unsigned char status_register;
+
+ status_register = scp_readb(SCPD_SR);
+ //wake_up(&scp_wait);
+ // Serial Data receive is complete and RxD is available
+ if (status_register & SCPD_RBR) {
+#ifdef SCP_DEBUG
+ if (scp_verbose)
+ printk("Int-> ");
+#endif
+ scp_rx_data();
+ }
+
+ return IRQ_HANDLED;
+}
+
+int scp_io(int chipid, struct spi_dev_desc *desc,
+ const unsigned char **inbufs, unsigned int *incounts,
+ unsigned char **outbufs, unsigned int *outcounts, int cansleep)
+{
+ unsigned int incount, outcount, tries;
+ const unsigned char *inp;
+ unsigned char *outp;
+ int extra_clock=0;
+
+ /* CS 'L' */
+ scp_cs(chipid,0);
+ udelay(desc->tcss);
+#ifdef SCP_DEBUG
+ scp_verbose = 0;
+#endif
+ /* do scatter IO */
+ inp = inbufs ? *inbufs : NULL;
+ extra_clock = (*inp == ATMEL_RDSR)? 1:0;
+ incount = *incounts;
+ while (incount && inp) {
+ tries = 0;
+ while (scp_readb(SCPD_SR) & SCPD_SR_BUSY) {
+ udelay(desc->tcss);
+ if (tries++ > 20) {
+ scp_cs(chipid,1);
+ return -1;
+ }
+ }
+ scp_tx_data(*inp++);
+ incount --;
+ if (!incount && *(++inbufs)) {
+ incount = *(++incounts);
+ inp = *(inbufs);
+ }
+ }
+#ifdef SCP_DEBUG
+ scp_verbose = 0;
+#endif
+ if (extra_clock) {
+ scp_tx_data(0);
+ scp_tx_data(0);
+ }
+ outp = outbufs ? *outbufs : NULL;
+ outcount = *outcounts;
+ while (outcount && outp) {
+ scp_tx_data(0); /* dummy, for clock */
+ //wait_event(scp_wait, scp_readb(SCPD_SR) & SCPD_SR_RBR);
+ tries = 0;
+ while (!(scp_readb(SCPD_SR) & SCPD_SR_RBR)) {
+ udelay(desc->tcss);
+ if (tries++ > 5) {
+ return -1;
+ }
+ }
+ *outp++ = scp_rx_data();
+ outcount --;
+ }
+ udelay(desc->tcss);
+ /* CS 'H' */
+ scp_cs(chipid,1);
+
+ return 0;
+}
+
+static int __devinit scp_request_irq(struct of_device *ofdev,
+ struct platform_device *dev)
+{
+ struct device_node *np = ofdev->node;
+ int irq;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "irq_of_parse_and_map failed\n");
+ return NO_IRQ;
+ }
+
+ /* request interrupt */
+ if (request_irq(irq, scp_int_handler , 0, SCP_NAME, dev)) {
+ dev_err(&ofdev->dev, "request_irq %d failed\n", irq);
+ return NO_IRQ;
+ }
+
+ return irq;
+}
+
+/*
+ **************************************************************************
+ * scpdev_cleanup: Clean the module
+ *
+ *
+ * Return nothing.
+ * ************************************************************************/
+static int __devexit scp_remove(struct of_device *ofdev)
+{
+ if (scpdev_initialized > 0) {
+ free_irq(SCP_IRQ, NULL);
+ iounmap(virtual_scp_add);
+ virtual_scp_add = NULL;
+ iounmap(gpio_base);
+ gpio_base = NULL;
+ release_mem_region(SCP_PHYS_ADD, 24);
+ remove_proc_entry("AMCC-SCP", NULL);
+ unregister_chrdev(major, "AMCC-SCP");
+ scpdev_initialized--;
+ }
+ return 0;
+}
+
+static int __init scp_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ SPI_DEV *spi_dev[NUM_SPI_SLAVES];
+ struct proc_dir_entry *pentry;
+ struct platform_device *scp_dev;
+ int retval = 0, i;
+ ulong val;
+ if (scpdev_initialized) {
+ return -1;
+ }
+
+ scp_dev = kzalloc(sizeof(*scp_dev), GFP_KERNEL);
+ if (!scp_dev) {
+ dev_err(&ofdev->dev, "failed to allocate device data\n");
+ return -ENOMEM;
+ }
+ dev_set_drvdata(&ofdev->dev, scp_dev);
+
+ if (request_mem_region(SCP_PHYS_ADD, 24, "AMCC-SCP") == NULL) {
+ retval = -EBUSY;
+ goto err0;
+ }
+ if ((virtual_scp_add = ioremap(SCP_PHYS_ADD, 24)) == NULL) {
+ retval = -ENOMEM;
+ goto err1;
+ }
+ //printk("virtual_scp_add=0x%p\n", virtual_scp_add);
+ if ((gpio_base = ioremap(GPIO_BASE, 0x100)) == NULL) {
+ retval = -ENOMEM;
+ goto err2;
+ }
+#if 1
+ /* select SCP */
+ val = SDR_READ(DCRN_SDR_PFC1);
+ SDR_WRITE(DCRN_SDR_PFC1, val & MASK_SEL_SCP);
+
+ /* enable GPIO output for pin 9, 10, 11 and 14*/
+ val = SDR_READ(DCRN_SDR_PFC0);
+ SDR_WRITE(DCRN_SDR_PFC0, (val & 0xFF8FFFFF) | 0x00700000);
+
+ val = SDR_READ(DCRN_SDR_PFC0);
+ SDR_WRITE(DCRN_SDR_PFC0, (val & 0xFFFDFFFF) | 0x00020000);
+
+ /* configure GPIO 9 and 11 as SPIClkOut and SPIDO */
+ val = in_be32((gpio_base+(GPIO0_OSRL-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_OSRL-GPIO_BASE)), (val & 0xFFFFCCFF) | 0x00001100);
+
+ val = in_be32((gpio_base+(GPIO0_TSRL-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_TSRL-GPIO_BASE)), (val & 0xFFFFCCFF) | 0x00001100);
+
+ val = in_be32((gpio_base+(GPIO0_ISR1L-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_ISR1L-GPIO_BASE)), (val & 0xFFFFF3FF) | 0x00000400);
+
+ /* configure GPIO14 as chip select */
+ val = in_be32((gpio_base+(GPIO0_TCR-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_TCR-GPIO_BASE)), (val & 0xFFFDFFFF) | 0x00020000);
+
+ val = in_be32((gpio_base+(GPIO0_TSRL-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_TSRL-GPIO_BASE)), (val & 0xFFFFFFF3) | 0x00000000);
+
+ val = in_be32((gpio_base+(GPIO0_OSRL-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_OSRL-GPIO_BASE)), (val & 0xFFFFFFF3) | 0x00000000);
+
+ val = in_be32((gpio_base+(GPIO0_ODR-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_ODR-GPIO_BASE)), (val & 0xFFFDFFFF) | 0x00000000);
+
+ val = in_be32((gpio_base+(GPIO0_ISR1L-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_ISR1L-GPIO_BASE)), val & GPIO_MASK);
+
+ val = in_be32((gpio_base+(GPIO0_ISR2L-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_ISR2L-GPIO_BASE)), val & GPIO_MASK);
+
+ val = in_be32((gpio_base+(GPIO0_ISR3L-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_ISR3L-GPIO_BASE)), val & GPIO_MASK);
+
+ /* CS high, b14 <= '1' */
+ val = in_be32((gpio_base+(GPIO0_OR-GPIO_BASE)));
+ out_be32((gpio_base+(GPIO0_OR-GPIO_BASE)), val | CS_HIGH );
+#endif
+
+ if ((retval = register_chrdev(major, "AMCC-SCP", &scp_device_fops)) < 0) {
+ retval = -ENODEV;
+ goto err3;
+ }
+ /* Init the SCP Core */
+ scp_set_configuration();
+ /* Create proc entry */
+ if ((pentry = create_proc_entry(SCP_PROC_NAME, 0200, NULL)) == NULL) {
+ retval = -ENODEV;
+ goto err4;
+ }
+ pentry->write_proc = scp_write_proc;
+ /* SCP interrupt registration to Linux. */
+ printk(KERN_INFO "SCP: Requesting SCP irq %d ...\n", SCP_IRQ);
+ /* Request SCP interrupt */
+ if ((scp_request_irq(ofdev, scp_dev)) == NO_IRQ)
+ {
+ printk(KERN_INFO "SCP: Requesting SCP irq %d ...fail\n", SCP_IRQ);
+ retval = -EINVAL;
+ goto err5;
+ }
+
+ for (i = 0; i < NUM_SPI_SLAVES; i++) {
+ spi_dev[i] = NULL;
+ /* Allocate SPI Flash device */
+ if ( (spi_dev[i] = (SPI_DEV *)kmalloc(sizeof(SPI_DEV), GFP_KERNEL))
+ == NULL) {
+ retval = -ENOMEM;
+ goto err6;
+ }
+ spi_dev[i]->slaveid = i;
+ if ((retval = beech_scp_init(spi_dev[i])) != 0) {
+ printk(KERN_ERR
+ "beech_scp_init(): %s beech_scp_init err 0x%08x\n",
+ DEVICE_NAME, retval);
+ goto err7;
+ }
+ }
+ scpdev_initialized++;
+
+ return 0;
+err7:
+err6:
+ for (i = 0; i < NUM_SPI_SLAVES; i++) {
+ if (spi_dev[i]) {
+ kfree(spi_dev[i]);
+ }
+ }
+ free_irq(SCP_IRQ, NULL);
+err5:
+ remove_proc_entry("AMCC-SCP", NULL);
+ unregister_chrdev(major, "AMCC-SCP");
+err4:
+err3:
+ iounmap(gpio_base);
+ gpio_base = NULL;
+err2:
+ iounmap(virtual_scp_add);
+ virtual_scp_add = NULL;
+
+err1:
+ release_mem_region(SCP_PHYS_ADD, 24);
+err0:
+ return retval;
+}
+
+static const struct of_device_id amcc_scp_match[] = {
+ { .compatible = "amcc,scp-405ex", },
+ {}
+};
+
+static struct of_platform_driver amcc_scp_driver = {
+ .name = "AMCC-SCP",
+ .match_table = amcc_scp_match,
+ .probe = scp_probe,
+ .remove = __devexit_p(scp_remove),
+};
+
+static int __init scp_init(void)
+{
+ return of_register_platform_driver(&amcc_scp_driver);
+}
+
+static void __exit scp_exit(void)
+{
+ of_unregister_platform_driver(&amcc_scp_driver);
+}
+
+module_init(scp_init);
+module_exit(scp_exit);
+
+MODULE_AUTHOR("christophe Lombard <christophe_lombard@fr.ibm.com>");
+MODULE_DESCRIPTION("SCP /dev entries driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scp/scp-dev.h b/drivers/scp/scp-dev.h
new file mode 100755
index 00000000000..4e7c385a596
--- /dev/null
+++ b/drivers/scp/scp-dev.h
@@ -0,0 +1,196 @@
+/* **************************************************************************
+ * drivers/spi/scp-dev.c -- Serial Communications Port
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ *
+ * (C) Copyright IBM Corp. 2004 Christophe Lombard <christophe_lombard@fr.ibm.com>
+ *
+ *
+ * Usage:
+ * Transmit data to scp
+ * echo "ad" > /proc/driver/scp_dev
+ *
+ * **************************************************************************/
+#include <linux/mtd/mtd.h> /* required for mtd support */
+
+/* SDR read/write helper macros */
+#define DCRN_SDR_CONFIG_ADDR 0x000E
+#define DCRN_SDR_CONFIG_DATA 0x000F
+
+#define SDR_READ(offset) ({ \
+ mtdcr(DCRN_SDR_CONFIG_ADDR, offset); \
+ mfdcr(DCRN_SDR_CONFIG_DATA);})
+#define SDR_WRITE(offset, data) ({ \
+ mtdcr(DCRN_SDR_CONFIG_ADDR, offset); \
+ mtdcr(DCRN_SDR_CONFIG_DATA, data);})
+
+// Physical address
+#define SCP_PHYS_ADD 0x4EF600900ULL
+/* GPIO address */
+#define GPIO_BASE (0x4EF600B00ULL)
+#define GPIO0_OR (GPIO_BASE+0x0)
+#define GPIO0_TCR (GPIO_BASE+0x4)
+#define GPIO0_OSRL (GPIO_BASE+0x8)
+#define GPIO0_OSRH (GPIO_BASE+0xC)
+#define GPIO0_TSRL (GPIO_BASE+0x10)
+#define GPIO0_TSRH (GPIO_BASE+0x14)
+#define GPIO0_ODR (GPIO_BASE+0x18)
+#define GPIO0_ISR1L (GPIO_BASE+0x30)
+#define GPIO0_ISR1H (GPIO_BASE+0x34)
+#define GPIO0_ISR2L (GPIO_BASE+0x38)
+#define GPIO0_ISR2H (GPIO_BASE+0x3C)
+#define GPIO0_ISR3L (GPIO_BASE+0x40)
+#define GPIO0_ISR3H (GPIO_BASE+0x44)
+/* Function Selection register */
+#define DCRN_SDR_PFC0 0x4100
+#define DCRN_SDR_PFC1 0x4101
+
+#define SCP_IRQ 34
+#define SCP_NAME "Serial Communications Port"
+#define SCP_PROC_NAME "driver/scp_dev"
+
+/*-------------------------------------------------------------------------*/
+/* Device Driver Control Flags (combinable flags parameters) */
+/*-------------------------------------------------------------------------*/
+/* Flags used in SCPD_Set_Configuration API */
+#define SCPD_SCP 0x10 /* Serial Clock Phase */
+#define SCPD_EN 0x08 /* Serial Port Enabled */
+#define SCPD_RD 0x04 /* Data bit 7 transfer first */
+#define SCPD_CI 0x02 /* Clock inverse */
+#define SCPD_LOOP 0x01 /* Internal Loopback */
+
+/*-------------------------------------------------------------------------*/
+/* Device Driver Control values */
+/*-------------------------------------------------------------------------*/
+/* SCPD_Tx_Char values */
+#define SCPD_STR 0x01 /* start of xfer */
+
+/* SCPD_RBR values */
+#define SCPD_RBR 0x01 /* Rx Byte Ready */
+
+/*----------------------------------------------------------------------*/
+/* Core Register Address Map */
+/*----------------------------------------------------------------------*/
+#define SCPD_SPMODE 0x0000
+#define SCPD_RXDATA 0x0001
+#define SCPD_TXDATA 0x0002
+#define SCPD_CR 0x0003
+#define SCPD_SR 0x0004
+#define SCPD_CDM 0x0006
+
+#define SCPD_SR_BUSY 0x2
+#define SCPD_SR_RBR 0x1
+
+#define MAF_NAME_STR_LEN 256
+#define MEM_TYPE_STR_LEN 256
+
+#define DEVICE_NAME "scp"
+#define DEVICE_REGS "ssi_regs"
+#define NUM_SPI_SLAVES 1
+
+/* ATMEL 250x0 instructions */
+#define ATMEL_WREN 0x06
+#define ATMEL_WRDI 0x04
+#define ATMEL_RDSR 0x05
+#define ATMEL_WRSR 0x01
+#define ATMEL_READ 0x03
+#define ATMEL_WRITE 0x02
+#define ATMEL_WRITE_SIZE 32
+
+#define ATMEL_SR_BSY 0x01
+#define ATMEL_SR_WEN 0x02
+#define ATMEL_SR_BP0 0x04
+#define ATMEL_SR_BP1 0x08
+
+
+
+/*******************************************************************************
+ * struct spi_chip
+ * Data structure for SPI Flash chip
+ @read_buf : read data from the chip into the buffer
+ @write_buf : write data from the buffer to the chip
+ @select_chip : select chip number
+ @hwcontrol : hw specific function for accessing control-lines
+ @dev_ready : hw specific function for device ready/busy ?
+ @chip_lock : lock to protect access
+ @wq : wait queue to sleep if SPI operation in progress
+ @state : current state of SPI device
+ @options : various chip options
+ @numchips : number of physical chips
+ @maf_name : Manufacturer name, obtained from READ_ID call
+ @mem_type : Memory Type, obtained from READ_ID call
+ @chipsize : Memory Capacity, obtained from READ_ID call
+ @numsectors : number of sectors on flash chip
+ @sectorsize : size of each sector
+ @numpages : number of pages on flash chip
+ @pagesize : size of each page
+ @slaveid : slave/flash chip number
+******************************************************************************/
+typedef struct spi_chip
+{
+ /* SPI devices list */
+ struct list_head list;
+
+ void (*select_chip)(int chip, int on_off);
+
+ int (*single_command)(struct mtd_info *mtd,
+ const int command);
+
+ int (*read_buf)(struct mtd_info *mtd, ulong addr,
+ u_char *data, int data_len);
+
+ int (*write_buf)(struct mtd_info *mtd, ulong addr,
+ const u_char *data, int data_len);
+
+ spinlock_t chip_lock;
+ wait_queue_head_t wq;
+ uint options;
+ int manuf_id;
+ int id_mem_type;
+ int chipsize;
+ int numsectors;
+ int sectorsize;
+ int numpages;
+ int chip_delay;
+ int slaveid;
+ void *priv;
+} SPI_DEV;
+
+/* SPI */
+struct spi_dev_desc {
+ unsigned int baud;
+ unsigned int tcss, tcsh, tcsr; /* CS setup/hold/recovery time */
+ unsigned int byteorder:1; /* 0:LSB-First, 1:MSB-First */
+ unsigned int polarity:1; /* 0:High-Active */
+ unsigned int phase:1; /* 0:Sample-Then-Shift */
+};
+
+
+extern unsigned char scp_rx_data(void);
+extern void scp_tx_data(unsigned char buffer);
+extern int scp_io(int chipid, struct spi_dev_desc *desc,
+ const unsigned char **inbufs, unsigned int *incounts,
+ unsigned char **outbufs, unsigned int *outcounts,
+ int cansleep);
+extern int beech_scp_init(SPI_DEV * s);
+extern void scp_cs(int chipid, int on);
+
+#ifdef SCP_DEBUG
+extern int scp_verbose;
+#endif
+
diff --git a/drivers/scp/spi_eeprom.c b/drivers/scp/spi_eeprom.c
new file mode 100755
index 00000000000..47c83a4679d
--- /dev/null
+++ b/drivers/scp/spi_eeprom.c
@@ -0,0 +1,350 @@
+/*
+ * spi_eeprom.c
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/spinlock.h>
+#include <linux/mtd/partitions.h> /* for mtd partitions */
+
+//#undef SCP_DEBUG
+#define SCP_DEBUG
+#include "scp-dev.h"
+DEFINE_SPINLOCK(spi_eeprom_lock);
+
+static struct spi_dev_desc eeprom_dev_desc = {
+ .baud = 1500000, /* 1.5Mbps */
+ .tcss = 10,
+ .tcsh = 10,
+ .tcsr = 1,
+ .byteorder = 1, /* MSB-First */
+ .polarity = 0, /* High-Active */
+ .phase = 0, /* Sample-Then-Shift */
+
+};
+
+static struct mtd_info beech_spi_mtd[NUM_SPI_SLAVES];
+
+static struct mtd_partition spi_partition_allmem[] = {
+ {
+ .name = "scp",
+ .offset = 0x0,
+ .size = 0x400,
+ },
+};
+
+static int
+spi_eeprom_io(int chipid,
+ const unsigned char **inbufs, unsigned int *incounts,
+ unsigned char **outbufs, unsigned int *outcounts)
+{
+ return scp_io(chipid, &eeprom_dev_desc,
+ inbufs, incounts, outbufs, outcounts, 0);
+}
+
+int spi_eeprom_write_enable(int chipid, int enable)
+{
+ unsigned char inbuf[1];
+ const unsigned char *inbufs[2];
+ unsigned int incounts[2], outcounts[1];
+ unsigned long flags;
+ int stat;
+
+ inbuf[0] = enable ? ATMEL_WREN : ATMEL_WRDI;
+ inbufs[0] = inbuf;
+ incounts[0] = sizeof(inbuf);
+ inbufs[1] = NULL;
+ incounts[1] = 0;
+ outcounts[0] = 0;
+ spin_lock_irqsave(&spi_eeprom_lock, flags);
+ stat = spi_eeprom_io(chipid, inbufs, incounts, NULL, outcounts);
+ spin_unlock_irqrestore(&spi_eeprom_lock, flags);
+ return stat;
+}
+
+static int spi_eeprom_read_status_nolock(int chipid)
+{
+ unsigned char inbuf[1], outbuf[1];
+ const unsigned char *inbufs[2];
+ unsigned char *outbufs[1];
+ unsigned int incounts[2], outcounts[1];
+ int stat;
+
+ inbuf[0] = ATMEL_RDSR;
+ inbufs[0] = inbuf;
+ incounts[0] = sizeof(inbuf);
+ incounts[1] = 0;
+ outbufs[0] = outbuf;
+ outcounts[0] = sizeof(outbuf);
+ stat = spi_eeprom_io(chipid, inbufs, incounts, outbufs, outcounts);
+ if (stat < 0) {
+ printk("KERNEL_DEBUG %s bail\n", __FUNCTION__);
+ return stat;
+ }
+ return outbuf[0];
+}
+
+int spi_eeprom_read_status(int chipid)
+{
+ unsigned long flags;
+ int stat;
+
+ spin_lock_irqsave(&spi_eeprom_lock, flags);
+ stat = spi_eeprom_read_status_nolock(chipid);
+ spin_unlock_irqrestore(&spi_eeprom_lock, flags);
+ return stat;
+}
+
+
+int spi_eeprom_read(struct mtd_info *mtd, ulong address, u_char * buf,
+ int len)
+{
+ unsigned char inbuf[3];
+ const unsigned char *inbufs[2];
+ unsigned char *outbufs[1];
+ unsigned int incounts[2], outcounts[1];
+ unsigned long flags;
+ int stat;
+
+ SPI_DEV *spi_dev = mtd->priv;
+
+ address &= 0xffff;
+ inbuf[0] = ATMEL_READ;
+ inbuf[1] = address >> 8;
+ inbuf[2] = address & 0xff;
+
+ inbufs[0] = inbuf;
+ incounts[0] = sizeof(inbuf);
+ inbufs[1] = NULL;
+ incounts[1] = 0;
+ outbufs[0] = buf;
+ outcounts[0] = len;
+ spin_lock_irqsave(&spi_eeprom_lock, flags);
+ stat = spi_eeprom_io(spi_dev->slaveid, inbufs, incounts,
+ outbufs, outcounts);
+ spin_unlock_irqrestore(&spi_eeprom_lock, flags);
+ return stat;
+}
+
+int spi_eeprom_write(struct mtd_info *mtd, ulong address,
+ const u_char * buf, int len)
+{
+ unsigned char inbuf[3];
+ const unsigned char *inbufs[3];
+ unsigned int incounts[3], outcounts[1];
+ unsigned long flags;
+ int i, stat, w_len, remain;
+
+ SPI_DEV *spi_dev = mtd->priv;
+
+ remain = len;
+ while (remain > 0) {
+ stat = spi_eeprom_write_enable(spi_dev->slaveid, 1);
+ if (stat < 0)
+ return stat;
+ stat = spi_eeprom_read_status(spi_dev->slaveid);
+ if (stat < 0)
+ return stat;
+ if (!(stat & ATMEL_SR_WEN))
+ return -EPERM;
+
+ inbuf[0] = ATMEL_WRITE;
+ address &= 0xffff;
+ inbuf[1] = address >> 8;
+ inbuf[2] = address & 0xff;
+
+ inbufs[0] = inbuf;
+ inbufs[1] = buf;
+ inbufs[2] = NULL;
+
+ incounts[0] = sizeof(inbuf);
+ if (address & (ATMEL_WRITE_SIZE-1)) {
+ w_len = ATMEL_WRITE_SIZE - (address & (ATMEL_WRITE_SIZE-1));
+ if (remain <= w_len) {
+ w_len = remain;
+ }
+ }
+ else {
+ w_len = (remain>= ATMEL_WRITE_SIZE) ? ATMEL_WRITE_SIZE:remain;
+ }
+ remain -= w_len;
+ address += w_len;
+ buf += w_len;
+
+ incounts[1] = w_len;
+ incounts[2] = 0;
+
+ outcounts[0] = 0;
+ spin_lock_irqsave(&spi_eeprom_lock, flags);
+ stat = spi_eeprom_io(spi_dev->slaveid, inbufs, incounts,
+ NULL, outcounts);
+ if (stat < 0)
+ goto unlock_return;
+ /* write start. max 10ms */
+ for (i = 5; i > 0; i--) {
+ mdelay(2);
+ stat = spi_eeprom_read_status_nolock(spi_dev->slaveid);
+#ifdef SCP_DEBUG
+ printk("w%d 0x%x\n",i, stat);
+#endif
+ if (stat < 0)
+ goto unlock_return;
+ if (!(stat & ATMEL_SR_BSY)) {
+ /* Should bail out here */
+ //printk("bail ATMEL_SR_BSY\n");
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&spi_eeprom_lock, flags);
+ if (i == 0) {
+ printk(KERN_DEBUG "bail -EIO\n");
+ break;
+ //return -EIO;
+ }
+ }
+ return (len-remain);
+unlock_return:
+ printk("bail unlock_return\n");
+ spin_unlock_irqrestore(&spi_eeprom_lock, flags);
+ return stat;
+}
+
+
+/*******************************************************************************
+* Function spi_read
+* This function provides the mtd interface to read the buffer from the SPI flash
+* at the offset
+*******************************************************************************/
+int
+spi_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t * retlen, u_char * buf)
+{
+ int err = 0;
+ SPI_DEV *this = mtd->priv;
+#ifdef SCP_DEBUG
+ printk(KERN_DEBUG "spi_read: from= 0x%08x, len= %d, buf= %p\n",
+ (uint) from, len, buf);
+#endif
+ err = this->read_buf(mtd, from, buf, len);
+ if (err) {
+ printk(KERN_DEBUG "spi_read: read_data failed with err %d\n", err);
+ goto out;
+ }
+
+ /* Tell the MTD device how many bytes have been read */
+ *retlen = len;
+
+out:
+ return err;
+}
+
+/*******************************************************************************
+* Function spi_write
+* This function provides the mtd interface to write the buffer to the SPI flash
+* at the offset
+*******************************************************************************/
+int
+spi_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t * retlen, const u_char * buf)
+{
+ int err = 0;
+ SPI_DEV *this = mtd->priv;
+
+ printk(KERN_DEBUG "spi_write: to= 0x%08x, len= %d, buf= %p\n",
+ (uint) to, len, buf);
+ err = this->write_buf(mtd, to, buf, len);
+#ifdef SCP_DEBUG
+ printk(KERN_DEBUG "spi_write: write_buf returned %d\n", err);
+#endif
+ return err;
+}
+
+
+/*******************************************************************************
+* Function spi_scan
+* This function scans for the existence of the slave SPI Flash device
+* by reading the Manufacturer ID, Memory Type and Chip Size.
+*******************************************************************************/
+int spi_scan(struct mtd_info *mtd)
+{
+#ifdef SCP_DEBUG
+ printk(KERN_DEBUG "spi_scan()\n");
+#endif
+ /* Fill in MTD driver data */
+ mtd->type = MTD_DATAFLASH;
+ mtd->read = spi_read;
+ mtd->write = spi_write;
+ mtd->lock = NULL;
+ mtd->unlock = NULL;
+ mtd->writesize=0x00100000;
+ mtd->size = 0x00100000;
+ mtd->name = "AMCC-SCP";
+ mtd->flags |= MTD_WRITEABLE;
+ mtd->owner = THIS_MODULE;
+ mtd->erasesize = 32;
+ return 0;
+}
+
+
+/*******************************************************************************
+ * Function: pine_scp_init
+ * This function is called by the LiveOak SSI device layer
+ * to initialize the device.
+*******************************************************************************/
+int beech_scp_init(SPI_DEV * this)
+{
+ int id = this->slaveid;
+ int err = 0;
+
+#ifdef SCP_DEBUG
+ printk(KERN_DEBUG "beech_spi_init() : %s slave id : %d\n",
+ DEVICE_NAME, id);
+#endif
+ memset(&(beech_spi_mtd[id]), 0, sizeof(beech_spi_mtd[id]));
+
+ /* Link the private data with the MTD structure */
+ beech_spi_mtd[id].priv = (void *) this;
+
+ this->chip_delay = 100;
+
+ this->read_buf = spi_eeprom_read;
+ this->write_buf = spi_eeprom_write;
+
+ /* Init wait queue */
+ //init_waitqueue_head(&this->wq);
+
+ /* Scan to find existence of SPI Flash device */
+ if ( (err = spi_scan(&beech_spi_mtd[id])) != 0) {
+ goto err;
+ }
+
+ /* Adr MTD partition onto SPI device */
+ add_mtd_partitions(&beech_spi_mtd[id], spi_partition_allmem,
+ sizeof(spi_partition_allmem) /
+ sizeof(spi_partition_allmem[0]));
+err:
+ return err;
+}
+
+/****************************************************************************
+ * Function: pine_spi_cleanup
+ * This function is called by the Pine SCP device layer
+ * on exit.
+*****************************************************************************/
+void beech_spi_cleanup(void)
+{
+ int i;
+#ifdef SCP_DEBUG
+ printk(KERN_DEBUG "beech_spi_cleanup()\n");
+#endif
+ for(i = 0; i < NUM_SPI_SLAVES; i++) {
+ /* Unregister partitions */
+ del_mtd_device(&beech_spi_mtd[i]);
+ }
+}
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 5ed1b828a7c..6b5711e7250 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -91,7 +91,11 @@ static unsigned int skip_txen_test; /* force skip of txen test at init time */
* machine types want others as well - they're free
* to redefine this in their header file.
*/
+#ifdef CONFIG_4xx
+#define is_real_interrupt(irq) (1)
+#else
#define is_real_interrupt(irq) ((irq) != 0)
+#endif
#ifdef CONFIG_SERIAL_8250_DETECT_IRQ
#define CONFIG_SERIAL_DETECT_IRQ 1
diff --git a/drivers/test/Kconfig b/drivers/test/Kconfig
new file mode 100644
index 00000000000..0a5ee717b08
--- /dev/null
+++ b/drivers/test/Kconfig
@@ -0,0 +1,20 @@
+menu "PPC4xx Self Test support"
+
+config PPC4xx_TEST
+ tristate "PowerPC 4xx Self Test support"
+ help
+ PPC4xx self tests are kernel drivers which can be built into the
+ kernel, or can be built as seperate driver modules. If these
+ drivers are built into the kernel, then they are run when the
+ kernel loads the test drivers. If these test drivers are built
+ as modules, then use insmod to load and execute the test drivers.
+
+config PPC4xx_L2CACHE_TEST
+ tristate "PowerPC 4xx L2 Cache Self Test support"
+ depends on PPC4xx_TEST
+ help
+ If you say Y here, you will get support for
+ the L2 Cache self test interface. See driver for
+ more information.
+
+endmenu
diff --git a/drivers/test/Makefile b/drivers/test/Makefile
new file mode 100644
index 00000000000..65f83eb9323
--- /dev/null
+++ b/drivers/test/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the PowerPC 4xx Test interface
+#
+
+
+obj-$(CONFIG_PPC4xx_L2CACHE_TEST) += l2cache/
diff --git a/drivers/test/l2cache/Makefile b/drivers/test/l2cache/Makefile
new file mode 100644
index 00000000000..38394dd8c7d
--- /dev/null
+++ b/drivers/test/l2cache/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the PowerPC 4xx Test interface
+#
+
+obj-$(CONFIG_PPC4xx_L2CACHE_TEST) += ppc4xx_l2cache_test_module.o
+
diff --git a/drivers/test/l2cache/ppc4xx_l2cache_test_module.c b/drivers/test/l2cache/ppc4xx_l2cache_test_module.c
new file mode 100644
index 00000000000..71402cd442a
--- /dev/null
+++ b/drivers/test/l2cache/ppc4xx_l2cache_test_module.c
@@ -0,0 +1,389 @@
+/****************************************************************************
+ *
+ * Test driver for the PPC44x L2 Cache core.
+ * This driver will print out the L2 Cache statistics to the /proc/l2cache
+ *
+ * Author: Adam Graham <agraham@amcc.com>
+ * February 26, 2008
+ *
+ * Copyright 2008 Applied Micro Circuits Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ ****************************************************************************
+ *
+ * Beware! The kernel PPC44x L2 Cache must be activated for this test.
+ * The example should work on any PPC440EX/GT based board,
+ * maybe also on PPC44x (not yet tested).
+ * To use this module:
+ * 1. Build it as a kenel module
+ * 2. insmod <module>
+ * cat /proc/l2cache/start to start the l2 cache statistics
+ * cat /proc/l2cache/stop to stop the l2 cache statistics
+ * cat /proc/l2cache/stats to read the statistics
+ * 3. rmmod <module>
+ * Everything you want to know will be printed out
+ *
+ */
+
+#include <linux/module.h> /* needed by all modules */
+#include <linux/init.h> /* needed for the module_xxx_macros */
+#include <linux/fs.h> /* needed for registering a device */
+#include <linux/proc_fs.h> /* needed for registering a device */
+//#include "ppc4xx_l2cache_core.h" /* Driver Interface */
+//#include "ppc4xx_l2cache_proc.h" /* ProcFS Interface */
+#include <asm/dcr-regs.h> /* PPC4xx L2 Cache DCR defines */
+#include <asm/dcr.h>
+#include <asm/reg.h>
+
+
+
+
+#define DRV_NAME "ppc4xx_l2cache_test_module"
+#define DRV_VERSION "0.1"
+
+MODULE_DESCRIPTION("PPC4xx L2 Cache test driver");
+MODULE_AUTHOR("agraham@amcc.com");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+uint32_t ppc4xx_l2cache_base; /* Set by ioremap function */
+
+#define DCR_L2C_BASE 0x30
+
+#define PPC4xx_L2CACHE_BASEADDR 0xefxxxxx;
+
+/* PPC4xx L2 Cache core registers */
+#define PPC4xx_L2CACHE_CONFIG 0x0
+#define PPC4xx_L2CACHE_COMMAND 0x1
+#define PPC4xx_L2CACHE_ADDRESS 0x2
+#define PPC4xx_L2CACHE_DATA 0x3
+#define PPC4xx_L2CACHE_STATUS 0x4
+#define PPC4xx_L2CACHE_CORE_VERSION 0x5
+#define PPC4xx_L2CACHE_SNOOP0 0x6
+#define PPC4xx_L2CACHE_SNOOP1 0x7
+
+/* Function Prototypes */
+static void ppc4xx_l2cache_cleanup(void);
+static int ppc4xx_l2cache_proc_start(char *, char **, off_t, int, int *, void *);
+static int ppc4xx_l2cache_proc_stop(char *, char **, off_t, int, int *, void *);
+static int ppc4xx_l2cache_proc_perf(char *, char **, off_t, int, int *, void *);
+static int ppc4xx_l2cache_proc_query(char *, char **, off_t, int, int *, void *);
+static int ppc4xx_l2cache_proc_regs(char *, char **, off_t, int, int *, void *);
+int ppc4xx_l2cache_createProcEntry(void);
+void ppc4xx_l2cache_removeProcEntry(void);
+static u32 ppc4xx_l2cache_read_reg(u32);
+static u32 ppc4xx_l2cache_read_config_reg(void);
+static u32 ppc4xx_l2cache_read_status_reg(void);
+static u32 ppc4xx_l2cache_diag(u32);
+
+/*
+ * Kernel Logging
+ */
+#define LOG(string, args...) \
+ printk(KERN_INFO DRV_NAME ": " string "\n",##args)
+
+#define ALERT(string, args...) \
+ printk(KERN_ALERT DRV_NAME ": " string "\n",##args)
+
+#define WARNING(string, args...) \
+ printk(KERN_WARNING DRV_NAME ": WARNING, " string "\n",##args)
+
+#define ERR(string, args...) \
+ printk(KERN_ALERT DRV_NAME ": ERROR, " string "\n",##args)
+
+#if defined(CONFIG_PPC4XX_L2CACHE_DEBUG)
+#define DBG(string, args...) \DCRN_L2C0_CFG
+ printk(KERN_INFO DRV_NAME ": " string "\n",##args)
+#else
+#define DBG(string, args...) do { } while (0)
+#endif
+
+static inline u32 ppc4xx_l2cache_read_reg(u32 l2_reg)
+{
+ u32 reg;
+
+ //reg = mfdcr(l2_reg);
+
+ return reg;
+}
+
+static inline u32 ppc4xx_l2cache_read_config_reg(void)
+{
+ u32 reg;
+ reg = mfdcr(DCR_L2C_BASE + DCRN_L2C0_CFG);
+ printk("base 0x%08x\n", ppc4xx_l2cache_base);
+ return reg;
+}
+
+static inline u32 ppc4xx_l2cache_read_status_reg(void)
+{
+ u32 reg;
+ reg = mfdcr(DCR_L2C_BASE + DCRN_L2C0_SR);
+ return reg;
+}
+
+/*******************************************************************************
+ * Static Variables:
+ ******************************************************************************/
+static struct proc_dir_entry *proc_driver = NULL; /* /proc/ */
+static struct proc_dir_entry *proc_regs = NULL; /* /proc/driver/l2cache/ */
+static struct proc_dir_entry *proc_start = NULL; /* /proc/driver/l2cache/ */
+static struct proc_dir_entry *proc_stop = NULL; /* /proc/driver/l2cache/ */
+static struct proc_dir_entry *proc_perf = NULL; /* /proc/driver/l2cache/ */
+static struct proc_dir_entry *proc_query = NULL; /* /proc/driver/l2cache/ */
+
+/* Issue L2C diagnostic command */
+static inline u32 ppc4xx_l2cache_diag(u32 addr)
+{
+ asm volatile ("sync" ::: "memory");
+ mtdcr(DCR_L2C_BASE + DCRN_L2C0_ADDR, addr);
+ mtdcr(DCR_L2C_BASE + DCRN_L2C0_CMD, L2C_CMD_DIAG);
+
+ while (!(mfdcr(DCR_L2C_BASE + DCRN_L2C0_SR) & L2C_SR_CC)) {
+ ;
+ }
+ asm volatile ("sync; isync" ::: "memory");
+
+ return mfdcr(DCR_L2C_BASE + DCRN_L2C0_DATA);
+}
+
+static int ppc4xx_l2cache_proc_start(char *buffer, char **start_off,
+ off_t offset, int buff_len, int *eof, void *data)
+{
+ int len = 0;
+ u32 sr = mfdcr(DCR_L2C_BASE + DCRN_L2C0_SR);
+
+#if defined(L2_TEST_DEBUG)
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Start L2C Performance counter.\n");
+#endif
+
+ if (sr & L2C_SR_PCS) {
+ /* Start Performance monitor counters */
+ mtdcr(DCR_L2C_BASE + DCRN_L2C0_CMD, L2C_CMD_STRC);
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance counter started.\n");
+ } else {
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance counter is already started.\n");
+ }
+
+ return len;
+}
+
+static int ppc4xx_l2cache_proc_query(char *buffer, char **start_off,
+ off_t offset, int buff_len, int *eof, void *data)
+{
+ int len = 0;
+ u32 sr = mfdcr(DCR_L2C_BASE + DCRN_L2C0_SR);
+
+#if defined(L2_TEST_DEBUG)
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Query L2C Performance monitor.\n");
+#endif
+
+ if (sr & L2C_SR_PCS) {
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance counter is stopped.\n");
+ } else {
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance counter is running.\n");
+ }
+
+ return len;
+}
+
+static int ppc4xx_l2cache_proc_stop(char *buffer, char **start_off,
+ off_t offset, int buff_len, int *eof, void *data)
+{
+ int len = 0;
+ u32 sr = mfdcr(DCR_L2C_BASE + DCRN_L2C0_SR);
+
+#if defined(L2_TEST_DEBUG)
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Stop L2C Performance counter.\n");
+#endif
+
+ if (sr & L2C_SR_PCS) {
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance counter is already stopped.\n");
+ } else {
+ /* Start Performance monitor counters */
+ mtdcr(DCR_L2C_BASE + DCRN_L2C0_CMD, L2C_CMD_STPC);
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance counter stopped.\n");
+ }
+
+ return len;
+}
+
+static int ppc4xx_l2cache_proc_perf(char *buffer, char **start_off,
+ off_t offset, int buff_len, int *eof, void *data)
+{
+ u32 l2_data;
+ int len = 0;
+ u32 sr = mfdcr(DCR_L2C_BASE + DCRN_L2C0_SR);
+
+#if defined(L2_TEST_DEBUG)
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache perf L2C read Performance Counter registers.\n");
+#endif
+
+ if (sr & L2C_SR_PCS) {
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance counter is not running.\n");
+ } else {
+ /* read diagnostics stats */
+ l2_data = ppc4xx_l2cache_diag(0x22000000);
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance Hit Counter Read 0x%08x\n", l2_data);
+
+ l2_data = ppc4xx_l2cache_diag(0x24000000);
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance Request Counter Read 0x%08x\n", l2_data);
+
+ l2_data = ppc4xx_l2cache_diag(0x28000000);
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Performance Cycle Counter Read 0x%08x\n", l2_data);
+ }
+
+ return len;
+}
+
+static int ppc4xx_l2cache_proc_regs(char *buffer, char **start_off,
+ off_t offset, int buff_len, int *eof, void *data)
+{
+ int len = 0;
+
+#if defined(L2_TEST_DEBUG)
+ len += sprintf(buffer + len,
+ "PPC4xx L2 Cache Registers.\n");
+#endif
+
+ len += sprintf(buffer + len,
+ "PPC4xx L2_CONFIG Register 0x%08x\n",
+ ppc4xx_l2cache_read_config_reg());
+
+ len += sprintf(buffer + len,
+ "PPC4xx L2_STATUS Register 0x%08x\n",
+ ppc4xx_l2cache_read_status_reg());
+
+ return len;
+}
+
+int ppc4xx_l2cache_createProcEntry(void)
+{
+ proc_driver = proc_mkdir("driver/l2cache", NULL);
+
+
+ if (proc_driver == NULL) {
+ ERR("while creating proc directory");
+ return -ENOMEM;
+ }
+
+ proc_regs = create_proc_entry("registers", 0, proc_driver);
+
+ if (proc_regs == NULL) {
+ ERR("while creating proc register file");
+ return -ENOMEM;
+ }else {
+ proc_regs->read_proc = ppc4xx_l2cache_proc_regs;
+ }
+
+ proc_start = create_proc_entry("start", 0, proc_driver);
+
+
+ if (proc_start == NULL) {
+ ERR("while creating proc start file");
+ return -ENOMEM;
+ }else {
+ proc_start->read_proc = ppc4xx_l2cache_proc_start;
+ }
+
+
+ proc_stop = create_proc_entry("stop", 0, proc_driver);
+
+ if (proc_stop == NULL) {
+ ERR("while creating proc stop file");
+ return -ENOMEM;
+ }else {
+ proc_stop->read_proc = ppc4xx_l2cache_proc_stop;
+ }
+
+ proc_perf = create_proc_entry("performance", 0, proc_driver);
+
+ if (proc_perf == NULL) {
+ ERR("while creating proc perf file");
+ return -ENOMEM;
+ }else {
+ proc_perf->read_proc = ppc4xx_l2cache_proc_perf;
+ }
+
+ proc_query = create_proc_entry("query", 0, proc_driver);
+
+ if (proc_query == NULL) {
+ ERR("while creating proc query file");
+ return -ENOMEM;
+ }else {
+ proc_query->read_proc = ppc4xx_l2cache_proc_query;
+ }
+
+ return 0;
+}
+
+static int __init ppc4xx_l2cache_init_module(void)
+{
+ int retval = 0;
+
+ LOG("initializing");
+ LOG("version %s", DRV_VERSION);
+
+ retval = ppc4xx_l2cache_createProcEntry();
+
+ if (retval < 0) {
+ ERR("while registering l2cache test driver.");
+ }
+
+ return retval;
+}
+
+module_init(ppc4xx_l2cache_init_module);
+
+static void __exit ppc4xx_l2cache_cleanup_module(void)
+{
+ LOG("cleaning up");
+
+ ppc4xx_l2cache_cleanup();
+}
+
+module_exit(ppc4xx_l2cache_cleanup_module);
+
+static void
+ppc4xx_l2cache_cleanup(void)
+{
+ ppc4xx_l2cache_removeProcEntry();
+}
+
+void
+ppc4xx_l2cache_removeProcEntry(void)
+{
+ if (proc_regs) remove_proc_entry("registers", proc_driver);
+ if (proc_start) remove_proc_entry("start", proc_driver);
+ if (proc_stop) remove_proc_entry("stop", proc_driver);
+ if (proc_perf) remove_proc_entry("performance", proc_driver);
+ if (proc_query) remove_proc_entry("query", proc_driver);
+ if (proc_driver) remove_proc_entry("l2cache", NULL);
+}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index a18e3c5dd82..3d83b8d0b31 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -109,8 +109,8 @@ config USB_GADGET_SELECTED
# - discrete ones (including all PCI-only controllers)
# - debug/dummy gadget+hcd is last.
#
-choice
- prompt "USB Peripheral Controller"
+menuconfig USB_PERIPHERAL_CONTROLLER
+ bool "USB Peripheral Controller"
depends on USB_GADGET
help
A USB device uses a controller to talk to its host.
@@ -122,6 +122,8 @@ choice
# Integrated controllers
#
+if USB_PERIPHERAL_CONTROLLER
+
config USB_GADGET_AT91
boolean "Atmel AT91 USB Device Port"
depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9 && !ARCH_AT91SAM9G45
@@ -504,6 +506,101 @@ config USB_LANGWELL
default USB_GADGET
select USB_GADGET_SELECTED
+menuconfig USB_GADGET_DWC_OTG
+ boolean "Synopsys DWC OTG Controller"
+ depends on 405EZ || 405EX || 460EX || APM82181
+ select USB_GADGET_DUALSPEED
+ help
+ This driver provides USB Device Controller support for the
+ Synopsys DesignWare USB OTG Core used on the AMCC 405EZ/405EX/
+ 460EX/APM82181.
+
+ Note that on the 405EZ, this Core provides USB Device Controller
+ function only. It does not act as a true OTG device, and the
+ 'OTG' is slightly misleading.
+
+if USB_GADGET_DWC_OTG
+
+config USB_DWC_OTG
+ tristate
+ depends on USB_GADGET_DWC_OTG
+ default USB_GADGET
+ select USB_GADGET_SELECTED
+
+config DWC_LEGACY_405EX
+ bool "Enable 405EX Legacy Support (lower performance)"
+ default n
+ depends on 405EX
+ select DWC_SLAVE
+ help
+ Enable Legacy 405EX Chip support (Rev 1.0) where DWC DMA is broken.
+ Selecting this option will cause lower performance.
+ Don't select this unless you want to support Rev 1.0 405EX chips (obsolete).
+
+menuconfig DWC_OTG_MODE
+ bool "DWC OTG Mode"
+ depends on 405EX || 460EX || APM82181
+ help
+ Enable selection whether USB OTG should operate in Host or
+ Device mode only
+
+if DWC_OTG_MODE
+config DWC_HOST_ONLY
+ bool "DWC Host Only Mode"
+ depends on 405EX || 460EX || APM82181
+ help
+ DWC Core in only host mode.
+
+config DWC_DEVICE_ONLY
+ bool "DWC Device Only Mode"
+ default y if 405EZ
+ help
+ DWC Core in only device mode.
+
+endif
+
+config DWC_SLAVE
+ bool "DWC Slave Mode"
+ depends on 405EX || 460EX || APM82181
+ default n
+ help
+ Slave mode uses the processor to tranfer data.
+ In Slave mode, processor DMA channels can be used if available.
+
+config DWC_USE_PLB_DMA
+ bool "Use PPC4xx PLB DMA (Only for Slave Mode)"
+ depends on DWC_SLAVE
+ depends on 405EX
+ default n
+ select OTG_PLB_DMA
+ select OTG_PLB_DMA_TASKLET
+ select PPC4xx_EDMA
+ help
+ Enable use of PPC4xx DMA engines in Slave Mode.
+ Please ensure PLB DMA channels not in use by any other block.
+
+if DWC_USE_PLB_DMA
+config OTG_PLB_DMA
+ bool
+ default n
+
+config OTG_PLB_DMA_TASKLET
+ bool
+ default n
+
+config PPC4xx_EDMA
+ bool
+ default n
+endif
+
+config DWC_DEBUG
+ bool "Enable DWC Debugging"
+ default n
+ help
+ Enable DWC driver debugging
+
+endif #USB_GADGET_DWC_OTG
+
#
# LAST -- dummy/emulated controller
@@ -541,7 +638,7 @@ config USB_DUMMY_HCD
# NOTE: Please keep dummy_hcd LAST so that "real hardware" appears
# first and will be selected by default.
-endchoice
+endif # USB_PERIPHERAL_CONTROLLER
config USB_GADGET_DUALSPEED
bool
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 9d7b87c52e9..3fdde74c993 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
obj-$(CONFIG_USB_CI13XXX) += ci13xxx_udc.o
obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
+obj-$(CONFIG_USB_DWC_OTG) += dwc_otg/
#
# USB gadget drivers
diff --git a/drivers/usb/gadget/dwc_otg/Makefile b/drivers/usb/gadget/dwc_otg/Makefile
new file mode 100644
index 00000000000..4d690a46658
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for DWC_otg Highspeed USB controller driver
+#
+
+KBUILD_CPPFLAGS += -Dlinux
+
+obj-$(CONFIG_USB_DWC_OTG) += dwc_otg.o
+
+dwc_otg-y := dwc_otg_driver.o dwc_otg_attr.o dwc_otg_cil.o \
+ dwc_otg_cil_intr.o dwc_otg_pcd.o dwc_otg_pcd_intr.o \
+ dwc_otg_hcd.o dwc_otg_hcd_intr.o dwc_otg_hcd_queue.o
+dwc_otg-$(CONFIG_DWC_USE_PLB_DMA) += ppc4xx_dma.o
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_attr.c b/drivers/usb/gadget/dwc_otg/dwc_otg_attr.c
new file mode 100644
index 00000000000..a3e1e7dff55
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_attr.c
@@ -0,0 +1,785 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_attr.c $
+ * $Revision: #5 $
+ * $Date: 2005/09/15 $
+ * $Change: 537387 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+/** @file
+ *
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing. The Linux driver attributes
+ * feature will be used to provide the Linux Diagnostic
+ * Interface. These attributes are accessed through sysfs.
+ */
+
+/** @page "Linux Module Attributes"
+ *
+ * The Linux module attributes feature is used to provide the Linux
+ * Diagnostic Interface. These attributes are accessed through sysfs.
+ * The diagnostic interface will provide access to the controller for
+ * bringing up the hardware and testing.
+
+
+ The following table shows the attributes.
+ <table>
+ <tr>
+ <td><b> Name</b></td>
+ <td><b> Description</b></td>
+ <td><b> Access</b></td>
+ </tr>
+
+ <tr>
+ <td> mode </td>
+ <td> Returns the current mode: 0 for device mode, 1 for host mode</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hnpcapable </td>
+ <td> Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> srpcapable </td>
+ <td> Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register.
+ Read returns the current value.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> hnp </td>
+ <td> Initiates the Host Negotiation Protocol. Read returns the status.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> srp </td>
+ <td> Initiates the Session Request Protocol. Read returns the status.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> buspower </td>
+ <td> Gets or sets the Power State of the bus (0 - Off or 1 - On)</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> bussuspend </td>
+ <td> Suspends the USB bus.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> busconnected </td>
+ <td> Gets the connection status of the bus</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> gotgctl </td>
+ <td> Gets or sets the Core Control Status Register.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gusbcfg </td>
+ <td> Gets or sets the Core USB Configuration Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> grxfsiz </td>
+ <td> Gets or sets the Receive FIFO Size Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gnptxfsiz </td>
+ <td> Gets or sets the non-periodic Transmit Size Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gpvndctl </td>
+ <td> Gets or sets the PHY Vendor Control Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> ggpio </td>
+ <td> Gets the value in the lower 16-bits of the General Purpose IO Register
+ or sets the upper 16 bits.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> guid </td>
+ <td> Gets or sets the value of the User ID Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> gsnpsid </td>
+ <td> Gets the value of the Synopsys ID Regester</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> devspeed </td>
+ <td> Gets or sets the device speed setting in the DCFG register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> enumspeed </td>
+ <td> Gets the device enumeration Speed.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hptxfsiz </td>
+ <td> Gets the value of the Host Periodic Transmit FIFO</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hprt0 </td>
+ <td> Gets or sets the value in the Host Port Control and Status Register</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regoffset </td>
+ <td> Sets the register offset for the next Register Access</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regvalue </td>
+ <td> Gets or sets the value of the register at the offset in the regoffset attribute.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> remote_wakeup </td>
+ <td> On read, shows the status of Remote Wakeup. On write, initiates a remote
+ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote
+ Wakeup signalling bit in the Device Control Register is set for 1
+ milli-second.</td>
+ <td> Read/Write</td>
+ </tr>
+
+ <tr>
+ <td> regdump </td>
+ <td> Dumps the contents of core registers.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hcddump </td>
+ <td> Dumps the current HCD state.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> hcd_frrem </td>
+ <td> Shows the average value of the Frame Remaining
+ field in the Host Frame Number/Frame Remaining register when an SOF interrupt
+ occurs. This can be used to determine the average interrupt latency. Also
+ shows the average Frame Remaining value for start_transfer and the "a" and
+ "b" sample points. The "a" and "b" sample points may be used during debugging
+ bto determine how long it takes to execute a section of the HCD code.</td>
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> rd_reg_test </td>
+ <td> Displays the time required to read the GNPTXFSIZ register many times
+ (the output shows the number of times the register is read).
+ <td> Read</td>
+ </tr>
+
+ <tr>
+ <td> wr_reg_test </td>
+ <td> Displays the time required to write the GNPTXFSIZ register many times
+ (the output shows the number of times the register is written).
+ <td> Read</td>
+ </tr>
+
+ </table>
+
+ Example usage:
+ To get the current mode:
+ cat /sys/devices/lm0/mode
+
+ To power down the USB:
+ echo 0 > /sys/devices/lm0/buspower
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/stat.h> /* permission constants */
+
+#include <asm/io.h>
+
+#include "linux/dwc_otg_plat.h"
+#include "dwc_otg_attr.h"
+#include "dwc_otg_driver.h"
+#include "dwc_otg_pcd.h"
+#include "dwc_otg_hcd.h"
+
+/*
+ * MACROs for defining sysfs attribute
+ */
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t val; \
+ val = dwc_read_reg32 (_addr_); \
+ val = (val & (_mask_)) >> _shift_; \
+ return sprintf (buf, "%s = 0x%x\n", _string_, val); \
+}
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t set = simple_strtoul(buf, NULL, 16); \
+ uint32_t clear = set; \
+ clear = ((~clear) << _shift_) & _mask_; \
+ set = (set << _shift_) & _mask_; \
+ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \
+ dwc_modify_reg32(_addr_, clear, set); \
+ return count; \
+}
+
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
+
+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
+
+/*
+ * MACROs for defining sysfs attribute for 32-bit registers
+ */
+#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \
+static ssize_t _otg_attr_name_##_show (struct device *_dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t val; \
+ val = dwc_read_reg32 (_addr_); \
+ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \
+}
+#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \
+static ssize_t _otg_attr_name_##_store (struct device *_dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\
+ uint32_t val = simple_strtoul(buf, NULL, 16); \
+ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \
+ dwc_write_reg32(_addr_, val); \
+ return count; \
+}
+
+#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \
+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \
+DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store);
+
+#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \
+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \
+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL);
+
+
+/** @name Functions for Show/Store of Attributes */
+/**@{*/
+
+/**
+ * Show the register offset of the Register Access.
+ */
+static ssize_t regoffset_show( struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset);
+}
+
+/**
+ * Set the register offset for the next Register Access Read/Write
+ */
+static ssize_t regoffset_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t offset = simple_strtoul(buf, NULL, 16);
+ //dev_dbg(_dev, "Offset=0x%08x\n", offset);
+ if (offset < SZ_256K ) {
+ otg_dev->reg_offset = offset;
+ }
+ else {
+ dev_err( _dev, "invalid offset\n" );
+ }
+
+ return count;
+}
+DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, regoffset_show, regoffset_store);
+
+/**
+ * Show the value of the register at the offset in the reg_offset
+ * attribute.
+ */
+static ssize_t regvalue_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t val;
+ volatile uint32_t *addr;
+
+ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) {
+ /* Calculate the address */
+ addr = (uint32_t*)(otg_dev->reg_offset +
+ (uint8_t*)otg_dev->base);
+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr);
+ val = dwc_read_reg32( addr );
+ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1,
+ "Reg@0x%06x = 0x%08x\n",
+ otg_dev->reg_offset, val);
+ } else {
+ dev_err(_dev, "Invalid offset (0x%0x)\n",
+ otg_dev->reg_offset);
+ return sprintf(buf, "invalid offset\n" );
+ }
+}
+
+/**
+ * Store the value in the register at the offset in the reg_offset
+ * attribute.
+ *
+ */
+static ssize_t regvalue_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ volatile uint32_t * addr;
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val);
+ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) {
+ /* Calculate the address */
+ addr = (uint32_t*)(otg_dev->reg_offset +
+ (uint8_t*)otg_dev->base);
+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr);
+ dwc_write_reg32( addr, val );
+ } else {
+ dev_err(_dev, "Invalid Register Offset (0x%08x)\n",
+ otg_dev->reg_offset);
+ }
+ return count;
+}
+DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store);
+
+/*
+ * Attributes
+ */
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode");
+
+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected");
+
+DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG");
+DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL");
+DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO");
+DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID");
+DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed");
+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed");
+
+DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ");
+DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0");
+
+
+/**
+ * @todo Add code to initiate the HNP.
+ */
+/**
+ * Show the HNP status bit
+ */
+static ssize_t hnp_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ gotgctl_data_t val;
+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl));
+ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs);
+}
+
+/**
+ * Set the HNP Request bit
+ */
+static ssize_t hnp_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t in = simple_strtoul(buf, NULL, 16);
+ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl);
+ gotgctl_data_t mem;
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.hnpreq = in;
+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32);
+ dwc_write_reg32(addr, mem.d32);
+ return count;
+}
+DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store);
+
+/**
+ * @todo Add code to initiate the SRP.
+ */
+/**
+ * Show the SRP status bit
+ */
+static ssize_t srp_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef CONFIG_DWC_HOST_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ gotgctl_data_t val;
+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl));
+ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs);
+#else
+ return sprintf(buf, "Host Only Mode!\n");
+#endif
+}
+
+/**
+ * Set the SRP Request bit
+ */
+static ssize_t srp_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+#ifndef CONFIG_DWC_HOST_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_pcd_initiate_srp(otg_dev->pcd);
+#endif
+ return count;
+}
+DEVICE_ATTR(srp, 0644, srp_show, srp_store);
+
+/**
+ * @todo Need to do more for power on/off?
+ */
+/**
+ * Show the Bus Power status
+ */
+static ssize_t buspower_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ hprt0_data_t val;
+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0);
+ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr);
+}
+
+
+/**
+ * Set the Bus Power status
+ */
+static ssize_t buspower_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t on = simple_strtoul(buf, NULL, 16);
+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0;
+ hprt0_data_t mem;
+
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.prtpwr = on;
+
+ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32);
+ dwc_write_reg32(addr, mem.d32);
+
+ return count;
+}
+DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store);
+
+/**
+ * @todo Need to do more for suspend?
+ */
+/**
+ * Show the Bus Suspend status
+ */
+static ssize_t bussuspend_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ hprt0_data_t val;
+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0);
+ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp);
+}
+
+/**
+ * Set the Bus Suspend status
+ */
+static ssize_t bussuspend_store( struct device *_dev, struct device_attribute *attr, const char *buf,
+ size_t count )
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t in = simple_strtoul(buf, NULL, 16);
+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0;
+ hprt0_data_t mem;
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.prtsusp = in;
+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32);
+ dwc_write_reg32(addr, mem.d32);
+ return count;
+}
+DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store);
+
+/**
+ * Show the status of Remote Wakeup.
+ */
+static ssize_t remote_wakeup_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef CONFIG_DWC_HOST_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dctl_data_t val;
+ val.d32 = dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl);
+ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n",
+ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable);
+#else
+ return sprintf(buf, "Host Only Mode!\n");
+#endif
+}
+
+/**
+ * Initiate a remote wakeup of the host. The Device control register
+ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable
+ * flag is set.
+ *
+ */
+static ssize_t remote_wakeup_store( struct device *_dev, struct device_attribute *attr,
+ const char *buf, size_t count )
+{
+#ifndef CONFIG_DWC_HOST_ONLY
+ uint32_t val = simple_strtoul(buf, NULL, 16);
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ if (val&1) {
+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1);
+ }
+ else {
+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0);
+ }
+#endif
+ return count;
+}
+DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show,
+ remote_wakeup_store);
+
+/**
+ * Dump global registers and either host or device registers (depending on the
+ * current mode of the core).
+ */
+static ssize_t regdump_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ printk("%s otg_dev=0x%p\n", __FUNCTION__, otg_dev);
+ dwc_otg_dump_global_registers( otg_dev->core_if);
+ if (dwc_otg_is_host_mode(otg_dev->core_if)) {
+ dwc_otg_dump_host_registers( otg_dev->core_if);
+ } else {
+ dwc_otg_dump_dev_registers( otg_dev->core_if);
+ }
+ return sprintf( buf, "Register Dump\n" );
+}
+DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0);
+
+/**
+ * Dump the current hcd state.
+ */
+static ssize_t hcddump_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef CONFIG_DWC_DEVICE_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_dump_state(otg_dev->hcd);
+#endif
+ return sprintf( buf, "HCD Dump\n" );
+}
+DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0);
+
+/**
+ * Dump the average frame remaining at SOF. This can be used to
+ * determine average interrupt latency. Frame remaining is also shown for
+ * start transfer and two additional sample points.
+ */
+static ssize_t hcd_frrem_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+#ifndef CONFIG_DWC_DEVICE_ONLY
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_dump_frrem(otg_dev->hcd);
+#endif
+ return sprintf( buf, "HCD Dump Frame Remaining\n" );
+}
+
+DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0);
+
+/**
+ * Displays the time required to read the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is read).
+ */
+#define RW_REG_COUNT 10000000
+#define MSEC_PER_JIFFIE 1000/HZ
+static ssize_t rd_reg_test_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ int i;
+ int time;
+ int start_jiffies;
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+
+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+ start_jiffies = jiffies;
+ for (i = 0; i < RW_REG_COUNT; i++) {
+ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz);
+ }
+ time = jiffies - start_jiffies;
+ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time );
+}
+
+DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0);
+
+/**
+ * Displays the time required to write the GNPTXFSIZ register many times (the
+ * output shows the number of times the register is written).
+ */
+static ssize_t wr_reg_test_show( struct device *_dev, struct device_attribute *attr, char *buf)
+{
+ int i;
+ int time;
+ int start_jiffies;
+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);
+ uint32_t reg_val;
+
+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n",
+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy);
+ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz);
+ start_jiffies = jiffies;
+ for (i = 0; i < RW_REG_COUNT; i++) {
+ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val);
+ }
+ time = jiffies - start_jiffies;
+ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n",
+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time);
+}
+DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0);
+
+/**
+ * Create the device files
+ */
+void dwc_otg_attr_create (struct device *dev)
+{
+ int ret_val = 0;
+ ret_val = device_create_file(dev, &dev_attr_regoffset);
+ ret_val = device_create_file(dev, &dev_attr_regvalue);
+ ret_val = device_create_file(dev, &dev_attr_mode);
+ ret_val = device_create_file(dev, &dev_attr_hnpcapable);
+ ret_val = device_create_file(dev, &dev_attr_srpcapable);
+ ret_val = device_create_file(dev, &dev_attr_hnp);
+ ret_val = device_create_file(dev, &dev_attr_srp);
+ ret_val = device_create_file(dev, &dev_attr_buspower);
+ ret_val = device_create_file(dev, &dev_attr_bussuspend);
+ ret_val = device_create_file(dev, &dev_attr_busconnected);
+ ret_val = device_create_file(dev, &dev_attr_gotgctl);
+ ret_val = device_create_file(dev, &dev_attr_gusbcfg);
+ ret_val = device_create_file(dev, &dev_attr_grxfsiz);
+ ret_val = device_create_file(dev, &dev_attr_gnptxfsiz);
+ ret_val = device_create_file(dev, &dev_attr_gpvndctl);
+ ret_val = device_create_file(dev, &dev_attr_ggpio);
+ ret_val = device_create_file(dev, &dev_attr_guid);
+ ret_val = device_create_file(dev, &dev_attr_gsnpsid);
+ ret_val = device_create_file(dev, &dev_attr_devspeed);
+ ret_val = device_create_file(dev, &dev_attr_enumspeed);
+ ret_val = device_create_file(dev, &dev_attr_hptxfsiz);
+ ret_val = device_create_file(dev, &dev_attr_hprt0);
+ ret_val = device_create_file(dev, &dev_attr_remote_wakeup);
+ ret_val = device_create_file(dev, &dev_attr_regdump);
+ ret_val = device_create_file(dev, &dev_attr_hcddump);
+ ret_val = device_create_file(dev, &dev_attr_hcd_frrem);
+ ret_val = device_create_file(dev, &dev_attr_rd_reg_test);
+ ret_val = device_create_file(dev, &dev_attr_wr_reg_test);
+}
+
+/**
+ * Remove the device files
+ */
+void dwc_otg_attr_remove (struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_regoffset);
+ device_remove_file(dev, &dev_attr_regvalue);
+ device_remove_file(dev, &dev_attr_mode);
+ device_remove_file(dev, &dev_attr_hnpcapable);
+ device_remove_file(dev, &dev_attr_srpcapable);
+ device_remove_file(dev, &dev_attr_hnp);
+ device_remove_file(dev, &dev_attr_srp);
+ device_remove_file(dev, &dev_attr_buspower);
+ device_remove_file(dev, &dev_attr_bussuspend);
+ device_remove_file(dev, &dev_attr_busconnected);
+ device_remove_file(dev, &dev_attr_gotgctl);
+ device_remove_file(dev, &dev_attr_gusbcfg);
+ device_remove_file(dev, &dev_attr_grxfsiz);
+ device_remove_file(dev, &dev_attr_gnptxfsiz);
+ device_remove_file(dev, &dev_attr_gpvndctl);
+ device_remove_file(dev, &dev_attr_ggpio);
+ device_remove_file(dev, &dev_attr_guid);
+ device_remove_file(dev, &dev_attr_gsnpsid);
+ device_remove_file(dev, &dev_attr_devspeed);
+ device_remove_file(dev, &dev_attr_enumspeed);
+ device_remove_file(dev, &dev_attr_hptxfsiz);
+ device_remove_file(dev, &dev_attr_hprt0);
+ device_remove_file(dev, &dev_attr_remote_wakeup);
+ device_remove_file(dev, &dev_attr_regdump);
+ device_remove_file(dev, &dev_attr_hcddump);
+ device_remove_file(dev, &dev_attr_hcd_frrem);
+ device_remove_file(dev, &dev_attr_rd_reg_test);
+ device_remove_file(dev, &dev_attr_wr_reg_test);
+}
+
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_attr.h b/drivers/usb/gadget/dwc_otg/dwc_otg_attr.h
new file mode 100644
index 00000000000..d04a7d06189
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_attr.h
@@ -0,0 +1,67 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_attr.h $
+ * $Revision: #1 $
+ * $Date: 2005/07/07 $
+ * $Change: 510275 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_ATTR_H__)
+#define __DWC_OTG_ATTR_H__
+
+/** @file
+ * This file contains the interface to the Linux device attributes.
+ */
+extern struct device_attribute dev_attr_regoffset;
+extern struct device_attribute dev_attr_regvalue;
+
+extern struct device_attribute dev_attr_mode;
+extern struct device_attribute dev_attr_hnpcapable;
+extern struct device_attribute dev_attr_srpcapable;
+extern struct device_attribute dev_attr_hnp;
+extern struct device_attribute dev_attr_srp;
+extern struct device_attribute dev_attr_buspower;
+extern struct device_attribute dev_attr_bussuspend;
+extern struct device_attribute dev_attr_busconnected;
+extern struct device_attribute dev_attr_gotgctl;
+extern struct device_attribute dev_attr_gusbcfg;
+extern struct device_attribute dev_attr_grxfsiz;
+extern struct device_attribute dev_attr_gnptxfsiz;
+extern struct device_attribute dev_attr_gpvndctl;
+extern struct device_attribute dev_attr_ggpio;
+extern struct device_attribute dev_attr_guid;
+extern struct device_attribute dev_attr_gsnpsid;
+extern struct device_attribute dev_attr_devspeed;
+extern struct device_attribute dev_attr_enumspeed;
+extern struct device_attribute dev_attr_hptxfsiz;
+extern struct device_attribute dev_attr_hprt0;
+
+void dwc_otg_attr_create (struct device *dev);
+void dwc_otg_attr_remove (struct device *dev);
+
+#endif
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c
new file mode 100644
index 00000000000..61a8879fc10
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.c
@@ -0,0 +1,3237 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil.c $
+ * $Revision: #24 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the DWC_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * The CIL manages the memory map for the core so that the HCD and PCD
+ * don't have to do this separately. It also handles basic tasks like
+ * reading/writing the registers and data FIFOs in the controller.
+ * Some of the data access functions provide encapsulation of several
+ * operations required to perform a task, such as writing multiple
+ * registers to start a transfer. Finally, the CIL performs basic
+ * services that are not specific to either the host or device modes
+ * of operation. These services include management of the OTG Host
+ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
+ * Diagnostic API is also provided to allow testing of the controller
+ * hardware.
+ *
+ * The Core Interface Layer has the following requirements:
+ * - Provides basic controller operations.
+ * - Minimal use of OS services.
+ * - The OS services used will be abstracted by using inline functions
+ * or macros.
+ *
+ */
+#include <asm/unaligned.h>
+#ifdef CONFIG_DWC_DEBUG
+#include <linux/jiffies.h>
+#endif /* */
+
+#include <asm/dcr.h>
+
+#include "linux/dwc_otg_plat.h"
+#include "dwc_otg_regs.h"
+#include "dwc_otg_cil.h"
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+atomic_t release_later = ATOMIC_INIT(0);
+#endif
+/**
+ * This function is called to initialize the DWC_otg CSR data
+ * structures. The register addresses in the device and host
+ * structures are initialized from the base address supplied by the
+ * caller. The calling function must make the OS calls to get the
+ * base address of the DWC_otg controller registers. The core_params
+ * argument holds the parameters that specify how the core should be
+ * configured.
+ *
+ * @param[in] _reg_base_addr Base address of DWC_otg core registers
+ * @param[in] _core_params Pointer to the core configuration parameters
+ *
+ */
+dwc_otg_core_if_t * dwc_otg_cil_init(const uint32_t * _reg_base_addr,
+ dwc_otg_core_params_t *_core_params)
+{
+ dwc_otg_core_if_t * core_if = 0;
+ dwc_otg_dev_if_t * dev_if = 0;
+ dwc_otg_host_if_t * host_if = 0;
+ uint8_t * reg_base = (uint8_t *) _reg_base_addr;
+ int i = 0;
+ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, _reg_base_addr,
+ _core_params);
+ core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL);
+ if (core_if == 0) {
+ DWC_DEBUGPL(DBG_CIL,"Allocation of dwc_otg_core_if_t failed\n");
+ return 0;
+ }
+ memset(core_if, 0, sizeof(dwc_otg_core_if_t));
+ core_if->core_params = _core_params;
+ core_if->core_global_regs = (dwc_otg_core_global_regs_t *) reg_base;
+
+ /*
+ * Allocate the Device Mode structures.
+ */
+ dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL);
+ if (dev_if == 0) {
+ DWC_DEBUGPL(DBG_CIL,"Allocation of dwc_otg_dev_if_t failed\n");
+ kfree(core_if);
+ return 0;
+ }
+ dev_if->dev_global_regs = (dwc_otg_device_global_regs_t *)(reg_base +
+ DWC_DEV_GLOBAL_REG_OFFSET);
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *)
+ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET));
+ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *)
+ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + (i * DWC_EP_REG_OFFSET));
+ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", i,
+ &dev_if->in_ep_regs[i]->diepctl);
+ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", i,
+ &dev_if->out_ep_regs[i]->doepctl);
+ }
+ dev_if->speed = 0; // unknown
+ core_if->dev_if = dev_if;
+
+ /*
+ * Allocate the Host Mode structures.
+ */
+ host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL);
+ if (host_if == 0) {
+ DWC_DEBUGPL(DBG_CIL,"Allocation of dwc_otg_host_if_t failed\n");
+ kfree(dev_if);
+ kfree(core_if);
+ return 0;
+ }
+ host_if->host_global_regs = (dwc_otg_host_global_regs_t *)
+ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
+ host_if->hprt0 = (uint32_t *) (reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
+
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ host_if->hc_regs[i] = (dwc_otg_hc_regs_t *)
+ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + (i * DWC_OTG_CHAN_REGS_OFFSET));
+ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", i,&host_if->hc_regs[i]->hcchar);
+ }
+
+ host_if->num_host_channels = MAX_EPS_CHANNELS;
+ core_if->host_if = host_if;
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ core_if->data_fifo[i] =
+ (uint32_t *) (reg_base + DWC_OTG_DATA_FIFO_OFFSET +
+ (i * DWC_OTG_DATA_FIFO_SIZE));
+ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", i,
+ (unsigned)core_if->data_fifo[i]);
+ }
+ core_if->pcgcctl = (uint32_t *) (reg_base + DWC_OTG_PCGCCTL_OFFSET);
+
+ /*
+ * Store the contents of the hardware configuration registers here for
+ * easy access later.
+ */
+ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1);
+ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2);
+#ifdef CONFIG_DWC_SLAVE
+ core_if->hwcfg2.b.architecture = DWC_SLAVE_ONLY_ARCH;
+#endif
+ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3);
+ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg1=%08x\n", core_if->hwcfg1.d32);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg2=%08x\n", core_if->hwcfg2.d32);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg3=%08x\n", core_if->hwcfg3.d32);
+ DWC_DEBUGPL(DBG_CILV, "hwcfg4=%08x\n", core_if->hwcfg4.d32);
+ DWC_DEBUGPL(DBG_CILV, "op_mode=%0x\n", core_if->hwcfg2.b.op_mode);
+ DWC_DEBUGPL(DBG_CILV, "arch=%0x\n", core_if->hwcfg2.b.architecture);
+ DWC_DEBUGPL(DBG_CILV, "num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep + 1);
+ DWC_DEBUGPL(DBG_CILV, "num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan);
+ DWC_DEBUGPL(DBG_CILV, "nonperio_tx_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.nonperio_tx_q_depth);
+ DWC_DEBUGPL(DBG_CILV, "host_perio_tx_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.host_perio_tx_q_depth);
+ DWC_DEBUGPL(DBG_CILV, "dev_token_q_depth=0x%0x\n",
+ core_if->hwcfg2.b.dev_token_q_depth);
+ DWC_DEBUGPL(DBG_CILV, "Total FIFO SZ=%d\n",
+ core_if->hwcfg3.b.dfifo_depth);
+ DWC_DEBUGPL(DBG_CILV, "xfer_size_cntr_width=%0x\n",
+ core_if->hwcfg3.b.xfer_size_cntr_width);
+
+ /*
+ * Set the SRP sucess bit for FS-I2c
+ */
+ core_if->srp_success = 0;
+ core_if->srp_timer_started = 0;
+ return core_if;
+}
+
+
+/**
+ * This function frees the structures allocated by dwc_otg_cil_init().
+ *
+ * @param[in] _core_if The core interface pointer returned from
+ * dwc_otg_cil_init().
+ *
+ */
+void dwc_otg_cil_remove(dwc_otg_core_if_t * _core_if)
+{
+ /* Disable all interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, 1, 0);
+ dwc_write_reg32(&_core_if->core_global_regs->gintmsk, 0);
+ if (_core_if->dev_if) {
+ kfree(_core_if->dev_if);
+ }
+ if (_core_if->host_if) {
+ kfree(_core_if->host_if);
+ }
+ kfree(_core_if);
+}
+
+
+/**
+ * This function enables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param[in] _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ gahbcfg_data_t ahbcfg = {.d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
+}
+
+/**
+ * This function disables the controller's Global Interrupt in the AHB Config
+ * register.
+ *
+ * @param[in] _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ gahbcfg_data_t ahbcfg = {.d32 = 0};
+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
+ dwc_modify_reg32(&_core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
+}
+
+/**
+ * This function initializes the commmon interrupts, used in both
+ * device and host modes.
+ *
+ * @param[in] _core_if Programming view of the DWC_otg controller
+ *
+ */
+static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+
+ /* Clear any pending OTG Interrupts */
+ dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF);
+
+ /* Clear any pending interrupts */
+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /*
+ * Enable the interrupts in the GINTMSK.
+ */
+ intr_mask.b.modemismatch = 1;
+ intr_mask.b.otgintr = 1;
+ if (!_core_if->dma_enable) {
+ intr_mask.b.rxstsqlvl = 1;
+ }
+ intr_mask.b.conidstschng = 1;
+ intr_mask.b.wkupintr = 1;
+ intr_mask.b.disconnect = 1;
+ intr_mask.b.usbsuspend = 1;
+ intr_mask.b.sessreqintr = 1;
+ dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32);
+}
+
+
+/**
+ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
+ * type.
+ */
+static void init_fslspclksel(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t val;
+ hcfg_data_t hcfg;
+ if (((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) ||
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+ /* Full speed PHY */
+ val = DWC_HCFG_48_MHZ;
+ } else {
+ /* High speed PHY running at full speed or high speed */
+ val = DWC_HCFG_30_60_MHZ;
+ }
+ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
+ hcfg.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hcfg);
+ hcfg.b.fslspclksel = val;
+ dwc_write_reg32(&_core_if->host_if->host_global_regs->hcfg, hcfg.d32);
+}
+
+
+/**
+ * Initializes the DevSpd field of the DCFG register depending on the PHY type
+ * and the enumeration speed of the device.
+ */
+static void init_devspd(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t val;
+ dcfg_data_t dcfg;
+ if (((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) ||
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+ /* Full speed PHY */
+ val = 0x3;
+ } else if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
+ /* High speed PHY running at full speed */
+ val = 0x1;
+ } else {
+ /* High speed PHY running at high speed */
+ val = 0x0;
+ }
+ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
+ dcfg.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dcfg);
+ dcfg.b.devspd = val;
+ dwc_write_reg32(&_core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
+}
+
+
+/**
+ * This function calculates the number of IN EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param _pcd the pcd structure.
+ */
+static uint32_t calc_num_in_eps(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t num_in_eps = 0;
+ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2;
+ uint32_t num_tx_fifos = _core_if->hwcfg4.b.num_in_eps;
+ int i;
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x1))
+ num_in_eps++;
+ hwcfg1 >>= 2;
+ }
+ if (_core_if->hwcfg4.b.ded_fifo_en) {
+ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
+ }
+ return num_in_eps;
+}
+
+
+/**
+ * This function calculates the number of OUT EPS
+ * using GHWCFG1 and GHWCFG2 registers values
+ *
+ * @param _pcd the pcd structure.
+ */
+static uint32_t calc_num_out_eps(dwc_otg_core_if_t * _core_if)
+{
+ uint32_t num_out_eps = 0;
+ uint32_t num_eps = _core_if->hwcfg2.b.num_dev_ep;
+ uint32_t hwcfg1 = _core_if->hwcfg1.d32 >> 2;
+ int i;
+ for (i = 0; i < num_eps; ++i) {
+ if (!(hwcfg1 & 0x2))
+ num_out_eps++;
+ hwcfg1 >>= 2;
+ }
+ return num_out_eps;
+}
+
+
+/**
+ * This function initializes the DWC_otg controller registers and
+ * prepares the core for device mode or host mode operation.
+ *
+ * @param _core_if Programming view of the DWC_otg controller
+ *
+ */
+void dwc_otg_core_init(dwc_otg_core_if_t * _core_if)
+{
+ int i = 0;
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ gahbcfg_data_t ahbcfg = {.d32 = 0};
+ gusbcfg_data_t usbcfg = {.d32 = 0};
+ gi2cctl_data_t i2cctl = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", _core_if);
+
+ /* Common Initialization */
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ DWC_DEBUGPL(DBG_CIL, "USB config register: 0x%08x\n", usbcfg.d32);
+
+ /* Program the ULPI External VBUS bit if needed */
+#if defined(OTG_EXT_CHG_PUMP) || defined(CONFIG_460EX) || defined(CONFIG_APM82181)
+ usbcfg.b.ulpi_ext_vbus_drv = 1;
+#else
+ //usbcfg.b.ulpi_ext_vbus_drv = 0;
+ usbcfg.b.ulpi_ext_vbus_drv =
+ (_core_if->core_params->phy_ulpi_ext_vbus ==
+ DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
+#endif
+
+ /* Set external TS Dline pulsing */
+ usbcfg.b.term_sel_dl_pulse = (_core_if->core_params->ts_dline == 1) ? 1 : 0;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset the Controller */
+ dwc_otg_core_reset(_core_if);
+
+ /* Initialize parameters from Hardware configuration registers. */
+ dev_if->num_in_eps = calc_num_in_eps(_core_if);
+ dev_if->num_out_eps = calc_num_out_eps(_core_if);
+ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",
+ _core_if->hwcfg4.b.num_dev_perio_in_ep);
+ DWC_DEBUGPL(DBG_CIL, "Is power optimization enabled? %s\n",
+ _core_if->hwcfg4.b.power_optimiz ? "Yes" : "No");
+ DWC_DEBUGPL(DBG_CIL, "vbus_valid filter enabled? %s\n",
+ _core_if->hwcfg4.b.vbus_valid_filt_en ? "Yes" : "No");
+ DWC_DEBUGPL(DBG_CIL, "iddig filter enabled? %s\n",
+ _core_if->hwcfg4.b.iddig_filt_en ? "Yes" : "No");
+
+ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ dev_if->perio_tx_fifo_size[i] =
+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", i,
+ dev_if->perio_tx_fifo_size[i]);
+ }
+ for (i = 0; i < _core_if->hwcfg4.b.num_in_eps; i++) {
+ dev_if->tx_fifo_size[i] =
+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", i,
+ dev_if->perio_tx_fifo_size[i]);
+ }
+ _core_if->total_fifo_size = _core_if->hwcfg3.b.dfifo_depth;
+ _core_if->rx_fifo_size = dwc_read_reg32(&global_regs->grxfsiz);
+ _core_if->nperio_tx_fifo_size = dwc_read_reg32(&global_regs->gnptxfsiz) >> 16;
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", _core_if->rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n",_core_if->nperio_tx_fifo_size);
+
+ /* This programming sequence needs to happen in FS mode before any other
+ * programming occurs */
+ if ((_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
+ (_core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
+
+ /* If FS mode with FS PHY */
+
+ /* core_init() is now called on every switch so only call the
+ * following for the first time through.
+ */
+ if (!_core_if->phy_init_done) {
+ _core_if->phy_init_done = 1;
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.physel = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Reset after a PHY select */
+ dwc_otg_core_reset(_core_if);
+ }
+
+ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
+ * do this on HNP Dev/Host mode switches (done in dev_init and
+ * host_init).
+ */
+ if (dwc_otg_is_host_mode(_core_if)) {
+ DWC_DEBUGPL(DBG_CIL, "host mode\n");
+ init_fslspclksel(_core_if);
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "device mode\n");
+ init_devspd(_core_if);
+ }
+
+ if (_core_if->core_params->i2c_enable) {
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
+
+ /* Program GUSBCFG.OtgUtmifsSel to I2C */
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.otgutmifssel = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Program GI2CCTL.I2CEn */
+ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl);
+ i2cctl.b.i2cdevaddr = 1;
+ i2cctl.b.i2cen = 0;
+ dwc_write_reg32(&global_regs->gi2cctl, i2cctl.d32);
+ i2cctl.b.i2cen = 1;
+ dwc_write_reg32(&global_regs->gi2cctl, i2cctl.d32);
+ }
+ } /* endif speed == DWC_SPEED_PARAM_FULL */
+ else {
+ /* High speed PHY. */
+ if (!_core_if->phy_init_done) {
+ _core_if->phy_init_done = 1;
+ DWC_DEBUGPL(DBG_CIL, "High spped PHY\n");
+ /* HS PHY parameters. These parameters are preserved
+ * during soft reset so only program the first time. Do
+ * a soft reset immediately after setting phyif.
+ */
+ // test-only: in AMCC 460EX code not used!!!???
+ usbcfg.b.ulpi_utmi_sel = _core_if->core_params->phy_type;
+ if (usbcfg.b.ulpi_utmi_sel == 1) {
+ DWC_DEBUGPL(DBG_CIL, "ULPI\n");
+ /* ULPI interface */
+ usbcfg.b.phyif = 0;
+ usbcfg.b.ddrsel = _core_if->core_params->phy_ulpi_ddr;
+ } else {
+ /* UTMI+ interface */
+ if (_core_if->core_params->phy_utmi_width == 16) {
+ usbcfg.b.phyif = 1;
+ DWC_DEBUGPL(DBG_CIL, "UTMI+ 16\n");
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "UTMI+ 8\n");
+ usbcfg.b.phyif = 0;
+ }
+ }
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ /* Reset after setting the PHY parameters */
+ dwc_otg_core_reset(_core_if);
+ }
+ }
+ if ((_core_if->hwcfg2.b.hs_phy_type == 2) &&
+ (_core_if->hwcfg2.b.fs_phy_type == 1) &&
+ (_core_if->core_params->ulpi_fs_ls)) {
+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 1;
+ usbcfg.b.ulpi_clk_sus_m = 1;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS=0\n");
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ usbcfg.b.ulpi_fsls = 0;
+ usbcfg.b.ulpi_clk_sus_m = 0;
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+ }
+
+ /* Program the GAHBCFG Register. */
+ switch (_core_if->hwcfg2.b.architecture) {
+ case DWC_SLAVE_ONLY_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
+ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
+ _core_if->dma_enable = 0;
+ break;
+ case DWC_EXT_DMA_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
+ ahbcfg.b.hburstlen = _core_if->core_params->dma_burst_size;
+ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0);
+ break;
+ case DWC_INT_DMA_ARCH:
+ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
+ #if defined(CONFIG_APM82181)
+ /* Avoid system hang during concurrently using USB and SATA */
+ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR16;
+ #else
+ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR;
+ #endif
+ _core_if->dma_enable = (_core_if->core_params->dma_enable != 0);
+ break;
+ }
+ ahbcfg.b.dmaenable = _core_if->dma_enable;
+ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32);
+ _core_if->en_multiple_tx_fifo = _core_if->hwcfg4.b.ded_fifo_en;
+
+ /*
+ * Program the GUSBCFG register.
+ */
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ switch (_core_if->hwcfg2.b.op_mode) {
+ case DWC_MODE_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = (_core_if->core_params->otg_cap ==
+ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_SRP_ONLY_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_NO_HNP_SRP_CAPABLE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ case DWC_MODE_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ case DWC_MODE_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = (_core_if->core_params->otg_cap !=
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
+ break;
+ case DWC_MODE_NO_SRP_CAPABLE_HOST:
+ usbcfg.b.hnpcap = 0;
+ usbcfg.b.srpcap = 0;
+ break;
+ }
+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32);
+
+ /* Enable common interrupts */
+ dwc_otg_enable_common_interrupts(_core_if);
+
+ /* Do device or host intialization based on mode during PCD
+ * and HCD initialization
+ */
+ if (dwc_otg_is_host_mode(_core_if)) {
+ DWC_DEBUGPL(DBG_ANY, "Host Mode\n");
+ _core_if->op_state = A_HOST;
+ } else {
+ DWC_DEBUGPL(DBG_ANY, "Device Mode\n");
+ _core_if->op_state = B_PERIPHERAL;
+#ifdef CONFIG_DWC_DEVICE_ONLY
+ dwc_otg_core_dev_init(_core_if);
+#endif /* */
+ }
+}
+
+
+/**
+ * This function enables the Device mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts */
+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ dwc_otg_enable_common_interrupts(_core_if);
+
+ /* Enable interrupts */
+ intr_mask.b.usbreset = 1;
+ intr_mask.b.enumdone = 1;
+ intr_mask.b.inepintr = 1;
+ intr_mask.b.outepintr = 1;
+ intr_mask.b.erlysuspend = 1;
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.epmismatch = 1;
+ }
+
+ /** @todo NGS: Should this be a module parameter? */
+#ifdef USE_PERIODIC_EP
+ intr_mask.b.isooutdrop = 1;
+ intr_mask.b.eopframe = 1;
+ intr_mask.b.incomplisoin = 1;
+ intr_mask.b.incomplisoout = 1;
+#endif /* */
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32,
+ intr_mask.d32);
+
+ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
+ dwc_read_reg32(&global_regs->gintmsk));
+}
+
+
+/**
+ * This function initializes the DWC_otg controller registers for
+ * device mode.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_dev_init(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ dwc_otg_core_params_t * params = _core_if->core_params;
+ dcfg_data_t dcfg = {.d32 = 0};
+ grstctl_t resetctl = {.d32 = 0};
+ uint32_t rx_fifo_size;
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t txfifosize;
+ dthrctl_data_t dthrctl;
+ fifosize_data_t ptxfifosize;
+
+ /* Restart the Phy Clock */
+ dwc_write_reg32(_core_if->pcgcctl, 0);
+
+ /* Device configuration register */
+ init_devspd(_core_if);
+ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
+ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ /* Configure data FIFO sizes */
+ if (_core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
+ _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
+ params->dev_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
+ params->dev_nperio_tx_fifo_size);
+
+ /* Rx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->grxfsiz));
+ rx_fifo_size = params->dev_rx_fifo_size;
+ dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->grxfsiz));
+
+ /** Set Periodic Tx FIFO Mask all bits 0 */
+ _core_if->p_tx_msk = 0;
+
+ /** Set Tx FIFO Mask all bits 0 */
+ _core_if->tx_msk = 0;
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz,nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+
+ /**@todo NGS: Fix Periodic FIFO Sizing! */
+ /*
+ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_perio_tx_fifo_size array and the FIFO size registers in
+ * the dptxfsiz array run from 0 to 14.
+ */
+ /** @todo Finish debug of this */
+ ptxfifosize.b.startaddr =
+ nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) {
+ ptxfifosize.b.depth = params->dev_perio_tx_fifo_size[i];
+ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i],ptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ ptxfifosize.b.startaddr += ptxfifosize.b.depth;
+ }
+ } else {
+
+ /*
+ * Tx FIFOs These FIFOs are numbered from 1 to 15.
+ * Indexes of the FIFO size module parameters in the
+ * dev_tx_fifo_size array and the FIFO size registers in
+ * the dptxfsiz_dieptxf array run from 0 to 14.
+ */
+
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
+ dwc_read_reg32(&global_regs->gnptxfsiz));
+ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ for (i = 1;i < _core_if->hwcfg4.b.num_dev_perio_in_ep;i++) {
+ txfifosize.b.depth = params->dev_tx_fifo_size[i];
+ DWC_DEBUGPL(DBG_CIL,"initial dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]));
+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i - 1],txfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL,"new dptxfsiz_dieptxf[%d]=%08x\n",
+ i,dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i-1]));
+ txfifosize.b.startaddr += txfifosize.b.depth;
+ }
+ }
+ }
+
+ /* Flush the FIFOs */
+ dwc_otg_flush_tx_fifo(_core_if, 0x10); /* all Tx FIFOs */
+ dwc_otg_flush_rx_fifo(_core_if);
+
+ /* Flush the Learning Queue. */
+ resetctl.b.intknqflsh = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->grstctl, resetctl.d32);
+
+ /* Clear all pending Device Interrupts */
+ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0);
+ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0);
+ dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
+ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0);
+ for (i = 0; i <= dev_if->num_in_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
+ if (depctl.b.epena) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ } else {
+ depctl.d32 = 0;
+ }
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0);
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
+ }
+ for (i = 0; i <= dev_if->num_out_eps; i++) {
+ depctl_data_t depctl;
+ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl);
+ if (depctl.b.epena) {
+ depctl.d32 = 0;
+ depctl.b.epdis = 1;
+ depctl.b.snak = 1;
+ } else {
+ depctl.d32 = 0;
+ }
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0);
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
+ }
+ if (_core_if->en_multiple_tx_fifo && _core_if->dma_enable) {
+ dev_if->non_iso_tx_thr_en = _core_if->core_params->thr_ctl & 0x1;
+ dev_if->iso_tx_thr_en = (_core_if->core_params->thr_ctl >> 1) & 0x1;
+ dev_if->rx_thr_en = (_core_if->core_params->thr_ctl >> 2) & 0x1;
+ dev_if->rx_thr_length = _core_if->core_params->rx_thr_length;
+ dev_if->tx_thr_length = _core_if->core_params->tx_thr_length;
+ dthrctl.d32 = 0;
+ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
+ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
+ dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
+ dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
+ dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
+ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl,dthrctl.d32);
+ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\n"
+ "Rx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
+ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en,
+ dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len,
+ dthrctl.b.rx_thr_len);
+ }
+ dwc_otg_enable_device_interrupts(_core_if);
+ {
+ diepmsk_data_t msk = {.d32 = 0};
+ msk.b.txfifoundrn = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32,msk.d32);
+ }
+}
+
+
+/**
+ * This function enables the Host mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
+
+ /* Disable all interrupts. */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Clear any pending interrupts. */
+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF);
+
+ /* Enable the common interrupts */
+ dwc_otg_enable_common_interrupts(_core_if);
+
+ /*
+ * Enable host mode interrupts without disturbing common
+ * interrupts.
+ */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
+}
+
+/**
+ * This function disables the Host Mode interrupts.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ */
+void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
+
+ /*
+ * Disable host mode interrupts without disturbing common
+ * interrupts.
+ */
+ intr_mask.b.sofintr = 1;
+ intr_mask.b.portintr = 1;
+ intr_mask.b.hcintr = 1;
+ intr_mask.b.ptxfempty = 1;
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0);
+}
+
+#if 0
+/* currently not used, keep it here as if needed later */
+static int phy_read(dwc_otg_core_if_t * _core_if, int addr)
+{
+ u32 val;
+ int timeout = 10;
+
+ dwc_write_reg32(&_core_if->core_global_regs->gpvndctl,
+ 0x02000000 | (addr << 16));
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ while (((val & 0x08000000) == 0) && (timeout--)) {
+ udelay(1000);
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ }
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ printk("%s: addr=%02x regval=%02x\n", __func__, addr, val & 0x000000ff);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_405EX
+static int phy_write(dwc_otg_core_if_t * _core_if, int addr, int val8)
+{
+ u32 val;
+ int timeout = 10;
+
+ dwc_write_reg32(&_core_if->core_global_regs->gpvndctl,
+ 0x02000000 | 0x00400000 | (addr << 16) | (val8 & 0x000000ff));
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ while (((val & 0x08000000) == 0) && (timeout--)) {
+ udelay(1000);
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+ }
+ val = dwc_read_reg32(&_core_if->core_global_regs->gpvndctl);
+
+ return 0;
+}
+#endif
+
+/**
+ * This function initializes the DWC_otg controller registers for
+ * host mode.
+ *
+ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the
+ * request queues. Host channels are reset to ensure that they are ready for
+ * performing transfers.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ *
+ */
+void dwc_otg_core_host_init(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ dwc_otg_host_if_t * host_if = _core_if->host_if;
+ dwc_otg_core_params_t * params = _core_if->core_params;
+ hprt0_data_t hprt0 = {.d32 = 0};
+ fifosize_data_t nptxfifosize;
+ fifosize_data_t ptxfifosize;
+ int i;
+ hcchar_data_t hcchar;
+ hcfg_data_t hcfg;
+ dwc_otg_hc_regs_t * hc_regs;
+ int num_channels;
+ gotgctl_data_t gotgctl = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, _core_if);
+
+ /* Restart the Phy Clock */
+ dwc_write_reg32(_core_if->pcgcctl, 0);
+
+ /* Initialize Host Configuration Register */
+ init_fslspclksel(_core_if);
+ if (_core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+ hcfg.b.fslssupp = 1;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ }
+
+ /* Configure data FIFO sizes */
+ if (_core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
+ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", _core_if->total_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->host_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",params->host_nperio_tx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size);
+
+ /* Rx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",dwc_read_reg32(&global_regs->grxfsiz));
+ dwc_write_reg32(&global_regs->grxfsiz,params->host_rx_fifo_size);
+ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",dwc_read_reg32(&global_regs->grxfsiz));
+
+ /* Non-periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",dwc_read_reg32(&global_regs->gnptxfsiz));
+ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
+ nptxfifosize.b.startaddr = params->host_rx_fifo_size;
+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz));
+
+ /* Periodic Tx FIFO */
+ DWC_DEBUGPL(DBG_CIL, "initial hptxfsiz=%08x\n",dwc_read_reg32(&global_regs->hptxfsiz));
+ ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
+ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth;
+ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32);
+ DWC_DEBUGPL(DBG_CIL, "new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz));
+ }
+
+ /* Clear Host Set HNP Enable in the OTG Control Register */
+ gotgctl.b.hstsethnpen = 1;
+ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
+
+ /* Make sure the FIFOs are flushed. */
+ dwc_otg_flush_tx_fifo(_core_if, 0x10 /* all Tx FIFOs */ );
+ dwc_otg_flush_rx_fifo(_core_if);
+
+ /* Flush out any leftover queued requests. */
+ num_channels = _core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ hc_regs = _core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ }
+
+ /* Halt all channels to put them into a known state. */
+ for (i = 0; i < num_channels; i++) {
+ int count = 0;
+ hc_regs = _core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
+
+ do {
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (++count > 200) {
+ DWC_ERROR
+ ("%s: Unable to clear halt on channel %d\n",
+ __func__, i);
+ break;
+ }
+ udelay(100);
+ } while (hcchar.b.chen);
+ }
+
+ /* Turn on the vbus power. */
+ DWC_PRINT("Init: Port Power? op_state=%d\n", _core_if->op_state);
+ if (_core_if->op_state == A_HOST) {
+ hprt0.d32 = dwc_otg_read_hprt0(_core_if);
+ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr);
+ if (hprt0.b.prtpwr == 0) {
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(host_if->hprt0, hprt0.d32);
+ }
+ }
+
+#ifdef CONFIG_405EX
+ /* Write 0x60 to USB PHY register 7:
+ * Modify "Indicator Complement" and "Indicator Pass Thru" of
+ * Interface control register to disable the internal Vbus
+ * comparator, as suggested by RichTek FAE.
+ * This produced better results recognizing and mounting USB
+ * memory sticks on the Makalu 405EX platform. I couldn't see
+ * any difference on Kilauea, but since it seems to be better
+ * on Makalu, let's keep it in here too.
+ */
+ phy_write(_core_if, 7, 0x60);
+#endif
+
+ dwc_otg_enable_host_interrupts(_core_if);
+}
+
+
+/**
+ * Prepares a host channel for transferring packets to/from a specific
+ * endpoint. The HCCHARn register is set up with the characteristics specified
+ * in _hc. Host channel interrupts that may need to be serviced while this
+ * transfer is in progress are enabled.
+ *
+ * @param _core_if Programming view of DWC_otg controller
+ * @param _hc Information needed to initialize the host channel
+ */
+void dwc_otg_hc_init(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ uint32_t intr_enable;
+ hcintmsk_data_t hc_intr_mask;
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ uint8_t hc_num = _hc->hc_num;
+ dwc_otg_host_if_t * host_if = _core_if->host_if;
+ dwc_otg_hc_regs_t * hc_regs = host_if->hc_regs[hc_num];
+
+ /* Clear old interrupt conditions for this host channel. */
+ hc_intr_mask.d32 = 0xFFFFFFFF;
+ hc_intr_mask.b.reserved = 0;
+ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32);
+
+ /* Enable channel interrupts required for this transfer. */
+ hc_intr_mask.d32 = 0;
+ hc_intr_mask.b.chhltd = 1;
+ if (_core_if->dma_enable) {
+ hc_intr_mask.b.ahberr = 1;
+ if (_hc->error_state && !_hc->do_split &&
+ _hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
+ hc_intr_mask.b.ack = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.datatglerr = 1;
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
+ hc_intr_mask.b.nak = 1;
+ }
+ }
+ }
+ } else {
+ switch (_hc->ep_type) {
+ case DWC_OTG_EP_TYPE_CONTROL:
+ case DWC_OTG_EP_TYPE_BULK:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ } else {
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.nyet = 1;
+ if (_hc->do_ping) {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ if (_hc->do_split) {
+ hc_intr_mask.b.nak = 1;
+ if (_hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ } else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ if (_hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ break;
+ case DWC_OTG_EP_TYPE_INTR:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.nak = 1;
+ hc_intr_mask.b.stall = 1;
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.datatglerr = 1;
+ hc_intr_mask.b.frmovrun = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.bblerr = 1;
+ }
+ if (_hc->error_state) {
+ hc_intr_mask.b.ack = 1;
+ }
+ if (_hc->do_split) {
+ if (_hc->complete_split) {
+ hc_intr_mask.b.nyet = 1;
+ } else {
+ hc_intr_mask.b.ack = 1;
+ }
+ }
+ break;
+ case DWC_OTG_EP_TYPE_ISOC:
+ hc_intr_mask.b.xfercompl = 1;
+ hc_intr_mask.b.frmovrun = 1;
+ hc_intr_mask.b.ack = 1;
+ if (_hc->ep_is_in) {
+ hc_intr_mask.b.xacterr = 1;
+ hc_intr_mask.b.bblerr = 1;
+ }
+ break;
+ }
+ }
+ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32);
+
+ /* Enable the top level host channel interrupt. */
+ intr_enable = (1 << hc_num);
+ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
+
+ /* Make sure host channel interrupts are enabled. */
+ gintmsk.b.hcintr = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
+
+ /*
+ * Program the HCCHARn register with the endpoint characteristics for
+ * the current transfer.
+ */
+ hcchar.d32 = 0;
+ hcchar.b.devaddr = _hc->dev_addr;
+ hcchar.b.epnum = _hc->ep_num;
+ hcchar.b.epdir = _hc->ep_is_in;
+ hcchar.b.lspddev = (_hc->speed == DWC_OTG_EP_SPEED_LOW);
+ hcchar.b.eptype = _hc->ep_type;
+ hcchar.b.mps = _hc->max_packet;
+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
+ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
+ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
+ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
+ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
+
+ /*
+ * Program the HCSPLIT register for SPLITs
+ */
+ hcsplt.d32 = 0;
+ if (_hc->do_split) {
+ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n",
+ _hc->hc_num,_hc->complete_split ? "CSPLIT" : "SSPLIT");
+ hcsplt.b.compsplt = _hc->complete_split;
+ hcsplt.b.xactpos = _hc->xact_pos;
+ hcsplt.b.hubaddr = _hc->hub_addr;
+ hcsplt.b.prtaddr = _hc->port_addr;
+ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", _hc->complete_split);
+ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", _hc->xact_pos);
+ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", _hc->hub_addr);
+ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", _hc->port_addr);
+ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", _hc->ep_is_in);
+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
+ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", _hc->xfer_len);
+ }
+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
+}
+
+
+/**
+ * Attempts to halt a host channel. This function should only be called in
+ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
+ * normal circumstances in DMA mode, the controller halts the channel when the
+ * transfer is complete or a condition occurs that requires application
+ * intervention.
+ *
+ * In slave mode, checks for a free request queue entry, then sets the Channel
+ * Enable and Channel Disable bits of the Host Channel Characteristics
+ * register of the specified channel to intiate the halt. If there is no free
+ * request queue entry, sets only the Channel Disable bit of the HCCHARn
+ * register to flush requests for this channel. In the latter case, sets a
+ * flag to indicate that the host channel needs to be halted when a request
+ * queue slot is open.
+ *
+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the
+ * HCCHARn register. The controller ensures there is space in the request
+ * queue before submitting the halt request.
+ *
+ * Some time may elapse before the core flushes any posted requests for this
+ * host channel and halts. The Channel Halted interrupt handler completes the
+ * deactivation of the host channel.
+ *
+ * @param _core_if Controller register interface.
+ * @param _hc Host channel to halt.
+ * @param _halt_status Reason for halting the channel.
+ */
+void dwc_otg_hc_halt(dwc_otg_core_if_t * _core_if,
+ dwc_hc_t * _hc, dwc_otg_halt_status_e _halt_status)
+{
+ gnptxsts_data_t nptxsts;
+ hptxsts_data_t hptxsts;
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t * hc_regs;
+ dwc_otg_core_global_regs_t * global_regs;
+ dwc_otg_host_global_regs_t * host_global_regs;
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ global_regs = _core_if->core_global_regs;
+ host_global_regs = _core_if->host_if->host_global_regs;
+ WARN_ON(_halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS);
+ if (_halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
+ _halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+
+ /*
+ * Disable all channel interrupts except Ch Halted. The QTD
+ * and QH state associated with this transfer has been cleared
+ * (in the case of URB_DEQUEUE), so the channel needs to be
+ * shut down carefully to prevent crashes.
+ */
+ hcintmsk_data_t hcintmsk;
+ hcintmsk.d32 = 0;
+ hcintmsk.b.chhltd = 1;
+ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32);
+
+ /*
+ * Make sure no other interrupts besides halt are currently
+ * pending. Handling another interrupt could cause a crash due
+ * to the QTD and QH state.
+ */
+ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32);
+
+ /*
+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
+ * even if the channel was already halted for some other
+ * reason.
+ */
+ _hc->halt_status = _halt_status;
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen == 0) {
+ /*
+ * The channel is either already halted or it hasn't
+ * started yet. In DMA mode, the transfer may halt if
+ * it finishes normally or a condition occurs that
+ * requires driver intervention. Don't want to halt
+ * the channel again. In either Slave or DMA mode,
+ * it's possible that the transfer has been assigned
+ * to a channel, but not started yet when an URB is
+ * dequeued. Don't want to halt a channel that hasn't
+ * started yet.
+ */
+ return;
+ }
+ }
+ if (_hc->halt_pending) {
+
+ /*
+ * A halt has already been issued for this channel. This might
+ * happen when a transfer is aborted by a higher level in
+ * the stack.
+ */
+#ifdef CONFIG_DWC_DEBUG
+ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n",
+ __func__, _hc->hc_num);
+/* dwc_otg_dump_global_registers(_core_if); */
+/* dwc_otg_dump_host_registers(_core_if); */
+#endif /* */
+ return;
+ }
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 1;
+ if (!_core_if->dma_enable) {
+ /* Check for space in the request queue to issue the halt. */
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL
+ || _hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
+ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ if (nptxsts.b.nptxqspcavail == 0) {
+ hcchar.b.chen = 0;
+ }
+ } else {
+ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts);
+ if ((hptxsts.b.ptxqspcavail == 0) ||
+ (_core_if->queuing_high_bandwidth)) {
+ hcchar.b.chen = 0;
+ }
+ }
+ }
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ _hc->halt_status = _halt_status;
+ if (hcchar.b.chen) {
+ _hc->halt_pending = 1;
+ _hc->halt_on_queue = 0;
+ } else {
+ _hc->halt_on_queue = 1;
+ }
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
+ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", _hc->halt_pending);
+ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", _hc->halt_on_queue);
+ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", _hc->halt_status);
+ return;
+}
+
+
+/**
+ * Clears the transfer state for a host channel. This function is normally
+ * called after a transfer is done and the host channel is being released.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Identifies the host channel to clean up.
+ */
+void dwc_otg_hc_cleanup(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ dwc_otg_hc_regs_t * hc_regs;
+ _hc->xfer_started = 0;
+
+ /*
+ * Clear channel interrupt enables and any unhandled channel interrupt
+ * conditions.
+ */
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ dwc_write_reg32(&hc_regs->hcintmsk, 0);
+ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF);
+
+#ifdef CONFIG_DWC_DEBUG
+ del_timer(&_core_if->hc_xfer_timer[_hc->hc_num]);
+ {
+ hcchar_data_t hcchar;
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, _hc->hc_num, hcchar.d32);
+ }
+ }
+#endif /* */
+}
+
+
+/**
+ * Sets the channel property that indicates in which frame a periodic transfer
+ * should occur. This is always set to the _next_ frame. This function has no
+ * effect on non-periodic transfers.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Identifies the host channel to set up and its properties.
+ * @param _hcchar Current value of the HCCHAR register for the specified host
+ * channel.
+ */
+static inline void hc_set_even_odd_frame(dwc_otg_core_if_t * _core_if,
+ dwc_hc_t * _hc, hcchar_data_t * _hcchar)
+{
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ hfnum_data_t hfnum;
+ hfnum.d32 = dwc_read_reg32(&_core_if->host_if->host_global_regs->hfnum);
+
+ /* 1 if _next_ frame is odd, 0 if it's even */
+ _hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
+
+#ifdef CONFIG_DWC_DEBUG
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR && _hc->do_split
+ && !_hc->complete_split) {
+ switch (hfnum.b.frnum & 0x7) {
+ case 7:
+ _core_if->hfnum_7_samples++;
+ _core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
+ break;
+ case 0:
+ _core_if->hfnum_0_samples++;
+ _core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
+ break;
+ default:
+ _core_if->hfnum_other_samples++;
+ _core_if->hfnum_other_frrem_accum +=
+ hfnum.b.frrem;
+ break;
+ }
+ }
+#endif /* */
+ }
+}
+
+#ifdef CONFIG_DWC_DEBUG
+static void hc_xfer_timeout(unsigned long _ptr)
+{
+ hc_xfer_info_t * xfer_info = (hc_xfer_info_t *) _ptr;
+ int hc_num = xfer_info->hc->hc_num;
+ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
+ DWC_WARN(" start_hcchar_val 0x%08x\n",
+ xfer_info->core_if->start_hcchar_val[hc_num]);
+}
+#endif /* */
+
+/*
+ * This function does the setup for a data transfer for a host channel and
+ * starts the transfer. May be called in either Slave mode or DMA mode. In
+ * Slave mode, the caller must ensure that there is sufficient space in the
+ * request queue and Tx Data FIFO.
+ *
+ * For an OUT transfer in Slave mode, it loads a data packet into the
+ * appropriate FIFO. If necessary, additional data packets will be loaded in
+ * the Host ISR.
+ *
+ * For an IN transfer in Slave mode, a data packet is requested. The data
+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
+ * additional data packets are requested in the Host ISR.
+ *
+ * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
+ * register along with a packet count of 1 and the channel is enabled. This
+ * causes a single PING transaction to occur. Other fields in HCTSIZ are
+ * simply set to 0 since no data transfer occurs in this case.
+ *
+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with
+ * all the information required to perform the subsequent data transfer. In
+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
+ * controller performs the entire PING protocol, then starts the data
+ * transfer.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _hc Information needed to initialize the host channel. The xfer_len
+ * value may be reduced to accommodate the max widths of the XferSize and
+ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed
+ * to reflect the final xfer_len value.
+ */
+void dwc_otg_hc_start_transfer(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ uint16_t num_packets;
+ uint32_t max_hc_xfer_size = _core_if->core_params->max_transfer_size;
+ uint16_t max_hc_pkt_count = _core_if->core_params->max_packet_count;
+ dwc_otg_hc_regs_t * hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ hctsiz.d32 = 0;
+ if (_hc->do_ping) {
+ if (!_core_if->dma_enable) {
+ dwc_otg_hc_do_ping(_core_if, _hc);
+ _hc->xfer_started = 1;
+ return;
+ } else {
+ hctsiz.b.dopng = 1;
+ }
+ }
+ if (_hc->do_split) {
+ num_packets = 1;
+ if (_hc->complete_split && !_hc->ep_is_in) {
+ /* For CSPLIT OUT Transfer, set the size to 0 so the
+ * core doesn't expect any data written to the FIFO */
+ _hc->xfer_len = 0;
+ } else if (_hc->ep_is_in || (_hc->xfer_len > _hc->max_packet)) {
+ _hc->xfer_len = _hc->max_packet;
+ } else if (!_hc->ep_is_in && (_hc->xfer_len > 188)) {
+ _hc->xfer_len = 188;
+ }
+ hctsiz.b.xfersize = _hc->xfer_len;
+ } else {
+ /*
+ * Ensure that the transfer length and packet count will fit
+ * in the widths allocated for them in the HCTSIZn register.
+ */
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR
+ || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure the transfer size is no larger than one
+ * (micro)frame's worth of data. (A check was done
+ * when the periodic transfer was accepted to ensure
+ * that a (micro)frame's worth of data can be
+ * programmed into a channel.)
+ */
+ uint32_t max_periodic_len = _hc->multi_count * _hc->max_packet;
+ if (_hc->xfer_len > max_periodic_len) {
+ _hc->xfer_len = max_periodic_len;
+ } else {
+ }
+ } else if (_hc->xfer_len > max_hc_xfer_size) {
+ /* Make sure that xfer_len is a multiple of max packet size. */
+ _hc->xfer_len = max_hc_xfer_size - _hc->max_packet + 1;
+ }
+ if (_hc->xfer_len > 0) {
+ num_packets = (_hc->xfer_len + _hc->max_packet - 1) / _hc->max_packet;
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ _hc->xfer_len = num_packets * _hc->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0. */
+ num_packets = 1;
+ }
+ if (_hc->ep_is_in) {
+ /* Always program an integral # of max packets for IN transfers. */
+ _hc->xfer_len = num_packets * _hc->max_packet;
+ }
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR
+ || _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * Make sure that the multi_count field matches the
+ * actual transfer length.
+ */
+ _hc->multi_count = num_packets;
+ }
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /* Set up the initial PID for the transfer. */
+ if (_hc->speed == DWC_OTG_EP_SPEED_HIGH) {
+ if (_hc->ep_is_in) {
+ if (_hc->multi_count == 1) {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA0;
+ } else if (_hc->multi_count == 2) {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA1;
+ } else {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA2;
+ }
+ } else {
+ if (_hc->multi_count == 1) {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_DATA0;
+ } else {
+ _hc->data_pid_start =
+ DWC_OTG_HC_PID_MDATA;
+ }
+ }
+ } else {
+ _hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
+ }
+ }
+ hctsiz.b.xfersize = _hc->xfer_len;
+ }
+ _hc->start_pkt_count = num_packets;
+ hctsiz.b.pktcnt = num_packets;
+ hctsiz.b.pid = _hc->data_pid_start;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
+ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
+ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&hc_regs->hcdma, (uint32_t) _hc->xfer_buff);
+ }
+
+ /* Start the split */
+ if (_hc->do_split) {
+ hcsplt_data_t hcsplt;
+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+ hcsplt.b.spltena = 1;
+ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32);
+ }
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.multicnt = _hc->multi_count;
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+
+#ifdef CONFIG_DWC_DEBUG
+ _core_if->start_hcchar_val[_hc->hc_num] = hcchar.d32;
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
+ __func__, _hc->hc_num, hcchar.d32);
+ }
+
+#endif /* */
+
+ /* Set host channel enable after all other setup is complete. */
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ _hc->xfer_started = 1;
+ _hc->requests++;
+ if (!_core_if->dma_enable && !_hc->ep_is_in && _hc->xfer_len > 0) {
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ dwc_otg_hc_write_packet(_core_if, _hc);
+ }
+
+#ifdef CONFIG_DWC_DEBUG
+ /* Start a timer for this transfer. */
+ _core_if->hc_xfer_timer[_hc->hc_num].function = hc_xfer_timeout;
+ _core_if->hc_xfer_info[_hc->hc_num].core_if = _core_if;
+ _core_if->hc_xfer_info[_hc->hc_num].hc = _hc;
+ _core_if->hc_xfer_timer[_hc->hc_num].data =
+ (unsigned long)(&_core_if->hc_xfer_info[_hc->hc_num]);
+ _core_if->hc_xfer_timer[_hc->hc_num].expires = jiffies + (HZ * 10);
+ add_timer(&_core_if->hc_xfer_timer[_hc->hc_num]);
+#endif /* */
+}
+
+/**
+ * This function continues a data transfer that was started by previous call
+ * to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is
+ * sufficient space in the request queue and Tx Data FIFO. This function
+ * should only be called in Slave mode. In DMA mode, the controller acts
+ * autonomously to complete transfers programmed to a host channel.
+ *
+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO
+ * if there is any data remaining to be queued. For an IN transfer, another
+ * data packet is always requested. For the SETUP phase of a control transfer,
+ * this function does nothing.
+ *
+ * @return 1 if a new request is queued, 0 if no more requests are required
+ * for this transfer.
+ */
+int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ if (_hc->do_split) {
+ /* SPLITs always queue just once per channel */
+ return 0;
+ } else if (_hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
+ /* SETUPs are queued only once since they can't be NAKed. */
+ return 0;
+ } else if (_hc->ep_is_in) {
+ /*
+ * Always queue another request for other IN transfers. If
+ * back-to-back INs are issued and NAKs are received for both,
+ * the driver may still be processing the first NAK when the
+ * second NAK is received. When the interrupt handler clears
+ * the NAK interrupt for the first NAK, the second NAK will
+ * not be seen. So we can't depend on the NAK interrupt
+ * handler to requeue a NAKed request. Instead, IN requests
+ * are issued each time this function is called. When the
+ * transfer completes, the extra requests for the channel will
+ * be flushed.
+ */
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t * hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32);
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ _hc->requests++;
+ return 1;
+ } else {
+ /* OUT transfers. */
+ if (_hc->xfer_count < _hc->xfer_len) {
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ hcchar_data_t hcchar;
+ dwc_otg_hc_regs_t * hc_regs;
+ hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hc_set_even_odd_frame(_core_if, _hc, &hcchar);
+ }
+
+ /* Load OUT packet into the appropriate Tx FIFO. */
+ dwc_otg_hc_write_packet(_core_if, _hc);
+ _hc->requests++;
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+/**
+ * Starts a PING transfer. This function should only be called in Slave mode.
+ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled.
+ */
+void dwc_otg_hc_do_ping(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ dwc_otg_hc_regs_t * hc_regs = _core_if->host_if->hc_regs[_hc->hc_num];
+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, _hc->hc_num);
+ hctsiz.d32 = 0;
+ hctsiz.b.dopng = 1;
+ hctsiz.b.pktcnt = 1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.chen = 1;
+ hcchar.b.chdis = 0;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+}
+
+
+#ifdef CONFIG_OTG_PLB_DMA /* PPC_PLB_DMA mode */
+/*
+ * This will dump the status of the dma registers -
+ * Only used in debug mode
+ */
+void ppc4xx_dump_dma(unsigned int dmanr)
+{
+ int index;
+
+ printk("%32s:\n", __FUNCTION__);
+ for (index=0; index<=7; index++) {
+ printk("%32s dmanr=%d , 0x%x=0x%x\n",__FUNCTION__, dmanr ,
+ DCRN_DMACR0 + dmanr*8+index, mfdcr(DCRN_DMACR0 + dmanr*8 + index));
+ }
+ printk("%32s DCRN_DMASR=0x%x\n", __FUNCTION__, mfdcr(DCRN_DMASR));
+}
+
+/*
+ * This function programs the PLB-DMA engine to perform MEM-MEM transfer
+ * This is used to RD & WR from the DWC_FIFO by the PLB_DMA engine
+ */
+void ppc4xx_start_plb_dma(dwc_otg_core_if_t *_core_if, void *src, void *dst, unsigned int length,
+ unsigned int use_interrupt, unsigned int dma_ch, unsigned int dma_dir)
+{
+ int res = 0;
+ unsigned int control;
+ ppc_dma_ch_t p_init;
+
+ memset((char *)&p_init, sizeof(p_init), 0);
+ p_init.polarity = 0;
+ p_init.pwidth = PW_32;
+ p_init.in_use = 0;
+ if ( dma_dir == OTG_TX_DMA) {
+ p_init.sai = 1;
+ p_init.dai = 0;
+ } else if (dma_dir == OTG_RX_DMA) {
+ p_init.sai = 0;
+ p_init.dai = 1;
+ }
+ res = ppc4xx_init_dma_channel(dma_ch, &p_init);
+ if (res) {
+ printk("%32s: nit_dma_channel return %d %d bytes dest %p\n",
+ __FUNCTION__, res, length, dst);
+ }
+ res = ppc4xx_clr_dma_status(dma_ch);
+ if (res) {
+ printk("%32s: ppc4xx_clr_dma_status %d\n", __FUNCTION__, res);
+ }
+
+ if (dma_dir == OTG_TX_DMA) {
+ ppc4xx_set_src_addr(dma_ch, virt_to_bus (src));
+ ppc4xx_set_dst_addr(dma_ch, (_core_if->phys_addr +
+ (dst - (void *)(_core_if->core_global_regs))) );
+ } else if (dma_dir == OTG_RX_DMA) {
+ ppc4xx_set_src_addr(dma_ch, (_core_if->phys_addr +
+ (src - (void *)(_core_if->core_global_regs))) );
+ ppc4xx_set_dst_addr(dma_ch, virt_to_bus (dst));
+ }
+
+ ppc4xx_set_dma_mode(dma_ch, DMA_MODE_MM);
+ ppc4xx_set_dma_count(dma_ch, length);
+
+ /* flush cache before enabling DMA transfer */
+ if (dma_dir == OTG_TX_DMA) {
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)(src + length));
+ } else if (dma_dir == OTG_RX_DMA) {
+ flush_dcache_range((unsigned long)dst,
+ (unsigned long)(dst + length));
+ }
+
+ if (use_interrupt) {
+ res = ppc4xx_enable_dma_interrupt(dma_ch);
+ } else {
+ res = ppc4xx_disable_dma_interrupt(dma_ch);
+ }
+ if (res) {
+ printk("%32s: en/disable_dma_interrupt %d return %d per %d\n",
+ __FUNCTION__, use_interrupt, res,
+ ppc4xx_get_peripheral_width(dma_ch));
+ }
+
+ control = mfdcr(DCRN_DMACR0 + (dma_ch * 8));
+
+ control &= ~(SET_DMA_BEN(1));
+ control &= ~(SET_DMA_PSC(3));
+ control &= ~(SET_DMA_PWC(0x3f));
+ control &= ~(SET_DMA_PHC(0x7));
+ control &= ~(SET_DMA_PL(1));
+
+ mtdcr(DCRN_DMACR0 + (dma_ch * 8), control);
+
+#ifdef OTG_PLB_DMA_DBG
+ ppc4xx_dump_dma(dma_ch);
+#endif
+ ppc4xx_enable_dma(dma_ch);
+}
+#endif
+
+/*
+ * This function writes a packet into the Tx FIFO associated with the Host
+ * Channel. For a channel associated with a non-periodic EP, the non-periodic
+ * Tx FIFO is written. For a channel associated with a periodic EP, the
+ * periodic Tx FIFO is written. This function should only be called in Slave
+ * mode.
+ *
+ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by
+ * then number of bytes written to the Tx FIFO.
+ */
+void dwc_otg_hc_write_packet(dwc_otg_core_if_t * _core_if, dwc_hc_t * _hc)
+{
+#ifndef CONFIG_OTG_PLB_DMA
+ uint32_t i;
+#endif
+ uint32_t remaining_count;
+ uint32_t byte_count;
+ uint32_t dword_count;
+ uint32_t * data_buff = (uint32_t *) (_hc->xfer_buff);
+ uint32_t * data_fifo = _core_if->data_fifo[_hc->hc_num];
+#if !defined( CONFIG_OTG_PLB_DMA_TASKLET) && defined(CONFIG_OTG_PLB_DMA)
+ uint32_t dma_sts = 0;
+#endif
+ remaining_count = _hc->xfer_len - _hc->xfer_count;
+ if (remaining_count > _hc->max_packet) {
+ byte_count = _hc->max_packet;
+ } else {
+ byte_count = remaining_count;
+ }
+ dword_count = (byte_count + 3) / 4;
+
+#ifdef CONFIG_OTG_PLB_DMA
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+
+ if ( _hc->xfer_len < USB_BUFSIZ) {
+ int i;
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* xfer_buff is DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, *data_buff);
+ }
+ } else {
+ /* xfer_buff is not DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, get_unaligned(data_buff));
+ }
+ }
+ } else {
+ DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n", __func__, dword_count);
+ atomic_set(& release_later, 1);
+ //disable_irq_nosync(94);
+ dwc_otg_disable_global_interrupts(_core_if);
+
+ _core_if->dma_xfer.dma_data_buff = data_buff;
+ _core_if->dma_xfer.dma_data_fifo = (void *)data_fifo;
+ _core_if->dma_xfer.dma_count = dword_count;
+ _core_if->dma_xfer.dma_dir = OTG_TX_DMA;
+ tasklet_schedule(_core_if->plbdma_tasklet);
+ }
+#else /* !CONFIG_OTG_PLB_DMA_TASKLET */
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* call tx_dma - src,dest,len,intr */
+ ppc4xx_start_plb_dma(_core_if, (void *)data_buff, data_fifo,
+ (dword_count * 4), PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_TX_DMA);
+ } else {
+ ppc4xx_start_plb_dma(_core_if, (void *)get_unaligned(data_buff),
+ data_fifo, (dword_count * 4), PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_TX_DMA);
+ }
+
+ while (mfdcr(DCRN_DMACR0 + (PLB_DMA_CH*8)) & DMA_CE_ENABLE) {
+ }
+ dma_sts = (uint32_t)ppc4xx_get_dma_status();
+#ifdef OTG_PLB_DMA_DBG
+ if (!(dma_sts & DMA_CS0)) {
+ printk("Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+#endif
+ if (dma_sts & DMA_CH0_ERR) {
+ printk("Status (Channel Error) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+ ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PLB_DMA_DBG
+ printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR)); /* vj_dbg */
+#endif
+
+#endif /* CONFIG_OTG_PLB_DMA_TASKLET */
+
+
+#else
+ if ((((unsigned long)data_buff) & 0x3) == 0) {
+ /* xfer_buff is DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, *data_buff);
+ }
+ } else {
+ /* xfer_buff is not DWORD aligned. */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(data_fifo, get_unaligned(data_buff));
+ }
+ }
+#endif
+ _hc->xfer_count += byte_count;
+ _hc->xfer_buff += byte_count;
+}
+
+/**
+ * Gets the current USB frame number. This is the frame number from the last
+ * SOF packet.
+ */
+uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t * _core_if)
+{
+ dsts_data_t dsts;
+ dsts.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts);
+ /* read current frame/microfreme number from DSTS register */
+ return dsts.b.soffn;
+}
+
+
+/**
+ * This function reads a setup packet from the Rx FIFO into the destination
+ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl)
+ * Interrupt routine when a SETUP packet has been received in Slave mode.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _dest Destination buffer for packet data.
+ */
+void dwc_otg_read_setup_packet(dwc_otg_core_if_t * _core_if, uint32_t * _dest)
+{
+ /* Get the 8 bytes of a setup transaction data */
+
+ /* Pop 2 DWORDS off the receive data FIFO into memory */
+ _dest[0] = dwc_read_datafifo32(_core_if->data_fifo[0]);
+ _dest[1] = dwc_read_datafifo32(_core_if->data_fifo[0]);
+}
+
+/**
+ * This function enables EP0 OUT to receive SETUP packets and configures EP0
+ * IN for transmitting packets. It is normally called when the
+ * "Enumeration Done" interrupt occurs.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_activate(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ dsts_data_t dsts;
+ depctl_data_t diepctl;
+ depctl_data_t doepctl;
+ dctl_data_t dctl = {.d32 = 0};
+
+ /* Read the Device Status and Endpoint 0 Control registers */
+ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts);
+ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl);
+ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
+
+ /* Set the MPS of the IN EP based on the enumeration speed */
+ switch (dsts.b.enumspd) {
+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
+ diepctl.b.mps = DWC_DEP0CTL_MPS_64;
+ break;
+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
+ diepctl.b.mps = DWC_DEP0CTL_MPS_8;
+ break;
+ }
+ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
+
+ /* Enable OUT EP for receive */
+ doepctl.b.epena = 1;
+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32);
+
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+ DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
+
+#endif /* */
+ dctl.b.cgnpinnak = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+ DWC_DEBUGPL(DBG_PCDV, "dctl=%0x\n",
+ dwc_read_reg32(&dev_if->dev_global_regs->dctl));
+}
+
+
+/**
+ * This function activates an EP. The Device EP control register for
+ * the EP is configured as defined in the ep structure. Note: This
+ * function is not used for EP0.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to activate.
+ */
+void dwc_otg_ep_activate(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ depctl_data_t depctl;
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32 = 0};
+ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, _ep->num,
+ (_ep->is_in ? "IN" : "OUT"));
+
+ /* Read DEPCTLn register */
+ if (_ep->is_in == 1) {
+ addr = &dev_if->in_ep_regs[_ep->num]->diepctl;
+ daintmsk.ep.in = 1 << _ep->num;
+ } else {
+ addr = &dev_if->out_ep_regs[_ep->num]->doepctl;
+ daintmsk.ep.out = 1 << _ep->num;
+ }
+
+ /* If the EP is already active don't change the EP Control
+ * register.
+ */
+ depctl.d32 = dwc_read_reg32(addr);
+ if (!depctl.b.usbactep) {
+ depctl.b.mps = _ep->maxpacket;
+ depctl.b.eptype = _ep->type;
+ depctl.b.txfnum = _ep->tx_fifo_num;
+ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) {
+ depctl.b.setd0pid = 1; // ???
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ depctl.b.usbactep = 1;
+ dwc_write_reg32(addr, depctl.d32);
+ DWC_DEBUGPL(DBG_PCDV, "DEPCTL=%08x\n", dwc_read_reg32(addr));
+ }
+
+ /* Enable the Interrupt for this EP */
+ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, 0,
+ daintmsk.d32);
+ DWC_DEBUGPL(DBG_PCDV, "DAINTMSK=%0x\n",
+ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk));
+ _ep->stall_clear_flag = 0;
+ return;
+}
+
+
+/**
+ * This function deactivates an EP. This is done by clearing the USB Active
+ * EP bit in the Device EP control register. Note: This function is not used
+ * for EP0. EP0 cannot be deactivated.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to deactivate.
+ */
+void dwc_otg_ep_deactivate(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ depctl_data_t depctl = {.d32 = 0};
+ volatile uint32_t *addr;
+ daint_data_t daintmsk = {.d32 = 0};
+
+ /* Read DEPCTLn register */
+ if (_ep->is_in == 1) {
+ addr = &_core_if->dev_if->in_ep_regs[_ep->num]->diepctl;
+ daintmsk.ep.in = 1 << _ep->num;
+ } else {
+ addr = &_core_if->dev_if->out_ep_regs[_ep->num]->doepctl;
+ daintmsk.ep.out = 1 << _ep->num;
+ }
+ depctl.b.usbactep = 0;
+ dwc_write_reg32(addr, depctl.d32);
+
+ /* Disable the Interrupt for this EP */
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->daintmsk,
+ daintmsk.d32, 0);
+ return;
+}
+
+
+/**
+ * This function does the setup for a data transfer for an EP and
+ * starts the transfer. For an IN transfer, the packets will be
+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to start the transfer on.
+ */
+void dwc_otg_ep_start_transfer(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ /** @todo Refactor this funciton to check the transfer size
+ * count value does not execed the number bits in the Transfer
+ * count register. */
+ depctl_data_t depctl;
+ deptsiz_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+
+#ifdef CHECK_PACKET_COUNTER_WIDTH
+ const uint32_t MAX_XFER_SIZE = _core_if->core_params->max_transfer_size;
+ const uint32_t MAX_PKT_COUNT = _core_if->core_params->max_packet_count;
+ uint32_t num_packets;
+ uint32_t transfer_len;
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[_ep->num];
+ gnptxsts_data_t txstatus;
+ int lvl = SET_DEBUG_LEVEL(DBG_PCD);
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p\n", _ep->num,
+ (_ep->is_in ? "IN" : "OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff);
+ transfer_len = _ep->xfer_len - _ep->xfer_count;
+ if (transfer_len > MAX_XFER_SIZE) {
+ transfer_len = MAX_XFER_SIZE;
+ }
+ if (transfer_len == 0) {
+ num_packets = 1;
+
+ /* OUT EP to recieve Zero-length packet set transfer
+ * size to maxpacket size. */
+ if (!_ep->is_in) {
+ transfer_len = _ep->maxpacket;
+ }
+ } else {
+ num_packets = (transfer_len + _ep->maxpacket - 1) / _ep->maxpacket;
+ if (num_packets > MAX_PKT_COUNT) {
+ num_packets = MAX_PKT_COUNT;
+ }
+ }
+ DWC_DEBUGPL(DBG_PCD, "transfer_len=%d #pckt=%d\n", transfer_len,
+ num_packets);
+ deptsiz.b.xfersize = transfer_len;
+ deptsiz.b.pktcnt = num_packets;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ } /* OUT endpoint */
+ else {
+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (txstatus.b.nptxqspcavail == 0) {
+ DWC_DEBUGPL(DBG_ANY, "TX Queue Full (0x%0x)\n",
+ txstatus.d32);
+ return;
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&in_regs->diepdma, (uint32_t) _ep->xfer_buff);
+ } else {
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintsts,
+ intr_mask.d32, 0);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk,
+ intr_mask.d32,intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0 &&
+ _ep->type != DWC_OTG_EP_TYPE_ISOC) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = (0x1 << _ep->num);
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->
+ dtknqr4_fifoemptymsk,0, fifoemptymsk);
+ }
+ }
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&out_regs->doepdma,(uint32_t) _ep->xfer_buff);
+ }
+ }
+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+ dwc_read_reg32(&out_regs->doepctl),
+ dwc_read_reg32(&out_regs->doeptsiz));
+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk),
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+ SET_DEBUG_LEVEL(lvl);
+
+#endif /* */
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__);
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p\n", _ep->num,
+ (_ep->is_in ? "IN" : "OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff);
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[_ep->num];
+ gnptxsts_data_t gtxstatus;
+ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (_core_if->en_multiple_tx_fifo == 0 &&
+ gtxstatus.b.nptxqspcavail == 0) {
+#ifdef CONFIG_DWC_DEBUG
+ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32);
+#endif /* */
+ return;
+ }
+ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl));
+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
+
+ /* Zero Length Packet? */
+ if (_ep->xfer_len == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Since the 405EZ (Ultra) only support 2047 bytes as
+ * max transfer size, we have to split up bigger transfers
+ * into multiple transfers of 1024 bytes sized messages.
+ * I happens often, that transfers of 4096 bytes are
+ * required (zero-gadget, file_storage-gadget).
+ */
+ if (_ep->xfer_len > MAX_XFER_LEN) {
+ _ep->bytes_pending = _ep->xfer_len - MAX_XFER_LEN;
+ _ep->xfer_len = MAX_XFER_LEN;
+ }
+#endif
+
+ deptsiz.b.xfersize = _ep->xfer_len;
+ deptsiz.b.pktcnt = (_ep->xfer_len - 1 + _ep->maxpacket) / _ep->maxpacket;
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(in_regs->diepdma), (uint32_t) _ep->dma_addr);
+ } else {
+ if (_ep->type != DWC_OTG_EP_TYPE_ISOC) {
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt,
+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode,
+ * the data will be written into the fifo by the ISR.
+ */
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintsts,
+ intr_mask.d32, 0);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk = 1 << _ep->num;
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->
+ dtknqr4_fifoemptymsk,0,fifoemptymsk);
+ }
+ }
+ }
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+ if (_core_if->dma_enable) {
+ depctl.d32 = dwc_read_reg32(&_core_if->dev_if->in_ep_regs[0]->diepctl);
+ depctl.b.nextep = _ep->num;
+ dwc_write_reg32(&_core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32);
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl));
+ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz));
+
+ /* Program the transfer size and packet count as follows:
+ *
+ * pktcnt = N
+ * xfersize = N * maxpacket
+ */
+ if (_ep->xfer_len == 0) {
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = _ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket;
+ }
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n",
+ _ep->num, deptsiz.b.xfersize, deptsiz.b.pktcnt);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(out_regs->doepdma),
+ (uint32_t) _ep->dma_addr);
+ }
+ if (_ep->type == DWC_OTG_EP_TYPE_ISOC) {
+ /** @todo NGS: dpid is read-only. Use setd0pid
+ * or setd1pid. */
+ if (_ep->even_odd_frame) {
+ depctl.b.setd1pid = 1;
+ } else {
+ depctl.b.setd0pid = 1;
+ }
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&out_regs->doepctl, depctl.d32);
+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n",
+ dwc_read_reg32(&out_regs->doepctl),
+ dwc_read_reg32(&out_regs->doeptsiz));
+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n",
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk),
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+ }
+}
+
+/**
+ * This function does the setup for a data transfer for EP0 and starts
+ * the transfer. For an IN transfer, the packets will be loaded into
+ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are
+ * unloaded from the Rx FIFO in the ISR.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ volatile depctl_data_t depctl;
+ volatile deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d "
+ "xfer_buff=%p start_xfer_buff=%p total_len=%d\n",
+ _ep->num, (_ep->is_in ? "IN" : "OUT"), _ep->xfer_len,
+ _ep->xfer_count, _ep->xfer_buff, _ep->start_xfer_buff,
+ _ep->total_len);
+ _ep->total_len = _ep->xfer_len;
+
+ /* IN endpoint */
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[0];
+ gnptxsts_data_t gtxstatus;
+ gtxstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ if (_core_if->en_multiple_tx_fifo == 0 &&
+ gtxstatus.b.nptxqspcavail == 0) {
+#ifdef CONFIG_DWC_DEBUG
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+ DWC_DEBUGPL(DBG_PCD, "DIEPCTL0=%0x\n",
+ dwc_read_reg32(&in_regs->diepctl));
+ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n",
+ deptsiz.d32, deptsiz.b.xfersize,deptsiz.b.pktcnt);
+ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", gtxstatus.d32);
+#endif /* */
+ printk("TX Queue or FIFO Full!!!!\n"); // test-only
+ return;
+ }
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+ /* Zero Length Packet? */
+ if (_ep->xfer_len == 0) {
+ deptsiz.b.xfersize = 0;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ if (_ep->xfer_len > _ep->maxpacket) {
+ _ep->xfer_len = _ep->maxpacket;
+ deptsiz.b.xfersize = _ep->maxpacket;
+ } else {
+ deptsiz.b.xfersize = _ep->xfer_len;
+ }
+ deptsiz.b.pktcnt = 1;
+ }
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV,"IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(in_regs->diepdma), (uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!_core_if->dma_enable) {
+ if (_core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintsts, intr_mask.d32, 0);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, intr_mask.d32,
+ intr_mask.d32);
+ } else {
+ /* Enable the Tx FIFO Empty Interrupt for this EP */
+ if (_ep->xfer_len > 0) {
+ uint32_t fifoemptymsk = 0;
+ fifoemptymsk |= 1 << _ep->num;
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk,
+ 0, fifoemptymsk);
+ }
+ }
+ }
+ } else {
+ /* OUT endpoint */
+ dwc_otg_dev_out_ep_regs_t * out_regs = _core_if->dev_if->out_ep_regs[_ep->num];
+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl);
+ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz);
+
+ /* Program the transfer size and packet count as follows:
+ * xfersize = N * (maxpacket + 4 - (maxpacket % 4))
+ * pktcnt = N */
+ if (_ep->xfer_len == 0) {
+ /* Zero Length Packet */
+ deptsiz.b.xfersize = _ep->maxpacket;
+ deptsiz.b.pktcnt = 1;
+ } else {
+ deptsiz.b.pktcnt = (_ep->xfer_len + (_ep->maxpacket - 1)) / _ep->maxpacket;
+ deptsiz.b.xfersize = deptsiz.b.pktcnt * _ep->maxpacket;
+ }
+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt);
+ if (_core_if->dma_enable) {
+ dwc_write_reg32(&(out_regs->doepdma), (uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&(out_regs->doepctl), depctl.d32);
+ }
+}
+
+
+/**
+ * This function continues control IN transfers started by
+ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a
+ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one
+ * bit for the packet count.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP0 data.
+ */
+void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t * _core_if,
+ dwc_ep_t * _ep)
+{
+ depctl_data_t depctl;
+ deptsiz0_data_t deptsiz;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ if (_ep->is_in == 1) {
+ dwc_otg_dev_in_ep_regs_t * in_regs = _core_if->dev_if->in_ep_regs[0];
+ gnptxsts_data_t tx_status = {.d32 = 0};
+ tx_status.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+
+ /** @todo Should there be check for room in the Tx
+ * Status Queue. If not remove the code above this comment. */
+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl);
+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz);
+
+ /* Program the transfer size and packet count
+ * as follows: xfersize = N * maxpacket +
+ * short_packet pktcnt = N + (short_packet
+ * exist ? 1 : 0)
+ */
+ deptsiz.b.xfersize = (_ep->total_len - _ep->xfer_count) >
+ _ep->maxpacket ? _ep->maxpacket : (_ep->total_len -
+ _ep->xfer_count);
+ deptsiz.b.pktcnt = 1;
+ _ep->xfer_len += deptsiz.b.xfersize;
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+ DWC_DEBUGPL(DBG_PCDV,"IN len=%d xfersize=%d pktcnt=%d [%08x]\n",
+ _ep->xfer_len, deptsiz.b.xfersize,deptsiz.b.pktcnt, deptsiz.d32);
+
+ /* Write the DMA register */
+ if (_core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) {
+ dwc_write_reg32(&(in_regs->diepdma),(uint32_t) _ep->dma_addr);
+ }
+
+ /* EP enable, IN data in FIFO */
+ depctl.b.cnak = 1;
+ depctl.b.epena = 1;
+ dwc_write_reg32(&in_regs->diepctl, depctl.d32);
+
+ /**
+ * Enable the Non-Periodic Tx FIFO empty interrupt, the
+ * data will be written into the fifo by the ISR.
+ */
+ if (!_core_if->dma_enable) {
+ /* First clear it from GINTSTS */
+ intr_mask.b.nptxfempty = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts,
+ intr_mask.d32);
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+ }
+ }
+}
+
+#ifdef CONFIG_DWC_DEBUG
+void dump_msg(const u8 * buf, unsigned int length)
+{
+ unsigned int start, num, i;
+ char line[52], *p;
+ if (length >= 512)
+ return;
+ start = 0;
+ while (length > 0) {
+ num = min(length, 16u);
+ p = line;
+ for (i = 0; i < num; ++i) {
+ if (i == 8)
+ *p++ = ' ';
+ sprintf(p, " %02x", buf[i]);
+ p += 3;
+ }
+ *p = 0;
+ DWC_PRINT("%6x: %s\n", start, line);
+ buf += num;
+ start += num;
+ length -= num;
+ }
+}
+
+
+#else /* */
+static inline void dump_msg(const u8 * buf, unsigned int length)
+{
+}
+#endif /* */
+
+/**
+ * This function writes a packet into the Tx FIFO associated with the
+ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For
+ * periodic EPs the periodic Tx FIFO associated with the EP is written
+ * with all packets for the next micro-frame.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to write packet for.
+ * @param _dma Indicates if DMA is being used.
+ */
+void dwc_otg_ep_write_packet(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep,
+ int _dma)
+{
+ /**
+ * The buffer is padded to DWORD on a per packet basis in
+ * slave/dma mode if the MPS is not DWORD aligned. The last
+ * packet, if short, is also padded to a multiple of DWORD.
+ *
+ * ep->xfer_buff always starts DWORD aligned in memory and is a
+ * multiple of DWORD in length
+ *
+ * ep->xfer_len can be any number of bytes
+ *
+ * ep->xfer_count is a multiple of ep->maxpacket until the last
+ * packet
+ *
+ * FIFO access is DWORD */
+#ifndef CONFIG_OTG_PLB_DMA
+ uint32_t i;
+#endif
+ uint32_t byte_count;
+ uint32_t dword_count;
+ uint32_t * fifo;
+ uint32_t * data_buff = (uint32_t *) _ep->xfer_buff;
+#if !defined( CONFIG_OTG_PLB_DMA_TASKLET) && defined(CONFIG_OTG_PLB_DMA)
+ uint32_t dma_sts = 0;
+#endif
+ //DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, _core_if, _ep);
+ if (_ep->xfer_count >= _ep->xfer_len) {
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s() No data for EP%d!!!\n", __func__, _ep->num);
+ return;
+ }
+
+ /* Find the byte length of the packet either short packet or MPS */
+ if ((_ep->xfer_len - _ep->xfer_count) < _ep->maxpacket) {
+ byte_count = _ep->xfer_len - _ep->xfer_count;
+ } else {
+ byte_count = _ep->maxpacket;
+ }
+
+ /* Find the DWORD length, padded by extra bytes as neccessary if MPS
+ * is not a multiple of DWORD */
+ dword_count = (byte_count + 3) / 4;
+
+#ifdef VERBOSE
+ dump_msg(_ep->xfer_buff, byte_count);
+#endif /* */
+
+ /**@todo NGS Where are the Periodic Tx FIFO addresses
+ * intialized? What should this be? */
+ fifo = _core_if->data_fifo[_ep->num];
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n",
+ fifo, data_buff, *data_buff, byte_count);
+ if (!_dma) {
+#ifdef CONFIG_OTG_PLB_DMA
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (byte_count < USB_BUFSIZ) {
+ int i;
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(fifo, *data_buff);
+ }
+ }
+ else {
+ DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n", __func__, dword_count);
+ atomic_set(& release_later, 1);
+ //disable_irq_nosync(94);
+ dwc_otg_disable_global_interrupts(_core_if);
+
+ _core_if->dma_xfer.dma_data_buff = data_buff;
+ _core_if->dma_xfer.dma_data_fifo = fifo;
+ _core_if->dma_xfer.dma_count = dword_count;
+ _core_if->dma_xfer.dma_dir = OTG_TX_DMA;
+ tasklet_schedule(_core_if->plbdma_tasklet);
+ }
+#else /* !CONFIG_OTG_PLB_DMA_TASKLET */
+ ppc4xx_start_plb_dma(_core_if, data_buff, fifo, (dword_count * 4),
+ PLB_DMA_INT_DIS , PLB_DMA_CH, OTG_TX_DMA);
+ while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE) {
+ }
+ dma_sts = (uint32_t)ppc4xx_get_dma_status();
+#ifdef OTG_PLB_DMA_DBG
+ if (!(dma_sts & DMA_CS0)) {
+ printk("DMA Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+#endif
+ if (dma_sts & DMA_CH0_ERR) {
+ printk("DMA Status (Channel 0 Error) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+ ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PLB_DMA_DBG
+ printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR)); /* vj_dbg */
+#endif
+#endif /* CONFIG_OTG_PLB_DMA_TASKLET */
+
+#else /* DWC_SLAVE mode */
+ for (i = 0; i < dword_count; i++, data_buff++) {
+ dwc_write_datafifo32(fifo, *data_buff);
+ }
+#endif
+ }
+
+ _ep->xfer_count += byte_count;
+ _ep->xfer_buff += byte_count;
+ _ep->dma_addr += byte_count;
+}
+
+
+/**
+ * Set the EP STALL.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to set the stall on.
+ */
+void dwc_otg_ep_set_stall(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num,
+ (_ep->is_in ? "IN" : "OUT"));
+ if (_ep->is_in == 1) {
+ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl);
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* set the disable and stall bits */
+ if (depctl.b.epena) {
+ depctl.b.epdis = 1;
+ }
+ depctl.b.stall = 1;
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ } else {
+ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl);
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* set the stall bit */
+ depctl.b.stall = 1;
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ }
+ DWC_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", dwc_read_reg32(depctl_addr));
+ return;
+}
+
+
+/**
+ * Clear the EP STALL.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _ep The EP to clear stall from.
+ */
+void dwc_otg_ep_clear_stall(dwc_otg_core_if_t * _core_if, dwc_ep_t * _ep)
+{
+ depctl_data_t depctl;
+ volatile uint32_t *depctl_addr;
+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, _ep->num,
+ (_ep->is_in ? "IN" : "OUT"));
+ if (_ep->is_in == 1) {
+ depctl_addr = &(_core_if->dev_if->in_ep_regs[_ep->num]->diepctl);
+ } else {
+ depctl_addr = &(_core_if->dev_if->out_ep_regs[_ep->num]->doepctl);
+ }
+ depctl.d32 = dwc_read_reg32(depctl_addr);
+
+ /* clear the stall bits */
+ depctl.b.stall = 0;
+
+ /*
+ * USB Spec 9.4.5: For endpoints using data toggle, regardless
+ * of whether an endpoint has the Halt feature set, a
+ * ClearFeature(ENDPOINT_HALT) request always results in the
+ * data toggle being reinitialized to DATA0.
+ */
+ if (_ep->type == DWC_OTG_EP_TYPE_INTR ||
+ _ep->type == DWC_OTG_EP_TYPE_BULK) {
+ depctl.b.setd0pid = 1; /* DATA0 */
+ }
+ dwc_write_reg32(depctl_addr, depctl.d32);
+ DWC_DEBUGPL(DBG_PCD, "DEPCTL=%0x\n", dwc_read_reg32(depctl_addr));
+ return;
+}
+
+
+/**
+ * This function reads a packet from the Rx FIFO into the destination
+ * buffer. To read SETUP data use dwc_otg_read_setup_packet.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _dest Destination buffer for the packet.
+ * @param _bytes Number of bytes to copy to the destination.
+ */
+void dwc_otg_read_packet(dwc_otg_core_if_t * _core_if,
+ uint8_t * _dest, uint16_t _bytes)
+{
+#ifndef CONFIG_OTG_PLB_DMA
+ int i;
+#endif
+ int word_count = (_bytes + 3) / 4;
+ volatile uint32_t *fifo = _core_if->data_fifo[0];
+ uint32_t * data_buff = (uint32_t *) _dest;
+#if !defined( CONFIG_OTG_PLB_DMA_TASKLET) && defined(CONFIG_OTG_PLB_DMA)
+ uint32_t dma_sts = 0;
+#endif
+
+ /**
+ * @todo Account for the case where _dest is not dword aligned. This
+ * requires reading data from the FIFO into a uint32_t temp buffer,
+ * then moving it into the data buffer.
+ */
+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV | DBG_SP), "%s(%p,%p,%d)\n", __func__,
+ _core_if, _dest, _bytes);
+#ifdef CONFIG_OTG_PLB_DMA
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if ( _bytes < USB_BUFSIZ) {
+ int i;
+ for (i = 0; i < word_count; i++, data_buff++) {
+ *data_buff = dwc_read_datafifo32(fifo);
+ }
+ } else {
+ DWC_DEBUGPL(DBG_SP, "%s set release_later %d\n", __func__, _bytes);
+ atomic_set(& release_later, 1);
+ //disable_irq_nosync(94);
+ dwc_otg_disable_global_interrupts(_core_if);
+
+ /* plbdma tasklet */
+ _core_if->dma_xfer.dma_data_buff = data_buff;
+ _core_if->dma_xfer.dma_data_fifo = (void *)fifo;
+ _core_if->dma_xfer.dma_count = word_count;
+ _core_if->dma_xfer.dma_dir = OTG_RX_DMA;
+ tasklet_schedule(_core_if->plbdma_tasklet);
+ }
+#else /* !CONFIG_OTG_PLB_DMA_TASKLET */
+ ppc4xx_start_plb_dma(_core_if,(void *)fifo,data_buff, (word_count * 4),
+ PLB_DMA_INT_DIS, PLB_DMA_CH, OTG_RX_DMA);
+ while (mfdcr(DCRN_DMACR0 + (DMA_CH0*8)) & DMA_CE_ENABLE) {
+ }
+ dma_sts = (uint32_t)ppc4xx_get_dma_status();
+#ifdef OTG_PLB_DMA_DBG
+ if (!(dma_sts & DMA_CS0)) {
+ printk("DMA Status (Terminal Count not occured) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+#endif
+ if (dma_sts & DMA_CH0_ERR) {
+ printk("DMA Status (Channel 0 Error) 0x%08x\n", mfdcr(DCRN_DMASR));
+ }
+ ppc4xx_clr_dma_status(PLB_DMA_CH);
+#ifdef OTG_PLB_DMA_DBG
+ printk("%32s DMA Status =0x%08x\n", __FUNCTION__, mfdcr(DCRN_DMASR));
+ printk(" Rxed buffer \n");
+ for( i=0; i< _bytes; i++) {
+ printk(" 0x%02x",*(_dest +i));
+ }
+ printk(" \n End of Rxed buffer \n");
+#endif
+#endif /* CONFIG_OTG_PLB_DMA_TASKLET */
+
+#else /* DWC_SLAVE mode */
+ for (i = 0; i < word_count; i++, data_buff++) {
+ *data_buff = dwc_read_datafifo32(fifo);
+ }
+#endif
+ return;
+}
+
+
+/**
+ * This functions reads the device registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_dev_registers(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+ DWC_PRINT("Device Global Registers\n");
+ addr = &_core_if->dev_if->dev_global_regs->dcfg;
+ DWC_PRINT("DCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dctl;
+ DWC_PRINT("DCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dsts;
+ DWC_PRINT("DSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->diepmsk;
+ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->doepmsk;
+ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->daint;
+ DWC_PRINT("DAINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr1;
+ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 6) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr2;
+ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n",
+ (uint32_t) addr, dwc_read_reg32(addr));
+ }
+ addr = &_core_if->dev_if->dev_global_regs->dvbusdis;
+ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->dev_global_regs->dvbuspulse;
+ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n",
+ (uint32_t) addr, dwc_read_reg32(addr));
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 14) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr3_dthrctl;
+ DWC_PRINT("DTKNQR3 @0x%08X : 0x%08X\n",
+ (uint32_t) addr, dwc_read_reg32(addr));
+ }
+ if (_core_if->hwcfg2.b.dev_token_q_depth > 22) {
+ addr = &_core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk;
+ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ for (i = 0; i <= _core_if->dev_if->num_in_eps; i++) {
+ DWC_PRINT("Device IN EP %d Registers\n", i);
+ addr = &_core_if->dev_if->in_ep_regs[i]->diepctl;
+ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->diepint;
+ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->dieptsiz;
+ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->diepdma;
+ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->in_ep_regs[i]->dtxfsts;
+ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ for (i = 0; i <= _core_if->dev_if->num_out_eps; i++) {
+ DWC_PRINT("Device OUT EP %d Registers\n", i);
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepctl;
+ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepfn;
+ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepint;
+ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doeptsiz;
+ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->dev_if->out_ep_regs[i]->doepdma;
+ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ return;
+}
+
+
+/**
+ * This function reads the host registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_host_registers(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+ DWC_PRINT("Host Global Registers\n");
+ addr = &_core_if->host_if->host_global_regs->hcfg;
+ DWC_PRINT("HCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->hfir;
+ DWC_PRINT("HFIR @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->hfnum;
+ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->hptxsts;
+ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->haint;
+ DWC_PRINT("HAINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->host_global_regs->haintmsk;
+ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = _core_if->host_if->hprt0;
+ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ for (i = 0; i < _core_if->core_params->host_channels; i++) {
+ DWC_PRINT("Host Channel %d Specific Registers\n", i);
+ addr = &_core_if->host_if->hc_regs[i]->hcchar;
+ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcsplt;
+ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcint;
+ DWC_PRINT("HCINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcintmsk;
+ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hctsiz;
+ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->host_if->hc_regs[i]->hcdma;
+ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ }
+ return;
+}
+
+
+/**
+ * This function reads the core global registers and prints them
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+void dwc_otg_dump_global_registers(dwc_otg_core_if_t * _core_if)
+{
+ int i;
+ volatile uint32_t *addr;
+ DWC_PRINT("Core Global Registers");
+ addr = &_core_if->core_global_regs->gotgctl;
+ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gotgint;
+ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gahbcfg;
+ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gusbcfg;
+ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->grstctl;
+ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gintsts;
+ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gintmsk;
+ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->grxstsr;
+ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+
+ //addr=&_core_if->core_global_regs->grxstsp;
+ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->grxfsiz;
+ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gnptxfsiz;
+ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gnptxsts;
+ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gi2cctl;
+ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gpvndctl;
+ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ggpio;
+ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->guid;
+ DWC_PRINT("GUID @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->gsnpsid;
+ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg1;
+ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg2;
+ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg3;
+ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->ghwcfg4;
+ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ addr = &_core_if->core_global_regs->hptxfsiz;
+ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n", (uint32_t) addr,
+ dwc_read_reg32(addr));
+ for (i = 0; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
+ addr = &_core_if->core_global_regs->dptxfsiz_dieptxf[i];
+ DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n", i,
+ (uint32_t) addr, dwc_read_reg32(addr));
+ }
+}
+
+
+/**
+ * Flush a Tx FIFO.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _num Tx FIFO to flush.
+ */
+extern void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t * _core_if,
+ const int _num)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+ DWC_DEBUGPL((DBG_CIL | DBG_PCDV), "Flush Tx FIFO %d\n", _num);
+ greset.b.txfflsh = 1;
+ greset.b.txfnum = _num;
+ dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 10000) {
+ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n",
+ __func__, greset.d32, dwc_read_reg32(&global_regs->gnptxsts));
+ break;
+ }
+ udelay(1);
+ } while (greset.b.txfflsh == 1);
+ /* Wait for 3 PHY Clocks */
+ UDELAY(1);
+}
+
+
+/**
+ * Flush Rx FIFO.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+extern void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+ DWC_DEBUGPL((DBG_CIL | DBG_PCDV), "%s\n", __func__);
+
+ /*
+ *
+ */
+ greset.b.rxfflsh = 1;
+ dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 10000) {
+ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, greset.d32);
+ break;
+ }
+ udelay(1);
+ } while (greset.b.rxfflsh == 1);
+
+ /* Wait for 3 PHY Clocks */
+ UDELAY(1);
+}
+
+
+/**
+ * Do core a soft reset of the core. Be careful with this because it
+ * resets all the internal state machines of the core.
+ */
+void dwc_otg_core_reset(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ volatile grstctl_t greset = {.d32 = 0 };
+ int count = 0;
+ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__);
+
+ /* Wait for AHB master IDLE state. */
+ do {
+ UDELAY(10);
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 100000) {
+ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, greset.d32);
+ return;
+ }
+ } while (greset.b.ahbidle == 0);
+
+ /* Core Soft Reset */
+ count = 0;
+ greset.b.csftrst = 1;
+ dwc_write_reg32(&global_regs->grstctl, greset.d32);
+
+ do {
+ greset.d32 = dwc_read_reg32(&global_regs->grstctl);
+ if (++count > 10000) {
+ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, greset.d32);
+ break;
+ }
+ udelay(1);
+ } while (greset.b.csftrst == 1);
+
+ /* Wait for 3 PHY Clocks */
+ //DWC_PRINT("100ms\n");
+ MDELAY(100);
+}
+
+
+/**
+ * Register HCD callbacks. The callbacks are used to start and stop
+ * the HCD for interrupt processing.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _cb the HCD callback structure.
+ * @param _p pointer to be passed to callback function (usb_hcd*).
+ */
+extern void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t * _core_if,
+ dwc_otg_cil_callbacks_t * _cb, void *_p)
+{
+ _core_if->hcd_cb = _cb;
+ _cb->p = _p;
+}
+
+/**
+ * Register PCD callbacks. The callbacks are used to start and stop
+ * the PCD for interrupt processing.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _cb the PCD callback structure.
+ * @param _p pointer to be passed to callback function (pcd*).
+ */
+extern void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t * _core_if,
+ dwc_otg_cil_callbacks_t * _cb, void *_p)
+{
+ _core_if->pcd_cb = _cb;
+ _cb->p = _p;
+}
+
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_cil.h b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.h
new file mode 100644
index 00000000000..adc0086e572
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_cil.h
@@ -0,0 +1,991 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil.h $
+ * $Revision: #12 $
+ * $Date: 2007/02/08 $
+ * $Change: 792294 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_CIL_H__)
+#define __DWC_CIL_H__
+
+#include "linux/dwc_otg_plat.h"
+#include "dwc_otg_regs.h"
+#ifdef CONFIG_DWC_DEBUG
+#include "linux/timer.h"
+#endif
+
+#ifdef CONFIG_OTG_PLB_DMA
+#include "ppc4xx_dma.h"
+#include <asm/cacheflush.h>
+#include <linux/interrupt.h>
+#include <asm/time.h>
+#include <asm/unaligned.h>
+
+#undef OTG_PLB_DMA_DBG
+#define OTG_TX_DMA 0 /* TX DMA direction */
+#define OTG_RX_DMA 1 /* RX DMA direction */
+#define PLB_DMA_CH DMA_CH0 /* plb dma channel */
+#define PLB_DMA_CH_INT 12
+#define PLB_DMA_INT_ENA 1
+#define PLB_DMA_INT_DIS 0
+#define USB_BUFSIZ 512
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+#ifndef CONFIG_OTG_PLB_DMA
+#define CONFIG_OTG_PLB_DMA
+#endif
+
+extern atomic_t release_later;
+#endif
+#endif
+
+/**
+ * @file
+ * This file contains the interface to the Core Interface Layer.
+ */
+
+#ifdef CONFIG_405EZ
+/*
+ * Added-sr: 2007-07-26
+ *
+ * Since the 405EZ (Ultra) only support 2047 bytes as
+ * max transfer size, we have to split up bigger transfers
+ * into multiple transfers of 1024 bytes sized messages.
+ * I happens often, that transfers of 4096 bytes are
+ * required (zero-gadget, file_storage-gadget).
+ *
+ * MAX_XFER_LEN is set to 1024 right now, but could be 2047,
+ * since the xfer-size field in the 405EZ USB device controller
+ * implementation has 11 bits. Using 1024 seems to work for now.
+ */
+#define MAX_XFER_LEN 1024
+#endif
+
+/**
+ * The <code>dwc_ep</code> structure represents the state of a single
+ * endpoint when acting in device mode. It contains the data items
+ * needed for an endpoint to be activated and transfer packets.
+ */
+typedef struct dwc_ep {
+ /** EP number used for register address lookup */
+ uint8_t num;
+ /** EP direction 0 = OUT */
+ unsigned is_in : 1;
+ /** EP active. */
+ unsigned active : 1;
+
+ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO
+ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/
+ unsigned tx_fifo_num : 4;
+ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */
+ unsigned type : 2;
+#define DWC_OTG_EP_TYPE_CONTROL 0
+#define DWC_OTG_EP_TYPE_ISOC 1
+#define DWC_OTG_EP_TYPE_BULK 2
+#define DWC_OTG_EP_TYPE_INTR 3
+
+ /** DATA start PID for INTR and BULK EP */
+ unsigned data_pid_start : 1;
+ /** Frame (even/odd) for ISOC EP */
+ unsigned even_odd_frame : 1;
+ /** Max Packet bytes */
+ unsigned maxpacket : 11;
+
+ /** @name Transfer state */
+ /** @{ */
+
+ /**
+ * Pointer to the beginning of the transfer buffer -- do not modify
+ * during transfer.
+ */
+
+ uint32_t dma_addr;
+
+ uint8_t *start_xfer_buff;
+ /** pointer to the transfer buffer */
+ uint8_t *xfer_buff;
+ /** Number of bytes to transfer */
+ unsigned xfer_len : 19;
+ /** Number of bytes transferred. */
+ unsigned xfer_count : 19;
+ /** Sent ZLP */
+ unsigned sent_zlp : 1;
+ /** Total len for control transfer */
+ unsigned total_len : 19;
+
+ /** stall clear flag */
+ unsigned stall_clear_flag : 1;
+
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Since the 405EZ (Ultra) only support 2047 bytes as
+ * max transfer size, we have to split up bigger transfers
+ * into multiple transfers of 1024 bytes sized messages.
+ * I happens often, that transfers of 4096 bytes are
+ * required (zero-gadget, file_storage-gadget).
+ *
+ * "bytes_pending" will hold the amount of bytes that are
+ * still pending to be send in further messages to complete
+ * the bigger transfer.
+ */
+ u32 bytes_pending;
+#endif
+
+ /** @} */
+} dwc_ep_t;
+
+/*
+ * Reasons for halting a host channel.
+ */
+typedef enum dwc_otg_halt_status {
+ DWC_OTG_HC_XFER_NO_HALT_STATUS,
+ DWC_OTG_HC_XFER_COMPLETE,
+ DWC_OTG_HC_XFER_URB_COMPLETE,
+ DWC_OTG_HC_XFER_ACK,
+ DWC_OTG_HC_XFER_NAK,
+ DWC_OTG_HC_XFER_NYET,
+ DWC_OTG_HC_XFER_STALL,
+ DWC_OTG_HC_XFER_XACT_ERR,
+ DWC_OTG_HC_XFER_FRAME_OVERRUN,
+ DWC_OTG_HC_XFER_BABBLE_ERR,
+ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR,
+ DWC_OTG_HC_XFER_AHB_ERR,
+ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE,
+ DWC_OTG_HC_XFER_URB_DEQUEUE
+} dwc_otg_halt_status_e;
+
+/**
+ * Host channel descriptor. This structure represents the state of a single
+ * host channel when acting in host mode. It contains the data items needed to
+ * transfer packets to an endpoint via a host channel.
+ */
+typedef struct dwc_hc {
+ /** Host channel number used for register address lookup */
+ uint8_t hc_num;
+
+ /** Device to access */
+ unsigned dev_addr : 7;
+
+ /** EP to access */
+ unsigned ep_num : 4;
+
+ /** EP direction. 0: OUT, 1: IN */
+ unsigned ep_is_in : 1;
+
+ /**
+ * EP speed.
+ * One of the following values:
+ * - DWC_OTG_EP_SPEED_LOW
+ * - DWC_OTG_EP_SPEED_FULL
+ * - DWC_OTG_EP_SPEED_HIGH
+ */
+ unsigned speed : 2;
+#define DWC_OTG_EP_SPEED_LOW 0
+#define DWC_OTG_EP_SPEED_FULL 1
+#define DWC_OTG_EP_SPEED_HIGH 2
+
+ /**
+ * Endpoint type.
+ * One of the following values:
+ * - DWC_OTG_EP_TYPE_CONTROL: 0
+ * - DWC_OTG_EP_TYPE_ISOC: 1
+ * - DWC_OTG_EP_TYPE_BULK: 2
+ * - DWC_OTG_EP_TYPE_INTR: 3
+ */
+ unsigned ep_type : 2;
+
+ /** Max packet size in bytes */
+ unsigned max_packet : 11;
+
+ /**
+ * PID for initial transaction.
+ * 0: DATA0,<br>
+ * 1: DATA2,<br>
+ * 2: DATA1,<br>
+ * 3: MDATA (non-Control EP),
+ * SETUP (Control EP)
+ */
+ unsigned data_pid_start : 2;
+#define DWC_OTG_HC_PID_DATA0 0
+#define DWC_OTG_HC_PID_DATA2 1
+#define DWC_OTG_HC_PID_DATA1 2
+#define DWC_OTG_HC_PID_MDATA 3
+#define DWC_OTG_HC_PID_SETUP 3
+
+ /** Number of periodic transactions per (micro)frame */
+ unsigned multi_count: 2;
+
+ /** @name Transfer State */
+ /** @{ */
+
+ /** Pointer to the current transfer buffer position. */
+ uint8_t *xfer_buff;
+ /** Total number of bytes to transfer. */
+ uint32_t xfer_len;
+ /** Number of bytes transferred so far. */
+ uint32_t xfer_count;
+ /** Packet count at start of transfer.*/
+ uint16_t start_pkt_count;
+
+ /**
+ * Flag to indicate whether the transfer has been started. Set to 1 if
+ * it has been started, 0 otherwise.
+ */
+ uint8_t xfer_started;
+
+ /**
+ * Set to 1 to indicate that a PING request should be issued on this
+ * channel. If 0, process normally.
+ */
+ uint8_t do_ping;
+
+ /**
+ * Set to 1 to indicate that the error count for this transaction is
+ * non-zero. Set to 0 if the error count is 0.
+ */
+ uint8_t error_state;
+
+ /**
+ * Set to 1 to indicate that this channel should be halted the next
+ * time a request is queued for the channel. This is necessary in
+ * slave mode if no request queue space is available when an attempt
+ * is made to halt the channel.
+ */
+ uint8_t halt_on_queue;
+
+ /**
+ * Set to 1 if the host channel has been halted, but the core is not
+ * finished flushing queued requests. Otherwise 0.
+ */
+ uint8_t halt_pending;
+
+ /**
+ * Reason for halting the host channel.
+ */
+ dwc_otg_halt_status_e halt_status;
+
+ /*
+ * Split settings for the host channel
+ */
+ uint8_t do_split; /**< Enable split for the channel */
+ uint8_t complete_split; /**< Enable complete split */
+ uint8_t hub_addr; /**< Address of high speed hub */
+
+ uint8_t port_addr; /**< Port of the low/full speed device */
+ /** Split transaction position
+ * One of the following values:
+ * - DWC_HCSPLIT_XACTPOS_MID
+ * - DWC_HCSPLIT_XACTPOS_BEGIN
+ * - DWC_HCSPLIT_XACTPOS_END
+ * - DWC_HCSPLIT_XACTPOS_ALL */
+ uint8_t xact_pos;
+
+ /** Set when the host channel does a short read. */
+ uint8_t short_read;
+
+ /**
+ * Number of requests issued for this channel since it was assigned to
+ * the current transfer (not counting PINGs).
+ */
+ uint8_t requests;
+
+ /**
+ * Queue Head for the transfer being processed by this channel.
+ */
+ struct dwc_otg_qh *qh;
+
+ /** @} */
+
+ /** Entry in list of host channels. */
+ struct list_head hc_list_entry;
+} dwc_hc_t;
+
+/**
+ * The following parameters may be specified when starting the module. These
+ * parameters define how the DWC_otg controller should be configured.
+ * Parameter values are passed to the CIL initialization function
+ * dwc_otg_cil_init.
+ */
+typedef struct dwc_otg_core_params
+{
+ int32_t opt;
+#define dwc_param_opt_default 1
+
+ /**
+ * Specifies the OTG capabilities. The driver will automatically
+ * detect the value for this parameter if none is specified.
+ * 0 - HNP and SRP capable (default)
+ * 1 - SRP Only capable
+ * 2 - No HNP/SRP capable
+ */
+ int32_t otg_cap;
+#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0
+#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1
+#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
+#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE
+
+ /**
+ * Specifies whether to use slave or DMA mode for accessing the data
+ * FIFOs. The driver will automatically detect the value for this
+ * parameter if none is specified.
+ * 0 - Slave
+ * 1 - DMA (default, if available)
+ */
+ int32_t dma_enable;
+#define dwc_param_dma_enable_default 1
+
+ /** The DMA Burst size (applicable only for External DMA
+ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ */
+ int32_t dma_burst_size; /* Translate this to GAHBCFG values */
+#define dwc_param_dma_burst_size_default 32
+
+ /**
+ * Specifies the maximum speed of operation in host and device mode.
+ * The actual speed depends on the speed of the attached device and
+ * the value of phy_type. The actual speed depends on the speed of the
+ * attached device.
+ * 0 - High Speed (default)
+ * 1 - Full Speed
+ */
+ int32_t speed;
+#define dwc_param_speed_default 0
+#define DWC_SPEED_PARAM_HIGH 0
+#define DWC_SPEED_PARAM_FULL 1
+
+ /** Specifies whether low power mode is supported when attached
+ * to a Full Speed or Low Speed device in host mode.
+ * 0 - Don't support low power mode (default)
+ * 1 - Support low power mode
+ */
+ int32_t host_support_fs_ls_low_power;
+#define dwc_param_host_support_fs_ls_low_power_default 0
+
+ /** Specifies the PHY clock rate in low power mode when connected to a
+ * Low Speed device in host mode. This parameter is applicable only if
+ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS
+ * then defaults to 6 MHZ otherwise 48 MHZ.
+ *
+ * 0 - 48 MHz
+ * 1 - 6 MHz
+ */
+ int32_t host_ls_low_power_phy_clk;
+#define dwc_param_host_ls_low_power_phy_clk_default 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0
+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1
+
+ /**
+ * 0 - Use cC FIFO size parameters
+ * 1 - Allow dynamic FIFO sizing (default)
+ */
+ int32_t enable_dynamic_fifo;
+#define dwc_param_enable_dynamic_fifo_default 1
+
+ /** Total number of 4-byte words in the data FIFO memory. This
+ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic
+ * Tx FIFOs.
+ * 32 to 32768 (default 8192)
+ * Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ */
+ int32_t data_fifo_size;
+#define dwc_param_data_fifo_size_default 8192
+
+ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1064)
+ */
+ int32_t dev_rx_fifo_size;
+#define dwc_param_dev_rx_fifo_size_default 1064
+
+ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode
+ * when dynamic FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t dev_nperio_tx_fifo_size;
+#define dwc_param_dev_nperio_tx_fifo_size_default 1024
+
+ /** Number of 4-byte words in each of the periodic Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS];
+#define dwc_param_dev_perio_tx_fifo_size_default 256
+
+ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_rx_fifo_size;
+#define dwc_param_host_rx_fifo_size_default 1024
+
+ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode
+ * when Dynamic FIFO sizing is enabled in the core.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_nperio_tx_fifo_size;
+#define dwc_param_host_nperio_tx_fifo_size_default 1024
+
+ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic
+ * FIFO sizing is enabled.
+ * 16 to 32768 (default 1024)
+ */
+ int32_t host_perio_tx_fifo_size;
+#define dwc_param_host_perio_tx_fifo_size_default 1024
+
+ /** The maximum transfer size supported in bytes.
+ * 2047 to 65,535 (default 65,535)
+ */
+ int32_t max_transfer_size;
+#define dwc_param_max_transfer_size_default 65535
+
+ /** The maximum number of packets in a transfer.
+ * 15 to 511 (default 511)
+ */
+ int32_t max_packet_count;
+#define dwc_param_max_packet_count_default 511
+
+ /** The number of host channel registers to use.
+ * 1 to 16 (default 12)
+ * Note: The FPGA configuration supports a maximum of 12 host channels.
+ */
+ int32_t host_channels;
+#define dwc_param_host_channels_default 12
+
+ /** The number of endpoints in addition to EP0 available for device
+ * mode operations.
+ * 1 to 15 (default 6 IN and OUT)
+ * Note: The FPGA configuration supports a maximum of 6 IN and OUT
+ * endpoints in addition to EP0.
+ */
+ int32_t dev_endpoints;
+#define dwc_param_dev_endpoints_default 6
+
+ /**
+ * Specifies the type of PHY interface to use. By default, the driver
+ * will automatically detect the phy_type.
+ *
+ * 0 - Full Speed PHY
+ * 1 - UTMI+ (default)
+ * 2 - ULPI
+ */
+ int32_t phy_type;
+#define DWC_PHY_TYPE_PARAM_FS 0
+#define DWC_PHY_TYPE_PARAM_UTMI 1
+#define DWC_PHY_TYPE_PARAM_ULPI 2
+#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI
+
+ /**
+ * Specifies the UTMI+ Data Width. This parameter is
+ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI
+ * PHY_TYPE, this parameter indicates the data width between
+ * the MAC and the ULPI Wrapper.) Also, this parameter is
+ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set
+ * to "8 and 16 bits", meaning that the core has been
+ * configured to work at either data path width.
+ *
+ * 8 or 16 bits (default 16)
+ */
+ int32_t phy_utmi_width;
+#define dwc_param_phy_utmi_width_default 16
+
+ /**
+ * Specifies whether the ULPI operates at double or single
+ * data rate. This parameter is only applicable if PHY_TYPE is
+ * ULPI.
+ *
+ * 0 - single data rate ULPI interface with 8 bit wide data
+ * bus (default)
+ * 1 - double data rate ULPI interface with 4 bit wide data
+ * bus
+ */
+ int32_t phy_ulpi_ddr;
+#define dwc_param_phy_ulpi_ddr_default 0
+
+ /**
+ * Specifies whether to use the internal or external supply to
+ * drive the vbus with a ULPI phy.
+ */
+ int32_t phy_ulpi_ext_vbus;
+#define DWC_PHY_ULPI_INTERNAL_VBUS 0
+#define DWC_PHY_ULPI_EXTERNAL_VBUS 1
+#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS
+
+ /**
+ * Specifies whether to use the I2Cinterface for full speed PHY. This
+ * parameter is only applicable if PHY_TYPE is FS.
+ * 0 - No (default)
+ * 1 - Yes
+ */
+ int32_t i2c_enable;
+#define dwc_param_i2c_enable_default 0
+
+ int32_t ulpi_fs_ls;
+#define dwc_param_ulpi_fs_ls_default 0
+
+ int32_t ts_dline;
+#define dwc_param_ts_dline_default 0
+
+ /**
+ * Specifies whether dedicated transmit FIFOs are
+ * enabled for non periodic IN endpoints in device mode
+ * 0 - No
+ * 1 - Yes
+ */
+ int32_t en_multiple_tx_fifo;
+#define dwc_param_en_multiple_tx_fifo_default 1
+
+ /** Number of 4-byte words in each of the Tx FIFOs in device
+ * mode when dynamic FIFO sizing is enabled.
+ * 4 to 768 (default 256)
+ */
+ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS];
+#define dwc_param_dev_tx_fifo_size_default 256
+
+ /** Thresholding enable flag-
+ * bit 0 - enable non-ISO Tx thresholding
+ * bit 1 - enable ISO Tx thresholding
+ * bit 2 - enable Rx thresholding
+ */
+ uint32_t thr_ctl;
+#define dwc_param_thr_ctl_default 0
+
+ /** Thresholding length for Tx
+ * FIFOs in 32 bit DWORDs
+ */
+ uint32_t tx_thr_length;
+#define dwc_param_tx_thr_length_default 64
+
+ /** Thresholding length for Rx
+ * FIFOs in 32 bit DWORDs
+ */
+ uint32_t rx_thr_length;
+#define dwc_param_rx_thr_length_default 64
+
+} dwc_otg_core_params_t;
+
+#ifdef CONFIG_DWC_DEBUG
+struct dwc_otg_core_if;
+typedef struct hc_xfer_info
+{
+ struct dwc_otg_core_if *core_if;
+ dwc_hc_t *hc;
+} hc_xfer_info_t;
+#endif
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+typedef struct dma_xfer_s
+{
+ uint32_t *dma_data_buff;
+ void *dma_data_fifo;
+ uint32_t dma_count;
+ uint32_t dma_dir;
+} dma_xfer_t;
+#endif
+
+/**
+ * The <code>dwc_otg_core_if</code> structure contains information needed to manage
+ * the DWC_otg controller acting in either host or device mode. It
+ * represents the programming view of the controller as a whole.
+ */
+typedef struct dwc_otg_core_if
+{
+ /** Parameters that define how the core should be configured.*/
+ dwc_otg_core_params_t *core_params;
+
+ /** Core Global registers starting at offset 000h. */
+ dwc_otg_core_global_regs_t *core_global_regs;
+
+ /** Device-specific information */
+ dwc_otg_dev_if_t *dev_if;
+ /** Host-specific information */
+ dwc_otg_host_if_t *host_if;
+
+ /*
+ * Set to 1 if the core PHY interface bits in USBCFG have been
+ * initialized.
+ */
+ uint8_t phy_init_done;
+
+ /*
+ * SRP Success flag, set by srp success interrupt in FS I2C mode
+ */
+ uint8_t srp_success;
+ uint8_t srp_timer_started;
+
+ /* Common configuration information */
+ /** Power and Clock Gating Control Register */
+ volatile uint32_t *pcgcctl;
+#define DWC_OTG_PCGCCTL_OFFSET 0xE00
+
+ /** Push/pop addresses for endpoints or host channels.*/
+ uint32_t *data_fifo[MAX_EPS_CHANNELS];
+#define DWC_OTG_DATA_FIFO_OFFSET 0x1000
+#define DWC_OTG_DATA_FIFO_SIZE 0x1000
+
+ /** Total RAM for FIFOs (Bytes) */
+ uint16_t total_fifo_size;
+ /** Size of Rx FIFO (Bytes) */
+ uint16_t rx_fifo_size;
+ /** Size of Non-periodic Tx FIFO (Bytes) */
+ uint16_t nperio_tx_fifo_size;
+
+
+ /** 1 if DMA is enabled, 0 otherwise. */
+ uint8_t dma_enable;
+
+ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */
+ uint8_t en_multiple_tx_fifo;
+
+ /** Set to 1 if multiple packets of a high-bandwidth transfer is in
+ * process of being queued */
+ uint8_t queuing_high_bandwidth;
+
+ /** Hardware Configuration -- stored here for convenience.*/
+ hwcfg1_data_t hwcfg1;
+ hwcfg2_data_t hwcfg2;
+ hwcfg3_data_t hwcfg3;
+ hwcfg4_data_t hwcfg4;
+
+ /** The operational State, during transations
+ * (a_host>>a_peripherial and b_device=>b_host) this may not
+ * match the core but allows the software to determine
+ * transitions.
+ */
+ uint8_t op_state;
+
+ /**
+ * Set to 1 if the HCD needs to be restarted on a session request
+ * interrupt. This is required if no connector ID status change has
+ * occurred since the HCD was last disconnected.
+ */
+ uint8_t restart_hcd_on_session_req;
+
+ /** HCD callbacks */
+ /** A-Device is a_host */
+#define A_HOST (1)
+ /** A-Device is a_suspend */
+#define A_SUSPEND (2)
+ /** A-Device is a_peripherial */
+#define A_PERIPHERAL (3)
+ /** B-Device is operating as a Peripheral. */
+#define B_PERIPHERAL (4)
+ /** B-Device is operating as a Host. */
+#define B_HOST (5)
+
+ /** HCD callbacks */
+ struct dwc_otg_cil_callbacks *hcd_cb;
+ /** PCD callbacks */
+ struct dwc_otg_cil_callbacks *pcd_cb;
+
+ /** Device mode Periodic Tx FIFO Mask */
+ uint32_t p_tx_msk;
+ /** Device mode Periodic Tx FIFO Mask */
+ uint32_t tx_msk;
+
+#ifdef CONFIG_DWC_DEBUG
+ uint32_t start_hcchar_val[MAX_EPS_CHANNELS];
+
+ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS];
+ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS];
+
+ uint32_t hfnum_7_samples;
+ uint64_t hfnum_7_frrem_accum;
+ uint32_t hfnum_0_samples;
+ uint64_t hfnum_0_frrem_accum;
+ uint32_t hfnum_other_samples;
+ uint64_t hfnum_other_frrem_accum;
+#endif
+ resource_size_t phys_addr; /* Added to support PLB DMA : phys-virt mapping */
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ /* Tasket to do plbdma */
+ struct tasklet_struct *plbdma_tasklet;
+#if 1
+ dma_xfer_t dma_xfer;
+#else
+ uint32_t *dma_data_buff;
+ void *dma_data_fifo;
+ uint32_t dma_count;
+ uint32_t dma_dir;
+#endif
+#endif
+
+
+} dwc_otg_core_if_t;
+
+/*
+ * The following functions support initialization of the CIL driver component
+ * and the DWC_otg controller.
+ */
+extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr,
+ dwc_otg_core_params_t *_core_params);
+extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if );
+extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if );
+
+/** @name Device CIL Functions
+ * The following functions support managing the DWC_otg controller in device
+ * mode.
+ */
+/**@{*/
+extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest);
+extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma);
+extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep);
+extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if);
+/**@}*/
+
+/** @name Host CIL Functions
+ * The following functions support managing the DWC_otg controller in host
+ * mode.
+ */
+/**@{*/
+extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if,
+ dwc_hc_t *_hc,
+ dwc_otg_halt_status_e _halt_status);
+extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc);
+extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if);
+extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if);
+
+/**
+ * This function Reads HPRT0 in preparation to modify. It keeps the
+ * WC bits 0 so that if they are read as 1, they won't clear when you
+ * write it back
+ */
+static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if)
+{
+ hprt0_data_t hprt0;
+ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0);
+ hprt0.b.prtena = 0;
+ hprt0.b.prtconndet = 0;
+ hprt0.b.prtenchng = 0;
+ hprt0.b.prtovrcurrchng = 0;
+ return hprt0.d32;
+}
+
+extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if);
+/**@}*/
+
+/** @name Common CIL Functions
+ * The following functions support managing the DWC_otg controller in either
+ * device or host mode.
+ */
+/**@{*/
+
+extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if,
+ uint8_t *dest,
+ uint16_t bytes);
+
+extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if);
+
+extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if,
+ const int _num );
+extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if );
+extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if );
+
+#ifdef CONFIG_OTG_PLB_DMA
+extern void ppc4xx_start_plb_dma(dwc_otg_core_if_t *_core_if, void *src, void *dst,
+ unsigned int length, unsigned int use_interrupt, unsigned int dma_ch, unsigned int dma_dir);
+#endif
+#define NP_TXFIFO_EMPTY -1
+#define MAX_NP_TXREQUEST_Q_SLOTS 8
+/**
+ * This function returns the endpoint number of the request at
+ * the top of non-periodic TX FIFO, or -1 if the request FIFO is
+ * empty.
+ */
+static inline int dwc_otg_top_nptxfifo_epnum(dwc_otg_core_if_t *_core_if) {
+ gnptxsts_data_t txstatus = {.d32 = 0};
+
+ txstatus.d32 = dwc_read_reg32(&_core_if->core_global_regs->gnptxsts);
+ return (txstatus.b.nptxqspcavail == MAX_NP_TXREQUEST_Q_SLOTS ?
+ -1 : txstatus.b.nptxqtop_chnep);
+}
+
+/**
+ * This function returns the Core Interrupt register.
+ */
+static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) &
+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk));
+}
+
+/**
+ * This function returns the OTG Interrupt register.
+ */
+static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint));
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the IN endpoint interrupt bits.
+ */
+static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *_core_if)
+{
+ uint32_t v;
+ v = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daint) &
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk);
+ return (v & 0xffff);
+
+}
+
+/**
+ * This function reads the Device All Endpoints Interrupt register and
+ * returns the OUT endpoint interrupt bits.
+ */
+static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *_core_if)
+{
+ uint32_t v;
+ v = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daint) &
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->daintmsk);
+ return ((v & 0xffff0000) >> 16);
+}
+
+/**
+ * This function returns the Device IN EP Interrupt register
+ */
+static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *_core_if,
+ dwc_ep_t *_ep)
+{
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ uint32_t v, msk, emp;
+ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk);
+ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk);
+ msk |= ((emp >> _ep->num) & 0x1) << 7;
+ v = dwc_read_reg32(&dev_if->in_ep_regs[_ep->num]->diepint) & msk;
+/*
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ uint32_t v;
+ v = dwc_read_reg32(&dev_if->in_ep_regs[_ep->num]->diepint) &
+ dwc_read_reg32(&dev_if->dev_global_regs->diepmsk);
+*/
+ return v;
+}
+/**
+ * This function returns the Device OUT EP Interrupt register
+ */
+static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if,
+ dwc_ep_t *_ep)
+{
+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if;
+ uint32_t v;
+ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) &
+ dwc_read_reg32(&dev_if->dev_global_regs->doepmsk);
+ return v;
+}
+
+/**
+ * This function returns the Host All Channel Interrupt register
+ */
+static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint));
+}
+
+static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc)
+{
+ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint));
+}
+
+
+/**
+ * This function returns the mode of the operation, host or device.
+ *
+ * @return 0 - Device Mode, 1 - Host Mode
+ */
+static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1);
+}
+
+static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE);
+}
+static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if)
+{
+ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE);
+}
+
+extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if );
+
+
+/**@}*/
+
+/**
+ * DWC_otg CIL callback structure. This structure allows the HCD and
+ * PCD to register functions used for starting and stopping the PCD
+ * and HCD for role change on for a DRD.
+ */
+typedef struct dwc_otg_cil_callbacks
+{
+ /** Start function for role change */
+ int (*start) (void *_p);
+ /** Stop Function for role change */
+ int (*stop) (void *_p);
+ /** Disconnect Function for role change */
+ int (*disconnect) (void *_p);
+ /** Resume/Remote wakeup Function */
+ int (*resume_wakeup) (void *_p);
+ /** Suspend function */
+ int (*suspend) (void *_p);
+ /** Session Start (SRP) */
+ int (*session_start) (void *_p);
+ /** Pointer passed to start() and stop() */
+ void *p;
+} dwc_otg_cil_callbacks_t;
+
+extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if,
+ dwc_otg_cil_callbacks_t *_cb,
+ void *_p);
+extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if,
+ dwc_otg_cil_callbacks_t *_cb,
+ void *_p);
+#endif
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_cil_intr.c b/drivers/usb/gadget/dwc_otg/dwc_otg_cil_intr.c
new file mode 100644
index 00000000000..ab6db554e22
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_cil_intr.c
@@ -0,0 +1,701 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_cil_intr.c $
+ * $Revision: #7 $
+ * $Date: 2005/11/02 $
+ * $Change: 553126 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+
+/** @file
+ *
+ * The Core Interface Layer provides basic services for accessing and
+ * managing the DWC_otg hardware. These services are used by both the
+ * Host Controller Driver and the Peripheral Controller Driver.
+ *
+ * This file contains the Common Interrupt handlers.
+ */
+#include "linux/dwc_otg_plat.h"
+#include "dwc_otg_regs.h"
+#include "dwc_otg_cil.h"
+
+inline const char *op_state_str(dwc_otg_core_if_t * _core_if)
+{
+ return (_core_if->op_state == A_HOST ? "a_host" :
+ (_core_if->op_state == A_SUSPEND ? "a_suspend" :
+ (_core_if->op_state == A_PERIPHERAL ? "a_peripheral" :
+ (_core_if->op_state == B_PERIPHERAL ? "b_peripheral" :
+ (_core_if->op_state == B_HOST ? "b_host" : "unknown")))));
+}
+
+/** This function will log a debug message
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_mode_mismatch_intr(dwc_otg_core_if_t * _core_if)
+{
+ gintsts_data_t gintsts;
+ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n",
+ dwc_otg_mode(_core_if) ? "Host" : "Device");
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.modemismatch = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/** Start the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_start(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->start) {
+ _core_if->hcd_cb->start(_core_if->hcd_cb->p);
+ }
+}
+
+/** Stop the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_stop(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->stop) {
+ _core_if->hcd_cb->stop(_core_if->hcd_cb->p);
+ }
+}
+
+/** Disconnect the HCD. Helper function for using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_disconnect(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->disconnect) {
+ _core_if->hcd_cb->disconnect(_core_if->hcd_cb->p);
+ }
+}
+
+/** Inform the HCD the a New Session has begun. Helper function for
+ * using the HCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void hcd_session_start(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->hcd_cb && _core_if->hcd_cb->session_start) {
+ _core_if->hcd_cb->session_start(_core_if->hcd_cb->p);
+ }
+}
+
+/** Start the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_start(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->start) {
+ _core_if->pcd_cb->start(_core_if->pcd_cb->p);
+ }
+}
+
+/** Stop the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_stop(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->stop) {
+ _core_if->pcd_cb->stop(_core_if->pcd_cb->p);
+ }
+}
+
+/** Suspend the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_suspend(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->suspend) {
+ _core_if->pcd_cb->suspend(_core_if->pcd_cb->p);
+ }
+}
+
+/** Resume the PCD. Helper function for using the PCD callbacks.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static inline void pcd_resume(dwc_otg_core_if_t * _core_if)
+{
+ if (_core_if->pcd_cb && _core_if->pcd_cb->resume_wakeup) {
+ _core_if->pcd_cb->resume_wakeup(_core_if->pcd_cb->p);
+ }
+}
+
+/**
+ * This function handles the OTG Interrupts. It reads the OTG
+ * Interrupt Register (GOTGINT) to determine what interrupt has
+ * occurred.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_core_global_regs_t * global_regs = _core_if->core_global_regs;
+ gotgint_data_t gotgint;
+ gotgctl_data_t gotgctl;
+ gintmsk_data_t gintmsk;
+ gotgint.d32 = dwc_read_reg32(&global_regs->gotgint);
+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+ DWC_DEBUGPL(DBG_CIL, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint.d32,
+ op_state_str(_core_if));
+
+ //DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32 );
+ if (gotgint.b.sesenddet) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Session End Detected++ (%s)\n",
+ op_state_str(_core_if));
+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+ if (_core_if->op_state == B_HOST) {
+ pcd_start(_core_if);
+ _core_if->op_state = B_PERIPHERAL;
+ } else {
+ /* If not B_HOST and Device HNP still set. HNP
+ * Did not succeed!*/
+ if (gotgctl.b.devhnpen) {
+ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n");
+ DWC_ERROR("Device Not Connected/Responding!\n");
+ }
+ /* If Session End Detected the B-Cable has
+ * been disconnected. */
+ /* Reset PCD and Gadget driver to a
+ * clean state. */
+ pcd_stop(_core_if);
+ }
+ gotgctl.d32 = 0;
+ gotgctl.b.devhnpen = 1;
+ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
+ }
+ if (gotgint.b.sesreqsucstschng) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Session Reqeust Success Status Change++\n");
+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+ if (gotgctl.b.sesreqscs) {
+ if ((_core_if->core_params->phy_type ==
+ DWC_PHY_TYPE_PARAM_FS)
+ && (_core_if->core_params->i2c_enable)) {
+ _core_if->srp_success = 1;
+ } else {
+ pcd_resume(_core_if);
+
+ /* Clear Session Request */
+ gotgctl.d32 = 0;
+ gotgctl.b.sesreq = 1;
+ dwc_modify_reg32(&global_regs->gotgctl,
+ gotgctl.d32, 0);
+ }
+ }
+ }
+ if (gotgint.b.hstnegsucstschng) {
+ /* Print statements during the HNP interrupt handling
+ * can cause it to fail.*/
+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl);
+ if (gotgctl.b.hstnegscs) {
+ if (dwc_otg_is_host_mode(_core_if)) {
+ _core_if->op_state = B_HOST;
+
+ /*
+ * Need to disable SOF interrupt immediately.
+ * When switching from device to host, the PCD
+ * interrupt handler won't handle the
+ * interrupt if host mode is already set. The
+ * HCD interrupt handler won't get called if
+ * the HCD state is HALT. This means that the
+ * interrupt does not get handled and Linux
+ * complains loudly.
+ */
+ gintmsk.d32 = 0;
+ gintmsk.b.sofintr = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, gintmsk.d32, 0);
+ pcd_stop(_core_if);
+
+ /*
+ * Initialize the Core for Host mode.
+ */
+ hcd_start(_core_if);
+ _core_if->op_state = B_HOST;
+ }
+ } else {
+ gotgctl.d32 = 0;
+ gotgctl.b.hnpreq = 1;
+ gotgctl.b.devhnpen = 1;
+ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0);
+ DWC_DEBUGPL(DBG_ANY, "HNP Failed\n");
+ DWC_ERROR("Device Not Connected/Responding\n");
+ }
+ }
+ if (gotgint.b.hstnegdet) {
+
+ /* The disconnect interrupt is set at the same time as
+ * Host Negotiation Detected. During the mode
+ * switch all interrupts are cleared so the disconnect
+ * interrupt handler will not get executed.
+ */
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Host Negotiation Detected++ (%s)\n",
+ (dwc_otg_is_host_mode(_core_if) ? "Host" :
+ "Device"));
+ if (dwc_otg_is_device_mode(_core_if)) {
+ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n",
+ _core_if->op_state);
+ hcd_disconnect(_core_if);
+ pcd_start(_core_if);
+ _core_if->op_state = A_PERIPHERAL;
+ } else {
+
+ /*
+ * Need to disable SOF interrupt immediately. When
+ * switching from device to host, the PCD interrupt
+ * handler won't handle the interrupt if host mode is
+ * already set. The HCD interrupt handler won't get
+ * called if the HCD state is HALT. This means that
+ * the interrupt does not get handled and Linux
+ * complains loudly.
+ */
+ gintmsk.d32 = 0;
+ gintmsk.b.sofintr = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, gintmsk.d32,
+ 0);
+ pcd_stop(_core_if);
+ hcd_start(_core_if);
+ _core_if->op_state = A_HOST;
+ }
+ }
+ if (gotgint.b.adevtoutchng) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "A-Device Timeout Change++\n");
+ }
+ if (gotgint.b.debdone) {
+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: "
+ "Debounce Done++\n");
+ }
+
+ /* Clear GOTGINT */
+ dwc_write_reg32(&_core_if->core_global_regs->gotgint, gotgint.d32);
+ return 1;
+}
+
+/**
+ * This function handles the Connector ID Status Change Interrupt. It
+ * reads the OTG Interrupt Register (GOTCTL) to determine whether this
+ * is a Device to Host Mode transition or a Host Mode to Device
+ * Transition.
+ *
+ * This only occurs when the cable is connected/removed from the PHY
+ * connector.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *
+ _core_if)
+{
+ uint32_t count = 0;
+ gintsts_data_t gintsts = {.d32 = 0};
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ gotgctl_data_t gotgctl = {.d32 = 0};
+
+ /*
+ * Need to disable SOF interrupt immediately. If switching from device
+ * to host, the PCD interrupt handler won't handle the interrupt if
+ * host mode is already set. The HCD interrupt handler won't get
+ * called if the HCD state is HALT. This means that the interrupt does
+ * not get handled and Linux complains loudly.
+ */
+ gintmsk.b.sofintr = 1;
+ dwc_modify_reg32(&_core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+ DWC_DEBUGPL(DBG_CIL,
+ " ++Connector ID Status Change Interrupt++ (%s)\n",
+ (dwc_otg_is_host_mode(_core_if) ? "Host" : "Device"));
+ gotgctl.d32 = dwc_read_reg32(&_core_if->core_global_regs->gotgctl);
+ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32);
+ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts);
+
+ /* B-Device connector (Device Mode) */
+ if (gotgctl.b.conidsts) {
+
+ /* Wait for switch to device mode. */
+ while (!dwc_otg_is_device_mode(_core_if)) {
+ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n",
+ (dwc_otg_is_host_mode(_core_if) ? "Host" :
+ "Peripheral"));
+ MDELAY(100);
+ if (++count > 10000)
+ *(uint32_t *) NULL = 0;
+ }
+ _core_if->op_state = B_PERIPHERAL;
+ dwc_otg_core_init(_core_if);
+ dwc_otg_enable_global_interrupts(_core_if);
+ pcd_start(_core_if);
+ } else {
+
+ /* A-Device connector (Host Mode) */
+ while (!dwc_otg_is_host_mode(_core_if)) {
+ DWC_PRINT("Waiting for Host Mode, Mode=%s\n",
+ (dwc_otg_is_host_mode(_core_if) ? "Host" :
+ "Peripheral"));
+ MDELAY(100);
+ if (++count > 10000)
+ *(uint32_t *) NULL = 0;
+ }
+ _core_if->op_state = A_HOST;
+
+ /*
+ * Initialize the Core for Host mode.
+ */
+ dwc_otg_core_init(_core_if);
+ dwc_otg_enable_global_interrupts(_core_if);
+ hcd_start(_core_if);
+ }
+
+ /* Set flag and clear interrupt */
+ gintsts.b.conidstschng = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that a device is initiating the Session
+ * Request Protocol to request the host to turn on bus power so a new
+ * session can begin. The handler responds by turning on bus power. If
+ * the DWC_otg controller is in low power mode, the handler brings the
+ * controller out of low power mode before turning on bus power.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+int32_t dwc_otg_handle_session_req_intr(dwc_otg_core_if_t * _core_if)
+{
+ gintsts_data_t gintsts;
+
+#ifndef CONFIG_DWC_HOST_ONLY
+ hprt0_data_t hprt0;
+ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n");
+ if (dwc_otg_is_device_mode(_core_if)) {
+ DWC_PRINT("SRP: Device mode\n");
+ } else {
+ DWC_PRINT("SRP: Host mode\n");
+
+ /* Turn on the port power bit. */
+ hprt0.d32 = dwc_otg_read_hprt0(_core_if);
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(_core_if->host_if->hprt0, hprt0.d32);
+
+ /* Start the Connection timer. So a message can be displayed
+ * if connect does not occur within 10 seconds. */
+ hcd_session_start(_core_if);
+ }
+
+#endif /* */
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.sessreqintr = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that the DWC_otg controller has detected a
+ * resume or remote wakeup sequence. If the DWC_otg controller is in
+ * low power mode, the handler must brings the controller out of low
+ * power mode. The controller automatically begins resume
+ * signaling. The handler schedules a time to stop resume signaling.
+ */
+int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t * _core_if)
+{
+ gintsts_data_t gintsts;
+ DWC_DEBUGPL(DBG_ANY,"++Resume and Remote Wakeup Detected Interrupt++\n");
+ if (dwc_otg_is_device_mode(_core_if)) {
+ dctl_data_t dctl = {.d32 = 0};
+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n",
+ dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts));
+
+#ifdef PARTIAL_POWER_DOWN
+ if (_core_if->hwcfg4.b.power_optimiz) {
+ pcgcctl_data_t power = {.d32 = 0};
+ power.d32 = dwc_read_reg32(_core_if->pcgcctl);
+ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32);
+ power.b.stoppclk = 0;
+ dwc_write_reg32(_core_if->pcgcctl, power.d32);
+ power.b.pwrclmp = 0;
+ dwc_write_reg32(_core_if->pcgcctl, power.d32);
+ power.b.rstpdwnmodule = 0;
+ dwc_write_reg32(_core_if->pcgcctl, power.d32);
+ }
+
+#endif /* */
+ /* Clear the Remote Wakeup Signalling */
+ dctl.b.rmtwkupsig = 1;
+ dwc_modify_reg32(&_core_if->dev_if->dev_global_regs->dctl,
+ dctl.d32, 0);
+ if (_core_if->pcd_cb && _core_if->pcd_cb->resume_wakeup) {
+ _core_if->pcd_cb->resume_wakeup(_core_if->pcd_cb->p);
+ }
+ } else {
+
+ /*
+ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms
+ * so that OPT tests pass with all PHYs).
+ */
+ hprt0_data_t hprt0 = {.d32 = 0};
+ pcgcctl_data_t pcgcctl = {.d32 = 0};
+
+ /* Restart the Phy Clock */
+ pcgcctl.b.stoppclk = 1;
+ dwc_modify_reg32(_core_if->pcgcctl, pcgcctl.d32, 0);
+ UDELAY(10);
+
+ /* Now wait for 70 ms. */
+ hprt0.d32 = dwc_otg_read_hprt0(_core_if);
+ DWC_DEBUGPL(DBG_ANY, "Resume: HPRT0=%0x\n", hprt0.d32);
+ MDELAY(70);
+ hprt0.b.prtres = 0; /* Resume */
+ dwc_write_reg32(_core_if->host_if->hprt0, hprt0.d32);
+ DWC_DEBUGPL(DBG_ANY, "Clear Resume: HPRT0=%0x\n",
+ dwc_read_reg32(_core_if->host_if->hprt0));
+ }
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.wkupintr = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that a device has been disconnected from
+ * the root port.
+ */
+int32_t dwc_otg_handle_disconnect_intr(dwc_otg_core_if_t * _core_if)
+{
+ gintsts_data_t gintsts;
+ printk(KERN_ERR " Disconnect Detected Interrupt++ (%s) %s\n",
+ (dwc_otg_is_host_mode(_core_if) ? "Host" : "Device"),
+ op_state_str(_core_if));
+ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n",
+ (dwc_otg_is_host_mode(_core_if) ? "Host" : "Device"),
+ op_state_str(_core_if));
+
+/** @todo Consolidate this if statement. */
+#ifndef CONFIG_DWC_HOST_ONLY
+ if (_core_if->op_state == B_HOST) {
+
+ /* If in device mode Disconnect and stop the HCD, then
+ * start the PCD. */
+ hcd_disconnect(_core_if);
+ pcd_start(_core_if);
+ _core_if->op_state = B_PERIPHERAL;
+ } else if (dwc_otg_is_device_mode(_core_if)) {
+ gotgctl_data_t gotgctl = {.d32 = 0};
+ gotgctl.d32 = dwc_read_reg32(&_core_if->core_global_regs->gotgctl);
+ if (gotgctl.b.hstsethnpen == 1) {
+ /* Do nothing, if HNP in process the OTG
+ * interrupt "Host Negotiation Detected"
+ * interrupt will do the mode switch.
+ */
+ } else if (gotgctl.b.devhnpen == 0) {
+ /* If in device mode Disconnect and stop the HCD, then
+ * start the PCD. */
+ hcd_disconnect(_core_if);
+ pcd_start(_core_if);
+ _core_if->op_state = B_PERIPHERAL;
+ } else {
+ DWC_DEBUGPL(DBG_ANY, "!a_peripheral && !devhnpen\n");
+ }
+ } else {
+ if (_core_if->op_state == A_HOST) {
+ /* A-Cable still connected but device disconnected. */
+ hcd_disconnect(_core_if);
+ }
+ }
+
+#endif /* */
+ gintsts.d32 = 0;
+ gintsts.b.disconnect = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that SUSPEND state has been detected on
+ * the USB.
+ *
+ * For HNP the USB Suspend interrupt signals the change from
+ * "a_peripheral" to "a_host".
+ *
+ * When power management is enabled the core will be put in low power
+ * mode.
+ */
+int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t * _core_if)
+{
+ dsts_data_t dsts;
+ gintsts_data_t gintsts;
+ DWC_DEBUGPL(DBG_ANY, "USB SUSPEND\n");
+ if (dwc_otg_is_device_mode(_core_if)) {
+
+ /* Check the Device status register to determine if the Suspend
+ * state is active. */
+ dsts.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts);
+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32);
+ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d "
+ "HWCFG4.power Optimize=%d\n", dsts.b.suspsts,
+ _core_if->hwcfg4.b.power_optimiz);
+
+#ifdef PARTIAL_POWER_DOWN
+/** @todo Add a module parameter for power management. */
+ if (dsts.b.suspsts && _core_if->hwcfg4.b.power_optimiz) {
+ pcgcctl_data_t power = {.d32 = 0};
+ DWC_DEBUGPL(DBG_CIL, "suspend\n");
+ power.b.pwrclmp = 1;
+ dwc_write_reg32(_core_if->pcgcctl, power.d32);
+ power.b.rstpdwnmodule = 1;
+ dwc_modify_reg32(_core_if->pcgcctl, 0, power.d32);
+ power.b.stoppclk = 1;
+ dwc_modify_reg32(_core_if->pcgcctl, 0, power.d32);
+ } else {
+ DWC_DEBUGPL(DBG_ANY, "disconnect?\n");
+ }
+
+#endif /* */
+ /* PCD callback for suspend. */
+ pcd_suspend(_core_if);
+ } else {
+ if (_core_if->op_state == A_PERIPHERAL) {
+ DWC_DEBUGPL(DBG_ANY, "a_peripheral->a_host\n");
+
+ /* Clear the a_peripheral flag, back to a_host. */
+ pcd_stop(_core_if);
+ hcd_start(_core_if);
+ _core_if->op_state = A_HOST;
+ }
+ }
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.usbsuspend = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This function returns the Core Interrupt register.
+ */
+static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t * _core_if)
+{
+ gintsts_data_t gintsts;
+ gintmsk_data_t gintmsk;
+ gintmsk_data_t gintmsk_common = {.d32 = 0};
+ gintmsk_common.b.wkupintr = 1;
+ gintmsk_common.b.sessreqintr = 1;
+ gintmsk_common.b.conidstschng = 1;
+ gintmsk_common.b.otgintr = 1;
+ gintmsk_common.b.modemismatch = 1;
+ gintmsk_common.b.disconnect = 1;
+ gintmsk_common.b.usbsuspend = 1;
+
+ /** @todo: The port interrupt occurs while in device
+ * mode. Added code to CIL to clear the interrupt for now!
+ */
+ gintmsk_common.b.portintr = 1;
+ gintsts.d32 = dwc_read_reg32(&_core_if->core_global_regs->gintsts);
+ gintmsk.d32 = dwc_read_reg32(&_core_if->core_global_regs->gintmsk);
+
+#ifdef CONFIG_DWC_DEBUG
+ /* if any common interrupts set */
+ if (gintsts.d32 & gintmsk_common.d32) {
+ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n",
+ gintsts.d32, gintmsk.d32);
+ }
+
+#endif /* */
+ return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32);
+}
+
+/**
+ * Common interrupt handler.
+ *
+ * The common interrupts are those that occur in both Host and Device mode.
+ * This handler handles the following interrupts:
+ * - Mode Mismatch Interrupt
+ * - Disconnect Interrupt
+ * - OTG Interrupt
+ * - Connector ID Status Change Interrupt
+ * - Session Request Interrupt.
+ * - Resume / Remote Wakeup Detected Interrupt.
+ *
+ */
+extern int32_t dwc_otg_handle_common_intr(dwc_otg_core_if_t * _core_if)
+{
+ int retval = 0;
+ gintsts_data_t gintsts;
+ gintsts.d32 = dwc_otg_read_common_intr(_core_if);
+ if (gintsts.b.modemismatch) {
+ retval |= dwc_otg_handle_mode_mismatch_intr(_core_if);
+ }
+ if (gintsts.b.otgintr) {
+ retval |= dwc_otg_handle_otg_intr(_core_if);
+ }
+ if (gintsts.b.conidstschng) {
+ retval |= dwc_otg_handle_conn_id_status_change_intr(_core_if);
+ }
+ if (gintsts.b.disconnect) {
+ retval |= dwc_otg_handle_disconnect_intr(_core_if);
+ }
+ if (gintsts.b.sessreqintr) {
+ retval |= dwc_otg_handle_session_req_intr(_core_if);
+ }
+ if (gintsts.b.wkupintr) {
+ retval |= dwc_otg_handle_wakeup_detected_intr(_core_if);
+ }
+ if (gintsts.b.usbsuspend) {
+ retval |= dwc_otg_handle_usb_suspend_intr(_core_if);
+ }
+ if (gintsts.b.portintr && dwc_otg_is_device_mode(_core_if)) {
+ /* The port interrupt occurs while in device mode with HPRT0
+ * Port Enable/Disable.
+ */
+ gintsts.d32 = 0;
+ gintsts.b.portintr = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts,
+ gintsts.d32);
+ retval |= 1;
+ }
+ return retval;
+}
+
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_driver.c b/drivers/usb/gadget/dwc_otg/dwc_otg_driver.c
new file mode 100644
index 00000000000..9896c76ed34
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_driver.c
@@ -0,0 +1,1345 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.c $
+ * $Revision: #12 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+
+/** @file
+ * The dwc_otg_driver module provides the initialization and cleanup entry
+ * points for the DWC_otg driver. This module will be dynamically installed
+ * after Linux is booted using the insmod command. When the module is
+ * installed, the dwc_otg_driver_init function is called. When the module is
+ * removed (using rmmod), the dwc_otg_driver_cleanup function is called.
+ *
+ * This module also defines a data structure for the dwc_otg_driver, which is
+ * used in conjunction with the standard device structure. These
+ * structures allow the OTG driver to comply with the standard Linux driver
+ * model in which devices and drivers are registered with a bus driver. This
+ * has the benefit that Linux can expose attributes of the driver and device
+ * in its special sysfs file system. Users can then read or write files in
+ * this file system to perform diagnostics on the driver components or the
+ * device.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/stat.h> /* permission constants */
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <asm/io.h>
+
+#include "linux/dwc_otg_plat.h"
+#include "dwc_otg_attr.h"
+#include "dwc_otg_driver.h"
+#include "dwc_otg_cil.h"
+#include "dwc_otg_pcd.h"
+#include "dwc_otg_hcd.h"
+
+#define DWC_DRIVER_VERSION "2.60a 22-NOV-2006"
+#define DWC_DRIVER_DESC "HS OTG USB Controller driver"
+static const char dwc_driver_name[] = "dwc_otg";
+
+/*-------------------------------------------------------------------------*/
+/* Encapsulate the module parameter settings */
+static dwc_otg_core_params_t dwc_otg_module_params = {
+ .opt = -1,
+ .otg_cap = -1,
+ .dma_enable = -1,
+ .dma_burst_size = -1,
+#if 1
+ .speed = -1,
+#else
+ .speed = 1, /* test-only: set full-speed for Beagle USB Analyzer */
+#endif
+ .host_support_fs_ls_low_power = -1,
+ .host_ls_low_power_phy_clk = -1,
+ .enable_dynamic_fifo = -1,
+ .data_fifo_size = -1,
+ .dev_rx_fifo_size = -1,
+ .dev_nperio_tx_fifo_size = -1,
+ .dev_perio_tx_fifo_size = { /* dev_perio_tx_fifo_size_1 */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1
+ }, /* 15 */
+ .host_rx_fifo_size = -1,
+ .host_nperio_tx_fifo_size =-1,
+ .host_perio_tx_fifo_size = -1,
+ .max_transfer_size = -1,
+ .max_packet_count = -1,
+ .host_channels = -1,
+ .dev_endpoints = -1,
+ .phy_type = -1,
+ .phy_utmi_width = -1,
+ .phy_ulpi_ddr = -1,
+ .phy_ulpi_ext_vbus = -1,
+ .i2c_enable = -1,
+ .ulpi_fs_ls = -1,
+ .ts_dline = -1,
+ .en_multiple_tx_fifo = -1,
+ .dev_tx_fifo_size = { /* dev_tx_fifo_size */
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
+ }, /* 15 */
+ .thr_ctl = -1,
+ .tx_thr_length = -1,
+ .rx_thr_length = -1,
+};
+
+
+/**
+ * This function shows the Driver Version.
+ */
+static ssize_t version_show(struct device_driver *dev, char *buf)
+{
+ return snprintf(buf, sizeof(DWC_DRIVER_VERSION) + 2, "%s\n",
+ DWC_DRIVER_VERSION);
+}
+static DRIVER_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * Global Debug Level Mask.
+ */
+ uint32_t g_dbg_lvl = 0x0; /* OFF */
+
+/**
+ * This function shows the driver Debug Level.
+ */
+static ssize_t dbg_level_show(struct device_driver *_drv, char *_buf)
+{
+ return sprintf(_buf, "0x%0x\n", g_dbg_lvl);
+}
+
+
+/**
+ * This function stores the driver Debug Level.
+ */
+static ssize_t dbg_level_store(struct device_driver *_drv, const char *_buf,
+ size_t _count)
+{
+ g_dbg_lvl = simple_strtoul(_buf, NULL, 16);
+ return _count;
+}
+static DRIVER_ATTR(debuglevel, S_IRUGO | S_IWUSR, dbg_level_show,
+ dbg_level_store);
+
+/**
+ * This function is called during module intialization to verify that
+ * the module parameters are in a valid state.
+ */
+static int check_parameters(dwc_otg_core_if_t * core_if)
+{
+ int i;
+ int retval = 0;
+
+/* Checks if the parameter is outside of its valid range of values */
+#define DWC_OTG_PARAM_TEST(_param_,_low_,_high_) \
+ ((dwc_otg_module_params._param_ < (_low_)) || \
+ (dwc_otg_module_params._param_ > (_high_)))
+/* If the parameter has been set by the user, check that the parameter value is
+ * within the value range of values. If not, report a module error. */
+#define DWC_OTG_PARAM_ERR(_param_,_low_,_high_,_string_) \
+do { \
+ if (dwc_otg_module_params._param_ != -1) { \
+ if (DWC_OTG_PARAM_TEST(_param_, (_low_), (_high_))) { \
+ DWC_ERROR("`%d' invalid for parameter `%s'\n", \
+ dwc_otg_module_params._param_, _string_); \
+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \
+ retval++; \
+ } \
+ } \
+} while (0)
+
+ DWC_OTG_PARAM_ERR(opt, 0, 1, "opt");
+ DWC_OTG_PARAM_ERR(otg_cap, 0, 2, "otg_cap");
+ DWC_OTG_PARAM_ERR(dma_enable, 0, 1, "dma_enable");
+ DWC_OTG_PARAM_ERR(speed, 0, 1, "speed");
+ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power, 0, 1,
+ "host_support_fs_ls_low_power");
+ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk, 0, 1,
+ "host_ls_low_power_phy_clk");
+ DWC_OTG_PARAM_ERR(enable_dynamic_fifo, 0, 1, "enable_dynamic_fifo");
+ DWC_OTG_PARAM_ERR(data_fifo_size, 32, 32768, "data_fifo_size");
+ DWC_OTG_PARAM_ERR(dev_rx_fifo_size, 16, 32768, "dev_rx_fifo_size");
+ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size, 16, 32768,
+ "dev_nperio_tx_fifo_size");
+ DWC_OTG_PARAM_ERR(host_rx_fifo_size, 16, 32768, "host_rx_fifo_size");
+ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size, 16, 32768,
+ "host_nperio_tx_fifo_size");
+ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size, 16, 32768,
+ "host_perio_tx_fifo_size");
+ DWC_OTG_PARAM_ERR(max_transfer_size, 2047, 524288,
+ "max_transfer_size");
+ DWC_OTG_PARAM_ERR(max_packet_count, 15, 511, "max_packet_count");
+ DWC_OTG_PARAM_ERR(host_channels, 1, 16, "host_channels");
+ DWC_OTG_PARAM_ERR(dev_endpoints, 1, 15, "dev_endpoints");
+ DWC_OTG_PARAM_ERR(phy_type, 0, 2, "phy_type");
+ DWC_OTG_PARAM_ERR(phy_ulpi_ddr, 0, 1, "phy_ulpi_ddr");
+ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus, 0, 1, "phy_ulpi_ext_vbus");
+ DWC_OTG_PARAM_ERR(i2c_enable, 0, 1, "i2c_enable");
+ DWC_OTG_PARAM_ERR(ulpi_fs_ls, 0, 1, "ulpi_fs_ls");
+ DWC_OTG_PARAM_ERR(ts_dline, 0, 1, "ts_dline");
+ if (dwc_otg_module_params.dma_burst_size != -1) {
+ if (DWC_OTG_PARAM_TEST(dma_burst_size, 1, 1)
+ && DWC_OTG_PARAM_TEST(dma_burst_size, 4, 4)
+ && DWC_OTG_PARAM_TEST(dma_burst_size, 8, 8)
+ && DWC_OTG_PARAM_TEST(dma_burst_size, 16, 16)
+ && DWC_OTG_PARAM_TEST(dma_burst_size, 32, 32)
+ && DWC_OTG_PARAM_TEST(dma_burst_size, 64, 64)
+ && DWC_OTG_PARAM_TEST(dma_burst_size, 128, 128)
+ && DWC_OTG_PARAM_TEST(dma_burst_size, 256, 256)) {
+ DWC_ERROR
+ ("`%d' invalid for parameter `dma_burst_size'\n",
+ dwc_otg_module_params.dma_burst_size);
+ dwc_otg_module_params.dma_burst_size = 32;
+ retval++;
+ }
+ }
+ if (dwc_otg_module_params.phy_utmi_width != -1) {
+ if (DWC_OTG_PARAM_TEST(phy_utmi_width, 8, 8)
+ && DWC_OTG_PARAM_TEST(phy_utmi_width, 16, 16)) {
+ DWC_ERROR("`%d'invalid for parameter `phy_utmi_width'\n",
+ dwc_otg_module_params.phy_utmi_width);
+ dwc_otg_module_params.phy_utmi_width = 8; /*fscz 16*/
+ retval++;
+ }
+ }
+ for (i = 0; i < 15; i++) {
+ /** @todo should be like above */
+ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i],4,768,"dev_perio_tx_fifo_size");
+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) {
+ if (DWC_OTG_PARAM_TEST
+ (dev_perio_tx_fifo_size[i], 4, 768)) {
+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n",
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i],
+ "dev_perio_tx_fifo_size", i);
+ dwc_otg_module_params.
+ dev_perio_tx_fifo_size[i] =
+ dwc_param_dev_perio_tx_fifo_size_default;
+ retval++;
+ }
+ }
+ }
+ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo");
+ for (i = 0; i < 15; i++) {
+ /** @todo should be like above */
+ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i],4,768,"dev_tx_fifo_size");
+ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) {
+ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) {
+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n",
+ dwc_otg_module_params.dev_tx_fifo_size[i],
+ "dev_tx_fifo_size", i);
+ dwc_otg_module_params.dev_tx_fifo_size[i] =
+ dwc_param_dev_tx_fifo_size_default;
+ retval++;
+ }
+ }
+ }
+ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl");
+ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length");
+ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length");
+
+ /* At this point, all module parameters that have been set by the user
+ * are valid, and those that have not are left unset. Now set their
+ * default values and/or check the parameters against the hardware
+ * configurations of the OTG core. */
+
+/* This sets the parameter to the default value if it has not been set by the
+ * user */
+#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \
+({ \
+ int changed = 1; \
+ if (dwc_otg_module_params._param_ == -1) { \
+ changed = 0; \
+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \
+ } \
+ changed; \
+})
+
+/* This checks the macro agains the hardware configuration to see if it is
+ * valid. It is possible that the default value could be invalid. In this
+ * case, it will report a module error if the user touched the parameter.
+ * Otherwise it will adjust the value without any error. */
+#define DWC_OTG_PARAM_CHECK_VALID(_param_,_str_,_is_valid_,_set_valid_) \
+( { \
+ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \
+ int error = 0; \
+ if (!(_is_valid_)) { \
+ if (changed) { \
+ DWC_ERROR("`%d' invalid for parameter `%s' Check HW configuration.\n", \
+ dwc_otg_module_params._param_, _str_); \
+ error = 1; \
+ } \
+ dwc_otg_module_params._param_ = (_set_valid_); \
+ } \
+ error; \
+} )
+
+ /* OTG Cap */
+ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap, "otg_cap",
+ ( {
+ int valid;
+ valid = 1;
+ switch(dwc_otg_module_params.otg_cap) {
+ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE:
+ if (core_if->hwcfg2.b.op_mode !=
+ DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
+ valid = 0;
+ break;
+ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE:
+ if((core_if->hwcfg2.b.op_mode !=
+ DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) &&
+ (core_if->hwcfg2.b.op_mode !=
+ DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) &&
+ (core_if->hwcfg2.b.op_mode !=
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) &&
+ (core_if->hwcfg2.b.op_mode !=
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) {
+ valid = 0;
+ }
+ break;
+ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE:
+ /* always valid */
+ break;
+ }
+ valid;
+ } ),
+ (((core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) ||
+ (core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) ||
+ (core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) ||
+ (core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ?
+ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE :
+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)) ;
+ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable, "dma_enable",
+ ((dwc_otg_module_params.dma_enable == 1) &&
+ (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1, 0);
+ retval += DWC_OTG_PARAM_CHECK_VALID(opt, "opt", 1, 0);
+ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size);
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power,
+ "host_support_fs_ls_low_power", 1, 0);
+ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo,
+ "enable_dynamic_fifo",
+ ((dwc_otg_module_params.enable_dynamic_fifo == 0) ||
+ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0);
+ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, "data_fifo_size",
+ (dwc_otg_module_params.data_fifo_size <=
+ core_if->hwcfg3.b.dfifo_depth),
+ core_if->hwcfg3.b.dfifo_depth);
+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size, "dev_rx_fifo_size",
+ (dwc_otg_module_params.dev_rx_fifo_size <=
+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)),
+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz));
+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size,
+ "dev_nperio_tx_fifo_size",
+ (dwc_otg_module_params.dev_nperio_tx_fifo_size <=
+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)),
+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16));
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size, "host_rx_fifo_size",
+ (dwc_otg_module_params.host_rx_fifo_size <=
+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)),
+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz));
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size,
+ "host_nperio_tx_fifo_size",
+ (dwc_otg_module_params.host_nperio_tx_fifo_size <=
+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)),
+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16));
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size,
+ "host_perio_tx_fifo_size",
+ (dwc_otg_module_params.host_perio_tx_fifo_size <=
+ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))),
+ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16)));
+ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size, "max_transfer_size",
+ (dwc_otg_module_params.max_transfer_size <
+ (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))),
+ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1));
+ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count, "max_packet_count",
+ (dwc_otg_module_params.max_packet_count <
+ (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))),
+ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1));
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, "host_channels",
+ (dwc_otg_module_params.host_channels <=
+ (core_if->hwcfg2.b.num_host_chan + 1)),
+ (core_if->hwcfg2.b.num_host_chan + 1));
+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, "dev_endpoints",
+ (dwc_otg_module_params.dev_endpoints <=
+ (core_if->hwcfg2.b.num_dev_ep)),
+ core_if->hwcfg2.b.num_dev_ep);
+
+/*
+ * Define the following to disable the FS PHY Hardware checking. This is for
+ * internal testing only.
+ *
+ * #define NO_FS_PHY_HW_CHECKS
+ */
+
+#ifdef NO_FS_PHY_HW_CHECKS
+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, "phy_type", 1, 0);
+#else /* */
+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, "phy_type", ( {
+ int valid = 0;
+ if ((dwc_otg_module_params.phy_type ==
+ DWC_PHY_TYPE_PARAM_UTMI) &&
+ ((core_if->hwcfg2.b.hs_phy_type == 1) ||
+ (core_if->hwcfg2.b.hs_phy_type == 3))) {
+ valid = 1;
+ }
+ else if ((dwc_otg_module_params.phy_type ==
+ DWC_PHY_TYPE_PARAM_ULPI) &&
+ ((core_if->hwcfg2.b.hs_phy_type == 2) ||
+ (core_if->hwcfg2.b.hs_phy_type == 3))) {
+ valid = 1;
+ }
+ else if ((dwc_otg_module_params.phy_type ==
+ DWC_PHY_TYPE_PARAM_FS) &&
+ (core_if->hwcfg2.b.fs_phy_type == 1)) {
+ valid = 1;
+ }
+ valid;
+ } ),
+ ( {
+ int set = DWC_PHY_TYPE_PARAM_FS;
+ if (core_if->hwcfg2.b.hs_phy_type) {
+ if ((core_if->hwcfg2.b.hs_phy_type == 3) ||
+ (core_if->hwcfg2.b.hs_phy_type ==1)) {
+ set = DWC_PHY_TYPE_PARAM_UTMI;
+ }
+ else {
+ set = DWC_PHY_TYPE_PARAM_ULPI;
+ }
+ }
+ set;
+ } ) ) ;
+
+#endif /* */
+ retval += DWC_OTG_PARAM_CHECK_VALID(speed, "speed",
+ (dwc_otg_module_params.speed == 0) &&
+ (dwc_otg_module_params.phy_type ==
+ DWC_PHY_TYPE_PARAM_FS) ? 0 : 1,
+ dwc_otg_module_params.phy_type ==
+ DWC_PHY_TYPE_PARAM_FS ? 1 : 0);
+ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk,
+ "host_ls_low_power_phy_clk",
+ ((dwc_otg_module_params.host_ls_low_power_phy_clk ==
+ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) &&
+ (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1),
+ ((dwc_otg_module_params.phy_type ==
+ DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ :
+ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ));
+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr);
+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus);
+ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width);
+ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls);
+ DWC_OTG_PARAM_SET_DEFAULT(ts_dline);
+
+#ifdef NO_FS_PHY_HW_CHECKS
+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable", 1, 0);
+
+#else /* */
+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable",
+ (dwc_otg_module_params.i2c_enable == 1) &&
+ (core_if->hwcfg3.b.i2c == 0) ? 0 : 1, 0);
+#endif /* */
+ for (i = 0; i < 16; i++) {
+ int changed = 1;
+ int error = 0;
+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) {
+ changed = 0;
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] =
+ dwc_param_dev_perio_tx_fifo_size_default;
+ }
+ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <=
+ (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) {
+ if (changed) {
+ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'."
+ "Check HW configuration.\n",
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i], i);
+ error = 1;
+ }
+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] =
+ dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]);
+ }
+ retval += error;
+ }
+
+ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo,
+ "en_multiple_tx_fifo",
+ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) &&
+ (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, 0);
+
+ for (i = 0; i < 16; i++) {
+ int changed = 1;
+ int error = 0;
+ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) {
+ changed = 0;
+ dwc_otg_module_params.dev_tx_fifo_size[i] =
+ dwc_param_dev_tx_fifo_size_default;
+ }
+ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <=
+ (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) {
+ if (changed) {
+ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'."
+ "Check HW configuration.\n",dwc_otg_module_params.dev_tx_fifo_size[i],i);
+ error = 1;
+ }
+ dwc_otg_module_params.dev_tx_fifo_size[i] =
+ dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]);
+ }
+ retval += error;
+ }
+ DWC_OTG_PARAM_SET_DEFAULT(thr_ctl);
+ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length);
+ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length);
+ return retval;
+}
+
+/**
+ * This function is the top level interrupt handler for the Common
+ * (Device and host modes) interrupts.
+ */
+static irqreturn_t dwc_otg_common_irq(int _irq, void *_dev)
+{
+ dwc_otg_device_t * otg_dev = _dev;
+ int32_t retval = IRQ_NONE;
+ retval = dwc_otg_handle_common_intr(otg_dev->core_if);
+ return IRQ_RETVAL(retval);
+}
+
+#ifdef OTG_EXT_CHG_PUMP
+/**
+ * This function is the interrupt handler for the OverCurrent condition
+ * from the external charge pump (if enabled)
+ */
+static irqreturn_t dwc_otg_externalchgpump_irq(int _irq, void *_dev)
+{
+ dwc_otg_device_t * otg_dev = _dev;
+ int32_t retval = IRQ_NONE;
+ dwc_otg_hcd_t *_dwc_otg_hcd = NULL;
+
+ DWC_DEBUGPL(DBG_OFF," ++OTG OverCurrent Detected (ExtChgPump Interrupt)++ \n");
+
+// mtdcr(0x0D2, mfdcr(0x0D2) & ~0x00000020); //Disable IRQ2 - 58
+// mtdcr(0x0C0, 0x00000002);
+
+ if(dwc_otg_is_host_mode(otg_dev->core_if)) {
+ hprt0_data_t hprt0 = {.d32 = 0};
+ _dwc_otg_hcd = otg_dev->hcd;
+ _dwc_otg_hcd->flags.b.port_over_current_change = 1;
+
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0,
+ hprt0.d32);
+ } else {
+ /* Device mode - This int is n/a for device mode */
+ DWC_ERROR(" DeviceMode: OTG OverCurrent Detected \n");
+ }
+
+// mtdcr(0x0D0, 0x00000020); //Clear the sts
+// mtdcr(0x0D2, mfdcr(0x0D2) | 0x00000020); //Enable IRQ2 - 58
+
+ retval |= 1; //dwc_otg_handle_common_intr(otg_dev->core_if);
+ return IRQ_RETVAL(retval);
+}
+#endif
+
+/**
+ * This function is called when a device is unregistered with the
+ * dwc_otg_driver. This happens, for example, when the rmmod command is
+ * executed. The device may or may not be electrically present. If it is
+ * present, the driver stops device processing. Any resources used on behalf
+ * of this device are freed.
+ *
+ * @param[in] _dev
+ */
+static int dwc_otg_driver_remove(struct platform_device *pdev)
+{
+ dwc_otg_device_t * otg_dev = platform_get_drvdata(pdev);
+ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, _dev);
+ if (otg_dev == NULL) {
+ /* Memory allocation for the dwc_otg_device failed. */
+ return 0;
+ }
+/*
+ * Free the IRQ
+ */
+ if (otg_dev->common_irq_installed) {
+ free_irq(otg_dev->irq, otg_dev);
+ }
+
+#ifndef CONFIG_DWC_DEVICE_ONLY
+ if (otg_dev->hcd != NULL) {
+ dwc_otg_hcd_remove(&pdev->dev);
+ }
+#endif /* */
+
+#ifndef CONFIG_DWC_HOST_ONLY
+ if (otg_dev->pcd != NULL) {
+ dwc_otg_pcd_remove(&pdev->dev);
+ }
+
+#endif /* */
+ if (otg_dev->core_if != NULL) {
+ dwc_otg_cil_remove(otg_dev->core_if);
+ }
+
+ /*
+ * Remove the device attributes
+ */
+ dwc_otg_attr_remove(&pdev->dev);
+
+ /*
+ * Return the memory.
+ */
+ if (otg_dev->base != NULL) {
+ iounmap(otg_dev->base);
+ }
+ if (otg_dev->phys_addr != 0) {
+ release_mem_region(otg_dev->phys_addr, otg_dev->base_len);
+ }
+ kfree(otg_dev);
+
+ /*
+ * Clear the drvdata pointer.
+ */
+ platform_set_drvdata(pdev, 0);
+ return 0;
+}
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+/**
+ * This function is the top level interrupt handler for the Common
+ * (Device and host modes) interrupts.
+ */
+static irqreturn_t dwc_otg_plbdma(int _irq, void *_dev)
+{
+ dwc_otg_device_t * otg_dev = _dev;
+ int32_t retval = IRQ_HANDLED;
+
+ ppc4xx_clr_dma_status(0);
+ DWC_DEBUGPL(DBG_SP, "%s reset release_later\n", __func__);
+ atomic_set(& release_later, 0);
+ dwc_otg_enable_global_interrupts(otg_dev->core_if);
+ //enable_irq(94);
+ return IRQ_RETVAL(retval);
+}
+#endif
+
+/**
+ * This function is called when an device is bound to a
+ * dwc_otg_driver. It creates the driver components required to
+ * control the device (CIL, HCD, and PCD) and it initializes the
+ * device. The driver components are stored in a dwc_otg_device
+ * structure. A reference to the dwc_otg_device is saved in the
+ * device. This allows the driver to access the dwc_otg_device
+ * structure on subsequent calls to driver methods for this device.
+ *
+ * @param[in] _dev device definition
+ */
+static int dwc_otg_driver_probe(struct platform_device *pdev)
+{
+ int retval = 0;
+ dwc_otg_device_t * dwc_otg_device;
+ int32_t snpsid;
+ struct resource *res;
+ gusbcfg_data_t usbcfg = {.d32 = 0};
+#if defined(OTG_EXT_CHG_PUMP) || defined(CONFIG_OTG_PLB_DMA_TASKLET)
+ int irq;
+#endif
+
+ dev_dbg(&pdev->dev, "dwc_otg_driver_probe (%p)\n", pdev);
+ dwc_otg_device = kmalloc(sizeof(dwc_otg_device_t), GFP_KERNEL);
+ if (dwc_otg_device == 0) {
+ dev_err(&pdev->dev, "kmalloc of dwc_otg_device failed\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+ memset(dwc_otg_device, 0, sizeof(*dwc_otg_device));
+ dwc_otg_device->reg_offset = 0xFFFFFFFF;
+
+ /*
+ * Retrieve the memory and IRQ resources.
+ */
+ dwc_otg_device->irq = platform_get_irq(pdev, 0);
+ if (dwc_otg_device->irq == 0) {
+ dev_err(&pdev->dev, "no device irq\n");
+ retval = -ENODEV;
+ goto fail;
+ }
+ dev_dbg(&pdev->dev, "OTG - device irq: %d\n", dwc_otg_device->irq);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "no CSR address\n");
+ retval = -ENODEV;
+ goto fail;
+ }
+ dev_dbg(&pdev->dev, "OTG - ioresource_mem start0x%08x: end:0x%08x\n",
+ (unsigned)res->start, (unsigned)res->end);
+ dwc_otg_device->phys_addr = res->start;
+ dwc_otg_device->base_len = res->end - res->start + 1;
+ if (request_mem_region(dwc_otg_device->phys_addr, dwc_otg_device->base_len,
+ dwc_driver_name) == NULL) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ retval = -EBUSY;
+ goto fail;
+ }
+
+ /*
+ * Map the DWC_otg Core memory into virtual address space.
+ */
+ dwc_otg_device->base =
+ ioremap(dwc_otg_device->phys_addr, dwc_otg_device->base_len);
+ if (dwc_otg_device->base == NULL) {
+ dev_err(&pdev->dev, "ioremap64() failed\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+ dev_dbg(&pdev->dev, "mapped base=0x%08x\n", (unsigned)dwc_otg_device->base);
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the SNPSID register contents. The value should be
+ * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX".
+ */
+ snpsid = dwc_read_reg32((uint32_t *) ((uint8_t *) dwc_otg_device->base + 0x40));
+ /*
+ * Initialize driver data to point to the global DWC_otg
+ * Device structure.
+ */
+ platform_set_drvdata(pdev, dwc_otg_device);
+ dev_dbg(&pdev->dev, "dwc_otg_device=0x%p\n", dwc_otg_device);
+ dwc_otg_device->core_if = dwc_otg_cil_init(dwc_otg_device->base, &dwc_otg_module_params);
+ if (dwc_otg_device->core_if == 0) {
+ dev_err(&pdev->dev, "CIL initialization failed!\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+ /*
+ * Validate parameter values.
+ */
+ if (check_parameters(dwc_otg_device->core_if) != 0) {
+ retval = -EINVAL;
+ goto fail;
+ }
+
+ /* Added for PLB DMA phys virt mapping */
+ dwc_otg_device->core_if->phys_addr = dwc_otg_device->phys_addr;
+
+ /*
+ * Create Device Attributes in sysfs
+ */
+ dwc_otg_attr_create(&pdev->dev);
+ /*
+ * Disable the global interrupt until all the interrupt
+ * handlers are installed.
+ */
+ dwc_otg_disable_global_interrupts(dwc_otg_device->core_if);
+
+ /*
+ * Install the interrupt handler for the common interrupts before
+ * enabling common interrupts in core_init below.
+ */
+ DWC_DEBUGPL(DBG_CIL, "registering (common) handler for irq%d\n",
+ dwc_otg_device->irq);
+ retval = request_irq(dwc_otg_device->irq, dwc_otg_common_irq, IRQF_SHARED,
+ "dwc_otg", dwc_otg_device);
+ if (retval != 0) {
+ DWC_ERROR("request of irq%d failed retval: %d\n", dwc_otg_device->irq, retval);
+ retval = -EBUSY;
+ goto fail;
+ } else {
+ dwc_otg_device->common_irq_installed = 1;
+ }
+
+#ifdef CONFIG_MACH_IPMATE
+ set_irq_type(_lmdev->irq, IRQT_LOW);
+#endif /* */
+ /*
+ * Initialize the DWC_otg core.
+ */
+ dwc_otg_core_init(dwc_otg_device->core_if);
+
+#ifdef OTG_EXT_CHG_PUMP
+ /* configure GPIO to use IRQ2, IRQ=58 (IRQ2) */
+ irq = platform_get_irq(pdev, 1);
+ retval = request_irq(irq, dwc_otg_externalchgpump_irq, IRQF_SHARED,
+ "dwc_otg_ext_chg_pump", dwc_otg_device);
+ if (retval != 0) {
+ DWC_ERROR("request of irq:2(ExtInt) failed retval: %d\n",
+ retval);
+ retval = -EBUSY;
+ goto fail;
+ } else {
+ printk(KERN_INFO "%s: (ExtChgPump-OverCurrent Detection)"
+ " IRQ2 registered\n", dwc_driver_name);
+ }
+#endif
+
+#ifndef CONFIG_DWC_HOST_ONLY
+ /*
+ * Initialize the PCD
+ */
+ retval = dwc_otg_pcd_init(&pdev->dev);
+
+ if (retval != 0) {
+ DWC_ERROR("dwc_otg_pcd_init failed\n");
+ dwc_otg_device->pcd = NULL;
+ goto fail;
+ }
+
+#endif /* */
+#ifndef CONFIG_DWC_DEVICE_ONLY
+ /*
+ * Initialize the HCD
+ */
+#if 1 /*fscz*/
+ /* force_host_mode */
+ usbcfg.d32 = dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg);
+ usbcfg.b.force_host_mode = 1;
+ dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg, usbcfg.d32);
+#endif
+ retval = dwc_otg_hcd_init(&pdev->dev, dwc_otg_device);
+ if (retval != 0) {
+ DWC_ERROR("dwc_otg_hcd_init failed\n");
+ dwc_otg_device->hcd = NULL;
+ goto fail;
+ }
+
+#endif /* */
+ /*
+ * Enable the global interrupt after all the interrupt
+ * handlers are installed.
+ */
+ dwc_otg_enable_global_interrupts(dwc_otg_device->core_if);
+#if 1 /*fscz*/
+ usbcfg.d32 = dwc_read_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg);
+ usbcfg.b.force_host_mode = 0;
+ dwc_write_reg32(&dwc_otg_device->core_if->core_global_regs ->gusbcfg, usbcfg.d32);
+#endif
+
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ atomic_set(&release_later, 0);
+ irq = platform_get_irq(pdev, 2);
+ retval = request_irq(irq, dwc_otg_plbdma, IRQF_SHARED,
+ "dwc_otg_plbdma", dwc_otg_device);
+ if (retval != 0) {
+ DWC_ERROR("Request of irq %d failed retval: %d\n", PLB_DMA_CH_INT, retval);
+ retval = -EBUSY;
+ goto fail;
+ } else {
+ DWC_DEBUGPL(DBG_CIL, "%s Irq %d registered\n", dwc_driver_name, PLB_DMA_CH_INT);
+ }
+#endif
+ return 0;
+
+fail:
+ dwc_otg_driver_remove(pdev);
+ return retval;
+}
+
+
+/**
+ * This structure defines the methods to be called by a bus driver
+ * during the lifecycle of a device on that bus. Both drivers and
+ * devices are registered with a bus driver. The bus driver matches
+ * devices to drivers based on information in the device and driver
+ * structures.
+ *
+ * The probe function is called when the bus driver matches a device
+ * to this driver. The remove function is called when a device is
+ * unregistered with the bus driver.
+ */
+static struct platform_driver dwc_otg_driver = {
+ .probe = dwc_otg_driver_probe,
+ .remove = __devexit_p(dwc_otg_driver_remove),
+ .driver = {
+ .name = (char *)dwc_driver_name,
+ .bus = &platform_bus_type,
+ },
+};
+
+/**
+ * This function is called when the dwc_otg_driver is installed with the
+ * insmod command. It registers the dwc_otg_driver structure with the
+ * appropriate bus driver. This will cause the dwc_otg_driver_probe function
+ * to be called. In addition, the bus driver will automatically expose
+ * attributes defined for the device and driver in the special sysfs file
+ * system.
+ *
+ * @return
+ */
+static int __init dwc_otg_driver_init(void)
+{
+ int retval = 0, ret = 0;
+ printk(KERN_INFO "%s: version %s\n", dwc_driver_name,
+ DWC_DRIVER_VERSION);
+ retval = platform_driver_register(&dwc_otg_driver);
+ if (retval < 0) {
+ printk(KERN_ERR "%s registration failed. retval=%d\n",
+ dwc_driver_name, retval);
+ return retval;
+ }
+ ret = driver_create_file(&dwc_otg_driver.driver, &driver_attr_version);
+ ret = driver_create_file(&dwc_otg_driver.driver, &driver_attr_debuglevel);
+ return retval;
+}
+
+module_init(dwc_otg_driver_init);
+
+/**
+ * This function is called when the driver is removed from the kernel
+ * with the rmmod command. The driver unregisters itself with its bus
+ * driver.
+ *
+ */
+static void __exit dwc_otg_driver_cleanup(void)
+{
+ printk(KERN_DEBUG "dwc_otg_driver_cleanup()\n");
+ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_debuglevel);
+ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version);
+ platform_driver_unregister(&dwc_otg_driver);
+ printk(KERN_INFO "%s module removed\n", dwc_driver_name);
+} module_exit(dwc_otg_driver_cleanup);
+
+MODULE_DESCRIPTION(DWC_DRIVER_DESC);
+MODULE_AUTHOR("Synopsys Inc.");
+MODULE_LICENSE("GPL");
+
+module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444);
+MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None");
+module_param_named(opt, dwc_otg_module_params.opt, int, 0444);
+MODULE_PARM_DESC(opt, "OPT Mode");
+module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444);
+MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled");
+module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int,0444);
+MODULE_PARM_DESC(dma_burst_size,"DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256");
+module_param_named(speed, dwc_otg_module_params.speed, int, 0444);
+MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed");
+module_param_named(host_support_fs_ls_low_power,
+ dwc_otg_module_params.host_support_fs_ls_low_power, int,0444);
+MODULE_PARM_DESC(host_support_fs_ls_low_power,
+ "Support Low Power w/FS or LS 0=Support 1=Don't Support");
+module_param_named(host_ls_low_power_phy_clk,
+ dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444);
+MODULE_PARM_DESC(host_ls_low_power_phy_clk,
+ "Low Speed Low Power Clock 0=48Mhz 1=6Mhz");
+module_param_named(enable_dynamic_fifo,
+ dwc_otg_module_params.enable_dynamic_fifo, int, 0444);
+MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing");
+module_param_named(data_fifo_size,
+ dwc_otg_module_params.data_fifo_size, int,0444);
+MODULE_PARM_DESC(data_fifo_size,
+ "Total number of words in the data FIFO memory 32-32768");
+module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size,
+ int, 0444);
+MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(dev_nperio_tx_fifo_size,
+ dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(dev_nperio_tx_fifo_size,
+ "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(dev_perio_tx_fifo_size_1,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_1,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_2,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_2,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_3,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_3,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_4,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_4,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_5,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_5,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_6,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_6,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_7,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_7,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_8,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_8,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_9,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_9,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_10,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_10,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_11,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[10], int,0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_11,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_12,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[11], int,0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_12,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_13,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[12], int,0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_13,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_14,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[13], int,0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_14,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(dev_perio_tx_fifo_size_15,
+ dwc_otg_module_params.dev_perio_tx_fifo_size[14], int,0444);
+MODULE_PARM_DESC(dev_perio_tx_fifo_size_15,
+ "Number of words in the periodic Tx FIFO 4-768");
+module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size,
+ int, 0444);
+MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768");
+module_param_named(host_nperio_tx_fifo_size,
+ dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_nperio_tx_fifo_size,
+ "Number of words in the non-periodic Tx FIFO 16-32768");
+module_param_named(host_perio_tx_fifo_size,
+ dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444);
+MODULE_PARM_DESC(host_perio_tx_fifo_size,
+ "Number of words in the host periodic Tx FIFO 16-32768");
+module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size,
+ int, 0444);
+
+/** @todo Set the max to 512K, modify checks */
+MODULE_PARM_DESC(max_transfer_size,
+ "The maximum transfer size supported in bytes 2047-65535");
+module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count,
+ int, 0444);
+MODULE_PARM_DESC(max_packet_count,
+ "The maximum number of packets in a transfer 15-511");
+module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444);
+MODULE_PARM_DESC(host_channels,
+ "The number of host channel registers to use 1-16");
+module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444);
+MODULE_PARM_DESC(dev_endpoints,
+ "The number of endpoints in addition to EP0 available for device mode 1-15");
+module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444);
+MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI");
+module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int,
+ 0444);
+MODULE_PARM_DESC(phy_utmi_width,
+ "Specifies the UTMI+ Data Width 8 or 16 bits");
+module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int,
+ 0444);
+MODULE_PARM_DESC(phy_ulpi_ddr,
+#if 1 /*fscz*/
+ "0");
+#else
+ "ULPI at double or single data rate 0=Single 1=Double");
+#endif
+module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus,
+ int, 0444);
+MODULE_PARM_DESC(phy_ulpi_ext_vbus,
+ "ULPI PHY using internal or external vbus 0=Internal");
+module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444);
+MODULE_PARM_DESC(i2c_enable, "FS PHY Interface");
+module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444);
+MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only");
+module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444);
+MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs");
+module_param_named(debug, g_dbg_lvl, int, 0444);
+MODULE_PARM_DESC(debug, "0");
+module_param_named(en_multiple_tx_fifo,
+ dwc_otg_module_params.en_multiple_tx_fifo, int, 0444);
+MODULE_PARM_DESC(en_multiple_tx_fifo,
+ "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled");
+module_param_named(dev_tx_fifo_size_1,
+ dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_2,
+ dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_3,
+ dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_4,
+ dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_5,
+ dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_6,
+ dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_7,
+ dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_8,
+ dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_9,
+ dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_10,
+ dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_11,
+ dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_12,
+ dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_13,
+ dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_14,
+ dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768");
+module_param_named(dev_tx_fifo_size_15,
+ dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444);
+MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768");
+module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444);
+MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit"
+ "0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled");
+module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444);
+MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs");
+module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444);
+MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs");
+
+/** @page "Module Parameters"
+ *
+ * The following parameters may be specified when starting the module.
+ * These parameters define how the DWC_otg controller should be
+ * configured. Parameter values are passed to the CIL initialization
+ * function dwc_otg_cil_init
+ *
+ * Example: <code>modprobe dwc_otg speed=1 otg_cap=1</code>
+ *
+
+ <table>
+ <tr><td>Parameter Name</td><td>Meaning</td></tr>
+
+ <tr>
+ <td>otg_cap</td>
+ <td>Specifies the OTG capabilities. The driver will automatically detect the
+ value for this parameter if none is specified.
+ - 0: HNP and SRP capable (default, if available)
+ - 1: SRP Only capable
+ - 2: No HNP/SRP capable
+ </td></tr>
+
+ <tr>
+ <td>dma_enable</td>
+ <td>Specifies whether to use slave or DMA mode for accessing the data FIFOs.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Slave
+ - 1: DMA (default, if available)
+ </td></tr>
+
+ <tr>
+ <td>dma_burst_size</td>
+ <td>The DMA Burst size (applicable only for External DMA Mode).
+ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32)
+ </td></tr>
+
+ <tr>
+ <td>speed</td>
+ <td>Specifies the maximum speed of operation in host and device mode. The
+ actual speed depends on the speed of the attached device and the value of
+ phy_type.
+ - 0: High Speed (default)
+ - 1: Full Speed
+ </td></tr>
+
+ <tr>
+ <td>host_support_fs_ls_low_power</td>
+ <td>Specifies whether low power mode is supported when attached to a Full
+ Speed or Low Speed device in host mode.
+ - 0: Don't support low power mode (default)
+ - 1: Support low power mode
+ </td></tr>
+
+ <tr>
+ <td>host_ls_low_power_phy_clk</td>
+ <td>Specifies the PHY clock rate in low power mode when connected to a Low
+ Speed device in host mode. This parameter is applicable only if
+ HOST_SUPPORT_FS_LS_LOW_POWER is enabled.
+ - 0: 48 MHz (default)
+ - 1: 6 MHz
+ </td></tr>
+
+ <tr>
+ <td>enable_dynamic_fifo</td>
+ <td> Specifies whether FIFOs may be resized by the driver software.
+ - 0: Use cC FIFO size parameters
+ - 1: Allow dynamic FIFO sizing (default)
+ </td></tr>
+
+ <tr>
+ <td>data_fifo_size</td>
+ <td>Total number of 4-byte words in the data FIFO memory. This memory
+ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs.
+ - Values: 32 to 32768 (default 8192)
+
+ Note: The total FIFO memory depth in the FPGA configuration is 8192.
+ </td></tr>
+
+ <tr>
+ <td>dev_rx_fifo_size</td>
+ <td>Number of 4-byte words in the Rx FIFO in device mode when dynamic
+ FIFO sizing is enabled.
+ - Values: 16 to 32768 (default 1064)
+ </td></tr>
+
+ <tr>
+ <td>dev_nperio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the non-periodic Tx FIFO in device mode when
+ dynamic FIFO sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>dev_perio_tx_fifo_size_n (n = 1 to 15)</td>
+ <td>Number of 4-byte words in each of the periodic Tx FIFOs in device mode
+ when dynamic FIFO sizing is enabled.
+ - Values: 4 to 768 (default 256)
+ </td></tr>
+
+ <tr>
+ <td>host_rx_fifo_size</td>
+ <td>Number of 4-byte words in the Rx FIFO in host mode when dynamic FIFO
+ sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>host_nperio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the non-periodic Tx FIFO in host mode when
+ dynamic FIFO sizing is enabled in the core.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>host_perio_tx_fifo_size</td>
+ <td>Number of 4-byte words in the host periodic Tx FIFO when dynamic FIFO
+ sizing is enabled.
+ - Values: 16 to 32768 (default 1024)
+ </td></tr>
+
+ <tr>
+ <td>max_transfer_size</td>
+ <td>The maximum transfer size supported in bytes.
+ - Values: 2047 to 65,535 (default 65,535)
+ </td></tr>
+
+ <tr>
+ <td>max_packet_count</td>
+ <td>The maximum number of packets in a transfer.
+ - Values: 15 to 511 (default 511)
+ </td></tr>
+
+ <tr>
+ <td>host_channels</td>
+ <td>The number of host channel registers to use.
+ - Values: 1 to 16 (default 12)
+
+ Note: The FPGA configuration supports a maximum of 12 host channels.
+ </td></tr>
+
+ <tr>
+ <td>dev_endpoints</td>
+ <td>The number of endpoints in addition to EP0 available for device mode
+ operations.
+ - Values: 1 to 15 (default 6 IN and OUT)
+
+ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in
+ addition to EP0.
+ </td></tr>
+
+ <tr>
+ <td>phy_type</td>
+ <td>Specifies the type of PHY interface to use. By default, the driver will
+ automatically detect the phy_type.
+ - 0: Full Speed
+ - 1: UTMI+ (default, if available)
+ - 2: ULPI
+ </td></tr>
+
+ <tr>
+ <td>phy_utmi_width</td>
+ <td>Specifies the UTMI+ Data Width. This parameter is applicable for a
+ phy_type of UTMI+. Also, this parameter is applicable only if the
+ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the
+ core has been configured to work at either data path width.
+ - Values: 8 or 16 bits (default 16)
+ </td></tr>
+
+ <tr>
+ <td>phy_ulpi_ddr</td>
+ <td>Specifies whether the ULPI operates at double or single data rate. This
+ parameter is only applicable if phy_type is ULPI.
+ - 0: single data rate ULPI interface with 8 bit wide data bus (default)
+ - 1: double data rate ULPI interface with 4 bit wide data bus
+ </td></tr>
+
+ <tr>
+ <td>i2c_enable</td>
+ <td>Specifies whether to use the I2C interface for full speed PHY. This
+ parameter is only applicable if PHY_TYPE is FS.
+ - 0: Disabled (default)
+ - 1: Enabled
+ </td></tr>
+
+ <tr>
+ <td>otg_en_multiple_tx_fifo</td>
+ <td>Specifies whether dedicatedto tx fifos are enabled for non periodic IN EPs.
+ The driver will automatically detect the value for this parameter if none is
+ specified.
+ - 0: Disabled
+ - 1: Enabled (default, if available)
+ </td></tr>
+
+ <tr>
+ <td>dev_tx_fifo_size_n (n = 1 to 15)</td>
+ <td>Number of 4-byte words in each of the Tx FIFOs in device mode
+ when dynamic FIFO sizing is enabled.
+ - Values: 4 to 768 (default 256)
+ </td></tr>
+
+*/
+
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_driver.h b/drivers/usb/gadget/dwc_otg/dwc_otg_driver.h
new file mode 100644
index 00000000000..e4ccd24accb
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_driver.h
@@ -0,0 +1,82 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.h $
+ * $Revision: #2 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_DRIVER_H__)
+#define __DWC_OTG_DRIVER_H__
+
+/** @file
+ * This file contains the interface to the Linux driver.
+ */
+#include "dwc_otg_cil.h"
+
+/* Type declarations */
+struct dwc_otg_pcd;
+struct dwc_otg_hcd;
+
+/**
+ * This structure is a wrapper that encapsulates the driver components used to
+ * manage a single DWC_otg controller.
+ */
+typedef struct dwc_otg_device
+{
+ /** Base address returned from ioremap() */
+ void *base;
+
+ /** Pointer to the core interface structure. */
+ dwc_otg_core_if_t *core_if;
+
+ /** Register offset for Diagnostic API.*/
+ uint32_t reg_offset;
+
+ /** Pointer to the PCD structure. */
+ struct dwc_otg_pcd *pcd;
+
+ /** Pointer to the HCD structure. */
+ struct dwc_otg_hcd *hcd;
+
+ /** Flag to indicate whether the common IRQ handler is installed. */
+ uint8_t common_irq_installed;
+
+ /** Interrupt request number. */
+ unsigned int irq;
+
+ /** Physical address of Control and Status registers, used by
+ * release_mem_region().
+ */
+ resource_size_t phys_addr;
+
+ /** Length of memory region, used by release_mem_region(). */
+ unsigned long base_len;
+} dwc_otg_device_t;
+
+#endif
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c
new file mode 100644
index 00000000000..a813327bf40
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.c
@@ -0,0 +1,2900 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.c $
+ * $Revision: #16 $
+ * $Date: 2006/12/05 $
+ * $Change: 762293 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the implementation of the HCD. In Linux, the HCD
+ * implements the hc_driver API.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+extern atomic_t release_later;
+
+static u64 dma_mask = DMA_BIT_MASK(32);
+
+static const char dwc_otg_hcd_name[] = "dwc_otg_hcd";
+
+static int dwc_otg_hcd_suspend(struct usb_hcd *hcd)
+{
+ /* FIXME: Write code to right suspend processing */
+ return 0;
+}
+
+static int dwc_otg_hcd_resume(struct usb_hcd *hcd)
+{
+ /* FIXME: Write code to right resume processing */
+ return 0;
+}
+
+static const struct hc_driver dwc_otg_hc_driver =
+{
+ .description = dwc_otg_hcd_name,
+ .product_desc = "DWC OTG Controller",
+ .hcd_priv_size = sizeof(dwc_otg_hcd_t),
+ .irq = dwc_otg_hcd_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+ //.reset =
+ .start = dwc_otg_hcd_start,
+#ifdef CONFIG_PM
+ .bus_suspend = dwc_otg_hcd_suspend,
+ .bus_resume = dwc_otg_hcd_resume,
+#endif
+ .stop = dwc_otg_hcd_stop,
+ .urb_enqueue = dwc_otg_hcd_urb_enqueue,
+ .urb_dequeue = dwc_otg_hcd_urb_dequeue,
+ .endpoint_disable = dwc_otg_hcd_endpoint_disable,
+ .get_frame_number = dwc_otg_hcd_get_frame_number,
+ .hub_status_data = dwc_otg_hcd_hub_status_data,
+ .hub_control = dwc_otg_hcd_hub_control,
+ //.hub_suspend =
+ //.hub_resume =
+};
+
+
+/**
+ * Work queue function for starting the HCD when A-Cable is connected.
+ * The dwc_otg_hcd_start() must be called in a process context.
+ */
+static void hcd_start_func(struct work_struct *work)
+{
+ struct dwc_otg_hcd *priv =
+ container_of(work, struct dwc_otg_hcd, start_work);
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)priv->_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd);
+ if (usb_hcd) {
+ dwc_otg_hcd_start(usb_hcd);
+ }
+}
+
+
+/**
+ * HCD Callback function for starting the HCD when A-Cable is
+ * connected.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_start_cb(void *_p)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0;
+ if (core_if->op_state == B_HOST) {
+ /*
+ * Reset the port. During a HNP mode switch the reset
+ * needs to occur within 1ms and have a duration of at
+ * least 50ms.
+ */
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ ((struct usb_hcd *)_p)->self.is_b_host = 1;
+ } else {
+ ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ }
+ /* Need to start the HCD in a non-interrupt context. */
+ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func);
+ dwc_otg_hcd->_p = _p;
+ schedule_work(&dwc_otg_hcd->start_work);
+ return 1;
+}
+
+
+/**
+ * HCD Callback function for stopping the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_stop_cb(void *_p)
+{
+ struct usb_hcd *usb_hcd = (struct usb_hcd *)_p;
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_stop(usb_hcd);
+ return 1;
+}
+static void del_xfer_timers(dwc_otg_hcd_t * _hcd)
+{
+
+#ifdef DEBUG
+ int i;
+ int num_channels = _hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ del_timer(&_hcd->core_if->hc_xfer_timer[i]);
+ }
+
+#endif /* */
+}
+static void del_timers(dwc_otg_hcd_t * _hcd)
+{
+ del_xfer_timers(_hcd);
+ del_timer(&_hcd->conn_timer);
+}
+
+/**
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ */
+static void kill_urbs_in_qh_list(dwc_otg_hcd_t * _hcd,
+ struct list_head *_qh_list)
+{
+ struct list_head *qh_item;
+ dwc_otg_qh_t * qh;
+ struct list_head *qtd_item;
+ dwc_otg_qtd_t * qtd;
+ list_for_each(qh_item, _qh_list) {
+ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry);
+ for (qtd_item = qh->qtd_list.next; qtd_item != &qh->qtd_list;
+ qtd_item = qh->qtd_list.next) {
+ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry);
+ if (qtd->urb != NULL) {
+ dwc_otg_hcd_complete_urb(_hcd, qtd->urb,-ETIMEDOUT);
+ }
+ dwc_otg_hcd_qtd_remove_and_free(qtd);
+ }
+ }
+}
+
+/**
+ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ */
+static void kill_all_urbs(dwc_otg_hcd_t * _hcd)
+{
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_deferred);
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_inactive);
+ kill_urbs_in_qh_list(_hcd, &_hcd->non_periodic_sched_active);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_inactive);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_ready);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_assigned);
+ kill_urbs_in_qh_list(_hcd, &_hcd->periodic_sched_queued);
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_disconnect_cb(void *_p)
+{
+ gintsts_data_t intr;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+
+ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+
+ /*
+ * Set status flags for the hub driver.
+ */
+ dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ dwc_otg_hcd->flags.b.port_connect_status = 0;
+
+ /*
+ * Shutdown any transfers in process by clearing the Tx FIFO Empty
+ * interrupt mask and status bits and disabling subsequent host
+ * channel interrupts.
+ */
+ intr.d32 = 0;
+ intr.b.nptxfempty = 1;
+ intr.b.ptxfempty = 1;
+ intr.b.hcintr = 1;
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk,
+ intr.d32, 0);
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts,
+ intr.d32, 0);
+ del_timers(dwc_otg_hcd);
+
+ /*
+ * Turn off the vbus power only if the core has transitioned to device
+ * mode. If still in host mode, need to keep power on to detect a
+ * reconnection.
+ */
+ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) {
+ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) {
+ hprt0_data_t hprt0 = {.d32 = 0};
+ DWC_PRINT("Disconnect: PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0,
+ hprt0.d32);
+ }
+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
+ }
+
+ /* Respond with an error status to all URBs in the schedule. */
+ kill_all_urbs(dwc_otg_hcd);
+ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) {
+ /* Clean up any host channels that were in use. */
+ int num_channels;
+ int i;
+ dwc_hc_t * channel;
+ dwc_otg_hc_regs_t * hc_regs;
+ hcchar_data_t hcchar;
+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+ if (!dwc_otg_hcd->core_if->dma_enable) {
+ /* Flush out any channel requests in slave mode. */
+ for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr_array[i];
+ if (list_empty(&channel->hc_list_entry)) {
+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ hcchar.b.chen = 0;
+ hcchar.b.chdis = 1;
+ hcchar.b.epdir = 0;
+ dwc_write_reg32(&hc_regs->hcchar,hcchar.d32);
+ }
+ }
+ }
+ }
+ for (i = 0; i < num_channels; i++) {
+ channel = dwc_otg_hcd->hc_ptr_array[i];
+ if (list_empty(&channel->hc_list_entry)) {
+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+ /* Halt the channel. */
+ hcchar.b.chdis = 1;
+ dwc_write_reg32(&hc_regs->hcchar,hcchar.d32);
+ }
+ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if,channel);
+ list_add_tail(&channel->hc_list_entry,
+ &dwc_otg_hcd->free_hc_list);
+ }
+ }
+ }
+
+ /* A disconnect will end the session so the B-Device is no
+ * longer a B-host. */
+ ((struct usb_hcd *)_p)->self.is_b_host = 0;
+ return 1;
+}
+
+/**
+ * Connection timeout function. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds.
+ */
+void dwc_otg_hcd_connect_timeout(unsigned long _ptr)
+{
+ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)_ptr);
+ DWC_PRINT("Connect Timeout\n");
+ DWC_ERROR("Device Not Connected/Responding\n");
+}
+
+/**
+ * Start the connection timer. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds. The
+ * timer is deleted if a port connect interrupt occurs before the
+ * timer expires.
+ */
+static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t * _hcd)
+{
+ init_timer(&_hcd->conn_timer);
+ _hcd->conn_timer.function = dwc_otg_hcd_connect_timeout;
+ _hcd->conn_timer.data = (unsigned long)0;
+ _hcd->conn_timer.expires = jiffies + (HZ * 10);
+ add_timer(&_hcd->conn_timer);
+}
+
+/**
+ * HCD Callback function for disconnect of the HCD.
+ *
+ * @param _p void pointer to the <code>struct usb_hcd</code>
+ */
+static int32_t dwc_otg_hcd_session_start_cb(void *_p)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_p);
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, _p);
+ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd);
+ return 1;
+}
+
+
+/**
+ * HCD Callback structure for handling mode switching.
+ */
+static dwc_otg_cil_callbacks_t hcd_cil_callbacks =
+{
+ .start = dwc_otg_hcd_start_cb,
+ .stop = dwc_otg_hcd_stop_cb,
+ .disconnect = dwc_otg_hcd_disconnect_cb,
+ .session_start = dwc_otg_hcd_session_start_cb,
+ .p = 0,
+};
+
+
+/**
+ * Reset tasklet function
+ */
+static void reset_tasklet_func(unsigned long data)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = (dwc_otg_hcd_t *) data;
+ dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ hprt0_data_t hprt0;
+ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ mdelay(60);
+ hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ dwc_otg_hcd->flags.b.port_reset_change = 1;
+ return;
+}
+static struct tasklet_struct reset_tasklet =
+{
+ .next = NULL,
+ .state = 0,
+ .count = ATOMIC_INIT(0),
+ .func = reset_tasklet_func,
+ .data = 0,
+};
+
+
+#ifdef OTG_PLB_DMA_TASKLET
+/**
+ * plbdma tasklet function
+ */
+static void plbdma_tasklet_func(unsigned long data)
+{
+ unsigned long flags;
+ dwc_otg_core_if_t * _core_if = (dwc_otg_core_if_t *) data;
+ dma_xfer_t * dma_xfer = &_core_if->dma_xfer;
+
+ local_irq_save(flags);
+ DWC_DEBUGPL(DBG_SP, "Plbdma tasklet called\n");
+
+ if (_core_if->dma_xfer.dma_dir == OTG_TX_DMA) {
+ if ((((unsigned long)dma_xfer->dma_data_buff) & 0x3) == 0) {
+ /* call tx_dma - src,dest,len,intr */
+ ppc4xx_start_plb_dma(_core_if, (void *)dma_xfer->dma_data_buff,
+ dma_xfer->dma_data_fifo, (dma_xfer->dma_count * 4), PLB_DMA_INT_ENA,
+ PLB_DMA_CH, OTG_TX_DMA);
+ } else {
+ ppc4xx_start_plb_dma(_core_if, (void *)get_unaligned(dma_xfer->dma_data_buff),
+ dma_xfer->dma_data_fifo, (dma_xfer->dma_count * 4), PLB_DMA_INT_ENA,
+ PLB_DMA_CH, OTG_TX_DMA);
+ }
+ }
+ else {
+ DWC_DEBUGPL(DBG_HCD, "0x%p 0x%p %d\n", (void *)dma_xfer->dma_data_fifo,
+ dma_xfer->dma_data_buff, dma_xfer->dma_count );
+
+ ppc4xx_start_plb_dma(_core_if, (void *)dma_xfer->dma_data_fifo,
+ dma_xfer->dma_data_buff, (dma_xfer->dma_count * 4), PLB_DMA_INT_ENA,
+ PLB_DMA_CH, OTG_RX_DMA);
+ }
+
+ local_irq_restore(flags);
+ return;
+}
+static struct tasklet_struct plbdma_tasklet =
+{
+ .next = NULL,
+ .state = 0,
+ .count = ATOMIC_INIT(0),
+ .func = plbdma_tasklet_func,
+ .data = 0,
+};
+
+#endif
+
+/**
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device)
+{
+ struct usb_hcd *hcd = NULL;
+ dwc_otg_hcd_t * dwc_otg_hcd = NULL;
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ int num_channels;
+ int i;
+ dwc_hc_t * channel;
+ int retval = 0;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n");
+ /*
+ * Allocate memory for the base HCD plus the DWC OTG HCD.
+ * Initialize the base HCD.
+ */
+ hcd = usb_create_hcd(&dwc_otg_hc_driver, _dev, dev_name(_dev));
+ if (hcd == NULL) {
+ retval = -ENOMEM;
+ goto error1;
+ }
+ dev_set_drvdata(_dev, dwc_otg_device); /* fscz restore */
+ hcd->regs = otg_dev->base;
+ hcd->self.otg_port = 1;
+
+ /* Initialize the DWC OTG HCD. */
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd);
+ dwc_otg_hcd->core_if = otg_dev->core_if;
+ otg_dev->hcd = dwc_otg_hcd;
+ /* Register the HCD CIL Callbacks */
+ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if,
+ &hcd_cil_callbacks, hcd);
+
+ /* Initialize the non-periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive);
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active);
+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_deferred);
+
+ /* Initialize the periodic schedule. */
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned);
+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued);
+
+ /*
+ * Create a host channel descriptor for each host channel implemented
+ * in the controller. Initialize the channel descriptor array.
+ */
+ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list);
+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL);
+ if (channel == NULL) {
+ retval = -ENOMEM;
+ DWC_ERROR("%s: host channel allocation failed\n",__func__);
+ goto error2;
+ }
+ memset(channel, 0, sizeof(dwc_hc_t));
+ channel->hc_num = i;
+ dwc_otg_hcd->hc_ptr_array[i] = channel;
+
+#ifdef DEBUG
+ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]);
+#endif /* */
+ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i,channel);
+ }
+
+ /* Initialize the Connection timeout timer. */
+ init_timer(&dwc_otg_hcd->conn_timer);
+
+ /* Initialize reset tasklet. */
+ reset_tasklet.data = (unsigned long)dwc_otg_hcd;
+ dwc_otg_hcd->reset_tasklet = &reset_tasklet;
+
+#ifdef OTG_PLB_DMA_TASKLET
+ /* Initialize plbdma tasklet. */
+ plbdma_tasklet.data = (unsigned long)dwc_otg_hcd->core_if;
+ dwc_otg_hcd->core_if->plbdma_tasklet = &plbdma_tasklet;
+#endif
+
+ /* Set device flags indicating whether the HCD supports DMA. */
+ if (otg_dev->core_if->dma_enable) {
+ DWC_PRINT("Using DMA mode\n");
+ _dev->dma_mask = &dma_mask;
+ _dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ } else {
+ DWC_PRINT("Using Slave mode\n");
+ _dev->dma_mask = (void *)0;
+ _dev->coherent_dma_mask = 0;
+ }
+ /*
+ * Finish generic HCD initialization and start the HCD. This function
+ * allocates the DMA buffer pool, registers the USB bus, requests the
+ * IRQ line, and calls dwc_otg_hcd_start method.
+ */
+ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED);
+ if (retval < 0) {
+ goto error2;
+ }
+ /*
+ * Allocate space for storing data on status transactions. Normally no
+ * data is sent, but this space acts as a bit bucket. This must be
+ * done after usb_add_hcd since that function allocates the DMA buffer
+ * pool.
+ */
+ if (otg_dev->core_if->dma_enable) {
+ dwc_otg_hcd->status_buf =
+ dma_alloc_coherent(_dev, DWC_OTG_HCD_STATUS_BUF_SIZE,
+ &dwc_otg_hcd->status_buf_dma, GFP_KERNEL | GFP_DMA);
+ } else {
+ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, GFP_KERNEL);
+ }
+ if (dwc_otg_hcd->status_buf == NULL) {
+ retval = -ENOMEM;
+ DWC_ERROR("%s: status_buf allocation failed\n", __func__);
+ goto error3;
+ }
+ DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n",
+ _dev->bus_id, hcd->self.busnum);
+ return 0;
+
+ /* Error conditions */
+ error3:usb_remove_hcd(hcd);
+ error2:dwc_otg_hcd_free(hcd);
+ usb_put_hcd(hcd);
+ error1:return retval;
+}
+
+
+/**
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc_otg_hcd_remove(struct device *_dev)
+{
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_hcd_t * dwc_otg_hcd = otg_dev->hcd;
+ struct usb_hcd *hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd);
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n");
+
+ /* Turn off all interrupts */
+ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0);
+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1,0);
+ usb_remove_hcd(hcd);
+ dwc_otg_hcd_free(hcd);
+ usb_put_hcd(hcd);
+ return;
+}
+
+
+/* =========================================================================
+ * Linux HC Driver Functions
+ * ========================================================================= */
+
+/**
+ * Initializes dynamic portions of the DWC_otg HCD state.
+ */
+static void hcd_reinit(dwc_otg_hcd_t * _hcd)
+{
+ struct list_head *item;
+ int num_channels;
+ int i;
+ dwc_hc_t * channel;
+ _hcd->flags.d32 = 0;
+ _hcd->non_periodic_qh_ptr = &_hcd->non_periodic_sched_active;
+ _hcd->non_periodic_channels = 0;
+ _hcd->periodic_channels = 0;
+
+ /*
+ * Put all channels in the free channel list and clean up channel
+ * states.
+ */
+ item = _hcd->free_hc_list.next;
+ while (item != &_hcd->free_hc_list) {
+ list_del(item);
+ item = _hcd->free_hc_list.next;
+ }
+ num_channels = _hcd->core_if->core_params->host_channels;
+ for (i = 0; i < num_channels; i++) {
+ channel = _hcd->hc_ptr_array[i];
+ list_add_tail(&channel->hc_list_entry, &_hcd->free_hc_list);
+ dwc_otg_hc_cleanup(_hcd->core_if, channel);
+ }
+
+ /* Initialize the DWC core for host mode operation. */
+ dwc_otg_core_host_init(_hcd->core_if);
+}
+
+
+/** Initializes the DWC_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure. */
+int dwc_otg_hcd_start(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ //dwc_otg_core_if_t * core_if = dwc_otg_hcd->core_if;
+ struct usb_device *udev;
+ struct usb_bus *bus;
+
+// int retval;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n");
+ bus = hcd_to_bus(_hcd);
+
+ /* Initialize the bus state. If the core is in Device Mode
+ * HALT the USB bus and return. */
+
+ _hcd->state = HC_STATE_RUNNING;
+
+ /* Initialize and connect root hub if one is not already attached */
+ if (bus->root_hub) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n");
+
+ /* Inform the HUB driver to resume. */
+ usb_hcd_resume_root_hub(_hcd);
+ }
+
+ else {
+ udev = usb_alloc_dev(NULL, bus, 0);
+ udev->speed = USB_SPEED_HIGH;
+ if (!udev) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n");
+ return -ENODEV;
+ }
+
+ /* Not needed - VJ
+ if ((retval = usb_hcd_register_root_hub(udev, _hcd)) != 0) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error registering %d\n", retval);
+ return -ENODEV;
+ }
+ */
+ }
+ hcd_reinit(dwc_otg_hcd);
+ return 0;
+}
+static void qh_list_free(dwc_otg_hcd_t * _hcd, struct list_head *_qh_list)
+{
+ struct list_head *item;
+ dwc_otg_qh_t * qh;
+ if (_qh_list->next == NULL) {
+ /* The list hasn't been initialized yet. */
+ return;
+ }
+
+ /* Ensure there are no QTDs or URBs left. */
+ kill_urbs_in_qh_list(_hcd, _qh_list);
+ for (item = _qh_list->next; item != _qh_list; item = _qh_list->next) {
+ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ dwc_otg_hcd_qh_remove_and_free(_hcd, qh);
+ }
+}
+
+
+/**
+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+void dwc_otg_hcd_stop(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ hprt0_data_t hprt0 = {.d32 = 0};
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n");
+
+ /* Turn off all host-specific interrupts. */
+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if);
+
+ /*
+ * The root hub should be disconnected before this function is called.
+ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+ * and the QH lists (via ..._hcd_endpoint_disable).
+ */
+
+ /* Turn off the vbus power */
+ DWC_PRINT("PortPower off\n");
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32);
+ return;
+}
+
+
+/** Returns the current frame number. */
+int dwc_otg_hcd_get_frame_number(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ hfnum_data_t hfnum;
+ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->host_if->
+ host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n",
+ hfnum.b.frnum);
+#endif /* */
+ return hfnum.b.frnum;
+}
+
+
+/**
+ * Frees secondary storage associated with the dwc_otg_hcd structure contained
+ * in the struct usb_hcd field.
+ */
+void dwc_otg_hcd_free(struct usb_hcd *_hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ int i;
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n");
+ del_timers(dwc_otg_hcd);
+
+ /* Free memory for QH/QTD lists */
+ qh_list_free(dwc_otg_hcd,
+ &dwc_otg_hcd->non_periodic_sched_inactive);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_deferred);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned);
+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued);
+
+ /* Free memory for the host channels. */
+ for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+ dwc_hc_t * hc = dwc_otg_hcd->hc_ptr_array[i];
+ if (hc != NULL) {
+ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n",i, hc);
+ kfree(hc);
+ }
+ }
+ if (dwc_otg_hcd->core_if->dma_enable) {
+ if (dwc_otg_hcd->status_buf_dma) {
+ dma_free_coherent(_hcd->self.controller,
+ DWC_OTG_HCD_STATUS_BUF_SIZE,
+ dwc_otg_hcd->status_buf,
+ dwc_otg_hcd->status_buf_dma);
+ }
+ } else if (dwc_otg_hcd->status_buf != NULL) {
+ kfree(dwc_otg_hcd->status_buf);
+ }
+ return;
+}
+
+
+#ifdef DEBUG
+static void dump_urb_info(struct urb *_urb, char *_fn_name)
+{
+ DWC_PRINT("%s, urb %p\n", _fn_name, _urb);
+ DWC_PRINT(" Device address: %d\n", usb_pipedevice(_urb->pipe));
+ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(_urb->pipe),
+ (usb_pipein(_urb->pipe) ? "IN" : "OUT"));
+ DWC_PRINT(" Endpoint type: %s\n", ( {
+ char *pipetype;
+ switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL:
+ pipetype = "CONTROL"; break;
+ case PIPE_BULK:
+ pipetype = "BULK"; break;
+ case PIPE_INTERRUPT:
+ pipetype = "INTERRUPT"; break;
+ case PIPE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS"; break;
+ default:
+ pipetype = "UNKNOWN"; break;
+ };
+ pipetype;
+ } )) ;
+ DWC_PRINT(" Speed: %s\n", ( {
+ char *speed;
+ switch (_urb->dev->speed) {
+ case USB_SPEED_HIGH:
+ speed = "HIGH"; break;
+ case USB_SPEED_FULL:
+ speed = "FULL"; break;
+ case USB_SPEED_LOW:
+ speed = "LOW"; break;
+ default:
+ speed = "UNKNOWN"; break;
+ };
+ speed;
+ } )) ;
+ DWC_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(_urb->dev, _urb->pipe, usb_pipeout(_urb->pipe)));
+ DWC_PRINT(" Data buffer length: %d\n", _urb->transfer_buffer_length);
+ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n",
+ _urb->transfer_buffer, (void *)_urb->transfer_dma);
+ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", _urb->setup_packet,
+ (void *)_urb->setup_dma);
+ DWC_PRINT(" Interval: %d\n", _urb->interval);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < _urb->number_of_packets; i++) {
+ DWC_PRINT(" ISO Desc %d:\n", i);
+ DWC_PRINT(" offset: %d, length %d\n",
+ _urb->iso_frame_desc[i].offset,
+ _urb->iso_frame_desc[i].length);
+ }
+ }
+}
+static void dump_channel_info(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * qh)
+{
+ if (qh->channel != NULL) {
+ dwc_hc_t * hc = qh->channel;
+ struct list_head *item;
+ dwc_otg_qh_t * qh_item;
+ int num_channels = _hcd->core_if->core_params->host_channels;
+ int i;
+ dwc_otg_hc_regs_t * hc_regs;
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+ hc_regs = _hcd->core_if->host_if->hc_regs[hc->hc_num];
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt);
+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz);
+ hcdma = dwc_read_reg32(&hc_regs->hcdma);
+ DWC_PRINT(" Assigned to channel %p:\n", hc);
+ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_packet);
+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
+ DWC_PRINT(" halt_status: %d\n", hc->halt_status);
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
+ DWC_PRINT(" qh: %p\n", hc->qh);
+ DWC_PRINT(" NP inactive sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_inactive) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP active sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_deferred) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" NP deferred sched:\n");
+ list_for_each(item, &_hcd->non_periodic_sched_active) {
+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry);
+ DWC_PRINT(" %p\n", qh_item);
+ } DWC_PRINT(" Channels: \n");
+ for (i = 0; i < num_channels; i++) {
+ dwc_hc_t * hc = _hcd->hc_ptr_array[i];
+ DWC_PRINT(" %2d: %p\n", i, hc);
+ }
+ }
+}
+
+#endif /* */
+
+/** Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB. */
+int dwc_otg_hcd_urb_enqueue(struct usb_hcd *_hcd,
+ struct urb *_urb,
+ gfp_t _mem_flags)
+{
+ unsigned long flags;
+ int retval;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ dwc_otg_qtd_t * qtd;
+
+ local_irq_save(flags);
+ retval = usb_hcd_link_urb_to_ep(_hcd, _urb);
+ if (retval) {
+ local_irq_restore(flags);
+ return retval;
+ }
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_enqueue");
+ }
+#endif /* */
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /* No longer connected. */
+ local_irq_restore(flags);
+ return -ENODEV;
+ }
+ qtd = dwc_otg_hcd_qtd_create(_urb);
+ if (qtd == NULL) {
+ local_irq_restore(flags);
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n");
+ return -ENOMEM;
+ }
+ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd);
+ if (retval < 0) {
+ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. "
+ "Error status %d\n", retval);
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+ local_irq_restore(flags);
+ return retval;
+}
+
+
+/** Aborts/cancels a USB transfer request. Always returns 0 to indicate
+ * success. */
+int dwc_otg_hcd_urb_dequeue(struct usb_hcd *_hcd, struct urb *_urb, int _status)
+{
+ unsigned long flags;
+ dwc_otg_hcd_t * dwc_otg_hcd;
+ dwc_otg_qtd_t * urb_qtd;
+ dwc_otg_qh_t * qh;
+ int retval;
+
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n");
+ local_irq_save(flags);
+ retval = usb_hcd_check_unlink_urb(_hcd, _urb, _status);
+ if (retval) {
+ local_irq_restore(flags);
+ return retval;
+ }
+
+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+
+ urb_qtd = (dwc_otg_qtd_t *) _urb->hcpriv;
+ if (urb_qtd == NULL) {
+ printk("urb_qtd is NULL for _urb %08x\n",(unsigned)_urb);
+ goto done;
+ }
+ qh = (dwc_otg_qh_t *) urb_qtd->qtd_qh_ptr;
+ if (qh == NULL) {
+ goto done;
+ }
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ dump_urb_info(_urb, "dwc_otg_hcd_urb_dequeue");
+ if (urb_qtd == qh->qtd_in_process) {
+ dump_channel_info(dwc_otg_hcd, qh);
+ }
+ }
+
+#endif /* */
+ if (urb_qtd == qh->qtd_in_process) {
+ /* The QTD is in process (it has been assigned to a channel). */
+ if (dwc_otg_hcd->flags.b.port_connect_status) {
+
+ /*
+ * If still connected (i.e. in host mode), halt the
+ * channel so it can be used for other transfers. If
+ * no longer connected, the host registers can't be
+ * written to halt the channel since the core is in
+ * device mode.
+ */
+ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel,
+ DWC_OTG_HC_XFER_URB_DEQUEUE);
+ }
+ }
+
+ /*
+ * Free the QTD and clean up the associated QH. Leave the QH in the
+ * schedule if it has any remaining QTDs.
+ */
+ dwc_otg_hcd_qtd_remove_and_free(urb_qtd);
+ if (urb_qtd == qh->qtd_in_process) {
+ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0);
+ qh->channel = NULL;
+ qh->qtd_in_process = NULL;
+ } else if (list_empty(&qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh);
+ }
+done:
+ local_irq_restore(flags);
+ _urb->hcpriv = NULL;
+
+ /* Higher layer software sets URB status. */
+#if 1 /* Fixed bug relate kernel hung when unplug cable */
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ usb_hcd_giveback_urb(_hcd, _urb, _status);
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("Called usb_hcd_giveback_urb()\n");
+ DWC_PRINT(" urb->status = %d\n", _status);
+ }
+#else
+ if (_status != -ECONNRESET) {
+ usb_hcd_unlink_urb_from_ep(_hcd, _urb);
+ usb_hcd_giveback_urb(_hcd, _urb, _status);
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("Called usb_hcd_giveback_urb()\n");
+ DWC_PRINT(" urb->status = %d\n", _status);
+ }
+ }
+#endif
+ return 0;
+}
+
+
+/** Frees resources in the DWC_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued. */
+void dwc_otg_hcd_endpoint_disable(struct usb_hcd *_hcd,
+ struct usb_host_endpoint *_ep)
+{
+ dwc_otg_qh_t * qh;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, "
+ "endpoint=%d\n", _ep->desc.bEndpointAddress,
+ dwc_ep_addr_to_endpoint(_ep->desc.bEndpointAddress));
+ qh = (dwc_otg_qh_t *) (_ep->hcpriv);
+ if (qh != NULL) {
+
+#ifdef DEBUG
+ /** Check that the QTD list is really empty */
+ if (!list_empty(&qh->qtd_list)) {
+ DWC_WARN("DWC OTG HCD EP DISABLE:"
+ " QTD List for this endpoint is not empty\n");
+ }
+
+#endif /* */
+ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh);
+ _ep->hcpriv = NULL;
+ }
+ return;
+}
+
+extern int fscz_debug;
+/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs */
+irqreturn_t dwc_otg_hcd_irq(struct usb_hcd * _hcd)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd));
+}
+
+/** Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0. */
+int dwc_otg_hcd_hub_status_data(struct usb_hcd *_hcd, char *_buf)
+{
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ _buf[0] = 0;
+ _buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change
+ || dwc_otg_hcd->flags.b.port_reset_change
+ || dwc_otg_hcd->flags.b.port_enable_change
+ || dwc_otg_hcd->flags.b.port_suspend_change
+ || dwc_otg_hcd->flags.b.port_over_current_change) << 1;
+
+#ifdef DEBUG
+ if (_buf[0]) {
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:"
+ " Root port status changed\n");
+ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n",
+ dwc_otg_hcd->flags.b.port_connect_status_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n",
+ dwc_otg_hcd->flags.b.port_reset_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n",
+ dwc_otg_hcd->flags.b.port_enable_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n",
+ dwc_otg_hcd->flags.b.port_suspend_change);
+ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n",
+ dwc_otg_hcd->flags.b.port_over_current_change);
+ }
+
+#endif /* */
+ return (_buf[0] != 0);
+}
+
+
+#ifdef DWC_HS_ELECT_TST
+/*
+ * Quick and dirty hack to implement the HS Electrical Test
+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature.
+ *
+ * This code was copied from our userspace app "hset". It sends a
+ * Get Device Descriptor control sequence in two parts, first the
+ * Setup packet by itself, followed some time later by the In and
+ * Ack packets. Rather than trying to figure out how to add this
+ * functionality to the normal driver code, we just hijack the
+ * hardware, using these two function to drive the hardware
+ * directly.
+ */
+dwc_otg_core_global_regs_t * global_regs;
+dwc_otg_host_global_regs_t * hc_global_regs;
+dwc_otg_hc_regs_t * hc_regs;
+uint32_t * data_fifo;
+
+static void do_setup(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+
+ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Send Setup packet (Get Device Descriptor)
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+
+ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+
+// hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ /* Fill FIFO with Setup data for Get Device Descriptor */
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+ dwc_write_reg32(data_fifo++, 0x01000680);
+ dwc_write_reg32(data_fifo++, 0x00080000);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+}
+
+static void do_in_ack(void)
+{
+ gintsts_data_t gintsts;
+ hctsiz_data_t hctsiz;
+ hcchar_data_t hcchar;
+ haint_data_t haint;
+ hcint_data_t hcint;
+ host_grxsts_data_t grxsts;
+
+ /* Enable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001);
+
+ /* Enable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /*
+ * Receive Control In packet
+ */
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ if (hcchar.b.chen) {
+ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 8;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 1;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer */
+ if (grxsts.b.bcnt > 0) {
+ int i;
+ int word_count = (grxsts.b.bcnt + 3) / 4;
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+ for (i = 0; i < word_count; i++) {
+ (void)dwc_read_reg32(data_fifo++);
+ }
+ }
+ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt);
+ break;
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n");
+ break;
+ }
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for receive status queue interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.rxstsqlvl == 0);
+
+ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read RXSTS */
+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp);
+
+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32);
+
+ /* Clear RXSTSQLVL in GINTSTS */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ break;
+ default:
+ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n");
+ break;
+ }
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ // usleep(100000);
+ // mdelay(100);
+ mdelay(1);
+
+ /*
+ * Send handshake packet
+ */
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Make sure channel is disabled */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ if (hcchar.b.chen) {
+
+ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32);
+ hcchar.b.chdis = 1;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+
+ //sleep(1);
+ mdelay(1000);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //if (hcchar.b.chen) {
+ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32);
+ //}
+ }
+
+ /* Set HCTSIZ */
+ hctsiz.d32 = 0;
+ hctsiz.b.xfersize = 0;
+ hctsiz.b.pktcnt = 1;
+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1;
+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32);
+
+ /* Set HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL;
+ hcchar.b.epdir = 0;
+ hcchar.b.epnum = 0;
+ hcchar.b.mps = 8;
+ hcchar.b.chen = 1;
+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32);
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Wait for host channel interrupt */
+ do {
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+ } while (gintsts.b.hcintr == 0);
+
+ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32);
+
+ /* Disable HCINTs */
+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000);
+
+ /* Disable HAINTs */
+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000);
+
+ /* Read HAINT */
+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint);
+
+ //fprintf(stderr, "HAINT: %08x\n", haint.d32);
+
+ /* Read HCINT */
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+
+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32);
+
+ /* Read HCCHAR */
+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar);
+
+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32);
+
+ /* Clear HCINT */
+ dwc_write_reg32(&hc_regs->hcint, hcint.d32);
+
+ /* Clear HAINT */
+ dwc_write_reg32(&hc_global_regs->haint, haint.d32);
+
+ /* Clear GINTSTS */
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /* Read GINTSTS */
+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts);
+
+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32);
+}
+
+#endif /* DWC_HS_ELECT_TST */
+
+/** Handles hub class-specific requests.*/
+int dwc_otg_hcd_hub_control(struct usb_hcd *_hcd, u16 _typeReq, u16 _wValue,
+ u16 _wIndex, char *_buf, u16 _wLength)
+{
+ int retval = 0;
+ dwc_otg_hcd_t * dwc_otg_hcd = hcd_to_dwc_otg_hcd(_hcd);
+ dwc_otg_core_if_t * core_if = hcd_to_dwc_otg_hcd(_hcd)->core_if;
+ struct usb_hub_descriptor *desc;
+ hprt0_data_t hprt0 = {.d32 = 0};
+ uint32_t port_status;
+ switch (_typeReq) {
+ case ClearHubFeature:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearHubFeature 0x%x\n", _wValue);
+ switch (_wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* Nothing required here */
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - ClearHubFeature request %xh unknown\n",
+ _wValue);
+ }
+ break;
+ case ClearPortFeature:
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+ switch (_wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtena = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtres = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+ /* Clear Resume bit */
+ mdelay(100);
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+
+ /* Port inidicator not supported */
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ /* Clears drivers internal connect status change
+ * flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+ dwc_otg_hcd->flags.b.port_connect_status_change = 0;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* Clears the driver's internal Port Reset Change
+ * flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+ dwc_otg_hcd->flags.b.port_reset_change = 0;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ /* Clears the driver's internal Port
+ * Enable/Disable Change flag */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+ dwc_otg_hcd->flags.b.port_enable_change = 0;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Clears the driver's internal Port Suspend
+ * Change flag, which is set when resume signaling on
+ * the host port is complete */
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+ dwc_otg_hcd->flags.b.port_suspend_change = 0;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+ dwc_otg_hcd->flags.b.port_over_current_change = 0;
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - "
+ "ClearPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ }
+ break;
+ case GetHubDescriptor:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetHubDescriptor\n");
+ desc = (struct usb_hub_descriptor *)_buf;
+ desc->bDescLength = 9;
+ desc->bDescriptorType = 0x29;
+ desc->bNbrPorts = 1;
+ desc->wHubCharacteristics = 0x08;
+ desc->bPwrOn2PwrGood = 1;
+ desc->bHubContrCurrent = 0;
+ desc->bitmap[0] = 0;
+ desc->bitmap[1] = 0xff;
+ break;
+ case GetHubStatus:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetHubStatus\n");
+ memset(_buf, 0, 4);
+ break;
+ case GetPortStatus:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "GetPortStatus\n");
+ if (!_wIndex || _wIndex > 1)
+ goto error;
+ port_status = 0;
+ if (dwc_otg_hcd->flags.b.port_connect_status_change)
+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
+ if (dwc_otg_hcd->flags.b.port_enable_change)
+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
+ if (dwc_otg_hcd->flags.b.port_suspend_change)
+ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND);
+ if (dwc_otg_hcd->flags.b.port_reset_change)
+ port_status |= (1 << USB_PORT_FEAT_C_RESET);
+ if (dwc_otg_hcd->flags.b.port_over_current_change) {
+ DWC_ERROR("Device Not Supported\n");
+ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT);
+ }
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return 0's for the remainder of the port status
+ * since the port register can't be read if the core
+ * is in device mode.
+ */
+ *((__le32 *) _buf) = cpu_to_le32(port_status);
+ break;
+ }
+ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0);
+ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32);
+ if (hprt0.b.prtconnsts)
+ port_status |= (1 << USB_PORT_FEAT_CONNECTION);
+ if (hprt0.b.prtena)
+ port_status |= (1 << USB_PORT_FEAT_ENABLE);
+ if (hprt0.b.prtsusp)
+ port_status |= (1 << USB_PORT_FEAT_SUSPEND);
+ if (hprt0.b.prtovrcurract)
+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
+ if (hprt0.b.prtrst)
+ port_status |= (1 << USB_PORT_FEAT_RESET);
+ if (hprt0.b.prtpwr)
+ port_status |= (1 << USB_PORT_FEAT_POWER);
+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED)
+ port_status |= (1 << USB_PORT_FEAT_HIGHSPEED);
+
+ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED)
+ port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
+ if (hprt0.b.prttstctl)
+ port_status |= (1 << USB_PORT_FEAT_TEST);
+
+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+ *((__le32 *) _buf) = cpu_to_le32(port_status);
+ break;
+ case SetHubFeature:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetHubFeature\n");
+
+ /* No HUB features supported */
+ break;
+ case SetPortFeature:
+ if (_wValue != USB_PORT_FEAT_TEST && (!_wIndex || _wIndex > 1))
+ goto error;
+ if (!dwc_otg_hcd->flags.b.port_connect_status) {
+ /*
+ * The port is disconnected, which means the core is
+ * either in device mode or it soon will be. Just
+ * return without doing anything since the port
+ * register can't be written if the core is in device
+ * mode.
+ */
+ break;
+ }
+ switch (_wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+ if (_hcd->self.otg_port == _wIndex
+ && _hcd->self.b_hnp_enable) {
+ gotgctl_data_t gotgctl = {.d32 = 0};
+ gotgctl.b.hstsethnpen = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->
+ gotgctl, 0, gotgctl.d32);
+ core_if->op_state = A_SUSPEND;
+ }
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+
+ //DWC_PRINT( "SUSPEND: HPRT0=%0x\n", hprt0.d32);
+ /* Suspend the Phy Clock */
+ {
+ pcgcctl_data_t pcgcctl = {.d32 = 0};
+ pcgcctl.b.stoppclk = 1;
+ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32);
+ }
+
+ /* For HNP the bus must be suspended for at least 200ms. */
+ if (_hcd->self.b_hnp_enable) {
+ mdelay(200);
+
+ //DWC_PRINT( "SUSPEND: wait complete! (%d)\n", _hcd->state);
+ }
+ break;
+ case USB_PORT_FEAT_POWER:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_POWER\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtpwr = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+ case USB_PORT_FEAT_RESET:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_RESET\n");
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+
+ /* When B-Host the Port reset bit is set in
+ * the Start HCD Callback function, so that
+ * the reset is started within 1ms of the HNP
+ * success interrupt. */
+ if (!_hcd->self.is_b_host) {
+ hprt0.b.prtrst = 1;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ }
+
+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+ MDELAY(60);
+ hprt0.b.prtrst = 0;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ break;
+
+#ifdef DWC_HS_ELECT_TST
+ case USB_PORT_FEAT_TEST:
+ {
+ uint32_t t;
+ gintmsk_data_t gintmsk;
+ t = (_wIndex >> 8); /* MSB wIndex USB */
+ DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_TEST %d\n",
+ t);
+ warn("USB_PORT_FEAT_TEST %d\n", t);
+ if (t < 6) {
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prttstctl = t;
+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32);
+ } else {
+ /* Setup global vars with reg addresses (quick and
+ * dirty hack, should be cleaned up)
+ */
+ global_regs = core_if->core_global_regs;
+ hc_global_regs = core_if->host_if->host_global_regs;
+ hc_regs = (dwc_otg_hc_regs_t *) ((char *) global_regs + 0x500);
+ data_fifo = (uint32_t *) ((char *)global_regs + 0x1000);
+ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */
+ /* Save current interrupt mask */
+ gintmsk.d32 =dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive suspend on the root port */
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 1;
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0,hprt0.d32);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Drive resume on the root port */
+ hprt0.d32 = dwc_otg_read_hprt0(core_if);
+ hprt0.b.prtsusp = 0;
+ hprt0.b.prtres = 1;
+ dwc_write_reg32(core_if->host_if->hprt0,hprt0.d32);
+ mdelay(100);
+
+ /* Clear the resume bit */
+ hprt0.b.prtres = 0;
+ dwc_write_reg32(core_if->host_if->hprt0,hprt0.d32);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk,gintmsk.d32);
+ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */
+ /* Save current interrupt mask */
+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* 15 second delay per the test spec */
+ mdelay(15000);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk,gintmsk.d32);
+ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */
+ /* Save current interrupt mask */
+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk);
+
+ /* Disable all interrupts while we muck with
+ * the hardware directly
+ */
+ dwc_write_reg32(&global_regs->gintmsk, 0);
+
+ /* Send the Setup packet */
+ do_setup();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Send the In and Ack packets */
+ do_in_ack();
+
+ /* 15 second delay so nothing else happens for awhile */
+ mdelay(15000);
+
+ /* Restore interrupts */
+ dwc_write_reg32(&global_regs->gintmsk,gintmsk.d32);
+ }
+ }
+ break;
+ }
+
+#endif /* DWC_HS_ELECT_TST */
+ case USB_PORT_FEAT_INDICATOR:
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - "
+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+ /* Not supported */
+ break;
+ default:
+ retval = -EINVAL;
+ DWC_ERROR("DWC OTG HCD - "
+ "SetPortFeature request %xh "
+ "unknown or unsupported\n", _wValue);
+ break;
+ }
+ break;
+ default:
+ error:retval = -EINVAL;
+ DWC_WARN("DWC OTG HCD - "
+ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n",
+ _typeReq, _wIndex, _wValue);
+ break;
+ }
+ return retval;
+}
+
+
+/**
+ * Assigns transactions from a QTD to a free host channel and initializes the
+ * host channel to perform the transactions. The host channel is removed from
+ * the free list.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _qh Transactions from the first QTD for this QH are selected and
+ * assigned to a free host channel.
+ */
+static void assign_and_init_hc(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ dwc_hc_t * hc;
+ dwc_otg_qtd_t * qtd;
+ struct urb *urb;
+ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, _hcd, _qh);
+ hc = list_entry(_hcd->free_hc_list.next, dwc_hc_t, hc_list_entry);
+
+ /* Remove the host channel from the free list. */
+ list_del_init(&hc->hc_list_entry);
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+ urb = qtd->urb;
+ _qh->channel = hc;
+ _qh->qtd_in_process = qtd;
+
+ /*
+ * Use usb_pipedevice to determine device address. This address is
+ * 0 before the SET_ADDRESS command and the correct address afterward.
+ */
+ hc->dev_addr = usb_pipedevice(urb->pipe);
+ hc->ep_num = usb_pipeendpoint(urb->pipe);
+ if (urb->dev->speed == USB_SPEED_LOW) {
+ hc->speed = DWC_OTG_EP_SPEED_LOW;
+ } else if (urb->dev->speed == USB_SPEED_FULL) {
+ hc->speed = DWC_OTG_EP_SPEED_FULL;
+ } else {
+ hc->speed = DWC_OTG_EP_SPEED_HIGH;
+ }
+ hc->max_packet = dwc_max_packet(_qh->maxp);
+ hc->xfer_started = 0;
+ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS;
+ hc->error_state = (qtd->error_count > 0);
+ hc->halt_on_queue = 0;
+ hc->halt_pending = 0;
+ hc->requests = 0;
+
+ /*
+ * The following values may be modified in the transfer type section
+ * below. The xfer_len value may be reduced when the transfer is
+ * started to accommodate the max widths of the XferSize and PktCnt
+ * fields in the HCTSIZn register.
+ */
+ hc->do_ping = _qh->ping_state;
+ hc->ep_is_in = (usb_pipein(urb->pipe) != 0);
+ hc->data_pid_start = _qh->data_toggle;
+ hc->multi_count = 1;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff =
+ (uint8_t *)(u32)urb->transfer_dma + urb->actual_length;
+ } else {
+ hc->xfer_buff =
+ (uint8_t *) urb->transfer_buffer + urb->actual_length;
+ }
+ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length;
+ hc->xfer_count = 0;
+
+ /*
+ * Set the split attributes
+ */
+ hc->do_split = 0;
+ if (_qh->do_split) {
+ hc->do_split = 1;
+ hc->xact_pos = qtd->isoc_split_pos;
+ hc->complete_split = qtd->complete_split;
+ hc->hub_addr = urb->dev->tt->hub->devnum;
+ hc->port_addr = urb->dev->ttport;
+ }
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL;
+ switch (qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP:
+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n");
+ hc->do_ping = 0;
+ hc->ep_is_in = 0;
+ hc->data_pid_start = DWC_OTG_HC_PID_SETUP;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)urb->setup_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) urb->setup_packet;
+ }
+ hc->xfer_len = 8;
+ break;
+ case DWC_OTG_CONTROL_DATA:
+ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n");
+ hc->data_pid_start = qtd->data_toggle;
+ break;
+ case DWC_OTG_CONTROL_STATUS:
+
+ /*
+ * Direction is opposite of data direction or IN if no
+ * data.
+ */
+ DWC_DEBUGPL(DBG_HCDV,
+ " Control status transaction\n");
+ if (urb->transfer_buffer_length == 0) {
+ hc->ep_is_in = 1;
+ } else {
+ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN);
+ }
+ if (hc->ep_is_in) {
+ hc->do_ping = 0;
+ }
+ hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
+ hc->xfer_len = 0;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)_hcd->status_buf_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) _hcd->status_buf;
+ }
+ break;
+ }
+ break;
+ case PIPE_BULK:
+ hc->ep_type = DWC_OTG_EP_TYPE_BULK;
+ break;
+ case PIPE_INTERRUPT:
+ hc->ep_type = DWC_OTG_EP_TYPE_INTR;
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index];
+ hc->ep_type = DWC_OTG_EP_TYPE_ISOC;
+ if (_hcd->core_if->dma_enable) {
+ hc->xfer_buff = (uint8_t *)(u32)urb->transfer_dma;
+ } else {
+ hc->xfer_buff = (uint8_t *) urb->transfer_buffer;
+ }
+ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
+ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
+ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) {
+ if (hc->xfer_len <= 188) {
+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ } else {
+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN;
+ }
+ }
+ }
+ break;
+ }
+
+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR
+ || hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * This value may be modified when the transfer is started to
+ * reflect the actual transfer length.
+ */
+ hc->multi_count = dwc_hb_mult(_qh->maxp);
+ }
+ dwc_otg_hc_init(_hcd->core_if, hc);
+ hc->qh = _qh;
+}
+
+
+/**
+ * This function selects transactions from the HCD transfer schedule and
+ * assigns them to available host channels. It is called from HCD interrupt
+ * handler functions.
+ *
+ * @param _hcd The HCD state structure.
+ *
+ * @return The types of new transactions that were assigned to host channels.
+ */
+dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd)
+{
+ struct list_head *qh_ptr;
+ dwc_otg_qh_t * qh;
+ int num_channels;
+ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE;
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n");
+#endif /* */
+
+ /* Process entries in the periodic ready list. */
+ qh_ptr = _hcd->periodic_sched_ready.next;
+ while (qh_ptr != &_hcd->periodic_sched_ready
+ && !list_empty(&_hcd->free_hc_list)) {
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ assign_and_init_hc(_hcd, qh);
+ /*
+ * Move the QH from the periodic ready schedule to the
+ * periodic assigned schedule.
+ */
+ qh_ptr = qh_ptr->next;
+ list_move(&qh->qh_list_entry, &_hcd->periodic_sched_assigned);
+ ret_val = DWC_OTG_TRANSACTION_PERIODIC;
+ }
+ /*
+ * Process entries in the deferred portion of the non-periodic list.
+ * A NAK put them here and, at the right time, they need to be
+ * placed on the sched_inactive list.
+ */
+ qh_ptr = _hcd->non_periodic_sched_deferred.next;
+ while (qh_ptr != &_hcd->non_periodic_sched_deferred) {
+ uint16_t frame_number =
+ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
+ unsigned long flags;
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ qh_ptr = qh_ptr->next;
+
+ if (dwc_frame_num_le(qh->sched_frame, frame_number)) {
+ // NAK did this
+ /*
+ * Move the QH from the non periodic deferred schedule to
+ * the non periodic inactive schedule.
+ */
+ local_irq_save(flags);
+ list_move(&qh->qh_list_entry,
+ &_hcd->non_periodic_sched_inactive);
+ local_irq_restore(flags);
+ }
+ }
+
+ /*
+ * Process entries in the inactive portion of the non-periodic
+ * schedule. Some free host channels may not be used if they are
+ * reserved for periodic transfers.
+ */
+ qh_ptr = _hcd->non_periodic_sched_inactive.next;
+ num_channels = _hcd->core_if->core_params->host_channels;
+ while (qh_ptr != &_hcd->non_periodic_sched_inactive &&
+ (_hcd->non_periodic_channels <
+ num_channels - _hcd->periodic_channels)
+ && !list_empty(&_hcd->free_hc_list)) {
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+ assign_and_init_hc(_hcd, qh);
+
+ /*
+ * Move the QH from the non-periodic inactive schedule to the
+ * non-periodic active schedule.
+ */
+ qh_ptr = qh_ptr->next;
+ list_move(&qh->qh_list_entry,
+ &_hcd->non_periodic_sched_active);
+ if (ret_val == DWC_OTG_TRANSACTION_NONE) {
+ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC;
+ } else {
+ ret_val = DWC_OTG_TRANSACTION_ALL;
+ }
+ _hcd->non_periodic_channels++;
+ }
+ return ret_val;
+}
+
+/**
+ * Attempts to queue a single transaction request for a host channel
+ * associated with either a periodic or non-periodic transfer. This function
+ * assumes that there is space available in the appropriate request queue. For
+ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space
+ * is available in the appropriate Tx FIFO.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _hc Host channel descriptor associated with either a periodic or
+ * non-periodic transfer.
+ * @param _fifo_dwords_avail Number of DWORDs available in the periodic Tx
+ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic
+ * transfers.
+ *
+ * @return 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO.
+ */
+static int queue_transaction(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, uint16_t _fifo_dwords_avail)
+{
+ int retval;
+ if (_hcd->core_if->dma_enable) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ _hc->qh->ping_state = 0;
+ }
+ retval = 0;
+ } else if (_hc->halt_pending) {
+ /* Don't queue a request if the channel has been halted. */
+ retval = 0;
+ } else if (_hc->halt_on_queue) {
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _hc->halt_status);
+ retval = 0;
+ } else if (_hc->do_ping) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ }
+ retval = 0;
+ } else if (!_hc->ep_is_in || _hc->data_pid_start == DWC_OTG_HC_PID_SETUP) {
+ if ((_fifo_dwords_avail * 4) >= _hc->max_packet) {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ retval = 1;
+ } else {
+ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc);
+ }
+ } else {
+ retval = -1;
+ }
+ } else {
+ if (!_hc->xfer_started) {
+ dwc_otg_hc_start_transfer(_hcd->core_if, _hc);
+ retval = 1;
+ } else {
+ retval = dwc_otg_hc_continue_transfer(_hcd->core_if, _hc);
+ }
+ }
+ return retval;
+}
+
+
+/**
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ */
+static void process_non_periodic_channels(dwc_otg_hcd_t * _hcd)
+{
+ gnptxsts_data_t tx_status;
+ struct list_head *orig_qh_ptr;
+ dwc_otg_qh_t * qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+ int more_to_do = 0;
+ dwc_otg_core_global_regs_t * global_regs =
+ _hcd->core_if->core_global_regs;
+ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n");
+
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif /* */
+ /*
+ * Keep track of the starting point. Skip over the start-of-list
+ * entry.
+ */
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ orig_qh_ptr = _hcd->non_periodic_qh_ptr;
+
+ /*
+ * Process once through the active list or until no more space is
+ * available in the request queue or the Tx FIFO.
+ */
+ do {
+
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ if (!_hcd->core_if->dma_enable
+ && tx_status.b.nptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+ qh =
+ list_entry(_hcd->non_periodic_qh_ptr, dwc_otg_qh_t,
+ qh_list_entry);
+ status =
+ queue_transaction(_hcd, qh->channel,
+ tx_status.b.nptxfspcavail);
+
+ if (status > 0) {
+ more_to_do = 1;
+ } else if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+#ifdef OTG_PLB_DMA_TASKLET
+ if (atomic_read(&release_later)) {
+ break;
+ }
+#endif
+
+ /* Advance to next QH, skipping start-of-list entry. */
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ if (_hcd->non_periodic_qh_ptr == &_hcd->non_periodic_sched_active) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ } while (_hcd->non_periodic_qh_ptr != orig_qh_ptr);
+ if (!_hcd->core_if->dma_enable) {
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ intr_mask.b.nptxfempty = 1;
+
+#ifndef OTG_PLB_DMA_TASKLET
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.nptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.nptxfspcavail);
+#endif /* */
+#endif
+
+ if (more_to_do || no_queue_space || no_fifo_space) {
+
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the non-periodic
+ * Tx FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, 0,intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ dwc_modify_reg32(&global_regs->gintmsk,intr_mask.d32, 0);
+ }
+ }
+}
+
+/**
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the DWC_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ */
+static void process_periodic_channels(dwc_otg_hcd_t * _hcd)
+{
+ hptxsts_data_t tx_status;
+ struct list_head *qh_ptr;
+ dwc_otg_qh_t * qh;
+ int status;
+ int no_queue_space = 0;
+ int no_fifo_space = 0;
+ dwc_otg_host_global_regs_t * host_regs;
+ host_regs = _hcd->core_if->host_if->host_global_regs;
+ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n");
+
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n",
+ tx_status.b.ptxfspcavail);
+
+#endif /* */
+ qh_ptr = _hcd->periodic_sched_assigned.next;
+ while (qh_ptr != &_hcd->periodic_sched_assigned) {
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ if (tx_status.b.ptxqspcavail == 0) {
+ no_queue_space = 1;
+ break;
+ }
+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry);
+
+ /*
+ * Set a flag if we're queuing high-bandwidth in slave mode.
+ * The flag prevents any halts to get into the request queue in
+ * the middle of multiple high-bandwidth packets getting queued.
+ */
+ if ((!_hcd->core_if->dma_enable) &&
+ (qh->channel->multi_count > 1)) {
+ _hcd->core_if->queuing_high_bandwidth = 1;
+ }
+ status = queue_transaction(_hcd, qh->channel,tx_status.b.ptxfspcavail);
+ if (status < 0) {
+ no_fifo_space = 1;
+ break;
+ }
+
+ /*
+ * In Slave mode, stay on the current transfer until there is
+ * nothing more to do or the high-bandwidth request count is
+ * reached. In DMA mode, only need to queue one request. The
+ * controller automatically handles multiple packets for
+ * high-bandwidth transfers.
+ */
+ if (_hcd->core_if->dma_enable ||
+ (status == 0 || qh->channel->requests == qh->channel->multi_count)) {
+ qh_ptr = qh_ptr->next;
+
+ /*
+ * Move the QH from the periodic assigned schedule to
+ * the periodic queued schedule.
+ */
+ list_move(&qh->qh_list_entry,
+ &_hcd->periodic_sched_queued);
+
+ /* done queuing high bandwidth */
+ _hcd->core_if->queuing_high_bandwidth = 0;
+ }
+ }
+ if (!_hcd->core_if->dma_enable) {
+ dwc_otg_core_global_regs_t * global_regs;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ global_regs = _hcd->core_if->core_global_regs;
+ intr_mask.b.ptxfempty = 1;
+
+#ifdef DEBUG
+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts);
+ DWC_DEBUGPL(DBG_HCDV," P Tx Req Queue Space Avail (after queue): %d\n",
+ tx_status.b.ptxqspcavail);
+ DWC_DEBUGPL(DBG_HCDV," P Tx FIFO Space Avail (after queue): %d\n",
+ tx_status.b.ptxfspcavail);
+
+#endif /* */
+ if (!(list_empty(&_hcd->periodic_sched_assigned))
+ || no_queue_space || no_fifo_space) {
+
+ /*
+ * May need to queue more transactions as the request
+ * queue or Tx FIFO empties. Enable the periodic Tx
+ * FIFO empty interrupt. (Always use the half-empty
+ * level to ensure that new requests are loaded as
+ * soon as possible.)
+ */
+ dwc_modify_reg32(&global_regs->gintmsk, 0,intr_mask.d32);
+ } else {
+ /*
+ * Disable the Tx FIFO empty interrupt since there are
+ * no more transactions that need to be queued right
+ * now. This function is called from interrupt
+ * handlers to queue more transactions as transfer
+ * states change.
+ */
+ dwc_modify_reg32(&global_regs->gintmsk,intr_mask.d32, 0);
+ }
+ }
+}
+
+
+
+/**
+ * This function processes the currently active host channels and queues
+ * transactions for these channels to the DWC_otg controller. It is called
+ * from HCD interrupt handler functions.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _tr_type The type(s) of transactions to queue (non-periodic,
+ * periodic, or both).
+ */
+void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t * _hcd,
+ dwc_otg_transaction_type_e _tr_type)
+{
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n");
+
+#endif /* */
+ /* Process host channels associated with periodic transfers. */
+ if ((_tr_type == DWC_OTG_TRANSACTION_PERIODIC
+ || _tr_type == DWC_OTG_TRANSACTION_ALL)
+ && !list_empty(&_hcd->periodic_sched_assigned)) {
+ process_periodic_channels(_hcd);
+ }
+
+ /* Process host channels associated with non-periodic transfers. */
+ if ((_tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC
+ || _tr_type == DWC_OTG_TRANSACTION_ALL)) {
+ if (!list_empty(&_hcd->non_periodic_sched_active)) {
+ process_non_periodic_channels(_hcd);
+ } else {
+ /*
+ * Ensure NP Tx FIFO empty interrupt is disabled when
+ * there are no non-periodic transfers to process.
+ */
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&_hcd->core_if->core_global_regs->gintmsk, gintmsk.d32, 0);
+ }
+ }
+}
+
+/**
+ * Sets the final status of an URB and returns it to the device driver. Any
+ * required cleanup of the URB is performed.
+ */
+void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t * _hcd, struct urb *_urb,
+ int _status)
+__releases(_hcd->lock)
+__acquires(_hcd->lock)
+{
+
+#ifdef DEBUG
+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) {
+ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n",
+ __func__, _urb, usb_pipedevice(_urb->pipe),
+ usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) ? "IN" : "OUT", _status);
+ if (usb_pipetype(_urb->pipe) == PIPE_ISOCHRONOUS) {
+ int i;
+ for (i = 0; i < _urb->number_of_packets; i++) {
+ DWC_PRINT(" ISO Desc %d status: %d\n", i,
+ _urb->iso_frame_desc[i].status);
+ }
+ }
+ }
+
+#endif /* */
+ _urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(dwc_otg_hcd_to_hcd(_hcd), _urb);
+ spin_unlock(&_hcd->lock);
+ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(_hcd), _urb, _status);
+ spin_lock(&_hcd->lock);
+}
+
+
+/*
+ * Returns the Queue Head for an URB.
+ */
+dwc_otg_qh_t * dwc_urb_to_qh(struct urb *_urb)
+{
+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(_urb);
+ return (dwc_otg_qh_t *) ep->hcpriv;
+}
+
+
+#ifdef DEBUG
+void dwc_print_setup_data(uint8_t * setup)
+{
+ int i;
+ if (CHK_DEBUG_LEVEL(DBG_HCD)) {
+ DWC_PRINT("Setup Data = MSB ");
+ for (i = 7; i >= 0; i--)
+ DWC_PRINT("%02x ", setup[i]);
+ DWC_PRINT("\n");
+ DWC_PRINT(" bmRequestType Tranfer = %s\n",
+ (setup[0] & 0x80) ? "Device-to-Host" :
+ "Host-to-Device");
+ DWC_PRINT(" bmRequestType Type = ");
+ switch ((setup[0] & 0x60) >> 5) {
+ case 0:
+ DWC_PRINT("Standard\n");
+ break;
+ case 1:
+ DWC_PRINT("Class\n");
+ break;
+ case 2:
+ DWC_PRINT("Vendor\n");
+ break;
+ case 3:
+ DWC_PRINT("Reserved\n");
+ break;
+ }
+ DWC_PRINT(" bmRequestType Recipient = ");
+ switch (setup[0] & 0x1f) {
+ case 0:
+ DWC_PRINT("Device\n");
+ break;
+ case 1:
+ DWC_PRINT("Interface\n");
+ break;
+ case 2:
+ DWC_PRINT("Endpoint\n");
+ break;
+ case 3:
+ DWC_PRINT("Other\n");
+ break;
+ default:
+ DWC_PRINT("Reserved\n");
+ break;
+ }
+ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]);
+ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *) & setup[2]));
+ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *) & setup[4]));
+ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *) & setup[6]));
+ }
+}
+
+
+#endif /* */
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t * _hcd)
+{
+
+/*
+#ifdef DEBUG
+ DWC_PRINT("Frame remaining at SOF:\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->frrem_samples, _hcd->frrem_accum,
+ (_hcd->frrem_samples > 0) ?
+ _hcd->frrem_accum/_hcd->frrem_samples : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_7_samples, _hcd->core_if->hfnum_7_frrem_accum,
+ (_hcd->core_if->hfnum_7_samples > 0) ?
+ _hcd->core_if->hfnum_7_frrem_accum/_hcd->core_if->hfnum_7_samples : 0);
+ DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_0_samples, _hcd->core_if->hfnum_0_frrem_accum,
+ (_hcd->core_if->hfnum_0_samples > 0) ?
+ _hcd->core_if->hfnum_0_frrem_accum/_hcd->core_if->hfnum_0_samples : 0);
+ DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->core_if->hfnum_other_samples, _hcd->core_if->hfnum_other_frrem_accum,
+ (_hcd->core_if->hfnum_other_samples > 0) ?
+ _hcd->core_if->hfnum_other_frrem_accum/_hcd->core_if->hfnum_other_samples : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point A (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_a, _hcd->hfnum_7_frrem_accum_a,
+ (_hcd->hfnum_7_samples_a > 0) ?
+ _hcd->hfnum_7_frrem_accum_a/_hcd->hfnum_7_samples_a : 0);
+ DWC_PRINT("Frame remaining at sample point A (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_a, _hcd->hfnum_0_frrem_accum_a,
+ (_hcd->hfnum_0_samples_a > 0) ?
+ _hcd->hfnum_0_frrem_accum_a/_hcd->hfnum_0_samples_a : 0);
+ DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_a, _hcd->hfnum_other_frrem_accum_a,
+ (_hcd->hfnum_other_samples_a > 0) ?
+ _hcd->hfnum_other_frrem_accum_a/_hcd->hfnum_other_samples_a : 0);
+
+ DWC_PRINT("\n");
+ DWC_PRINT("Frame remaining at sample point B (uframe 7):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_7_samples_b, _hcd->hfnum_7_frrem_accum_b,
+ (_hcd->hfnum_7_samples_b > 0) ?
+ _hcd->hfnum_7_frrem_accum_b/_hcd->hfnum_7_samples_b : 0);
+ DWC_PRINT("Frame remaining at sample point B (uframe 0):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_0_samples_b, _hcd->hfnum_0_frrem_accum_b,
+ (_hcd->hfnum_0_samples_b > 0) ?
+ _hcd->hfnum_0_frrem_accum_b/_hcd->hfnum_0_samples_b : 0);
+ DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n");
+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n",
+ _hcd->hfnum_other_samples_b, _hcd->hfnum_other_frrem_accum_b,
+ (_hcd->hfnum_other_samples_b > 0) ?
+ _hcd->hfnum_other_frrem_accum_b/_hcd->hfnum_other_samples_b : 0);
+#endif
+*/
+} void dwc_otg_hcd_dump_state(dwc_otg_hcd_t * _hcd)
+{
+
+#ifdef DEBUG
+ int num_channels;
+ int i;
+ gnptxsts_data_t np_tx_status;
+ hptxsts_data_t p_tx_status;
+ num_channels = _hcd->core_if->core_params->host_channels;
+ DWC_PRINT("\n");
+ DWC_PRINT
+ ("************************************************************\n");
+ DWC_PRINT("HCD State:\n");
+ DWC_PRINT(" Num channels: %d\n", num_channels);
+ for (i = 0; i < num_channels; i++) {
+ dwc_hc_t * hc = _hcd->hc_ptr_array[i];
+ DWC_PRINT(" Channel %d:\n", i);
+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+ hc->dev_addr, hc->ep_num, hc->ep_is_in);
+ DWC_PRINT(" speed: %d\n", hc->speed);
+ DWC_PRINT(" ep_type: %d\n", hc->ep_type);
+ DWC_PRINT(" max_packet: %d\n", hc->max_packet);
+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start);
+ DWC_PRINT(" multi_count: %d\n", hc->multi_count);
+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started);
+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff);
+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len);
+ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count);
+ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue);
+ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending);
+ DWC_PRINT(" halt_status: %d\n", hc->halt_status);
+ DWC_PRINT(" do_split: %d\n", hc->do_split);
+ DWC_PRINT(" complete_split: %d\n", hc->complete_split);
+ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr);
+ DWC_PRINT(" port_addr: %d\n", hc->port_addr);
+ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos);
+ DWC_PRINT(" requests: %d\n", hc->requests);
+ DWC_PRINT(" qh: %p\n", hc->qh);
+ if (hc->xfer_started) {
+ hfnum_data_t hfnum;
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hfnum.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->
+ host_global_regs->hfnum);
+ hcchar.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hcchar);
+ hctsiz.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hctsiz);
+ hcint.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hcint);
+ hcintmsk.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->hc_regs[i]->
+ hcintmsk);
+ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32);
+ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32);
+ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32);
+ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32);
+ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32);
+ }
+ if (hc->xfer_started && (hc->qh != NULL)
+ && (hc->qh->qtd_in_process != NULL)) {
+ dwc_otg_qtd_t * qtd;
+ struct urb *urb;
+ qtd = hc->qh->qtd_in_process;
+ urb = qtd->urb;
+ DWC_PRINT(" URB Info:\n");
+ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb);
+ if (urb != NULL) {
+ DWC_PRINT(" Dev: %d, EP: %d %s\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->
+ pipe) ? "IN" : "OUT");
+ DWC_PRINT(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->
+ pipe)));
+ DWC_PRINT(" transfer_buffer: %p\n",
+ urb->transfer_buffer);
+ DWC_PRINT(" transfer_dma: %p\n",
+ (void *)urb->transfer_dma);
+ DWC_PRINT(" transfer_buffer_length: %d\n",
+ urb->transfer_buffer_length);
+ DWC_PRINT(" actual_length: %d\n",
+ urb->actual_length);
+ }
+ }
+ } DWC_PRINT(" non_periodic_channels: %d\n",
+ _hcd->non_periodic_channels);
+ DWC_PRINT(" periodic_channels: %d\n", _hcd->periodic_channels);
+ DWC_PRINT(" periodic_usecs: %d\n", _hcd->periodic_usecs);
+ np_tx_status.d32 =
+ dwc_read_reg32(&_hcd->core_if->core_global_regs->gnptxsts);
+ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n",
+ np_tx_status.b.nptxqspcavail);
+ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n",
+ np_tx_status.b.nptxfspcavail);
+ p_tx_status.d32 =
+ dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hptxsts);
+ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n",
+ p_tx_status.b.ptxqspcavail);
+ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail);
+ dwc_otg_hcd_dump_frrem(_hcd);
+ dwc_otg_dump_global_registers(_hcd->core_if);
+ dwc_otg_dump_host_registers(_hcd->core_if);
+ DWC_PRINT
+ ("************************************************************\n");
+ DWC_PRINT("\n");
+
+#endif /* */
+}
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h
new file mode 100644
index 00000000000..ae2b8c3e179
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd.h
@@ -0,0 +1,660 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd.h $
+ * $Revision: #6 $
+ * $Date: 2006/12/05 $
+ * $Change: 762293 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef CONFIG_DWC_DEVICE_ONLY
+#if !defined(__DWC_HCD_H__)
+#define __DWC_HCD_H__
+
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <../drivers/usb/core/hcd.h>
+
+struct lm_device;
+struct dwc_otg_device;
+
+#include "dwc_otg_cil.h"
+
+/**
+ * @file
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Host Contoller Driver (HCD).
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the DWC_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+/**
+ * Phases for control transfers.
+ */
+typedef enum dwc_otg_control_phase {
+ DWC_OTG_CONTROL_SETUP,
+ DWC_OTG_CONTROL_DATA,
+ DWC_OTG_CONTROL_STATUS
+} dwc_otg_control_phase_e;
+
+/** Transaction types. */
+typedef enum dwc_otg_transaction_type {
+ DWC_OTG_TRANSACTION_NONE,
+ DWC_OTG_TRANSACTION_PERIODIC,
+ DWC_OTG_TRANSACTION_NON_PERIODIC,
+ DWC_OTG_TRANSACTION_ALL
+} dwc_otg_transaction_type_e;
+
+/**
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+
+struct dwc_otg_qh;
+
+typedef struct dwc_otg_qtd {
+ /**
+ * Determines the PID of the next data packet for the data phase of
+ * control transfers. Ignored for other transfer types.<br>
+ * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Current phase for control transfers (Setup, Data, or Status). */
+ dwc_otg_control_phase_e control_phase;
+
+ /** Keep track of the current split type
+ * for FS/LS endpoints on a HS Hub */
+ uint8_t complete_split;
+
+ /** How many bytes transferred during SSPLIT OUT */
+ uint32_t ssplit_out_xfer_count;
+
+ /**
+ * Holds the number of bus errors that have occurred for a transaction
+ * within this transfer.
+ */
+ uint8_t error_count;
+
+ /**
+ * Index of the next frame descriptor for an isochronous transfer. A
+ * frame descriptor describes the buffer position and length of the
+ * data to be transferred in the next scheduled (micro)frame of an
+ * isochronous transfer. It also holds status for that transaction.
+ * The frame index starts at 0.
+ */
+ int isoc_frame_index;
+
+ /** Position of the ISOC split on full/low speed */
+ uint8_t isoc_split_pos;
+
+ /** Position of the ISOC split in the buffer for the current frame */
+ uint16_t isoc_split_offset;
+
+ /** URB for this transfer */
+ struct urb *urb;
+
+ /** This list of QTDs */
+ struct list_head qtd_list_entry;
+
+ /* Field to track the qh pointer */
+ struct dwc_otg_qh *qtd_qh_ptr;
+} dwc_otg_qtd_t;
+
+/**
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+typedef struct dwc_otg_qh {
+ /**
+ * Endpoint type.
+ * One of the following values:
+ * - USB_ENDPOINT_XFER_CONTROL
+ * - USB_ENDPOINT_XFER_ISOC
+ * - USB_ENDPOINT_XFER_BULK
+ * - USB_ENDPOINT_XFER_INT
+ */
+ uint8_t ep_type;
+ uint8_t ep_is_in;
+
+ /** wMaxPacketSize Field of Endpoint Descriptor. */
+ uint16_t maxp;
+
+ /**
+ * Determines the PID of the next data packet for non-control
+ * transfers. Ignored for control transfers.<br>
+ * One of the following values:
+ * - DWC_OTG_HC_PID_DATA0
+ * - DWC_OTG_HC_PID_DATA1
+ */
+ uint8_t data_toggle;
+
+ /** Ping state if 1. */
+ uint8_t ping_state;
+
+ /**
+ * List of QTDs for this QH.
+ */
+ struct list_head qtd_list;
+
+ /** Host channel currently processing transfers for this QH. */
+ dwc_hc_t *channel;
+
+ /** QTD currently assigned to a host channel for this QH. */
+ dwc_otg_qtd_t *qtd_in_process;
+
+ /** Full/low speed endpoint on high-speed hub requires split. */
+ uint8_t do_split;
+
+ /** @name Periodic schedule information */
+ /** @{ */
+
+ /** Bandwidth in microseconds per (micro)frame. */
+ uint8_t usecs;
+
+ /** Interval between transfers in (micro)frames. */
+ uint16_t interval;
+
+ /**
+ * (micro)frame to initialize a periodic transfer. The transfer
+ * executes in the following (micro)frame.
+ */
+ uint16_t sched_frame;
+
+ /** (micro)frame at which last start split was initialized. */
+ uint16_t start_split_frame;
+
+ /** @} */
+
+ /** Entry for QH in either the periodic or non-periodic schedule. */
+ struct list_head qh_list_entry;
+} dwc_otg_qh_t;
+
+/**
+ * This structure holds the state of the HCD, including the non-periodic and
+ * periodic schedules.
+ */
+typedef struct dwc_otg_hcd {
+
+ spinlock_t lock;
+
+ /** DWC OTG Core Interface Layer */
+ dwc_otg_core_if_t *core_if;
+
+ /** Internal DWC HCD Flags */
+ volatile union dwc_otg_hcd_internal_flags {
+ uint32_t d32;
+ struct {
+ unsigned port_connect_status_change : 1;
+ unsigned port_connect_status : 1;
+ unsigned port_reset_change : 1;
+ unsigned port_enable_change : 1;
+ unsigned port_suspend_change : 1;
+ unsigned port_over_current_change : 1;
+ unsigned reserved : 27;
+ } b;
+ } flags;
+
+ /**
+ * Inactive items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are not
+ * currently assigned to a host channel.
+ */
+ struct list_head non_periodic_sched_inactive;
+
+ /**
+ * Deferred items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are not
+ * currently assigned to a host channel.
+ * When we get an NAK, the QH goes here.
+ */
+ struct list_head non_periodic_sched_deferred;
+
+ /**
+ * Active items in the non-periodic schedule. This is a list of
+ * Queue Heads. Transfers associated with these Queue Heads are
+ * currently assigned to a host channel.
+ */
+ struct list_head non_periodic_sched_active;
+
+ /**
+ * Pointer to the next Queue Head to process in the active
+ * non-periodic schedule.
+ */
+ struct list_head *non_periodic_qh_ptr;
+
+ /**
+ * Inactive items in the periodic schedule. This is a list of QHs for
+ * periodic transfers that are _not_ scheduled for the next frame.
+ * Each QH in the list has an interval counter that determines when it
+ * needs to be scheduled for execution. This scheduling mechanism
+ * allows only a simple calculation for periodic bandwidth used (i.e.
+ * must assume that all periodic transfers may need to execute in the
+ * same frame). However, it greatly simplifies scheduling and should
+ * be sufficient for the vast majority of OTG hosts, which need to
+ * connect to a small number of peripherals at one time.
+ *
+ * Items move from this list to periodic_sched_ready when the QH
+ * interval counter is 0 at SOF.
+ */
+ struct list_head periodic_sched_inactive;
+
+ /**
+ * List of periodic QHs that are ready for execution in the next
+ * frame, but have not yet been assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_assigned as host
+ * channels become available during the current frame.
+ */
+ struct list_head periodic_sched_ready;
+
+ /**
+ * List of periodic QHs to be executed in the next frame that are
+ * assigned to host channels.
+ *
+ * Items move from this list to periodic_sched_queued as the
+ * transactions for the QH are queued to the DWC_otg controller.
+ */
+ struct list_head periodic_sched_assigned;
+
+ /**
+ * List of periodic QHs that have been queued for execution.
+ *
+ * Items move from this list to either periodic_sched_inactive or
+ * periodic_sched_ready when the channel associated with the transfer
+ * is released. If the interval for the QH is 1, the item moves to
+ * periodic_sched_ready because it must be rescheduled for the next
+ * frame. Otherwise, the item moves to periodic_sched_inactive.
+ */
+ struct list_head periodic_sched_queued;
+
+ /**
+ * Total bandwidth claimed so far for periodic transfers. This value
+ * is in microseconds per (micro)frame. The assumption is that all
+ * periodic transfers may occur in the same (micro)frame.
+ */
+ uint16_t periodic_usecs;
+
+ /**
+ * Frame number read from the core at SOF. The value ranges from 0 to
+ * DWC_HFNUM_MAX_FRNUM.
+ */
+ uint16_t frame_number;
+
+ /**
+ * Free host channels in the controller. This is a list of
+ * dwc_hc_t items.
+ */
+ struct list_head free_hc_list;
+
+ /**
+ * Number of host channels assigned to periodic transfers. Currently
+ * assuming that there is a dedicated host channel for each periodic
+ * transaction and at least one host channel available for
+ * non-periodic transactions.
+ */
+ int periodic_channels;
+
+ /**
+ * Number of host channels assigned to non-periodic transfers.
+ */
+ int non_periodic_channels;
+
+ /**
+ * Array of pointers to the host channel descriptors. Allows accessing
+ * a host channel descriptor given the host channel number. This is
+ * useful in interrupt handlers.
+ */
+ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS];
+
+ /**
+ * Buffer to use for any data received during the status phase of a
+ * control transfer. Normally no data is transferred during the status
+ * phase. This buffer is used as a bit bucket.
+ */
+ uint8_t *status_buf;
+
+ /**
+ * DMA address for status_buf.
+ */
+ dma_addr_t status_buf_dma;
+#define DWC_OTG_HCD_STATUS_BUF_SIZE 64
+
+ /**
+ * Structure to allow starting the HCD in a non-interrupt context
+ * during an OTG role change.
+ */
+ struct work_struct start_work;
+ struct usb_hcd *_p;
+
+ /**
+ * Connection timer. An OTG host must display a message if the device
+ * does not connect. Started when the VBus power is turned on via
+ * sysfs attribute "buspower".
+ */
+ struct timer_list conn_timer;
+
+ /* Tasket to do a reset */
+ struct tasklet_struct *reset_tasklet;
+
+#ifdef CONFIG_DWC_DEBUG
+ uint32_t frrem_samples;
+ uint64_t frrem_accum;
+
+ uint32_t hfnum_7_samples_a;
+ uint64_t hfnum_7_frrem_accum_a;
+ uint32_t hfnum_0_samples_a;
+ uint64_t hfnum_0_frrem_accum_a;
+ uint32_t hfnum_other_samples_a;
+ uint64_t hfnum_other_frrem_accum_a;
+
+ uint32_t hfnum_7_samples_b;
+ uint64_t hfnum_7_frrem_accum_b;
+ uint32_t hfnum_0_samples_b;
+ uint64_t hfnum_0_frrem_accum_b;
+ uint32_t hfnum_other_samples_b;
+ uint64_t hfnum_other_frrem_accum_b;
+#endif
+
+} dwc_otg_hcd_t;
+
+/** Gets the dwc_otg_hcd from a struct usb_hcd */
+static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd)
+{
+ return (dwc_otg_hcd_t *)(hcd->hcd_priv);
+}
+
+/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */
+static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd)
+{
+ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv);
+}
+
+/** @name HCD Create/Destroy Functions */
+/** @{ */
+extern int __init dwc_otg_hcd_init(struct device *_dev, dwc_otg_device_t * dwc_otg_device);
+extern void dwc_otg_hcd_remove(struct device *_dev);
+/** @} */
+
+/** @name Linux HC Driver API Functions */
+/** @{ */
+
+extern int dwc_otg_hcd_start(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_stop(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd);
+extern void dwc_otg_hcd_free(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags);
+extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd,
+/* struct usb_host_endpoint *ep,*/
+ struct urb *urb, int status);
+extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep);
+extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd);
+extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd,
+ char *buf);
+extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength);
+
+/** @} */
+
+/** @name Transaction Execution Functions */
+/** @{ */
+extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *_hcd);
+extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *_hcd,
+ dwc_otg_transaction_type_e _tr_type);
+extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *_urb,
+ int _status);
+/** @} */
+
+/** @name Interrupt Handler Functions */
+/** @{ */
+extern int32_t dwc_otg_hcd_handle_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_sof_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_port_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_disconnect_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_hc_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_hc_n_intr (dwc_otg_hcd_t *_dwc_otg_hcd, uint32_t _num);
+extern int32_t dwc_otg_hcd_handle_session_req_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr (dwc_otg_hcd_t *_dwc_otg_hcd);
+/** @} */
+
+
+/** @name Schedule Queue Functions */
+/** @{ */
+
+/* Implemented in dwc_otg_hcd_queue.c */
+extern dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *_hcd, struct urb *_urb);
+extern void dwc_otg_hcd_qh_init (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, struct urb *_urb);
+extern void dwc_otg_hcd_qh_free (dwc_otg_qh_t *_qh);
+extern int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh);
+extern void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh);
+extern void dwc_otg_hcd_qh_deactivate (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int sched_csplit);
+extern int dwc_otg_hcd_qh_deferr (dwc_otg_hcd_t *_hcd, dwc_otg_qh_t *_qh, int delay);
+
+/** Remove and free a QH */
+static inline void dwc_otg_hcd_qh_remove_and_free (dwc_otg_hcd_t *_hcd,
+ dwc_otg_qh_t *_qh)
+{
+ dwc_otg_hcd_qh_remove (_hcd, _qh);
+ dwc_otg_hcd_qh_free (_qh);
+}
+
+/** Allocates memory for a QH structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc (void)
+{
+ return (dwc_otg_qh_t *) kmalloc (sizeof(dwc_otg_qh_t), GFP_KERNEL);
+}
+
+extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb);
+extern void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb);
+extern int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd);
+
+/** Allocates memory for a QTD structure.
+ * @return Returns the memory allocate or NULL on error. */
+static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc (void)
+{
+ return (dwc_otg_qtd_t *) kmalloc (sizeof(dwc_otg_qtd_t), GFP_KERNEL);
+}
+
+/** Frees the memory for a QTD structure. QTD should already be removed from
+ * list.
+ * @param[in] _qtd QTD to free.*/
+static inline void dwc_otg_hcd_qtd_free (dwc_otg_qtd_t *_qtd)
+{
+ kfree (_qtd);
+}
+
+/** Removes a QTD from list.
+ * @param[in] _qtd QTD to remove from list. */
+static inline void dwc_otg_hcd_qtd_remove (dwc_otg_qtd_t *_qtd)
+{
+ unsigned long flags;
+ local_irq_save (flags);
+ list_del (&_qtd->qtd_list_entry);
+ local_irq_restore (flags);
+}
+
+/** Remove and free a QTD */
+static inline void dwc_otg_hcd_qtd_remove_and_free (dwc_otg_qtd_t *_qtd)
+{
+ dwc_otg_hcd_qtd_remove (_qtd);
+ dwc_otg_hcd_qtd_free (_qtd);
+}
+
+/** @} */
+
+
+/** @name Internal Functions */
+/** @{ */
+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *_urb);
+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *_hcd);
+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *_hcd);
+/** @} */
+
+/** Gets the usb_host_endpoint associated with an URB. */
+static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *_urb)
+{
+ struct usb_device *dev = _urb->dev;
+ int ep_num = usb_pipeendpoint(_urb->pipe);
+
+ if (usb_pipein(_urb->pipe))
+ return dev->ep_in[ep_num];
+ else
+ return dev->ep_out[ep_num];
+}
+
+/**
+ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is
+ * qualified with its direction (possible 32 endpoints per device).
+ */
+#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) \
+ ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \
+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4)
+
+/** Gets the QH that contains the list_head */
+#define dwc_list_to_qh(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qh_t,qh_list_entry))
+
+/** Gets the QTD that contains the list_head */
+#define dwc_list_to_qtd(_list_head_ptr_) (container_of(_list_head_ptr_,dwc_otg_qtd_t,qtd_list_entry))
+
+/** Check if QH is non-periodic */
+#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \
+ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL))
+
+/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */
+#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+/** Packet size for any kind of endpoint descriptor */
+#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/**
+ * Returns true if _frame1 is less than or equal to _frame2. The comparison is
+ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_le(uint16_t _frame1, uint16_t _frame2)
+{
+ return ((_frame2 - _frame1) & DWC_HFNUM_MAX_FRNUM) <=
+ (DWC_HFNUM_MAX_FRNUM >> 1);
+}
+
+/**
+ * Returns true if _frame1 is greater than _frame2. The comparison is done
+ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int dwc_frame_num_gt(uint16_t _frame1, uint16_t _frame2)
+{
+ return (_frame1 != _frame2) &&
+ (((_frame1 - _frame2) & DWC_HFNUM_MAX_FRNUM) <
+ (DWC_HFNUM_MAX_FRNUM >> 1));
+}
+
+/**
+ * Increments _frame by the amount specified by _inc. The addition is done
+ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline uint16_t dwc_frame_num_inc(uint16_t _frame, uint16_t _inc)
+{
+ return (_frame + _inc) & DWC_HFNUM_MAX_FRNUM;
+}
+
+static inline uint16_t dwc_full_frame_num (uint16_t _frame)
+{
+ return ((_frame) & DWC_HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline uint16_t dwc_micro_frame_num (uint16_t _frame)
+{
+ return (_frame) & 0x7;
+}
+
+#ifdef CONFIG_DWC_DEBUG
+/**
+ * Macro to sample the remaining PHY clocks left in the current frame. This
+ * may be used during debugging to determine the average time it takes to
+ * execute sections of code. There are two possible sample points, "a" and
+ * "b", so the _letter argument must be one of these values.
+ *
+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
+ * example, "cat /sys/devices/lm0/hcd_frrem".
+ */
+#define dwc_sample_frrem(_hcd, _qh, _letter) \
+{ \
+ hfnum_data_t hfnum; \
+ dwc_otg_qtd_t *qtd; \
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \
+ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \
+ switch (hfnum.b.frnum & 0x7) { \
+ case 7: \
+ _hcd->hfnum_7_samples_##_letter++; \
+ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ case 0: \
+ _hcd->hfnum_0_samples_##_letter++; \
+ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ default: \
+ _hcd->hfnum_other_samples_##_letter++; \
+ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \
+ break; \
+ } \
+ } \
+}
+#else
+#define dwc_sample_frrem(_hcd, _qh, _letter)
+#endif
+#endif
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c
new file mode 100644
index 00000000000..39a995f9948
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_intr.c
@@ -0,0 +1,1746 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_intr.c $
+ * $Revision: #7 $
+ * $Date: 2005/11/02 $
+ * $Change: 553126 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef CONFIG_DWC_DEVICE_ONLY
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+const int erratum_usb09_patched = 0;
+const int deferral_on = 1;
+int nak_deferral_delay = 20;
+module_param(nak_deferral_delay, int, 0644);
+const int nyet_deferral_delay = 1;
+
+/** @file
+ * This file contains the implementation of the HCD Interrupt handlers.
+ */
+
+/** This function handles interrupts for the HCD. */
+int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ int retval = 0;
+ dwc_otg_core_if_t * core_if = _dwc_otg_hcd->core_if;
+ gintsts_data_t gintsts;
+
+#ifdef CONFIG_DWC_DEBUG
+ dwc_otg_core_global_regs_t * global_regs = core_if->core_global_regs;
+
+#endif /* */
+
+ /* Check if HOST Mode */
+ if (dwc_otg_is_host_mode(core_if)) {
+ gintsts.d32 = dwc_otg_read_core_intr(core_if);
+ if (!gintsts.d32) {
+ return 0;
+ }
+#ifdef CONFIG_DWC_DEBUG
+ /* Don't print debug message in the interrupt handler on SOF */
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ DWC_DEBUGPL(DBG_HCD, "\n");
+#endif /* */
+
+#ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ DWC_DEBUGPL(DBG_HCD,"DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
+ gintsts.d32);
+#endif /* */
+ if (gintsts.b.sofintr) {
+ retval |= dwc_otg_hcd_handle_sof_intr(_dwc_otg_hcd);
+ }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (!atomic_read(&release_later) && gintsts.b.rxstsqlvl) {
+#else
+ if (gintsts.b.rxstsqlvl) {
+#endif
+ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(_dwc_otg_hcd);
+ }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (!atomic_read(&release_later) && gintsts.b.nptxfempty) {
+#else
+ if (gintsts.b.nptxfempty) {
+#endif
+ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(_dwc_otg_hcd);
+ }
+ if (gintsts.b.i2cintr) {
+ /** @todo Implement i2cintr handler. */
+ }
+ if (gintsts.b.portintr) {
+ retval |= dwc_otg_hcd_handle_port_intr(_dwc_otg_hcd);
+ }
+ if (gintsts.b.hcintr) {
+ retval |= dwc_otg_hcd_handle_hc_intr(_dwc_otg_hcd);
+ }
+ if (gintsts.b.ptxfempty) {
+ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(_dwc_otg_hcd);
+ }
+
+#ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ {
+ DWC_DEBUGPL(DBG_HCD,
+ "DWC OTG HCD Finished Servicing Interrupts\n");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n",
+ dwc_read_reg32(&global_regs->gintsts));
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n",
+ dwc_read_reg32(&global_regs->gintmsk));
+ }
+#endif /* */
+
+#ifdef CONFIG_DWC_DEBUG
+#ifndef DEBUG_SOF
+ if (gintsts.d32 != DWC_SOF_INTR_MASK)
+#endif
+ DWC_DEBUGPL(DBG_HCD, "\n");
+#endif /* */
+ }
+ return retval;
+}
+
+
+#ifdef DWC_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs
+#define FRAME_NUM_ARRAY_SIZE 1000
+/**
+ * This function is for debug only.
+ */
+static inline void track_missed_sofs(uint16_t _curr_frame_number)
+{
+ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
+ static int frame_num_idx = 0;
+ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM;
+ static int dumped_frame_num_array = 0;
+ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+ if ((((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) !=
+ _curr_frame_number)) {
+ frame_num_array[frame_num_idx] = _curr_frame_number;
+ last_frame_num_array[frame_num_idx++] = last_frame_num;
+ }
+ } else if (!dumped_frame_num_array) {
+ int i;
+ printk(KERN_EMERG USB_DWC "Frame Last Frame\n");
+ printk(KERN_EMERG USB_DWC "----- ----------\n");
+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n",
+ frame_num_array[i], last_frame_num_array[i]);
+ }
+ dumped_frame_num_array = 1;
+ }
+ last_frame_num = _curr_frame_number;
+}
+#endif /* */
+
+/**
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller for the
+ * next (micro)frame.
+ */
+int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t * _hcd)
+{
+ hfnum_data_t hfnum;
+ struct list_head *qh_entry;
+ dwc_otg_qh_t * qh;
+ dwc_otg_transaction_type_e tr_type;
+ gintsts_data_t gintsts = {.d32 = 0};
+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum);
+
+#ifdef DEBUG_SOF
+ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n");
+#endif /* */
+ _hcd->frame_number = hfnum.b.frnum;
+#ifdef CONFIG_DWC_DEBUG
+ _hcd->frrem_accum += hfnum.b.frrem;
+ _hcd->frrem_samples++;
+#endif /* */
+
+#ifdef DWC_TRACK_MISSED_SOFS
+ track_missed_sofs(_hcd->frame_number);
+#endif /* */
+
+ /* Determine whether any periodic QHs should be executed. */
+ qh_entry = _hcd->periodic_sched_inactive.next;
+ while (qh_entry != &_hcd->periodic_sched_inactive) {
+ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry);
+ qh_entry = qh_entry->next;
+ if (dwc_frame_num_le(qh->sched_frame, _hcd->frame_number)) {
+ /*
+ * Move QH to the ready list to be executed next
+ * (micro)frame.
+ */
+ list_move(&qh->qh_list_entry,&_hcd->periodic_sched_ready);
+ }
+ }
+ tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ //schedule_work(&_hcd->hcd_queue_work);
+ }
+
+ /* Clear interrupt */
+ gintsts.b.sofintr = 1;
+ dwc_write_reg32(&_hcd->core_if->core_global_regs->gintsts,gintsts.d32);
+ return 1;
+}
+
+/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at
+ * least one packet in the Rx FIFO. The packets are moved from the FIFO to
+ * memory if the DWC_otg controller is operating in Slave mode. */
+int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *_dwc_otg_hcd)
+{
+ host_grxsts_data_t grxsts;
+ dwc_hc_t * hc = NULL;
+ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n");
+ grxsts.d32 = dwc_read_reg32(&_dwc_otg_hcd->core_if->core_global_regs->grxstsp);
+ hc = _dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum];
+
+ /* Packet Status */
+ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum);
+ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt);
+ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid,
+ hc->data_pid_start);
+ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts);
+ switch (grxsts.b.pktsts) {
+ case DWC_GRXSTS_PKTSTS_IN:
+ /* Read the data into the host buffer. */
+ if (grxsts.b.bcnt > 0) {
+ dwc_otg_read_packet(_dwc_otg_hcd->core_if,
+ hc->xfer_buff, grxsts.b.bcnt);
+
+ /* Update the HC fields for the next packet received. */
+ hc->xfer_count += grxsts.b.bcnt;
+ hc->xfer_buff += grxsts.b.bcnt;
+ }
+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP:
+ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR:
+ case DWC_GRXSTS_PKTSTS_CH_HALTED:
+ /* Handled in interrupt, just ignore data */
+ break;
+ default:
+ DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n",
+ grxsts.b.pktsts);
+ break;
+ }
+ return 1;
+}
+
+
+/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *
+ _dwc_otg_hcd)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_NON_PERIODIC);
+ //schedule_work(&_dwc_otg_hcd->hcd_queue_work);
+ return 1;
+}
+
+
+/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode. */
+int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *
+ _dwc_otg_hcd)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n");
+ dwc_otg_hcd_queue_transactions(_dwc_otg_hcd,
+ DWC_OTG_TRANSACTION_PERIODIC);
+ //schedule_work(&_dwc_otg_hcd->hcd_queue_work);
+ return 1;
+}
+
+
+/** There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately. */
+int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ int retval = 0;
+ hprt0_data_t hprt0;
+ hprt0_data_t hprt0_modify;
+ hprt0.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
+ hprt0_modify.d32 = dwc_read_reg32(_dwc_otg_hcd->core_if->host_if->hprt0);
+
+ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in
+ * GINTSTS */
+ hprt0_modify.b.prtena = 0;
+ hprt0_modify.b.prtconndet = 0;
+ hprt0_modify.b.prtenchng = 0;
+ hprt0_modify.b.prtovrcurrchng = 0;
+
+ /* Port Connect Detected
+ * Set flag and clear if detected */
+ if (hprt0.b.prtconndet) {
+ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x "
+ "Port Connect Detected--\n", hprt0.d32);
+ _dwc_otg_hcd->flags.b.port_connect_status_change = 1;
+ _dwc_otg_hcd->flags.b.port_connect_status = 1;
+ hprt0_modify.b.prtconndet = 1;
+
+ /* B-Device has connected, Delete the connection timer. */
+ del_timer(&_dwc_otg_hcd->conn_timer);
+
+ /* The Hub driver asserts a reset when it sees port connect
+ * status change flag
+ */
+ retval |= 1;
+ }
+
+ /* Port Enable Changed
+ * Clear if detected - Set internal flag if disabled */
+ if (hprt0.b.prtenchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Enable Changed--\n", hprt0.d32);
+ hprt0_modify.b.prtenchng = 1;
+ if (hprt0.b.prtena == 1) {
+ int do_reset = 0;
+ dwc_otg_core_params_t * params =
+ _dwc_otg_hcd->core_if->core_params;
+ dwc_otg_core_global_regs_t * global_regs =
+ _dwc_otg_hcd->core_if->core_global_regs;
+ dwc_otg_host_if_t * host_if =
+ _dwc_otg_hcd->core_if->host_if;
+
+ /* Check if we need to adjust the PHY clock speed for
+ * low power and adjust it */
+ if (params->host_support_fs_ls_low_power) {
+ gusbcfg_data_t usbcfg;
+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ||
+ (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED)) {
+ /*
+ * Low power
+ */
+ hcfg_data_t hcfg;
+ if (usbcfg.b.phylpwrclksel == 0) {
+ /* Set PHY low power clock select for FS/LS devices */
+ usbcfg.b.phylpwrclksel = 1;
+ dwc_write_reg32(&global_regs->gusbcfg,usbcfg.d32);
+ do_reset = 1;
+ }
+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg);
+ if ((hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) &&
+ (params->host_ls_low_power_phy_clk ==
+ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ)) {
+ /* 6 MHZ */
+ DWC_DEBUGPL(DBG_CIL,"FS_PHY programming HCFG to 6 MHz (Low Power)\n");
+ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) {
+ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ do_reset = 1;
+ }
+ } else {
+ /* 48 MHZ */
+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n");
+ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) {
+ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ;
+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32);
+ do_reset = 1;
+ }
+ }
+ } else {
+ /*
+ * Not low power
+ */
+ if (usbcfg.b.phylpwrclksel == 1) {
+ usbcfg.b.phylpwrclksel = 0;
+ dwc_write_reg32(&global_regs->gusbcfg,usbcfg.d32);
+ do_reset = 1;
+ }
+ }
+ if (do_reset) {
+ tasklet_schedule(_dwc_otg_hcd->reset_tasklet);
+ }
+ }
+ if (!do_reset) {
+ /* Port has been enabled set the reset change flag */
+ _dwc_otg_hcd->flags.b.port_reset_change = 1;
+ }
+ } else {
+ _dwc_otg_hcd->flags.b.port_enable_change = 1;
+ }
+ retval |= 1;
+ }
+
+ /** Overcurrent Change Interrupt */
+ if (hprt0.b.prtovrcurrchng) {
+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x "
+ "Port Overcurrent Changed--\n", hprt0.d32);
+ _dwc_otg_hcd->flags.b.port_over_current_change = 1;
+ hprt0_modify.b.prtovrcurrchng = 1;
+ retval |= 1;
+ }
+
+ /* Clear Port Interrupts */
+ dwc_write_reg32(_dwc_otg_hcd->core_if->host_if->hprt0,hprt0_modify.d32);
+ return retval;
+}
+
+/** This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately. */
+int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ int i;
+ int retval = 0;
+ haint_data_t haint;
+
+ /* Clear appropriate bits in HCINTn to clear the interrupt bit in
+ * GINTSTS */
+ haint.d32 = dwc_otg_read_host_all_channels_intr(_dwc_otg_hcd->core_if);
+ for (i = 0; i < _dwc_otg_hcd->core_if->core_params->host_channels;i++) {
+ if (haint.b2.chint & (1 << i)) {
+ retval |= dwc_otg_hcd_handle_hc_n_intr(_dwc_otg_hcd, i);
+ }
+ }
+ return retval;
+}
+
+/* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hc_regs_,_intr_) \
+ do { \
+ hcint_data_t hcint_clear = { .d32 = 0}; \
+ hcint_clear.b._intr_ = 1; \
+ dwc_write_reg32(&((_hc_regs_)->hcint), hcint_clear.d32); \
+ } while (0)
+
+/*
+ * Macro used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+#define disable_hc_int(_hc_regs_,_intr_) \
+ do { \
+ hcintmsk_data_t hcintmsk = {.d32 = 0}; \
+ hcintmsk.b._intr_ = 1; \
+ dwc_modify_reg32(&((_hc_regs_)->hcintmsk), hcintmsk.d32, 0); \
+ } while (0)
+
+/**
+ * Gets the actual length of a transfer after the transfer halts. _halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where _halt_status is DWC_OTG_HC_XFER_COMPLETE,
+ * *_short_read is set to 1 upon return if less than the requested
+ * number of bytes were transferred. Otherwise, *_short_read is set to 0 upon
+ * return. _short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static uint32_t get_actual_xfer_length(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status, int *_short_read)
+{
+ hctsiz_data_t hctsiz;
+ uint32_t length;
+ if (_short_read != NULL) {
+ *_short_read = 0;
+ }
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (_halt_status == DWC_OTG_HC_XFER_COMPLETE) {
+ if (_hc->ep_is_in) {
+ length = _hc->xfer_len - hctsiz.b.xfersize;
+ if (_short_read != NULL) {
+ *_short_read = (hctsiz.b.xfersize != 0);
+ }
+ } else if (_hc->qh->do_split) {
+ length = _qtd->ssplit_out_xfer_count;
+ } else {
+ length = _hc->xfer_len;
+ }
+ } else {
+ /*
+ * Must use the hctsiz.pktcnt field to determine how much data
+ * has been transferred. This field reflects the number of
+ * packets that have been transferred via the USB. This is
+ * always an integral number of packets if the transfer was
+ * halted before its normal completion. (Can't use the
+ * hctsiz.xfersize field because that reflects the number of
+ * bytes transferred via the AHB, not the USB).
+ */
+ length = (_hc->start_pkt_count - hctsiz.b.pktcnt) * _hc->max_packet;
+ }
+ return length;
+}
+
+/**
+ * Updates the state of the URB after a Transfer Complete interrupt on the
+ * host channel. Updates the actual_length field of the URB based on the
+ * number of bytes transferred via the host channel. Sets the URB status
+ * if the data transfer is finished.
+ *
+ * @return 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise.
+ */
+static int update_urb_state_xfer_comp(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb,
+ dwc_otg_qtd_t * _qtd, int *status)
+{
+ int xfer_done = 0;
+ int short_read = 0;
+ _urb->actual_length += get_actual_xfer_length(_hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_COMPLETE, &short_read);
+ if (short_read || (_urb->actual_length == _urb->transfer_buffer_length)) {
+ xfer_done = 1;
+ if (short_read && (_urb->transfer_flags & URB_SHORT_NOT_OK)) {
+ *status = -EREMOTEIO;
+ } else {
+ *status = 0;
+ }
+ }
+
+#ifdef CONFIG_DWC_DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (_hc->ep_is_in ? "IN" : "OUT"), _hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", _hc->xfer_len);
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n",hctsiz.b.xfersize);
+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n",
+ _urb->transfer_buffer_length);
+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n",
+ _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n",
+ short_read, xfer_done);
+ }
+#endif /* */
+ return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+static void save_data_toggle(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+{
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) {
+ dwc_otg_qh_t * qh = _hc->qh;
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+ } else {
+ qh->data_toggle = DWC_OTG_HC_PID_DATA1;
+ }
+ } else {
+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA0;
+ } else {
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+ }
+ }
+}
+
+/**
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void deactivate_qh(dwc_otg_hcd_t * _hcd,
+ dwc_otg_qh_t * _qh, int free_qtd)
+{
+ int continue_split = 0;
+ dwc_otg_qtd_t * qtd;
+ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, _hcd, _qh,
+ free_qtd);
+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+ if (qtd->complete_split) {
+ continue_split = 1;
+ } else if ((qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID) ||
+ (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END)) {
+ continue_split = 1;
+ }
+
+ if (free_qtd) {
+ /*
+ * Note that this was previously a call to
+ * dwc_otg_hcd_qtd_remove_and_free(qtd), which frees the qtd.
+ * However, that call frees the qtd memory, and we continue in the
+ * interrupt logic to access it many more times, including writing
+ * to it. With slub debugging on, it is clear that we were writing
+ * to memory we had freed.
+ * Call this instead, and now I have moved the freeing of the memory to
+ * the end of processing this interrupt.
+ */
+ dwc_otg_hcd_qtd_remove(qtd);
+
+ continue_split = 0;
+ }
+ _qh->channel = NULL;
+ _qh->qtd_in_process = NULL;
+ dwc_otg_hcd_qh_deactivate(_hcd, _qh, continue_split);
+}
+
+/**
+ * Updates the state of an Isochronous URB when the transfer is stopped for
+ * any reason. The fields of the current entry in the frame descriptor array
+ * are set based on the transfer state and the input _halt_status. Completes
+ * the Isochronous URB if all the URB frames have been completed.
+ *
+ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE.
+ */
+static dwc_otg_halt_status_e update_isoc_urb_state(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status)
+{
+ struct urb *urb = _qtd->urb;
+ dwc_otg_halt_status_e ret_val = _halt_status;
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &urb->iso_frame_desc[_qtd->isoc_frame_index];
+ switch (_halt_status) {
+ case DWC_OTG_HC_XFER_COMPLETE:
+ frame_desc->status = 0;
+ frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt_status,NULL);
+ break;
+ case DWC_OTG_HC_XFER_FRAME_OVERRUN:
+ urb->error_count++;
+ if (_hc->ep_is_in) {
+ frame_desc->status = -ENOSR;
+ } else {
+ frame_desc->status = -ECOMM;
+ }
+ frame_desc->actual_length = 0;
+ break;
+ case DWC_OTG_HC_XFER_BABBLE_ERR:
+ urb->error_count++;
+ frame_desc->status = -EOVERFLOW;
+
+ /* Don't need to update actual_length in this case. */
+ break;
+ case DWC_OTG_HC_XFER_XACT_ERR:
+ urb->error_count++;
+ frame_desc->status = -EPROTO;
+ frame_desc->actual_length =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt_status,NULL);
+ default:
+ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, _halt_status);
+ BUG();
+ break;
+ }
+ if (++_qtd->isoc_frame_index == urb->number_of_packets) {
+ /*
+ * urb->status is not used for isoc transfers.
+ * The individual frame_desc statuses are used instead.
+ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, 0);
+ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ ret_val = DWC_OTG_HC_XFER_COMPLETE;
+ }
+ return ret_val;
+}
+
+/**
+ * Releases a host channel for use by other transfers. Attempts to select and
+ * queue more transactions since at least one host channel is available.
+ *
+ * @param _hcd The HCD state structure.
+ * @param _hc The host channel to release.
+ * @param _qtd The QTD associated with the host channel. This QTD may be freed
+ * if the transfer is complete or an error has occurred.
+ * @param _halt_status Reason the channel is being released. This status
+ * determines the actions taken by this function.
+ */
+
+static void release_channel(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_status_e _halt_status, int *must_free) {
+ dwc_otg_transaction_type_e tr_type;
+ int free_qtd;
+ dwc_otg_qh_t * _qh;
+ int deact = 1;
+ int retry_delay = 1;
+
+ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", __func__,
+ _hc->hc_num, _halt_status);
+ switch (_halt_status) {
+ case DWC_OTG_HC_XFER_NYET:
+ case DWC_OTG_HC_XFER_NAK:
+ if (_halt_status == DWC_OTG_HC_XFER_NYET) {
+ retry_delay = nyet_deferral_delay;
+ } else {
+ retry_delay = nak_deferral_delay;
+ }
+ free_qtd = 0;
+ if (deferral_on && _hc->do_split) {
+ _qh = _hc->qh;
+ if (_qh) {
+ deact = dwc_otg_hcd_qh_deferr(_hcd, _qh , retry_delay);
+ }
+ }
+ break;
+
+ case DWC_OTG_HC_XFER_URB_COMPLETE:
+ free_qtd = 1;
+ break;
+ case DWC_OTG_HC_XFER_AHB_ERR:
+ case DWC_OTG_HC_XFER_STALL:
+ case DWC_OTG_HC_XFER_BABBLE_ERR:
+ free_qtd = 1;
+ break;
+ case DWC_OTG_HC_XFER_XACT_ERR:
+ if (_qtd->error_count >= 3) {
+ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n");
+ free_qtd = 1;
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPROTO);
+ } else {
+ free_qtd = 0;
+ }
+ break;
+ case DWC_OTG_HC_XFER_URB_DEQUEUE:
+ /*
+ * The QTD has already been removed and the QH has been
+ * deactivated. Don't want to do anything except release the
+ * host channel and try to queue more transfers.
+ */
+ goto cleanup;
+ case DWC_OTG_HC_XFER_NO_HALT_STATUS:
+ DWC_ERROR("%s: No halt_status, channel %d\n", __func__,
+ _hc->hc_num);
+ free_qtd = 0;
+ break;
+ default:
+ free_qtd = 0;
+ break;
+ }
+ *must_free = free_qtd;
+ if (deact) {
+ deactivate_qh(_hcd, _hc->qh, free_qtd);
+ }
+cleanup:
+ /*
+ * Release the host channel for use by other transfers. The cleanup
+ * function clears the channel interrupt enables and conditions, so
+ * there's no need to clear the Channel Halted interrupt separately.
+ */
+ dwc_otg_hc_cleanup(_hcd->core_if, _hc);
+ list_add_tail(&_hc->hc_list_entry, &_hcd->free_hc_list);
+ switch (_hc->ep_type) {
+ case DWC_OTG_EP_TYPE_CONTROL:
+ case DWC_OTG_EP_TYPE_BULK:
+ _hcd->non_periodic_channels--;
+ break;
+ default:
+ /*
+ * Don't release reservations for periodic channels here.
+ * That's done when a periodic transfer is descheduled (i.e.
+ * when the QH is removed from the periodic schedule).
+ */
+ break;
+ }
+ /* Try to queue more transfers now that there's a free channel, */
+ /* unless erratum_usb09_patched is set */
+ if (!erratum_usb09_patched) {
+ tr_type = dwc_otg_hcd_select_transactions(_hcd);
+ if (tr_type != DWC_OTG_TRANSACTION_NONE) {
+ dwc_otg_hcd_queue_transactions(_hcd, tr_type);
+ }
+ }
+}
+
+/**
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void halt_channel(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_qtd_t * _qtd, dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ if (_hcd->core_if->dma_enable) {
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ return;
+ }
+
+ /* Slave mode processing... */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, _halt_status);
+ if (_hc->halt_on_queue) {
+ gintmsk_data_t gintmsk = {.d32 = 0};
+ dwc_otg_core_global_regs_t * global_regs;
+ global_regs = _hcd->core_if->core_global_regs;
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
+ /*
+ * Make sure the Non-periodic Tx FIFO empty interrupt
+ * is enabled so that the non-periodic schedule will
+ * be processed.
+ */
+ gintmsk.b.nptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32);
+ } else {
+ /*
+ * Move the QH from the periodic queued schedule to
+ * the periodic assigned schedule. This allows the
+ * halt to be queued when the periodic schedule is
+ * processed.
+ */
+ list_move(&_hc->qh->qh_list_entry,
+ &_hcd->periodic_sched_assigned);
+
+ /*
+ * Make sure the Periodic Tx FIFO Empty interrupt is
+ * enabled so that the periodic schedule will be
+ * processed.
+ */
+ gintmsk.b.ptxfempty = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, 0,gintmsk.d32);
+ }
+ }
+}
+
+/**
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void complete_non_periodic_xfer(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ hcint_data_t hcint;
+ _qtd->error_count = 0;
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ if (hcint.b.nyet) {
+ /*
+ * Got a NYET on the last transaction of the transfer. This
+ * means that the endpoint should be in the PING state at the
+ * beginning of the next transfer.
+ */
+ _hc->qh->ping_state = 1;
+ clear_hc_int(_hc_regs, nyet);
+ }
+
+ /*
+ * Always halt and release the host channel to make it available for
+ * more transfers. There may still be more phases for a control
+ * transfer or more data packets for a bulk transfer at this point,
+ * but the host channel is still halted. A channel will be reassigned
+ * to the transfer when the non-periodic schedule is processed after
+ * the channel is released. This allows transactions to be queued
+ * properly via dwc_otg_hcd_queue_transactions, which also enables the
+ * Tx FIFO Empty interrupt if necessary.
+ */
+ if (_hc->ep_is_in) {
+ /*
+ * IN transfers in Slave mode require an explicit disable to
+ * halt the channel. (In DMA mode, this call simply releases
+ * the channel.)
+ */
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ } else {
+ /*
+ * The channel is automatically disabled by the core for OUT
+ * transfers in Slave mode.
+ */
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ }
+}
+
+/**
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void complete_periodic_xfer(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status, int *must_free)
+{
+ hctsiz_data_t hctsiz;
+ _qtd->error_count = 0;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ if (!_hc->ep_is_in || hctsiz.b.pktcnt == 0) {
+ /* Core halts channel in these cases. */
+ release_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ } else {
+ /* Flush any outstanding requests from the Tx queue. */
+ halt_channel(_hcd, _hc, _qtd, _halt_status, must_free);
+ }
+}
+
+/**
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ int urb_xfer_done;
+ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe);
+ int status = -EINPROGRESS;
+
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transfer Complete--\n", _hc->hc_num);
+
+ /*
+ * Handle xfer complete on CSPLIT.
+ */
+ if (_hc->qh->do_split) {
+ _qtd->complete_split = 0;
+ }
+
+ /* Update the QTD and URB states. */
+ switch (pipe_type) {
+ case PIPE_CONTROL:
+ switch (_qtd->control_phase) {
+ case DWC_OTG_CONTROL_SETUP:
+ if (urb->transfer_buffer_length > 0) {
+ _qtd->control_phase = DWC_OTG_CONTROL_DATA;
+ } else {
+ _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+ }
+ DWC_DEBUGPL(DBG_HCDV,
+ " Control setup transaction done\n");
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ break;
+ case DWC_OTG_CONTROL_DATA:{
+ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs,urb, _qtd, &status);
+ if (urb_xfer_done) {
+ _qtd->control_phase = DWC_OTG_CONTROL_STATUS;
+ DWC_DEBUGPL(DBG_HCDV," Control data transfer done\n");
+ } else {
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ }
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ break;
+ }
+ case DWC_OTG_CONTROL_STATUS:
+ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n");
+ if (status == -EINPROGRESS) {
+ status = 0;
+ }
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+ break;
+ }
+ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
+ halt_status, must_free);
+ break;
+ case PIPE_BULK:
+ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n");
+ urb_xfer_done = update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
+ if (urb_xfer_done) {
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE;
+ } else {
+ halt_status = DWC_OTG_HC_XFER_COMPLETE;
+ }
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_non_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,halt_status, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n");
+ update_urb_state_xfer_comp(_hc, _hc_regs, urb, _qtd, &status);
+ /*
+ * Interrupt URB is done on the first transfer complete
+ * interrupt.
+ */
+ dwc_otg_hcd_complete_urb(_hcd, urb, status);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_URB_COMPLETE, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n");
+ if (_qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) {
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_COMPLETE);
+ }
+ complete_periodic_xfer(_hcd, _hc, _hc_regs, _qtd, halt_status, must_free);
+ break;
+ }
+ disable_hc_int(_hc_regs, xfercompl);
+ return 1;
+}
+
+/**
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_stall_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ struct urb *urb = _qtd->urb;
+ int pipe_type = usb_pipetype(urb->pipe);
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "STALL Received--\n", _hc->hc_num);
+ if (pipe_type == PIPE_CONTROL) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
+ }
+ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EPIPE);
+ /*
+ * USB protocol requires resetting the data toggle for bulk
+ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+ * setup command is issued to the endpoint. Anticipate the
+ * CLEAR_FEATURE command since a STALL has occurred and reset
+ * the data toggle now.
+ */
+ _hc->qh->data_toggle = 0;
+ }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_STALL, must_free);
+ disable_hc_int(_hc_regs, stall);
+ return 1;
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void update_urb_state_xfer_intr(dwc_hc_t * _hc,
+ dwc_otg_hc_regs_t * _hc_regs, struct urb *_urb, dwc_otg_qtd_t * _qtd,
+ dwc_otg_halt_status_e _halt_status)
+{
+ uint32_t bytes_transferred =
+ get_actual_xfer_length(_hc, _hc_regs, _qtd, _halt_status, NULL);
+ _urb->actual_length += bytes_transferred;
+
+#ifdef CONFIG_DWC_DEBUG
+ {
+ hctsiz_data_t hctsiz;
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n",
+ __func__, (_hc->ep_is_in ? "IN" : "OUT"),_hc->hc_num);
+ DWC_DEBUGPL(DBG_HCDV, " _hc->start_pkt_count %d\n",
+ _hc->start_pkt_count);
+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt);
+ DWC_DEBUGPL(DBG_HCDV, " _hc->max_packet %d\n",_hc->max_packet);
+ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n",
+ bytes_transferred);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->actual_length %d\n",
+ _urb->actual_length);
+ DWC_DEBUGPL(DBG_HCDV, " _urb->transfer_buffer_length %d\n",
+ _urb->transfer_buffer_length);
+ }
+#endif /* */
+}
+
+/**
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nak_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free) {
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NAK Received--\n", _hc->hc_num);
+ /*
+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+ * interrupt. Re-start the SSPLIT transfer.
+ */
+ if (_hc->do_split) {
+ if (_hc->complete_split) {
+ _qtd->error_count = 0;
+ }
+ _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ goto handle_nak_done;
+ }
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ if (_hcd->core_if->dma_enable && _hc->ep_is_in) {
+ /*
+ * NAK interrupts are enabled on bulk/control IN
+ * transfers in DMA mode for the sole purpose of
+ * resetting the error count after a transaction error
+ * occurs. The core will continue transferring data.
+ */
+ _qtd->error_count = 0;
+ goto handle_nak_done;
+ }
+
+ /*
+ * NAK interrupts normally occur during OUT transfers in DMA
+ * or Slave mode. For IN transfers, more requests will be
+ * queued as request queue space is available.
+ */
+ _qtd->error_count = 0;
+ if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
+ _qtd, DWC_OTG_HC_XFER_NAK);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ if (_qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ _hc->qh->ping_state = 1;
+ }
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will
+ * start/continue.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ _qtd->error_count = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NAK, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ /* Should never get called for isochronous transfers. */
+ BUG();
+ break;
+ }
+ handle_nak_done:disable_hc_int(_hc_regs, nak);
+ clear_hc_int(_hc_regs, nak);
+ return 1;
+}
+
+/**
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static int32_t handle_hc_ack_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "ACK Received--\n", _hc->hc_num);
+ if (_hc->do_split) {
+ /*
+ * Handle ACK on SSPLIT.
+ * ACK should not occur in CSPLIT.
+ */
+ if ((!_hc->ep_is_in) && (_hc->data_pid_start != DWC_OTG_HC_PID_SETUP)) {
+ _qtd->ssplit_out_xfer_count = _hc->xfer_len;
+ }
+ if (!(_hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !_hc->ep_is_in)) {
+ /* Don't need complete for isochronous out transfers. */
+ _qtd->complete_split = 1;
+ }
+
+ /* ISOC OUT */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && !_hc->ep_is_in) {
+ switch (_hc->xact_pos) {
+ case DWC_HCSPLIT_XACTPOS_ALL:
+ break;
+ case DWC_HCSPLIT_XACTPOS_END:
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ _qtd->isoc_split_offset = 0;
+ break;
+ case DWC_HCSPLIT_XACTPOS_BEGIN:
+ case DWC_HCSPLIT_XACTPOS_MID:
+ /*
+ * For BEGIN or MID, calculate the length for
+ * the next microframe to determine the correct
+ * SSPLIT token, either MID or END.
+ */
+ do {
+ struct usb_iso_packet_descriptor *frame_desc;
+ frame_desc = &_qtd->urb->iso_frame_desc[_qtd->isoc_frame_index];
+ _qtd->isoc_split_offset += 188;
+ if ((frame_desc->length - _qtd->isoc_split_offset) <=188) {
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END;
+ } else {
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID;
+ }
+ } while (0);
+ break;
+ }
+ } else {
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
+ }
+ } else {
+ _qtd->error_count = 0;
+ if (_hc->qh->ping_state) {
+ _hc->qh->ping_state = 0;
+
+ /*
+ * Halt the channel so the transfer can be re-started
+ * from the appropriate point. This only happens in
+ * Slave mode. In DMA mode, the ping_state is cleared
+ * when the transfer is started because the core
+ * automatically executes the PING, then the transfer.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_ACK, must_free);
+ }
+ }
+
+ /*
+ * If the ACK occurred when _not_ in the PING state, let the channel
+ * continue transferring data after clearing the error count.
+ */
+ disable_hc_int(_hc_regs, ack);
+ clear_hc_int(_hc_regs, ack);
+ return 1;
+}
+
+/**
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "NYET Received--\n", _hc->hc_num);
+
+ /*
+ * NYET on CSPLIT
+ * re-do the CSPLIT immediately on non-periodic
+ */
+ if ((_hc->do_split) && (_hc->complete_split)) {
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_INTR) ||
+ (_hc->ep_type == DWC_OTG_EP_TYPE_ISOC)) {
+ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd
+ (_hcd));
+ if (dwc_full_frame_num(frnum) !=
+ dwc_full_frame_num(_hc->qh->sched_frame)) {
+
+ /*
+ * No longer in the same full speed frame.
+ * Treat this as a transaction error.
+ */
+#if 0
+ /** @todo Fix system performance so this can
+ * be treated as an error. Right now complete
+ * splits cannot be scheduled precisely enough
+ * due to other system activity, so this error
+ * occurs regularly in Slave mode.
+ */
+ _qtd->error_count++;
+
+#endif /* */
+ _qtd->complete_split = 0;
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+
+ /** @todo add support for isoc release */
+ goto handle_nyet_done;
+ }
+ }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
+ goto handle_nyet_done;
+ }
+ _hc->qh->ping_state = 1;
+ _qtd->error_count = 0;
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb, _qtd,
+ DWC_OTG_HC_XFER_NYET);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+
+ /*
+ * Halt the channel and re-start the transfer so the PING
+ * protocol will start.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_NYET, must_free);
+handle_nyet_done:
+ disable_hc_int(_hc_regs, nyet);
+ clear_hc_int(_hc_regs, nyet);
+
+ return 1;
+}
+
+/**
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_babble_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Babble Error--\n", _hc->hc_num);
+ if (_hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
+ dwc_otg_hcd_complete_urb(_hcd, _qtd->urb, -EOVERFLOW);
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_BABBLE_ERR, must_free);
+ } else {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_BABBLE_ERR);
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ disable_hc_int(_hc_regs, bblerr);
+ return 1;
+}
+
+/**
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+{
+ hcchar_data_t hcchar;
+ hcsplt_data_t hcsplt;
+ hctsiz_data_t hctsiz;
+ uint32_t hcdma;
+ struct urb *urb = _qtd->urb;
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "AHB Error--\n", _hc->hc_num);
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ hcdma = dwc_read_reg32(&_hc_regs->hcdma);
+ DWC_ERROR("AHB ERROR, Channel %d\n", _hc->hc_num);
+ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32);
+ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma);
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n");
+ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe));
+ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe),
+ (usb_pipein(urb->pipe) ? "IN" : "OUT"));
+ DWC_ERROR(" Endpoint type: %s\n", ( {
+ char *pipetype;
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ pipetype = "CONTROL"; break;
+ case PIPE_BULK:
+ pipetype = "BULK"; break;
+ case PIPE_INTERRUPT:
+ pipetype = "INTERRUPT"; break;
+ case PIPE_ISOCHRONOUS:
+ pipetype = "ISOCHRONOUS"; break;
+ default:
+ pipetype = "UNKNOWN"; break;
+ };
+ pipetype;
+ } )) ;
+ DWC_ERROR(" Speed: %s\n", ( {
+ char *speed;
+ switch (urb->dev->speed) {
+ case USB_SPEED_HIGH:
+ speed = "HIGH"; break;
+ case USB_SPEED_FULL:
+ speed = "FULL"; break;
+ case USB_SPEED_LOW:
+ speed = "LOW"; break;
+ default:
+ speed = "UNKNOWN"; break;
+ };
+ speed;
+ } )) ;
+ DWC_ERROR(" Max packet size: %d\n",
+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length);
+ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n",
+ urb->transfer_buffer, (void *)(u32)urb->transfer_dma);
+ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", urb->setup_packet,
+ (void *)(u32)urb->setup_dma);
+ DWC_ERROR(" Interval: %d\n", urb->interval);
+ dwc_otg_hcd_complete_urb(_hcd, urb, -EIO);
+
+ /*
+ * Force a channel halt. Don't call halt_channel because that won't
+ * write to the HCCHARn register in DMA mode to force the halt.
+ */
+ dwc_otg_hc_halt(_hcd->core_if, _hc, DWC_OTG_HC_XFER_AHB_ERR);
+ disable_hc_int(_hc_regs, ahberr);
+ return 1;
+}
+
+/**
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Transaction Error--\n", _hc->hc_num);
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ _qtd->error_count++;
+ if (!_hc->qh->ping_state) {
+ update_urb_state_xfer_intr(_hc, _hc_regs, _qtd->urb,
+ _qtd, DWC_OTG_HC_XFER_XACT_ERR);
+ save_data_toggle(_hc, _hc_regs, _qtd);
+ if (!_hc->ep_is_in && _qtd->urb->dev->speed == USB_SPEED_HIGH) {
+ _hc->qh->ping_state = 1;
+ }
+ }
+
+ /*
+ * Halt the channel so the transfer can be re-started from
+ * the appropriate point or the PING protocol will start.
+ */
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+ break;
+ case PIPE_INTERRUPT:
+ _qtd->error_count++;
+ if ((_hc->do_split) && (_hc->complete_split)) {
+ _qtd->complete_split = 0;
+ }
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_XACT_ERR, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_XACT_ERR);
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ break;
+ }
+ disable_hc_int(_hc_regs, xacterr);
+ return 1;
+}
+
+/**
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Frame Overrun--\n", _hc->hc_num);
+ switch (usb_pipetype(_qtd->urb->pipe)) {
+ case PIPE_CONTROL:
+ case PIPE_BULK:
+ break;
+ case PIPE_INTERRUPT:
+ halt_channel(_hcd, _hc, _qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN, must_free);
+ break;
+ case PIPE_ISOCHRONOUS:
+ {
+ dwc_otg_halt_status_e halt_status;
+ halt_status = update_isoc_urb_state(_hcd, _hc, _hc_regs, _qtd,
+ DWC_OTG_HC_XFER_FRAME_OVERRUN);
+ halt_channel(_hcd, _hc, _qtd, halt_status, must_free);
+ }
+ break;
+ }
+ disable_hc_int(_hc_regs, frmovrun);
+ return 1;
+}
+
+/**
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Data Toggle Error--\n", _hc->hc_num);
+ if (_hc->ep_is_in) {
+ _qtd->error_count = 0;
+ } else {
+ DWC_ERROR("Data Toggle Error on OUT transfer,"
+ "channel %d\n", _hc->hc_num);
+ }
+ disable_hc_int(_hc_regs, datatglerr);
+ return 1;
+}
+
+#ifdef CONFIG_DWC_DEBUG
+/**
+ * This function is for debug only. It checks that a valid halt status is set
+ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ * @return 1 if halt status is ok, 0 otherwise.
+ */
+static inline int halt_status_ok(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd)
+{
+ hcchar_data_t hcchar;
+ hctsiz_data_t hctsiz;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ hcsplt_data_t hcsplt;
+ if (_hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) {
+ /*
+ * This code is here only as a check. This condition should
+ * never happen. Ignore the halt if it does occur.
+ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ hctsiz.d32 = dwc_read_reg32(&_hc_regs->hctsiz);
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
+ hcsplt.d32 = dwc_read_reg32(&_hc_regs->hcsplt);
+ DWC_WARN("%s: _hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS, "
+ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
+ "hcint 0x%08x, hcintmsk 0x%08x, "
+ "hcsplt 0x%08x, qtd->complete_split %d\n", __func__,
+ _hc->hc_num, hcchar.d32, hctsiz.d32, hcint.d32,
+ hcintmsk.d32, hcsplt.d32, _qtd->complete_split);
+ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n",
+ __func__, _hc->hc_num);
+ DWC_WARN("\n");
+ clear_hc_int(_hc_regs, chhltd);
+ return 0;
+ }
+
+ /*
+ * This code is here only as a check. hcchar.chdis should
+ * never be set when the halt interrupt occurs. Halt the
+ * channel again if it does occur.
+ */
+ hcchar.d32 = dwc_read_reg32(&_hc_regs->hcchar);
+ if (hcchar.b.chdis) {
+ DWC_WARN("%s: hcchar.chdis set unexpectedly, "
+ "hcchar 0x%08x, trying to halt again\n", __func__,
+ hcchar.d32);
+ clear_hc_int(_hc_regs, chhltd);
+ _hc->halt_pending = 0;
+ halt_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ return 0;
+ }
+ return 1;
+}
+#endif /* */
+
+/**
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ if (_hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
+ _hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
+ /*
+ * Just release the channel. A dequeue can happen on a
+ * transfer timeout. In the case of an AHB Error, the channel
+ * was forced to halt because there's no way to gracefully
+ * recover.
+ */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ return;
+ }
+
+ /* Read the HCINTn register to determine the cause for the halt. */
+ hcint.d32 = dwc_read_reg32(&_hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&_hc_regs->hcintmsk);
+ if (hcint.b.xfercomp) {
+
+ /** @todo This is here because of a possible hardware bug. Spec
+ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+ * interrupt w/ACK bit set should occur, but I only see the
+ * XFERCOMP bit, even with it masked out. This is a workaround
+ * for that behavior. Should fix this when hardware is fixed.
+ */
+ if ((_hc->ep_type == DWC_OTG_EP_TYPE_ISOC) && (!_hc->ep_is_in)) {
+ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ }
+ handle_hc_xfercomp_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.stall) {
+ handle_hc_stall_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.xacterr) {
+ /*
+ * Must handle xacterr before nak or ack. Could get a xacterr
+ * at the same time as either of these on a BULK/CONTROL OUT
+ * that started with a PING. The xacterr takes precedence.
+ */
+ handle_hc_xacterr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.nyet) {
+ /*
+ * Must handle nyet before nak or ack. Could get a nyet at the
+ * same time as either of those on a BULK/CONTROL OUT that
+ * started with a PING. The nyet takes precedence.
+ */
+ handle_hc_nyet_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.bblerr) {
+ handle_hc_babble_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.frmovrun) {
+ handle_hc_frmovrun_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.datatglerr) {
+ handle_hc_datatglerr_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.nak && !hcintmsk.b.nak) {
+ /*
+ * If nak is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the nak is handled by
+ * the nak interrupt handler, not here. Handle nak here for
+ * BULK/CONTROL OUT transfers, which halt on a NAK to allow
+ * rewinding the buffer pointer.
+ */
+ handle_hc_nak_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else if (hcint.b.ack && !hcintmsk.b.ack) {
+ /*
+ * If ack is not masked, it's because a non-split IN transfer
+ * is in an error state. In that case, the ack is handled by
+ * the ack interrupt handler, not here. Handle ack here for
+ * split transfers. Start splits halt on ACK.
+ */
+ handle_hc_ack_intr(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else {
+ if (_hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
+ _hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
+ /*
+ * A periodic transfer halted with no other channel
+ * interrupts set. Assume it was halted by the core
+ * because it could not be completed in its scheduled
+ * (micro)frame.
+ */
+#ifdef CONFIG_DWC_DEBUG
+ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n",
+ __func__, _hc->hc_num);
+
+#endif /* */
+ halt_channel(_hcd, _hc, _qtd,
+ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, must_free);
+ } else {
+ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason "
+ "for halting is unknown, nyet %d, hcint 0x%08x, intsts 0x%08x\n",
+ __func__, _hc->hc_num, hcint.b.nyet, hcint.d32,
+ dwc_read_reg32(&_hcd->core_if->core_global_regs->gintsts));
+ }
+ }
+}
+
+/**
+ * Handles a host channel Channel Halted interrupt.
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t * _hcd,
+ dwc_hc_t * _hc, dwc_otg_hc_regs_t * _hc_regs, dwc_otg_qtd_t * _qtd, int *must_free)
+{
+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: "
+ "Channel Halted--\n", _hc->hc_num);
+ if (_hcd->core_if->dma_enable) {
+ handle_hc_chhltd_intr_dma(_hcd, _hc, _hc_regs, _qtd, must_free);
+ } else {
+#ifdef CONFIG_DWC_DEBUG
+ if (!halt_status_ok(_hcd, _hc, _hc_regs, _qtd)) {
+ return 1;
+ }
+#endif /* */
+ release_channel(_hcd, _hc, _qtd, _hc->halt_status, must_free);
+ }
+ clear_hc_int(_hc_regs, chhltd);
+ return 1;
+}
+
+/** Handles interrupt for a specific Host Channel */
+int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t * _dwc_otg_hcd, uint32_t _num)
+{
+ int must_free = 0;
+ int retval = 0;
+ hcint_data_t hcint;
+ hcintmsk_data_t hcintmsk;
+ dwc_hc_t * hc;
+ dwc_otg_hc_regs_t * hc_regs;
+ dwc_otg_qtd_t * qtd;
+ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n",_num);
+ hc = _dwc_otg_hcd->hc_ptr_array[_num];
+ hc_regs = _dwc_otg_hcd->core_if->host_if->hc_regs[_num];
+ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry);
+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint);
+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk);
+ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32));
+ hcint.d32 = hcint.d32 & hcintmsk.d32;
+ if (!_dwc_otg_hcd->core_if->dma_enable) {
+ if ((hcint.b.chhltd) && (hcint.d32 != 0x2)) {
+ hcint.b.chhltd = 0;
+ }
+ }
+ if (hcint.b.xfercomp) {
+ retval |= handle_hc_xfercomp_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ /*
+ * If NYET occurred at same time as Xfer Complete, the NYET is
+ * handled by the Xfer Complete interrupt handler. Don't want
+ * to call the NYET interrupt handler in this case.
+ */
+ hcint.b.nyet = 0;
+ }
+ if (hcint.b.chhltd) {
+ retval |= handle_hc_chhltd_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.ahberr) {
+ retval |= handle_hc_ahberr_intr(_dwc_otg_hcd, hc, hc_regs, qtd);
+ }
+ if (hcint.b.stall) {
+ retval |= handle_hc_stall_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.nak) {
+ retval |= handle_hc_nak_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.ack) {
+ retval |= handle_hc_ack_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.nyet) {
+ retval |= handle_hc_nyet_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.xacterr) {
+ retval |= handle_hc_xacterr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.bblerr) {
+ retval |= handle_hc_babble_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.frmovrun) {
+ retval |= handle_hc_frmovrun_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ if (hcint.b.datatglerr) {
+ retval |= handle_hc_datatglerr_intr(_dwc_otg_hcd, hc, hc_regs, qtd, &must_free);
+ }
+ /*
+ * Logic to free the qtd here, at the end of the hc intr
+ * processing, if the handling of this interrupt determined
+ * that it needs to be freed.
+ */
+ if (must_free) {
+ /* Free the qtd here now that we are done using it. */
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+ return retval;
+}
+
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c
new file mode 100644
index 00000000000..b33b32e8468
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_hcd_queue.c
@@ -0,0 +1,696 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_queue.c $
+ * $Revision: #4 $
+ * $Date: 2005/09/15 $
+ * $Change: 537387 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef CONFIG_DWC_DEVICE_ONLY
+
+/**
+ * @file
+ *
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_hcd.h"
+#include "dwc_otg_regs.h"
+
+/**
+ * This function allocates and initializes a QH.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] _urb Holds the information about the device/endpoint that we need
+ * to initialize the QH.
+ *
+ * @return Returns pointer to the newly allocated QH, or NULL on error. */
+dwc_otg_qh_t * dwc_otg_hcd_qh_create(dwc_otg_hcd_t * _hcd,
+ struct urb * _urb)
+{
+ dwc_otg_qh_t * qh;
+
+ /* Allocate memory */
+ /** @todo add memflags argument */
+ qh = dwc_otg_hcd_qh_alloc();
+ if (qh == NULL) {
+ return NULL;
+ }
+ dwc_otg_hcd_qh_init(_hcd, qh, _urb);
+ return qh;
+}
+
+/** Free each QTD in the QH's QTD-list then free the QH. QH should already be
+ * removed from a list. QTD list should already be empty if called from URB
+ * Dequeue.
+ *
+ * @param[in] _qh The QH to free.
+ */
+void dwc_otg_hcd_qh_free(dwc_otg_qh_t * _qh)
+{
+ dwc_otg_qtd_t * qtd;
+ struct list_head *pos;
+ unsigned long flags;
+
+ /* Free each QTD in the QTD list */
+ local_irq_save(flags);
+ for (pos = _qh->qtd_list.next; pos != &_qh->qtd_list;
+ pos = _qh->qtd_list.next) {
+ list_del(pos);
+ qtd = dwc_list_to_qtd(pos);
+ dwc_otg_hcd_qtd_free(qtd);
+ }
+ local_irq_restore(flags);
+ kfree(_qh);
+ return;
+}
+
+/** Initializes a QH structure.
+ *
+ * @param[in] _hcd The HCD state structure for the DWC OTG controller.
+ * @param[in] _qh The QH to init.
+ * @param[in] _urb Holds the information about the device/endpoint that we need
+ * to initialize the QH. */
+#define SCHEDULE_SLOP 10
+void dwc_otg_hcd_qh_init(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh,
+ struct urb *_urb)
+{
+ memset(_qh, 0, sizeof(dwc_otg_qh_t));
+
+ /* Initialize QH */
+ switch (usb_pipetype(_urb->pipe)) {
+ case PIPE_CONTROL:
+ _qh->ep_type = USB_ENDPOINT_XFER_CONTROL;
+ break;
+ case PIPE_BULK:
+ _qh->ep_type = USB_ENDPOINT_XFER_BULK;
+ break;
+ case PIPE_ISOCHRONOUS:
+ _qh->ep_type = USB_ENDPOINT_XFER_ISOC;
+ break;
+ case PIPE_INTERRUPT:
+ _qh->ep_type = USB_ENDPOINT_XFER_INT;
+ break;
+ }
+ _qh->ep_is_in = usb_pipein(_urb->pipe) ? 1 : 0;
+ _qh->data_toggle = DWC_OTG_HC_PID_DATA0;
+ _qh->maxp = usb_maxpacket(_urb->dev, _urb->pipe, !(usb_pipein(_urb->pipe)));
+ INIT_LIST_HEAD(&_qh->qtd_list);
+ INIT_LIST_HEAD(&_qh->qh_list_entry);
+ _qh->channel = NULL;
+
+ /* FS/LS Enpoint on HS Hub
+ * NOT virtual root hub */
+ _qh->do_split = 0;
+ if (((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL)) &&
+ (_urb->dev->tt) && (_urb->dev->tt->hub) && (_urb->dev->tt->hub->devnum != 1)) {
+ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n",
+ usb_pipeendpoint(_urb->pipe), _urb->dev->tt->hub->devnum, _urb->dev->ttport);
+ _qh->do_split = 1;
+ }
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT
+ || _qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
+
+ /* Compute scheduling parameters once and save them. */
+ hprt0_data_t hprt;
+
+ /** @todo Account for split transfers in the bus time. */
+ int bytecount = dwc_hb_mult(_qh->maxp) * dwc_max_packet(_qh->maxp);
+ _qh->usecs = NS_TO_US(usb_calc_bus_time(_urb->dev->speed,
+ usb_pipein(_urb->pipe),
+ (_qh->ep_type == USB_ENDPOINT_XFER_ISOC),bytecount));
+
+ /* Start in a slightly future (micro)frame. */
+ _qh->sched_frame = dwc_frame_num_inc(_hcd->frame_number, SCHEDULE_SLOP);
+ _qh->interval = _urb->interval;
+
+#if 0
+ /* Increase interrupt polling rate for debugging. */
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ _qh->interval = 8;
+ }
+
+#endif /* */
+ hprt.d32 = dwc_read_reg32(_hcd->core_if->host_if->hprt0);
+ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) &&
+ ((_urb->dev->speed == USB_SPEED_LOW) ||
+ (_urb->dev->speed == USB_SPEED_FULL))) {
+ _qh->interval *= 8;
+ _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh->sched_frame;
+ }
+ }
+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", _qh);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n",
+ _urb->dev->devnum);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n",
+ usb_pipeendpoint(_urb->pipe),
+ usb_pipein(_urb->pipe) == USB_DIR_IN ? "IN" : "OUT");
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", ( {
+ char *speed;
+ switch(_urb->dev->speed) {
+ case USB_SPEED_LOW:
+ speed = "low"; break;
+ case USB_SPEED_FULL:
+ speed = "full"; break;
+ case USB_SPEED_HIGH:
+ speed = "high"; break;
+ default:
+ speed = "?";
+ break;
+ };
+ speed;
+ } )) ;
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n", ( {
+ char *type;
+ switch (_qh->ep_type) {
+ case USB_ENDPOINT_XFER_ISOC:
+ type = "isochronous"; break;
+ case USB_ENDPOINT_XFER_INT:
+ type = "interrupt"; break;
+ case USB_ENDPOINT_XFER_CONTROL:
+ type = "control"; break;
+ case USB_ENDPOINT_XFER_BULK:
+ type = "bulk"; break;
+ default:
+ type = "?";break;
+ };
+ type;
+ } )) ;
+
+#ifdef CONFIG_DWC_DEBUG
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n",
+ _qh->usecs);
+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n",
+ _qh->interval);
+ }
+
+#endif /* */
+ return;
+}
+
+/**
+ * Checks that a channel is available for a periodic transfer.
+ *
+ * @return 0 if successful, negative error code otherise.
+ */
+static int periodic_channel_available(dwc_otg_hcd_t * _hcd)
+{
+ /*
+ * Currently assuming that there is a dedicated host channnel for each
+ * periodic transaction plus at least one host channel for
+ * non-periodic transactions.
+ */
+ int status;
+ int num_channels;
+ num_channels = _hcd->core_if->core_params->host_channels;
+ if ((_hcd->periodic_channels + _hcd->non_periodic_channels <
+ num_channels) && (_hcd->periodic_channels < num_channels - 1)) {
+ status = 0;
+ } else {
+ DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n",
+ __func__, num_channels, _hcd->periodic_channels,
+ _hcd->non_periodic_channels);
+ status = -ENOSPC;
+ }
+ return status;
+}
+
+/**
+ * Checks that there is sufficient bandwidth for the specified QH in the
+ * periodic schedule. For simplicity, this calculation assumes that all the
+ * transfers in the periodic schedule may occur in the same (micro)frame.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH containing periodic bandwidth required.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_periodic_bandwidth(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int status;
+ uint16_t max_claimed_usecs;
+ status = 0;
+ if (_hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) {
+ /*
+ * High speed mode.
+ * Max periodic usecs is 80% x 125 usec = 100 usec.
+ */
+ max_claimed_usecs = 100 - _qh->usecs;
+ } else {
+ /*
+ * Full speed mode.
+ * Max periodic usecs is 90% x 1000 usec = 900 usec.
+ */
+ max_claimed_usecs = 900 - _qh->usecs;
+ }
+ if (_hcd->periodic_usecs > max_claimed_usecs) {
+#undef USB_DWC_OTG_IGNORE_BANDWIDTH
+#ifndef USB_DWC_OTG_IGNORE_BANDWIDTH
+ DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n",
+ __func__, _hcd->periodic_usecs, _qh->usecs);
+ status = -ENOSPC;
+#else
+ status = 0;
+#endif
+ }
+ return status;
+}
+
+/**
+ * Checks that the max transfer size allowed in a host channel is large enough
+ * to handle the maximum data transfer in a single (micro)frame for a periodic
+ * transfer.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for a periodic endpoint.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int check_max_xfer_size(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int status;
+ uint32_t max_xfer_size;
+ uint32_t max_channel_xfer_size;
+ status = 0;
+ max_xfer_size = dwc_max_packet(_qh->maxp) * dwc_hb_mult(_qh->maxp);
+ max_channel_xfer_size = _hcd->core_if->core_params->max_transfer_size;
+ if (max_xfer_size > max_channel_xfer_size) {
+ DWC_NOTICE("%s: Periodic xfer length %d > "
+ "max xfer length for channel %d\n", __func__,
+ max_xfer_size, max_channel_xfer_size);
+ status = -ENOSPC;
+ }
+ return status;
+}
+
+/**
+ * Schedules an interrupt or isochronous transfer in the periodic schedule.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for the periodic transfer. The QH should already contain the
+ * scheduling information.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+static int schedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ int status = 0;
+ status = periodic_channel_available(_hcd);
+ if (status) {
+ DWC_NOTICE("%s: No host channel available for periodic "
+ "transfer.\n", __func__);
+ return status;
+ }
+ status = check_periodic_bandwidth(_hcd, _qh);
+ if (status) {
+ DWC_NOTICE("%s: Insufficient periodic bandwidth for "
+ "periodic transfer.\n", __func__);
+ return status;
+ }
+ status = check_max_xfer_size(_hcd, _qh);
+ if (status) {
+ DWC_NOTICE("%s: Channel max transfer size too small "
+ "for periodic transfer.\n", __func__);
+ return status;
+ }
+
+ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry, &_hcd->periodic_sched_inactive);
+
+ /* Reserve the periodic channel. */
+ _hcd->periodic_channels++;
+
+ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs += _qh->usecs;
+
+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated +=
+ _qh->usecs / _qh->interval;
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs++;
+ DWC_DEBUGPL(DBG_HCD,
+ "Scheduled intr: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs++;
+ DWC_DEBUGPL(DBG_HCD,
+ "Scheduled isoc: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ }
+ return status;
+}
+
+/**
+ * This function adds a QH to either the non periodic or periodic schedule if
+ * it is not already in the schedule. If the QH is already in the schedule, no
+ * action is taken.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qh_add(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ unsigned long flags;
+ int status = 0;
+ local_irq_save(flags);
+ if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */
+ goto done;
+ }
+
+ /* Add the new QH to the appropriate schedule */
+ if (dwc_qh_is_non_per(_qh)) {
+ /* Always start in the inactive schedule. */
+ list_add_tail(&_qh->qh_list_entry,
+ &_hcd->non_periodic_sched_inactive);
+ } else {
+ status = schedule_periodic(_hcd, _qh);
+ }
+
+done:local_irq_restore(flags);
+ return status;
+}
+/**
+ * This function adds a QH to the non periodic deferred schedule.
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qh_add_deferred(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (!list_empty(&_qh->qh_list_entry)) {
+ /* QH already in a schedule. */
+ goto done;
+ }
+
+ /* Add the new QH to the non periodic deferred schedule */
+ if (dwc_qh_is_non_per(_qh)) {
+ list_add_tail(&_qh->qh_list_entry,
+ &_hcd->non_periodic_sched_deferred);
+ }
+done:
+ local_irq_restore(flags);
+ return 0;
+}
+
+/**
+ * Removes an interrupt or isochronous transfer from the periodic schedule.
+ *
+ * @param _hcd The HCD state structure for the DWC OTG controller.
+ * @param _qh QH for the periodic transfer.
+ */
+static void deschedule_periodic(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ list_del_init(&_qh->qh_list_entry);
+
+ /* Release the periodic channel reservation. */
+ _hcd->periodic_channels--;
+
+ /* Update claimed usecs per (micro)frame. */
+ _hcd->periodic_usecs -= _qh->usecs;
+
+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_allocated -=
+ _qh->usecs / _qh->interval;
+
+ if (_qh->ep_type == USB_ENDPOINT_XFER_INT) {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_int_reqs--;
+ DWC_DEBUGPL(DBG_HCD,
+ "Descheduled intr: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ } else {
+ hcd_to_bus(dwc_otg_hcd_to_hcd(_hcd))->bandwidth_isoc_reqs--;
+ DWC_DEBUGPL(DBG_HCD,
+ "Descheduled isoc: qh %p, usecs %d, period %d\n",
+ _qh, _qh->usecs, _qh->interval);
+ }
+}
+
+/**
+ * Removes a QH from either the non-periodic or periodic schedule. Memory is
+ * not freed.
+ *
+ * @param[in] _hcd The HCD state structure.
+ * @param[in] _qh QH to remove from schedule. */
+void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (list_empty(&_qh->qh_list_entry)) {
+ /* QH is not in a schedule. */
+ goto done;
+ }
+ if (dwc_qh_is_non_per(_qh)) {
+ if (_hcd->non_periodic_qh_ptr == &_qh->qh_list_entry) {
+ _hcd->non_periodic_qh_ptr = _hcd->non_periodic_qh_ptr->next;
+ }
+ list_del_init(&_qh->qh_list_entry);
+ } else {
+ deschedule_periodic(_hcd, _qh);
+ }
+
+done:local_irq_restore(flags);
+}
+
+/**
+ * Defers a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the deferred non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ */
+int dwc_otg_hcd_qh_deferr(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh, int delay)
+{
+ int deact = 1;
+ unsigned long flags;
+ local_irq_save(flags);
+ if (dwc_qh_is_non_per(_qh)) {
+ _qh->sched_frame =
+ dwc_frame_num_inc(_hcd->frame_number,
+ delay);
+ _qh->channel = NULL;
+ _qh->qtd_in_process = NULL;
+ deact = 0;
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to deferred non-periodic schedule. */
+ dwc_otg_hcd_qh_add_deferred(_hcd, _qh);
+ }
+ }
+ local_irq_restore(flags);
+ return deact;
+}
+/**
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t * _hcd, dwc_otg_qh_t * _qh,
+ int sched_next_periodic_split)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ if (dwc_qh_is_non_per(_qh)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ if (!list_empty(&_qh->qtd_list)) {
+ /* Add back to inactive non-periodic schedule. */
+ dwc_otg_hcd_qh_add(_hcd, _qh);
+ }
+ } else {
+ uint16_t frame_number =
+ dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(_hcd));
+ if (_qh->do_split) {
+ /* Schedule the next continuing periodic split transfer */
+ if (sched_next_periodic_split) {
+ _qh->sched_frame = frame_number;
+ if (dwc_frame_num_le(frame_number,
+ dwc_frame_num_inc(_qh->start_split_frame,1))) {
+ /*
+ * Allow one frame to elapse after start
+ * split microframe before scheduling
+ * complete split, but DONT if we are
+ * doing the next start split in the
+ * same frame for an ISOC out.
+ */
+ if ((_qh->ep_type != USB_ENDPOINT_XFER_ISOC)
+ || (_qh->ep_is_in != 0)) {
+ _qh->sched_frame = dwc_frame_num_inc(_qh->sched_frame,1);
+ }
+ }
+ } else {
+ _qh->sched_frame = dwc_frame_num_inc(_qh->start_split_frame,
+ _qh->interval);
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
+ _qh->sched_frame = frame_number;
+ }
+ _qh->sched_frame |= 0x7;
+ _qh->start_split_frame = _qh->sched_frame;
+ }
+ } else {
+ _qh->sched_frame =
+ dwc_frame_num_inc(_qh->sched_frame, _qh->interval);
+ if (dwc_frame_num_le(_qh->sched_frame, frame_number)) {
+ _qh->sched_frame = frame_number;
+ }
+ }
+ if (list_empty(&_qh->qtd_list)) {
+ dwc_otg_hcd_qh_remove(_hcd, _qh);
+ } else {
+ /*
+ * Remove from periodic_sched_queued and move to
+ * appropriate queue.
+ */
+ if (_qh->sched_frame == frame_number) {
+ list_move(&_qh->qh_list_entry,
+ &_hcd->periodic_sched_ready);
+ } else {
+ list_move(&_qh->qh_list_entry,
+ &_hcd->periodic_sched_inactive);
+ }
+ }
+ }
+ local_irq_restore(flags);
+}
+
+/**
+ * This function allocates and initializes a QTD.
+ *
+ * @param[in] _urb The URB to create a QTD from. Each URB-QTD pair will end up
+ * pointing to each other so each pair should have a unique correlation.
+ *
+ * @return Returns pointer to the newly allocated QTD, or NULL on error. */
+dwc_otg_qtd_t * dwc_otg_hcd_qtd_create(struct urb *_urb)
+{
+ dwc_otg_qtd_t * qtd;
+ qtd = dwc_otg_hcd_qtd_alloc();
+ if (qtd == NULL) {
+ return NULL;
+ }
+ dwc_otg_hcd_qtd_init(qtd, _urb);
+ return qtd;
+}
+
+/**
+ * Initializes a QTD structure.
+ *
+ * @param[in] _qtd The QTD to initialize.
+ * @param[in] _urb The URB to use for initialization. */
+void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t * _qtd, struct urb *_urb)
+{
+ memset(_qtd, 0, sizeof(dwc_otg_qtd_t));
+ _qtd->urb = _urb;
+ if (usb_pipecontrol(_urb->pipe)) {
+ /*
+ * The only time the QTD data toggle is used is on the data
+ * phase of control transfers. This phase always starts with
+ * DATA1.
+ */
+ _qtd->data_toggle = DWC_OTG_HC_PID_DATA1;
+ _qtd->control_phase = DWC_OTG_CONTROL_SETUP;
+ }
+
+ /* start split */
+ _qtd->complete_split = 0;
+ _qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL;
+ _qtd->isoc_split_offset = 0;
+
+ /* Store the qtd ptr in the urb to reference what QTD. */
+ _urb->hcpriv = _qtd;
+ return;
+}
+
+/**
+ * This function adds a QTD to the QTD-list of a QH. It will find the correct
+ * QH to place the QTD into. If it does not find a QH, then it will create a
+ * new QH. If the QH to which the QTD is added is not currently scheduled, it
+ * is placed into the proper schedule based on its EP type.
+ *
+ * @param[in] _qtd The QTD to add
+ * @param[in] _dwc_otg_hcd The DWC HCD structure
+ *
+ * @return 0 if successful, negative error code otherwise.
+ */
+int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t * _qtd, dwc_otg_hcd_t * _dwc_otg_hcd)
+{
+ struct usb_host_endpoint *ep;
+ dwc_otg_qh_t * qh;
+ unsigned long flags;
+ int retval = 0;
+ struct urb *urb = _qtd->urb;
+ local_irq_save(flags);
+
+ /*
+ * Get the QH which holds the QTD-list to insert to. Create QH if it
+ * doesn't exist.
+ */
+ ep = dwc_urb_to_endpoint(urb);
+ qh = (dwc_otg_qh_t *) ep->hcpriv;
+ if (qh == NULL) {
+ qh = dwc_otg_hcd_qh_create(_dwc_otg_hcd, urb);
+ if (qh == NULL) {
+ retval = -1;
+ goto done;
+ }
+ ep->hcpriv = qh;
+ }
+ _qtd->qtd_qh_ptr = qh;
+ retval = dwc_otg_hcd_qh_add(_dwc_otg_hcd, qh);
+ if (retval == 0) {
+ list_add_tail(&_qtd->qtd_list_entry, &qh->qtd_list);
+ }
+
+done:
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+
+#endif /* DWC_DEVICE_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c
new file mode 100644
index 00000000000..4bd17de7939
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.c
@@ -0,0 +1,1408 @@
+ /* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_pcd.c $
+ * $Revision: #18 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef CONFIG_DWC_HOST_ONLY
+
+/** @file
+ * This file implements the Peripheral Controller Driver.
+ *
+ * The Peripheral Controller Driver (PCD) is responsible for
+ * translating requests from the Function Driver into the appropriate
+ * actions on the DWC_otg controller. It isolates the Function Driver
+ * from the specifics of the controller by providing an API to the
+ * Function Driver.
+ *
+ * The Peripheral Controller Driver for Linux will implement the
+ * Gadget API, so that the existing Gadget drivers can be used.
+ * (Gadget Driver is the Linux terminology for a Function Driver.)
+ *
+ * The Linux Gadget API is defined in the header file
+ * <code><linux/usb/gadget.h></code>. The USB EP operations API is
+ * defined in the structure <code>usb_ep_ops</code> and the USB
+ * Controller API is defined in the structure
+ * <code>usb_gadget_ops</code>.
+ *
+ * An important function of the PCD is managing interrupts generated
+ * by the DWC_otg controller. The implementation of the DWC_otg device
+ * mode interrupt service routines is in dwc_otg_pcd_intr.c.
+ *
+ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
+ * @todo Does it work when the request size is greater than DEPTSIZ
+ * transfer size
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_pcd.h"
+
+/**
+ * Static PCD pointer for use in usb_gadget_register_driver and
+ * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init.
+ */
+static dwc_otg_pcd_t *s_pcd = 0;
+
+/* Display the contents of the buffer */
+extern void dump_msg(const u8 * buf, unsigned int length);
+
+/**
+ * This function completes a request. It call's the request call back.
+ */
+void request_done(dwc_otg_pcd_ep_t * _ep, dwc_otg_pcd_request_t * _req,
+ int _status)
+{
+ unsigned stopped = _ep->stopped;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep);
+ if (_req->mapped) {
+ dma_unmap_single(_ep->pcd->gadget.dev.parent,
+ _req->req.dma, _req->req.length,
+ _ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ _req->req.dma = DMA_ADDR_INVALID;
+ _req->mapped = 0;
+ } else
+ dma_sync_single_for_cpu(_ep->pcd->gadget.dev.parent,
+ _req->req.dma, _req->req.length,
+ _ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ list_del_init(&_req->queue);
+ if (_req->req.status == -EINPROGRESS) {
+ _req->req.status = _status;
+ } else {
+ _status = _req->req.status;
+ }
+
+ /* don't modify queue heads during completion callback */
+ _ep->stopped = 1;
+ SPIN_UNLOCK(&_ep->pcd->lock);
+ _req->req.complete(&_ep->ep, &_req->req);
+ SPIN_LOCK(&_ep->pcd->lock);
+ if (_ep->pcd->request_pending > 0) {
+ --_ep->pcd->request_pending;
+ }
+ _ep->stopped = stopped;
+
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Finally, when the current request is done, mark this endpoint
+ * as not active, so that new requests can be processed.
+ */
+ _ep->dwc_ep.active = 0;
+#endif
+}
+
+/**
+ * This function terminates all the requsts in the EP request queue.
+ */
+void request_nuke(dwc_otg_pcd_ep_t * _ep)
+{
+ dwc_otg_pcd_request_t * req;
+ _ep->stopped = 1;
+ /* called with irqs blocked?? */
+ while (!list_empty(&_ep->queue)) {
+ req = list_entry(_ep->queue.next, dwc_otg_pcd_request_t, queue);
+ request_done(_ep, req, -ESHUTDOWN);
+ }
+}
+
+/* USB Endpoint Operations */
+/*
+ * The following sections briefly describe the behavior of the Gadget
+ * API endpoint operations implemented in the DWC_otg driver
+ * software. Detailed descriptions of the generic behavior of each of
+ * these functions can be found in the Linux header file
+ * include/linux/usb_gadget.h.
+ *
+ * The Gadget API provides wrapper functions for each of the function
+ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper
+ * function, which then calls the underlying PCD function. The
+ * following sections are named according to the wrapper
+ * functions. Within each section, the corresponding DWC_otg PCD
+ * function name is specified.
+ *
+ */
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
+{
+ uint32_t PerTxMsk = 1;
+ int i;
+ for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
+ if ((PerTxMsk & core_if->p_tx_msk) == 0) {
+ core_if->p_tx_msk |= PerTxMsk;
+ return i + 1;
+ }
+ PerTxMsk <<= 1;
+ }
+ return 0;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
+ uint32_t fifo_num)
+{
+ core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
+}
+
+/**
+ * This function assigns periodic Tx FIFO to an periodic EP
+ * in shared Tx FIFO mode
+ */
+static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
+{
+ uint32_t TxMsk = 1;
+ int i;
+ for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
+ if ((TxMsk & core_if->tx_msk) == 0) {
+ core_if->tx_msk |= TxMsk;
+ return i + 1;
+ }
+ TxMsk <<= 1;
+ }
+ return 0;
+}
+
+/**
+ * This function releases periodic Tx FIFO
+ * in shared Tx FIFO mode
+ */
+static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
+{
+ core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
+}
+
+/**
+ * This function is called by the Gadget Driver for each EP to be
+ * configured for the current configuration (SET_CONFIGURATION).
+ *
+ * This function initializes the dwc_otg_ep_t data structure, and then
+ * calls dwc_otg_ep_activate.
+ */
+static int dwc_otg_pcd_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *_desc)
+{
+ dwc_otg_pcd_ep_t * ep = 0;
+ dwc_otg_pcd_t * pcd = 0;
+ unsigned long flags;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _desc);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || !_desc || ep->desc
+ || _desc->bDescriptorType != USB_DT_ENDPOINT) {
+ DWC_WARN("%s, bad ep or descriptor\n", __func__);
+ return -EINVAL;
+ }
+ if (ep == &ep->pcd->ep0) {
+ DWC_WARN("%s, bad ep(0)\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Check FIFO size? */
+ if (!_desc->wMaxPacketSize) {
+ DWC_WARN("%s, bad %s maxpacket\n", __func__, _ep->name);
+ return -ERANGE;
+ }
+ pcd = ep->pcd;
+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+ DWC_WARN("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
+ ep->desc = _desc;
+ ep->ep.maxpacket = le16_to_cpu(_desc->wMaxPacketSize);
+
+ /*
+ * Activate the EP
+ */
+ ep->stopped = 0;
+ ep->dwc_ep.is_in = (USB_DIR_IN & _desc->bEndpointAddress) != 0;
+ ep->dwc_ep.maxpacket = ep->ep.maxpacket;
+ ep->dwc_ep.type = _desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+ if (ep->dwc_ep.is_in) {
+ if (!pcd->otg_dev->core_if->en_multiple_tx_fifo) {
+ ep->dwc_ep.tx_fifo_num = 0;
+ if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_ISOC) {
+ /*
+ * if ISOC EP then assign a Periodic Tx FIFO.
+ */
+ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if);
+ }
+ } else {
+ /*
+ * if Dedicated FIFOs mode is on then assign a Tx FIFO.
+ */
+ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if);
+ }
+ }
+
+ /* Set initial data PID. */
+ if ((_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK) {
+ ep->dwc_ep.data_pid_start = 0;
+ }
+ DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n",
+ ep->ep.name, (ep->dwc_ep.is_in ? "IN" : "OUT"),
+ ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
+ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return 0;
+}
+
+
+/**
+ * This function is called when an EP is disabled due to disconnect or
+ * change in configuration. Any pending requests will terminate with a
+ * status of -ESHUTDOWN.
+ *
+ * This function modifies the dwc_otg_ep_t data structure for this EP,
+ * and then calls dwc_otg_ep_deactivate.
+ */
+static int dwc_otg_pcd_ep_disable(struct usb_ep *_ep)
+{
+ dwc_otg_pcd_ep_t * ep;
+ unsigned long flags;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _ep);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || !ep->desc) {
+ DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__,
+ _ep ? ep->ep.name : NULL);
+ return -EINVAL;
+ }
+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
+ request_nuke(ep);
+ dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep);
+ ep->desc = 0;
+ ep->stopped = 1;
+ if (ep->dwc_ep.is_in) {
+ release_perio_tx_fifo(GET_CORE_IF(ep->pcd),ep->dwc_ep.tx_fifo_num);
+ release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num);
+ }
+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags);
+ DWC_DEBUGPL(DBG_PCD, "%s disabled\n", _ep->name);
+ return 0;
+}
+
+
+/**
+ * This function allocates a request object to use with the specified
+ * endpoint.
+ *
+ * @param _ep The endpoint to be used with with the request
+ * @param _gfp_flags the GFP_* flags to use.
+ */
+static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *_ep,
+ gfp_t _gfp_flags)
+{
+ dwc_otg_pcd_request_t * req;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%d)\n", __func__, _ep, _gfp_flags);
+ if (0 == _ep) {
+ DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n");
+ return 0;
+ }
+ req = kmalloc(sizeof(dwc_otg_pcd_request_t), _gfp_flags);
+ if (0 == req) {
+ DWC_WARN("%s() %s\n", __func__,"request allocation failed!\n");
+ return 0;
+ }
+ memset(req, 0, sizeof(dwc_otg_pcd_request_t));
+ req->req.dma = DMA_ADDR_INVALID;
+ INIT_LIST_HEAD(&req->queue);
+ return &req->req;
+}
+
+
+/**
+ * This function frees a request object.
+ *
+ * @param _ep The endpoint associated with the request
+ * @param _req The request being freed
+ */
+static void dwc_otg_pcd_free_request(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ dwc_otg_pcd_request_t * req;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _req);
+ if (0 == _ep || 0 == _req) {
+ DWC_WARN("%s() %s\n", __func__,"Invalid ep or req argument!\n");
+ return;
+ }
+ req = container_of(_req, dwc_otg_pcd_request_t, req);
+ kfree(req);
+}
+
+
+/**
+ * This function is used to submit an I/O Request to an EP.
+ *
+ * - When the request completes the request's completion callback
+ * is called to return the request to the driver.
+ * - An EP, except control EPs, may have multiple requests
+ * pending.
+ * - Once submitted the request cannot be examined or modified.
+ * - Each request is turned into one or more packets.
+ * - A BULK EP can queue any amount of data; the transfer is
+ * packetized.
+ * - Zero length Packets are specified with the request 'zero'
+ * flag.
+ */
+static int dwc_otg_pcd_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t _gfp_flags)
+{
+ int prevented = 0;
+ dwc_otg_pcd_request_t * req;
+ dwc_otg_pcd_ep_t * ep;
+ dwc_otg_pcd_t * pcd;
+ unsigned long flags = 0;
+
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p,%d)\n", __func__, _ep, _req,
+ _gfp_flags);
+ req = container_of(_req, dwc_otg_pcd_request_t, req);
+ if (!_req || !_req->complete || !_req->buf
+ || !list_empty(&req->queue)) {
+ DWC_WARN("%s, bad params\n", __func__);
+ return -EINVAL;
+ }
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || (!ep->desc && ep->dwc_ep.num != 0)) {
+ DWC_WARN("%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+ pcd = ep->pcd;
+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed);
+ DWC_WARN("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+ DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", _ep->name,
+ _req, _req->length, _req->buf);
+ if (!GET_CORE_IF(pcd)->core_params->opt) {
+ if (ep->dwc_ep.num != 0) {
+ DWC_ERROR("%s queue req %p, len %d buf %p\n",
+ _ep->name, _req, _req->length, _req->buf);
+ }
+ }
+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
+
+#if defined(CONFIG_DWC_DEBUG) & defined(VERBOSE)
+ dump_msg(_req->buf, _req->length);
+
+#endif /* */
+ _req->status = -EINPROGRESS;
+ _req->actual = 0;
+
+ /*
+ * For EP0 IN without premature status, zlp is required?
+ */
+ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
+ DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", _ep->name);
+ //_req->zero = 1;
+ }
+
+ /* map virtual address to hardware */
+ if (_req->dma == DMA_ADDR_INVALID) {
+ _req->dma = dma_map_single(ep->pcd->gadget.dev.parent,
+ _req->buf,
+ _req->length,
+ ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ req->mapped = 1;
+ } else {
+ dma_sync_single_for_device(ep->pcd->gadget.dev.parent,
+ _req->dma, _req->length,
+ ep->dwc_ep.is_in
+ ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ req->mapped = 0;
+ }
+
+ /* Start the transfer */
+ if (list_empty(&ep->queue) && !ep->stopped) {
+ /* EP0 Transfer? */
+ if (ep->dwc_ep.num == 0) {
+ switch (pcd->ep0state) {
+ case EP0_IN_DATA_PHASE:
+ DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_IN_DATA_PHASE\n",
+ __func__);
+ break;
+ case EP0_OUT_DATA_PHASE:
+ DWC_DEBUGPL(DBG_PCD, "%s ep0: EP0_OUT_DATA_PHASE\n",
+ __func__);
+ if (pcd->request_config) {
+ /* Complete STATUS PHASE */
+ ep->dwc_ep.is_in = 1;
+ pcd->ep0state = EP0_STATUS;
+ }
+ break;
+ default:
+ DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
+ pcd->ep0state);
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return -EL2HLT;
+ }
+ ep->dwc_ep.dma_addr = _req->dma;
+ ep->dwc_ep.start_xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_len = _req->length;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
+ &ep->dwc_ep);
+ } else {
+ /* Setup and start the Transfer */
+ ep->dwc_ep.dma_addr = _req->dma;
+ ep->dwc_ep.start_xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_buff = _req->buf;
+ ep->dwc_ep.xfer_len = _req->length;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
+ dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
+ &ep->dwc_ep);
+ }
+ }
+ if ((req != 0) || prevented) {
+ ++pcd->request_pending;
+ list_add_tail(&req->queue, &ep->queue);
+ if (ep->dwc_ep.is_in && ep->stopped
+ && !(GET_CORE_IF(pcd)->dma_enable)) {
+ /** @todo NGS Create a function for this. */
+ diepmsk_data_t diepmsk = {.d32 = 0};
+ diepmsk.b.intktxfemp = 1;
+ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0,
+ diepmsk.d32);
+ }
+ }
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return 0;
+}
+
+/**
+ * This function cancels an I/O request from an EP.
+ */
+static int dwc_otg_pcd_ep_dequeue(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ dwc_otg_pcd_request_t * req;
+ dwc_otg_pcd_ep_t * ep;
+ dwc_otg_pcd_t * pcd;
+ unsigned long flags;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p,%p)\n", __func__, _ep, _req);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || !_req || (!ep->desc && ep->dwc_ep.num != 0)) {
+ DWC_WARN("%s, bad argument\n", __func__);
+ return -EINVAL;
+ }
+ pcd = ep->pcd;
+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) {
+ DWC_WARN("%s, bogus device state\n", __func__);
+ return -ESHUTDOWN;
+ }
+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
+ DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, _ep->name,
+ ep->dwc_ep.is_in ? "IN" : "OUT", _req);
+
+ /* make sure it's actually queued on this endpoint */
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req) {
+ break;
+ }
+ }
+ if (&req->req != _req) {
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return -EINVAL;
+ }
+ if (!list_empty(&req->queue)) {
+ request_done(ep, req, -ECONNRESET);
+ } else {
+ req = 0;
+ }
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return req ? 0 : -EOPNOTSUPP;
+}
+
+/**
+ * usb_ep_set_halt stalls an endpoint.
+ *
+ * usb_ep_clear_halt clears an endpoint halt and resets its data
+ * toggle.
+ *
+ * Both of these functions are implemented with the same underlying
+ * function. The behavior depends on the value argument.
+ *
+ * @param[in] _ep the Endpoint to halt or clear halt.
+ * @param[in] _value
+ * - 0 means clear_halt.
+ * - 1 means set_halt,
+ * - 2 means clear stall lock flag.
+ * - 3 means set stall lock flag.
+ */
+static int dwc_otg_pcd_ep_set_halt(struct usb_ep *_ep, int _value)
+{
+ int retval = 0;
+ unsigned long flags;
+ dwc_otg_pcd_ep_t * ep = 0;
+ DWC_DEBUGPL(DBG_PCD, "HALT %s %d\n", _ep->name, _value);
+ ep = container_of(_ep, dwc_otg_pcd_ep_t, ep);
+ if (!_ep || (!ep->desc && ep != &ep->pcd->ep0)
+ || ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ DWC_WARN("%s, bad ep\n", __func__);
+ return -EINVAL;
+ }
+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags);
+ if (ep->dwc_ep.is_in && !list_empty(&ep->queue)) {
+ DWC_WARN("%s() %s XFer In process\n", __func__, _ep->name);
+ retval = -EAGAIN;
+ } else if (_value == 0) {
+ dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if,&ep->dwc_ep);
+ } else if (_value == 1) {
+ if (ep->dwc_ep.num == 0) {
+ ep->pcd->ep0state = EP0_STALL;
+ }
+ ep->stopped = 1;
+ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, &ep->dwc_ep);
+ } else if (_value == 2) {
+ ep->dwc_ep.stall_clear_flag = 0;
+ } else if (_value == 3) {
+ ep->dwc_ep.stall_clear_flag = 1;
+ }
+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags);
+ return retval;
+}
+
+static struct usb_ep_ops dwc_otg_pcd_ep_ops =
+{
+ .enable = dwc_otg_pcd_ep_enable,
+ .disable = dwc_otg_pcd_ep_disable,
+ .alloc_request = dwc_otg_pcd_alloc_request,
+ .free_request = dwc_otg_pcd_free_request,
+ .queue = dwc_otg_pcd_ep_queue,
+ .dequeue = dwc_otg_pcd_ep_dequeue,
+ .set_halt = dwc_otg_pcd_ep_set_halt,
+ .fifo_status = 0,
+ .fifo_flush = 0,
+};
+
+/* Gadget Operations */
+/**
+ * The following gadget operations will be implemented in the DWC_otg
+ * PCD. Functions in the API that are not described below are not
+ * implemented.
+ *
+ * The Gadget API provides wrapper functions for each of the function
+ * pointers defined in usb_gadget_ops. The Gadget Driver calls the
+ * wrapper function, which then calls the underlying PCD function. The
+ * following sections are named according to the wrapper functions
+ * (except for ioctl, which doesn't have a wrapper function). Within
+ * each section, the corresponding DWC_otg PCD function name is
+ * specified.
+ *
+ */
+
+/**
+ *Gets the USB Frame number of the last SOF.
+ */
+static int dwc_otg_pcd_get_frame(struct usb_gadget *_gadget)
+{
+ dwc_otg_pcd_t * pcd;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _gadget);
+ if (_gadget == 0) {
+ return -ENODEV;
+ } else {
+ pcd = container_of(_gadget, dwc_otg_pcd_t, gadget);
+ dwc_otg_get_frame_number(GET_CORE_IF(pcd));
+ }
+ return 0;
+}
+void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * _pcd)
+{
+ uint32_t * addr = (uint32_t *) &(GET_CORE_IF(_pcd)->core_global_regs->gotgctl);
+ gotgctl_data_t mem;
+ gotgctl_data_t val;
+ val.d32 = dwc_read_reg32(addr);
+ if (val.b.sesreq) {
+ DWC_ERROR("Session Request Already active!\n");
+ return;
+ }
+ DWC_NOTICE("Session Request Initated\n");
+ mem.d32 = dwc_read_reg32(addr);
+ mem.b.sesreq = 1;
+ dwc_write_reg32(addr, mem.d32);
+
+ /* Start the SRP timer */
+ dwc_otg_pcd_start_srp_timer(_pcd);
+ return;
+}
+void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * _pcd, int set)
+{
+ dctl_data_t dctl = {.d32 = 0};
+ volatile uint32_t *addr = &(GET_CORE_IF(_pcd)->dev_if->dev_global_regs->dctl);
+ if (dwc_otg_is_device_mode(GET_CORE_IF(_pcd))) {
+ if (_pcd->remote_wakeup_enable) {
+ if (set) {
+ dctl.b.rmtwkupsig = 1;
+ dwc_modify_reg32(addr, 0, dctl.d32);
+ DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
+ mdelay(1);
+ dwc_modify_reg32(addr, dctl.d32, 0);
+ DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
+ } else {
+ }
+ } else {
+ DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
+ }
+ }
+ return;
+}
+
+
+/**
+ * Initiates Session Request Protocol (SRP) to wakeup the host if no
+ * session is in progress. If a session is already in progress, but
+ * the device is suspended, remote wakeup signaling is started.
+ *
+ */
+static int dwc_otg_pcd_wakeup(struct usb_gadget *_gadget)
+{
+ unsigned long flags;
+ dwc_otg_pcd_t * pcd;
+ dsts_data_t dsts;
+ gotgctl_data_t gotgctl;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _gadget);
+ if (_gadget == 0) {
+ return -ENODEV;
+ } else {
+ pcd = container_of(_gadget, dwc_otg_pcd_t, gadget);
+ }
+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags);
+
+ /*
+ * This function starts the Protocol if no session is in progress. If
+ * a session is already in progress, but the device is suspended,
+ * remote wakeup signaling is started.
+ */
+
+ /* Check if valid session */
+ gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
+ if (gotgctl.b.bsesvld) {
+
+ /* Check if suspend state */
+ dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts));
+ if (dsts.b.suspsts) {
+ dwc_otg_pcd_remote_wakeup(pcd, 1);
+ }
+ } else {
+ dwc_otg_pcd_initiate_srp(pcd);
+ }
+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags);
+ return 0;
+}
+
+static const struct usb_gadget_ops dwc_otg_pcd_ops =
+{
+ .get_frame = dwc_otg_pcd_get_frame,
+ .wakeup = dwc_otg_pcd_wakeup,
+ // current versions must always be self-powered
+};
+
+/**
+ * This function updates the otg values in the gadget structure.
+ */
+void dwc_otg_pcd_update_otg(dwc_otg_pcd_t * _pcd, const unsigned _reset)
+{
+ if (!_pcd->gadget.is_otg)
+ return;
+ if (_reset) {
+ _pcd->b_hnp_enable = 0;
+ _pcd->a_hnp_support = 0;
+ _pcd->a_alt_hnp_support = 0;
+ }
+ _pcd->gadget.b_hnp_enable = _pcd->b_hnp_enable;
+ _pcd->gadget.a_hnp_support = _pcd->a_hnp_support;
+ _pcd->gadget.a_alt_hnp_support = _pcd->a_alt_hnp_support;
+}
+
+/**
+ * This function is the top level PCD interrupt handler.
+ */
+static irqreturn_t dwc_otg_pcd_irq(int _irq, void *_dev)
+{
+ dwc_otg_pcd_t * pcd = _dev;
+ int32_t retval = IRQ_NONE;
+ retval = dwc_otg_pcd_handle_intr(pcd);
+ return IRQ_RETVAL(retval);
+}
+
+/**
+ * PCD Callback function for initializing the PCD when switching to
+ * device mode.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_start_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+
+ /*
+ * Initialized the Core for Device mode.
+ */
+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
+ dwc_otg_core_dev_init(GET_CORE_IF(pcd));
+ }
+ return 1;
+}
+
+/**
+ * PCD Callback function for stopping the PCD when switching to Host
+ * mode.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_stop_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
+ dwc_otg_pcd_stop(pcd);
+ return 1;
+}
+
+/**
+ * PCD Callback function for notifying the PCD when resuming from
+ * suspend.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_suspend_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+ if (pcd->driver && pcd->driver->resume) {
+ SPIN_UNLOCK(&pcd->lock);
+ pcd->driver->suspend(&pcd->gadget);
+ SPIN_LOCK(&pcd->lock);
+ }
+ return 1;
+}
+
+
+/**
+ * PCD Callback function for notifying the PCD when resuming from
+ * suspend.
+ *
+ * @param _p void pointer to the <code>dwc_otg_pcd_t</code>
+ */
+static int32_t dwc_otg_pcd_resume_cb(void *_p)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _p;
+ if (pcd->driver && pcd->driver->resume) {
+ SPIN_UNLOCK(&pcd->lock);
+ pcd->driver->resume(&pcd->gadget);
+ SPIN_LOCK(&pcd->lock);
+ }
+
+ /* Stop the SRP timeout timer. */
+ if ((GET_CORE_IF(pcd)->core_params->phy_type !=
+ DWC_PHY_TYPE_PARAM_FS) || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
+ if (GET_CORE_IF(pcd)->srp_timer_started) {
+ GET_CORE_IF(pcd)->srp_timer_started = 0;
+ del_timer(&pcd->srp_timer);
+ }
+ }
+ return 1;
+}
+
+/**
+ * PCD Callback structure for handling mode switching.
+ */
+static dwc_otg_cil_callbacks_t pcd_callbacks =
+{
+ .start = dwc_otg_pcd_start_cb,
+ .stop = dwc_otg_pcd_stop_cb,
+ .suspend = dwc_otg_pcd_suspend_cb,
+ .resume_wakeup = dwc_otg_pcd_resume_cb,
+ .p = 0, /* Set at registration */
+};
+
+/**
+ * This function is called when the SRP timer expires. The SRP should
+ * complete within 6 seconds.
+ */
+static void srp_timeout(unsigned long _ptr)
+{
+ gotgctl_data_t gotgctl;
+ dwc_otg_core_if_t * core_if = (dwc_otg_core_if_t *) _ptr;
+ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
+ gotgctl.d32 = dwc_read_reg32(addr);
+ core_if->srp_timer_started = 0;
+ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
+ (core_if->core_params->i2c_enable)) {
+ DWC_PRINT("SRP Timeout\n");
+ if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
+ core_if->pcd_cb->resume_wakeup(core_if->
+ pcd_cb->p);
+ }
+
+ /* Clear Session Request */
+ gotgctl.d32 = 0;
+ gotgctl.b.sesreq = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->gotgctl,gotgctl.d32, 0);
+ core_if->srp_success = 0;
+ } else {
+ DWC_ERROR("Device not connected/responding\n");
+ gotgctl.b.sesreq = 0;
+ dwc_write_reg32(addr, gotgctl.d32);
+ }
+ } else if (gotgctl.b.sesreq) {
+ DWC_PRINT("SRP Timeout\n");
+ DWC_ERROR("Device not connected/responding\n");
+ gotgctl.b.sesreq = 0;
+ dwc_write_reg32(addr, gotgctl.d32);
+ } else {
+ DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32);
+ }
+}
+
+/**
+ * Start the SRP timer to detect when the SRP does not complete within
+ * 6 seconds.
+ *
+ * @param _pcd the pcd structure.
+ */
+void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t * _pcd)
+{
+ struct timer_list *srp_timer = &_pcd->srp_timer;
+ GET_CORE_IF(_pcd)->srp_timer_started = 1;
+ init_timer(srp_timer);
+ srp_timer->function = srp_timeout;
+ srp_timer->data = (unsigned long)GET_CORE_IF(_pcd);
+ srp_timer->expires = jiffies + (HZ * 6);
+ add_timer(srp_timer);
+}
+
+/**
+ * Tasklet
+ *
+ */
+extern void start_next_request(dwc_otg_pcd_ep_t * _ep);
+
+static void start_xfer_tasklet_func(unsigned long data)
+{
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) data;
+ dwc_otg_core_if_t * core_if = pcd->otg_dev->core_if;
+ int i;
+ depctl_data_t diepctl;
+ DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
+ diepctl.d32 =
+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+ if (pcd->ep0.queue_sof) {
+ pcd->ep0.queue_sof = 0;
+ start_next_request(&pcd->ep0);
+ // break;
+ }
+ for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
+ depctl_data_t diepctl;
+ diepctl.d32 =
+ dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl);
+ if (pcd->in_ep[i].queue_sof) {
+ pcd->in_ep[i].queue_sof = 0;
+ start_next_request(&pcd->in_ep[i]);
+ // break;
+ }
+ }
+ return;
+}
+
+static struct tasklet_struct start_xfer_tasklet =
+{
+ .next = NULL,
+ .state = 0,
+ .count = ATOMIC_INIT(0),
+ .func = start_xfer_tasklet_func,
+ .data = 0,
+};
+
+/**
+ * This function initialized the pcd Dp structures to there default
+ * state.
+ *
+ * @param _pcd the pcd structure.
+ */
+void dwc_otg_pcd_reinit(dwc_otg_pcd_t * _pcd)
+{
+ static const char *names[] =
+ {
+ "ep0", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
+ "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in", "ep12in", "ep13in",
+ "ep14in", "ep15in", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
+ "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out", "ep12out",
+ "ep13out", "ep14out", "ep15out"
+ };
+
+ int i;
+ int in_ep_cntr, out_ep_cntr;
+ uint32_t hwcfg1;
+ uint32_t num_in_eps = (GET_CORE_IF(_pcd))->dev_if->num_in_eps;
+ uint32_t num_out_eps = (GET_CORE_IF(_pcd))->dev_if->num_out_eps;
+ dwc_otg_pcd_ep_t * ep;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
+ INIT_LIST_HEAD(&_pcd->gadget.ep_list);
+ _pcd->gadget.ep0 = &_pcd->ep0.ep;
+ _pcd->gadget.speed = USB_SPEED_UNKNOWN;
+ INIT_LIST_HEAD(&_pcd->gadget.ep0->ep_list);
+
+ /**
+ * Initialize the EP0 structure.
+ */
+ ep = &_pcd->ep0;
+
+ /* Init EP structure */
+ ep->desc = 0;
+ ep->pcd = _pcd;
+ ep->stopped = 1;
+
+ /* Init DWC ep structure */
+ ep->dwc_ep.num = 0;
+ ep->dwc_ep.active = 0;
+ ep->dwc_ep.tx_fifo_num = 0;
+
+ /* Control until ep is actvated */
+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
+ ep->dwc_ep.dma_addr = 0;
+ ep->dwc_ep.start_xfer_buff = 0;
+ ep->dwc_ep.xfer_buff = 0;
+ ep->dwc_ep.xfer_len = 0;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = 0;
+ ep->queue_sof = 0;
+
+ /* Init the usb_ep structure. */
+ ep->ep.name = names[0];
+ ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->ep.maxpacket = MAX_PACKET_SIZE;
+ list_add_tail(&ep->ep.ep_list, &_pcd->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+
+ /**
+ * Initialize the EP structures.
+ */
+ in_ep_cntr = 0;
+ hwcfg1 = (GET_CORE_IF(_pcd))->hwcfg1.d32 >> 3;
+ for (i = 1; in_ep_cntr < num_in_eps; i++) {
+ if ((hwcfg1 & 0x1) == 0) {
+ dwc_otg_pcd_ep_t * ep = &_pcd->in_ep[in_ep_cntr];
+ in_ep_cntr++;
+
+ /* Init EP structure */
+ ep->desc = 0;
+ ep->pcd = _pcd;
+ ep->stopped = 1;
+
+ /* Init DWC ep structure */
+ ep->dwc_ep.is_in = 1;
+ ep->dwc_ep.num = i;
+ ep->dwc_ep.active = 0;
+ ep->dwc_ep.tx_fifo_num = 0;
+
+ /* Control until ep is actvated */
+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
+ ep->dwc_ep.dma_addr = 0;
+ ep->dwc_ep.start_xfer_buff = 0;
+ ep->dwc_ep.xfer_buff = 0;
+ ep->dwc_ep.xfer_len = 0;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = 0;
+ ep->queue_sof = 0;
+
+ /* Init the usb_ep structure. */
+ /**
+ * @todo NGS: Add direction to EP, based on contents
+ * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
+ * sprintf( ";r
+ */
+ ep->ep.name = names[i];
+ ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->ep.maxpacket = MAX_PACKET_SIZE;
+ list_add_tail(&ep->ep.ep_list, &_pcd->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+ }
+ hwcfg1 >>= 2;
+ }
+ out_ep_cntr = 0;
+ hwcfg1 = (GET_CORE_IF(_pcd))->hwcfg1.d32 >> 2;
+ for (i = 1; out_ep_cntr < num_out_eps; i++) {
+ if ((hwcfg1 & 0x1) == 0) {
+ dwc_otg_pcd_ep_t * ep = &_pcd->out_ep[out_ep_cntr];
+ out_ep_cntr++;
+
+ /* Init EP structure */
+ ep->desc = 0;
+ ep->pcd = _pcd;
+ ep->stopped = 1;
+
+ /* Init DWC ep structure */
+ ep->dwc_ep.is_in = 0;
+ ep->dwc_ep.num = i;
+ ep->dwc_ep.active = 0;
+ ep->dwc_ep.tx_fifo_num = 0;
+
+ /* Control until ep is actvated */
+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
+ ep->dwc_ep.dma_addr = 0;
+ ep->dwc_ep.start_xfer_buff = 0;
+ ep->dwc_ep.xfer_buff = 0;
+ ep->dwc_ep.xfer_len = 0;
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.sent_zlp = 0;
+ ep->dwc_ep.total_len = 0;
+ ep->queue_sof = 0;
+
+ /* Init the usb_ep structure. */
+ /**
+ * @todo NGS: Add direction to EP, based on contents
+ * of HWCFG1. Need a copy of HWCFG1 in pcd structure?
+ * sprintf( ";r
+ */
+ ep->ep.name = names[15 + i];
+ ep->ep.ops = &dwc_otg_pcd_ep_ops;
+
+ /**
+ * @todo NGS: What should the max packet size be set to
+ * here? Before EP type is set?
+ */
+ ep->ep.maxpacket = MAX_PACKET_SIZE;
+ list_add_tail(&ep->ep.ep_list, &_pcd->gadget.ep_list);
+ INIT_LIST_HEAD(&ep->queue);
+ }
+ hwcfg1 >>= 2;
+ }
+
+ /* remove ep0 from the list. There is a ep0 pointer. */
+ list_del_init(&_pcd->ep0.ep.ep_list);
+ _pcd->ep0state = EP0_DISCONNECT;
+ _pcd->ep0.ep.maxpacket = MAX_EP0_SIZE;
+ _pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
+ _pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
+}
+
+/**
+ * This function releases the Gadget device.
+ * required by device_unregister().
+ *
+ * @todo Should this do something? Should it free the PCD?
+ */
+static void dwc_otg_pcd_gadget_release(struct device *_dev)
+{
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _dev);
+}
+
+/**
+ * This function initialized the PCD portion of the driver.
+ *
+ */
+int __init dwc_otg_pcd_init(struct device *_dev)
+{
+ static char pcd_name[] = "dwc_otg_pcd";
+ dwc_otg_pcd_t * pcd;
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ int retval = 0;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _dev);
+
+ /*
+ * Allocate PCD structure
+ */
+ pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL);
+ if (pcd == 0) {
+ return -ENOMEM;
+ }
+ memset(pcd, 0, sizeof(dwc_otg_pcd_t));
+ spin_lock_init(&pcd->lock);
+ otg_dev->pcd = pcd;
+ s_pcd = pcd;
+ pcd->gadget.name = pcd_name;
+ dev_set_name(&pcd->gadget.dev, "gadget");
+ pcd->otg_dev = dev_get_drvdata(_dev);
+ pcd->gadget.dev.parent = _dev;
+ pcd->gadget.dev.release = dwc_otg_pcd_gadget_release;
+ pcd->gadget.ops = &dwc_otg_pcd_ops;
+ if (GET_CORE_IF(pcd)->hwcfg4.b.ded_fifo_en) {
+ DWC_PRINT("Dedicated Tx FIFOs mode\n");
+ } else {
+ DWC_PRINT("Shared Tx FIFO mode\n");
+ }
+
+ /* If the module is set to FS or if the PHY_TYPE is FS then the gadget
+ * should not report as dual-speed capable. replace the following line
+ * with the block of code below it once the software is debugged for
+ * this. If is_dualspeed = 0 then the gadget driver should not report
+ * a device qualifier descriptor when queried. */
+ if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL)
+ || ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2)
+ && (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1)
+ && (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) {
+ pcd->gadget.is_dualspeed = 0;
+ } else {
+ pcd->gadget.is_dualspeed = 1;
+ }
+ if ((otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE)
+ || (otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST)
+ || (otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE)
+ || (otg_dev->core_if->hwcfg2.b.op_mode ==
+ DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) {
+ pcd->gadget.is_otg = 0;
+ } else {
+ pcd->gadget.is_otg = 1;
+ }
+ pcd->driver = 0;
+
+ /* Register the gadget device */
+ retval = device_register(&pcd->gadget.dev);
+
+ /*
+ * Initialized the Core for Device mode.
+ */
+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) {
+ dwc_otg_core_dev_init(GET_CORE_IF(pcd));
+ }
+
+ /*
+ * Initialize EP structures
+ */
+ dwc_otg_pcd_reinit(pcd);
+
+ /*
+ * Register the PCD Callbacks.
+ */
+ dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks,pcd);
+
+ /*
+ * Setup interupt handler
+ */
+ DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n",otg_dev->irq);
+ retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq, IRQF_SHARED,
+ pcd->gadget.name, pcd);
+ if (retval != 0) {
+ DWC_ERROR("request of irq%d failed\n", otg_dev->irq);
+ kfree(pcd);
+ return -EBUSY;
+ }
+
+ /*
+ * Initialize the DMA buffer for SETUP packets
+ */
+ if (GET_CORE_IF(pcd)->dma_enable) {
+ pcd->setup_pkt = dma_alloc_coherent(_dev, sizeof(*pcd->setup_pkt) * 5,
+ &pcd->setup_pkt_dma_handle, 0);
+ pcd->status_buf = dma_alloc_coherent(_dev, sizeof(uint16_t),
+ &pcd->status_buf_dma_handle, 0);
+ } else {
+ pcd->setup_pkt = kmalloc(sizeof(*pcd->setup_pkt) * 5, GFP_KERNEL);
+ pcd->status_buf = kmalloc(sizeof(uint16_t), GFP_KERNEL);
+ }
+ if (pcd->setup_pkt == 0) {
+ kfree(pcd);
+ return -ENOMEM;
+ }
+
+ /* Initialize tasklet */
+ start_xfer_tasklet.data = (unsigned long)pcd;
+ pcd->start_xfer_tasklet = &start_xfer_tasklet;
+ return 0;
+}
+
+/**
+ * Cleanup the PCD.
+ */
+void dwc_otg_pcd_remove(struct device *_dev)
+{
+ dwc_otg_device_t * otg_dev = dev_get_drvdata(_dev);
+ dwc_otg_pcd_t * pcd = otg_dev->pcd;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _dev);
+
+ /*
+ * Free the IRQ
+ */
+ free_irq(otg_dev->irq, pcd);
+
+ /* start with the driver above us */
+ if (pcd->driver) {
+
+ /* should have been done already by driver model core */
+ DWC_WARN("driver '%s' is still registered\n",pcd->driver->driver.name);
+ usb_gadget_unregister_driver(pcd->driver);
+ }
+ device_unregister(&pcd->gadget.dev);
+ if (GET_CORE_IF(pcd)->dma_enable) {
+ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt) * 5,
+ pcd->setup_pkt, pcd->setup_pkt_dma_handle);
+ dma_free_coherent(NULL, sizeof(uint16_t), pcd->status_buf,
+ pcd->status_buf_dma_handle);
+ } else {
+ kfree(pcd->setup_pkt);
+ kfree(pcd->status_buf);
+ }
+ kfree(pcd);
+ otg_dev->pcd = 0;
+}
+
+
+/**
+ * This function registers a gadget driver with the PCD.
+ *
+ * When a driver is successfully registered, it will receive control
+ * requests including set_configuration(), which enables non-control
+ * requests. then usb traffic follows until a disconnect is reported.
+ * then a host may connect again, or the driver might get unbound.
+ *
+ * @param _driver The driver being registered
+ */
+int usb_gadget_register_driver(struct usb_gadget_driver *_driver)
+{
+ int retval;
+ DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n",
+ _driver->driver.name);
+ if (!_driver || _driver->speed == USB_SPEED_UNKNOWN || !_driver->bind
+ || !_driver->disconnect || !_driver->setup) {
+ DWC_DEBUGPL(DBG_PCDV, "EINVAL\n");
+#if 1
+ printk("_driver=0x%p speed=0x%x bind=0x%p unbind=0x%p disconnect=0x%p setup=0x%p\n", _driver, _driver->speed, _driver->bind, _driver->unbind, _driver->disconnect, _driver->setup);
+#endif
+ return -EINVAL;
+ }
+ if (s_pcd == 0) {
+ DWC_DEBUGPL(DBG_PCDV, "ENODEV\n");
+ return -ENODEV;
+ }
+ if (s_pcd->driver != 0) {
+ DWC_DEBUGPL(DBG_PCDV, "EBUSY (%p)\n", s_pcd->driver);
+ return -EBUSY;
+ }
+
+ /* hook up the driver */
+ s_pcd->driver = _driver;
+ s_pcd->gadget.dev.driver = &_driver->driver;
+ {
+ dwc_otg_core_if_t *_core_if = s_pcd->otg_dev->core_if;
+ if(_core_if) {
+ dwc_otg_disable_global_interrupts(_core_if);
+ dwc_otg_core_init(_core_if);
+ dwc_otg_pcd_reinit(s_pcd);
+ dwc_otg_enable_global_interrupts(_core_if);
+ if (_core_if->pcd_cb)
+ dwc_otg_pcd_start_cb(_core_if->pcd_cb->p);
+ }
+
+ }
+ DWC_DEBUGPL(DBG_PCD, "bind to driver %s\n", _driver->driver.name);
+ retval = _driver->bind(&s_pcd->gadget);
+ if (retval) {
+ DWC_ERROR("bind to driver %s --> error %d\n",
+ _driver->driver.name, retval);
+ s_pcd->driver = 0;
+ s_pcd->gadget.dev.driver = 0;
+ return retval;
+ }
+ DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n",
+ _driver->driver.name);
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_register_driver);
+
+/**
+ * This function unregisters a gadget driver
+ *
+ * @param _driver The driver being unregistered
+ */
+int usb_gadget_unregister_driver(struct usb_gadget_driver *_driver)
+{
+
+ //DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver);
+ if (s_pcd == 0) {
+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__,-ENODEV);
+ return -ENODEV;
+ }
+ if (_driver == 0 || _driver != s_pcd->driver) {
+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__,-EINVAL);
+ return -EINVAL;
+ }
+ _driver->unbind(&s_pcd->gadget);
+ s_pcd->driver = 0;
+ DWC_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n",
+ _driver->driver.name);
+ return 0;
+}
+EXPORT_SYMBOL(usb_gadget_unregister_driver);
+
+#endif /* DWC_HOST_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.h b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.h
new file mode 100644
index 00000000000..a252dc01ed1
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd.h
@@ -0,0 +1,209 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_pcd.h $
+ * $Revision: #6 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+#ifndef CONFIG_DWC_HOST_ONLY
+#if !defined(__DWC_PCD_H__)
+#define __DWC_PCD_H__
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+struct lm_device;
+struct dwc_otg_device;
+
+#include "dwc_otg_cil.h"
+
+/**
+ * @file
+ *
+ * This file contains the structures, constants, and interfaces for
+ * the Perpherial Contoller Driver (PCD).
+ *
+ * The Peripheral Controller Driver (PCD) for Linux will implement the
+ * Gadget API, so that the existing Gadget drivers can be used. For
+ * the Mass Storage Function driver the File-backed USB Storage Gadget
+ * (FBS) driver will be used. The FBS driver supports the
+ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only
+ * transports.
+ *
+ */
+
+/** Invalid DMA Address */
+#define DMA_ADDR_INVALID (~(dma_addr_t)0)
+/** Maxpacket size for EP0 */
+#define MAX_EP0_SIZE 64
+/** Maxpacket size for any EP */
+#define MAX_PACKET_SIZE 1024
+
+/**
+ * Get the pointer to the core_if from the pcd pointer.
+ */
+#define GET_CORE_IF( _pcd ) (_pcd->otg_dev->core_if)
+
+/**
+ * States of EP0.
+ */
+typedef enum ep0_state {
+ EP0_DISCONNECT = 0, /* no host */
+ EP0_IDLE = 1,
+ EP0_IN_DATA_PHASE = 2,
+ EP0_OUT_DATA_PHASE = 3,
+ EP0_STATUS = 4,
+ EP0_STALL = 5,
+} ep0state_e;
+
+/** Fordward declaration.*/
+struct dwc_otg_pcd;
+
+/** PCD EP structure.
+ * This structure describes an EP, there is an array of EPs in the PCD
+ * structure.
+ */
+typedef struct dwc_otg_pcd_ep {
+ /** USB EP data */
+ struct usb_ep ep;
+ /** USB EP Descriptor */
+ const struct usb_endpoint_descriptor *desc;
+
+ /** queue of dwc_otg_pcd_requests. */
+ struct list_head queue;
+ unsigned stopped : 1;
+ unsigned disabling : 1;
+ unsigned dma : 1;
+ unsigned queue_sof : 1;
+
+ /** DWC_otg ep data. */
+ dwc_ep_t dwc_ep;
+
+ /** Pointer to PCD */
+ struct dwc_otg_pcd *pcd;
+}dwc_otg_pcd_ep_t;
+
+
+
+/** DWC_otg PCD Structure.
+ * This structure encapsulates the data for the dwc_otg PCD.
+ */
+typedef struct dwc_otg_pcd {
+ /** USB gadget */
+ struct usb_gadget gadget;
+ /** USB gadget driver pointer*/
+ struct usb_gadget_driver *driver;
+ /** The DWC otg device pointer. */
+ struct dwc_otg_device *otg_dev;
+
+ /** State of EP0 */
+ ep0state_e ep0state;
+ /** EP0 Request is pending */
+ unsigned ep0_pending : 1;
+ /** Indicates when SET CONFIGURATION Request is in process */
+ unsigned request_config : 1;
+ /** The state of the Remote Wakeup Enable. */
+ unsigned remote_wakeup_enable : 1;
+ /** The state of the B-Device HNP Enable. */
+ unsigned b_hnp_enable : 1;
+ /** The state of A-Device HNP Support. */
+ unsigned a_hnp_support : 1;
+ /** The state of the A-Device Alt HNP support. */
+ unsigned a_alt_hnp_support : 1;
+ /** Count of pending Requests */
+ unsigned request_pending;
+
+ /** SETUP packet for EP0
+ * This structure is allocated as a DMA buffer on PCD initialization
+ * with enough space for up to 3 setup packets.
+ */
+ union {
+ struct usb_ctrlrequest req;
+ uint32_t d32[2];
+ } *setup_pkt;
+
+ dma_addr_t setup_pkt_dma_handle;
+
+ /** 2-byte dma buffer used to return status from GET_STATUS */
+ uint16_t *status_buf;
+ dma_addr_t status_buf_dma_handle;
+
+ /** Array of EPs. */
+ dwc_otg_pcd_ep_t ep0;
+ /** Array of IN EPs. */
+ dwc_otg_pcd_ep_t in_ep[ MAX_EPS_CHANNELS - 1];
+ /** Array of OUT EPs. */
+ dwc_otg_pcd_ep_t out_ep[ MAX_EPS_CHANNELS - 1];
+ /** number of valid EPs in the above array. */
+// unsigned num_eps : 4;
+ spinlock_t lock;
+ /** Timer for SRP. If it expires before SRP is successful
+ * clear the SRP. */
+ struct timer_list srp_timer;
+
+ /** Tasklet to defer starting of TEST mode transmissions until
+ * Status Phase has been completed.
+ */
+ struct tasklet_struct test_mode_tasklet;
+
+ /** Tasklet to delay starting of xfer in DMA mode */
+ struct tasklet_struct *start_xfer_tasklet;
+
+ /** The test mode to enter when the tasklet is executed. */
+ unsigned test_mode;
+
+} dwc_otg_pcd_t;
+
+
+/** DWC_otg request structure.
+ * This structure is a list of requests.
+ */
+typedef struct dwc_otg_pcd_request {
+ struct usb_request req; /**< USB Request. */
+ struct list_head queue; /**< queue of these requests. */
+ unsigned mapped:1;
+} dwc_otg_pcd_request_t;
+
+
+extern int __init dwc_otg_pcd_init(struct device *_dev);
+
+extern void dwc_otg_pcd_remove( struct device *_dev );
+extern int32_t dwc_otg_pcd_handle_intr( dwc_otg_pcd_t *_pcd );
+extern void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *_pcd );
+
+extern void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *_pcd);
+extern void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *_pcd, int set);
+
+#endif
+#endif /* DWC_HOST_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_pcd_intr.c b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd_intr.c
new file mode 100644
index 00000000000..5d7b2b39239
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_pcd_intr.c
@@ -0,0 +1,2519 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_pcd_intr.c $
+ * $Revision: #18 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef CONFIG_DWC_HOST_ONLY
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+
+#include "dwc_otg_driver.h"
+#include "dwc_otg_pcd.h"
+
+//#define VERBOSE
+#define DEBUG_EP0
+
+/* request functions defined in "dwc_otg_pcd.c" */
+extern void request_done(dwc_otg_pcd_ep_t * _ep, dwc_otg_pcd_request_t * _req,
+ int _status);
+extern void request_nuke(dwc_otg_pcd_ep_t * _ep);
+extern void dwc_otg_pcd_update_otg(dwc_otg_pcd_t * _pcd,
+ const unsigned _reset);
+
+/** @file
+ * This file contains the implementation of the PCD Interrupt handlers.
+ *
+ * The PCD handles the device interrupts. Many conditions can cause a
+ * device interrupt. When an interrupt occurs, the device interrupt
+ * service routine determines the cause of the interrupt and
+ * dispatches handling to the appropriate function. These interrupt
+ * handling functions are described below.
+ * All interrupt registers are processed from LSB to MSB.
+ */
+
+/**
+ * This function prints the ep0 state for debug purposes.
+ */
+static inline void print_ep0_state(dwc_otg_pcd_t * _pcd)
+{
+#ifdef CONFIG_DWC_DEBUG
+ char str[40];
+ switch (_pcd->ep0state) {
+ case EP0_DISCONNECT:
+ strcpy(str, "EP0_DISCONNECT");
+ break;
+ case EP0_IDLE:
+ strcpy(str, "EP0_IDLE");
+ break;
+ case EP0_IN_DATA_PHASE:
+ strcpy(str, "EP0_IN_DATA_PHASE");
+ break;
+ case EP0_OUT_DATA_PHASE:
+ strcpy(str, "EP0_OUT_DATA_PHASE");
+ break;
+ case EP0_STATUS:
+ strcpy(str, "EP0_STATUS");
+ break;
+ case EP0_STALL:
+ strcpy(str, "EP0_STALL");
+ break;
+ default:
+ strcpy(str, "EP0_INVALID");
+ }
+ DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, _pcd->ep0state);
+#endif /* */
+}
+
+/**
+ * This function returns pointer to in ep struct with number ep_num
+ */
+static inline dwc_otg_pcd_ep_t *get_in_ep(dwc_otg_pcd_t * _pcd,
+ uint32_t ep_num)
+{
+ int i;
+ int num_in_eps = GET_CORE_IF(_pcd)->dev_if->num_in_eps;
+ if (ep_num == 0) {
+ return &_pcd->ep0;
+ } else {
+ for (i = 0; i < num_in_eps; ++i) {
+ if (_pcd->in_ep[i].dwc_ep.num == ep_num)
+ return &_pcd->in_ep[i];
+ }
+ return 0;
+ }
+
+}
+
+/**
+ * This function returns pointer to out ep struct with number ep_num
+ */
+static inline dwc_otg_pcd_ep_t *get_out_ep(dwc_otg_pcd_t * _pcd,
+ uint32_t ep_num)
+{
+ int i;
+ int num_out_eps = GET_CORE_IF(_pcd)->dev_if->num_out_eps;
+ if (ep_num == 0) {
+ return &_pcd->ep0;
+ } else {
+ for (i = 0; i < num_out_eps; ++i) {
+ if (_pcd->out_ep[i].dwc_ep.num == ep_num)
+ return &_pcd->out_ep[i];
+ }
+ return 0;
+ }
+
+}
+
+/**
+ * This functions gets a pointer to an EP from the wIndex address
+ * value of the control request.
+ */
+static dwc_otg_pcd_ep_t *get_ep_by_addr(dwc_otg_pcd_t * _pcd, u16 _wIndex)
+{
+ dwc_otg_pcd_ep_t * ep;
+ if ((_wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &_pcd->ep0;
+ list_for_each_entry(ep, &_pcd->gadget.ep_list, ep.ep_list) {
+ u8 bEndpointAddress;
+ if (!ep->desc)
+ continue;
+ bEndpointAddress = ep->desc->bEndpointAddress;
+ if ((_wIndex ^ bEndpointAddress) & USB_DIR_IN)
+ continue;
+ if ((_wIndex & 0x0f) == (bEndpointAddress & 0x0f))
+ return ep;
+ }
+ return NULL;
+}
+
+
+/**
+ * This function checks the EP request queue, if the queue is not
+ * empty the next request is started.
+ */
+void start_next_request(dwc_otg_pcd_ep_t * _ep)
+{
+ dwc_otg_pcd_request_t * req = 0;
+ if (!list_empty(&_ep->queue)) {
+ req = list_entry(_ep->queue.next, dwc_otg_pcd_request_t, queue);
+
+ /* Setup and start the Transfer */
+ _ep->dwc_ep.start_xfer_buff = req->req.buf;
+ _ep->dwc_ep.xfer_buff = req->req.buf;
+ _ep->dwc_ep.xfer_len = req->req.length;
+ _ep->dwc_ep.xfer_count = 0;
+ _ep->dwc_ep.dma_addr = req->req.dma;
+ _ep->dwc_ep.sent_zlp = 0;
+ _ep->dwc_ep.total_len = _ep->dwc_ep.xfer_len;
+
+ //DWC_ERROR(" -> starting transfer (start_next_req) %s %s\n",
+ //_ep->ep.name, _ep->dwc_ep.is_in?"IN":"OUT");
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * When a new transfer will be started, mark this
+ * endpoint as active. This way it will be blocked
+ * for further transfers, until the current transfer
+ * is finished.
+ */
+ _ep->dwc_ep.active = 1;
+#endif
+ dwc_otg_ep_start_transfer(GET_CORE_IF(_ep->pcd), &_ep->dwc_ep);
+ }
+}
+
+/**
+ * This function handles the SOF Interrupts. At this time the SOF
+ * Interrupt is disabled.
+ */
+int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ gintsts_data_t gintsts;
+
+ //DWC_DEBUGPL(DBG_PCD, "SOF\n");
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.sofintr = 1;
+ dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This function handles the Rx Status Queue Level Interrupt, which
+ * indicates that there is a least one packet in the Rx FIFO. The
+ * packets are moved from the FIFO to memory, where they will be
+ * processed when the Endpoint Interrupt Register indicates Transfer
+ * Complete or SETUP Phase Done.
+ *
+ * Repeat the following until the Rx Status Queue is empty:
+ * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet
+ * info
+ * -# If Receive FIFO is empty then skip to step Clear the interrupt
+ * and exit
+ * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the
+ * SETUP data to the buffer
+ * -# If OUT Data Packet call dwc_otg_read_packet to copy the data
+ * to the destination buffer
+ */
+int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_core_global_regs_t * global_regs = core_if->core_global_regs;
+ gintmsk_data_t gintmask = {.d32 = 0};
+ device_grxsts_data_t status;
+ dwc_otg_pcd_ep_t * ep;
+#ifndef CONFIG_OTG_PLB_DMA_TASKLET
+ gintsts_data_t gintsts;
+#endif
+
+#ifdef CONFIG_DWC_DEBUG
+ static char *dpid_str[] = { "D0", "D2", "D1", "MDATA" };
+
+#endif /* */
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
+
+ /* Disable the Rx Status Queue Level interrupt */
+ gintmask.b.rxstsqlvl = 1;
+ dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0);
+
+ /* Get the Status from the top of the FIFO */
+ status.d32 = dwc_read_reg32(&global_regs->grxstsp);
+ DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s "
+ "pktsts:%x Frame:%d(0x%0x)\n", status.b.epnum,
+ status.b.bcnt, dpid_str[status.b.dpid], status.b.pktsts,
+ status.b.fn, status.b.fn);
+
+ /* Get pointer to EP structure */
+ ep = get_out_ep(_pcd, status.b.epnum);
+
+// ep = &_pcd->out_ep[ status.b.epnum - 1];
+ switch (status.b.pktsts) {
+ case DWC_DSTS_GOUT_NAK:
+ DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n");
+ break;
+ case DWC_STS_DATA_UPDT:
+ DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n");
+ if (status.b.bcnt && ep->dwc_ep.xfer_buff) {
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ /* Enable the Rx Status Queue Level interrupt */
+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32);
+#endif
+ /** @todo NGS Check for buffer overflow? */
+ dwc_otg_read_packet(core_if, ep->dwc_ep.xfer_buff,
+ status.b.bcnt);
+ ep->dwc_ep.xfer_count += status.b.bcnt;
+ ep->dwc_ep.xfer_buff += status.b.bcnt;
+ }
+ break;
+ case DWC_STS_XFER_COMP:
+ DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n");
+ break;
+ case DWC_DSTS_SETUP_COMP:
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n");
+#endif /* */
+ break;
+ case DWC_DSTS_SETUP_UPDT:
+ dwc_otg_read_setup_packet(core_if, _pcd->setup_pkt->d32);
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCD,
+ "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n",
+ _pcd->setup_pkt->req.bRequestType,
+ _pcd->setup_pkt->req.bRequest,
+ __le16_to_cpu(_pcd->setup_pkt->req.wValue),
+ __le16_to_cpu(_pcd->setup_pkt->req.wIndex),
+ __le16_to_cpu(_pcd->setup_pkt->req.wLength));
+
+#endif /* */
+
+ ep->dwc_ep.xfer_count += status.b.bcnt;
+ break;
+ default:
+ DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n",
+ status.b.pktsts);
+ break;
+ }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET /* Can't access registers */
+ if (!atomic_read(&release_later))
+#endif
+ /* Enable the Rx Status Queue Level interrupt */
+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32);
+#ifndef CONFIG_OTG_PLB_DMA_TASKLET /* why do this? */
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.rxstsqlvl = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+#endif
+ //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__);
+ return 1;
+}
+
+/**
+ * This function examines the Device IN Token Learning Queue to
+ * determine the EP number of the last IN token received. This
+ * implementation is for the Mass Storage device where there are only
+ * 2 IN EPs (Control-IN and BULK-IN).
+ *
+ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there
+ * are 8 EP Numbers in each of the other possible DTKNQ Registers.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ *
+ */
+static inline int get_ep_of_last_in_token(dwc_otg_core_if_t * _core_if)
+{
+ dwc_otg_device_global_regs_t * dev_global_regs =
+ _core_if->dev_if->dev_global_regs;
+ const uint32_t TOKEN_Q_DEPTH = _core_if->hwcfg2.b.dev_token_q_depth;
+
+ /* Number of Token Queue Registers */
+ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8;
+ dtknq1_data_t dtknqr1;
+ uint32_t in_tkn_epnums[4];
+ int ndx = 0;
+ int i = 0;
+ volatile uint32_t *addr = &dev_global_regs->dtknqr1;
+ int epnum = 0;
+
+ //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH);
+
+ /* Read the DTKNQ Registers */
+ for (i = 0; i <= DTKNQ_REG_CNT; i++) {
+ in_tkn_epnums[i] = dwc_read_reg32(addr);
+ DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i + 1,
+ in_tkn_epnums[i]);
+ if (addr == &dev_global_regs->dvbusdis) {
+ addr = &dev_global_regs->dtknqr3_dthrctl;
+ } else {
+ ++addr;
+ }
+ }
+
+ /* Copy the DTKNQR1 data to the bit field. */
+ dtknqr1.d32 = in_tkn_epnums[0];
+
+ /* Get the EP numbers */
+ in_tkn_epnums[0] = dtknqr1.b.epnums0_5;
+ ndx = dtknqr1.b.intknwptr - 1;
+
+ //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx);
+ if (ndx == -1) {
+ /** @todo Find a simpler way to calculate the max
+ * queue position.*/
+ int cnt = TOKEN_Q_DEPTH;
+ if (TOKEN_Q_DEPTH <= 6) {
+ cnt = TOKEN_Q_DEPTH - 1;
+ } else if (TOKEN_Q_DEPTH <= 14) {
+ cnt = TOKEN_Q_DEPTH - 7;
+ } else if (TOKEN_Q_DEPTH <= 22) {
+ cnt = TOKEN_Q_DEPTH - 15;
+ } else {
+ cnt = TOKEN_Q_DEPTH - 23;
+ }
+ epnum = (in_tkn_epnums[DTKNQ_REG_CNT - 1] >> (cnt * 4)) & 0xF;
+ } else {
+ if (ndx <= 5) {
+ epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF;
+ } else if (ndx <= 13) {
+ ndx -= 6;
+ epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF;
+ } else if (ndx <= 21) {
+ ndx -= 14;
+ epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF;
+ } else if (ndx <= 29) {
+ ndx -= 22;
+ epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF;
+ }
+ }
+ //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum);
+ return epnum;
+}
+
+/**
+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty.
+ * The active request is checked for the next packet to be loaded into
+ * the non-periodic Tx FIFO.
+ */
+int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_core_global_regs_t * global_regs = core_if->core_global_regs;
+ dwc_otg_dev_in_ep_regs_t * ep_regs;
+ volatile gnptxsts_data_t txstatus = {.d32 = 0 };
+#ifndef CONFIG_OTG_PLB_DMA
+ gintsts_data_t gintsts;
+#endif
+ int epnum = 0;
+ dwc_otg_pcd_ep_t * ep = 0;
+ uint32_t len = 0;
+ int dwords;
+
+ /* Get the epnum from the IN Token Learning Queue. */
+ epnum = get_ep_of_last_in_token(core_if);
+ ep = get_in_ep(_pcd, epnum);
+
+/*
+ if(epnum != 0)
+ ep = &_pcd->in_ep[epnum-1];
+ else
+ ep = &_pcd->ep0;
+*/
+ DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name,epnum);
+ ep_regs = core_if->dev_if->in_ep_regs[epnum];
+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+ if (len > ep->dwc_ep.maxpacket) {
+ len = ep->dwc_ep.maxpacket;
+ }
+ dwords = (len + 3) / 4;
+
+ /* While there is space in the queue and space in the FIFO and
+ * More data to tranfer, Write packets to the Tx FIFO
+ */
+ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n", txstatus.d32);
+ while (txstatus.b.nptxqspcavail > 0
+ && txstatus.b.nptxfspcavail > dwords
+ && ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) {
+
+ /* Write the FIFO */
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * When a new transfer will be started, mark this
+ * endpoint as active. This way it will be blocked
+ * for further transfers, until the current transfer
+ * is finished.
+ */
+ ep->dwc_ep.active = 1;
+#endif
+ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+ if (len > ep->dwc_ep.maxpacket) {
+ len = ep->dwc_ep.maxpacket;
+ }
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (atomic_read(&release_later)) {
+ break;
+ }
+#endif
+ dwords = (len + 3) / 4;
+ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts);
+ DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", txstatus.d32);
+ }
+ DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n",
+ dwc_read_reg32(&global_regs->gnptxsts));
+#ifndef CONFIG_OTG_PLB_DMA /* why do this? */
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.nptxfempty = 1;
+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32);
+
+ /*
+ * Re-enable tx-fifo empty interrupt, if packets are still
+ * pending
+ */
+ if (len)
+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintsts.d32);
+#endif
+
+ return 1;
+}
+
+/**
+ * This function is called when dedicated Tx FIFO Empty interrupt occurs.
+ * The active request is checked for the next packet to be loaded into
+ * apropriate Tx FIFO.
+ */
+static int32_t write_empty_tx_fifo(dwc_otg_pcd_t * _pcd, uint32_t epnum)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+ dwc_otg_dev_in_ep_regs_t * ep_regs;
+ dtxfsts_data_t txstatus = {.d32 = 0};
+ dwc_otg_pcd_ep_t * ep = 0;
+ uint32_t len = 0;
+ int dwords;
+ ep = get_in_ep(_pcd, epnum);
+
+/*
+ if(epnum != 0)
+ ep = &_pcd->in_ep[epnum-1];
+ else
+ ep = &_pcd->ep0;
+*/
+ DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n",ep->ep.name, epnum);
+ ep_regs = core_if->dev_if->in_ep_regs[epnum];
+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+ if (len > ep->dwc_ep.maxpacket) {
+ len = ep->dwc_ep.maxpacket;
+ }
+ dwords = (len + 3) / 4;
+
+ /* While there is space in the queue and space in the FIFO and
+ * More data to tranfer, Write packets to the Tx FIFO */
+ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32);
+ while (txstatus.b.txfspcavail > dwords
+ && ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len
+ && ep->dwc_ep.xfer_len != 0) {
+
+ /* Write the FIFO */
+ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0);
+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count;
+ if (len > ep->dwc_ep.maxpacket) {
+ len = ep->dwc_ep.maxpacket;
+ }
+ dwords = (len + 3) / 4;
+ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts);
+ DWC_DEBUGPL(DBG_PCDV, "dtxfsts[%d]=0x%08x\n", epnum,
+ txstatus.d32);
+ }
+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n", epnum,
+ dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts));
+ return 1;
+}
+
+/**
+ * This function is called when the Device is disconnected. It stops
+ * any active requests and informs the Gadget driver of the
+ * disconnect.
+ */
+void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd)
+{
+ int i, num_in_eps, num_out_eps;
+ dwc_otg_pcd_ep_t * ep;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ num_in_eps = GET_CORE_IF(_pcd)->dev_if->num_in_eps;
+ num_out_eps = GET_CORE_IF(_pcd)->dev_if->num_out_eps;
+ DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__);
+
+ /* don't disconnect drivers more than once */
+ if (_pcd->ep0state == EP0_DISCONNECT) {
+ DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__);
+ return;
+ }
+ _pcd->ep0state = EP0_DISCONNECT;
+
+ /* Reset the OTG state. */
+ dwc_otg_pcd_update_otg(_pcd, 1);
+
+ /* Disable the NP Tx Fifo Empty Interrupt. */
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Flush the FIFOs */
+ /**@todo NGS Flush Periodic FIFOs */
+ dwc_otg_flush_tx_fifo(GET_CORE_IF(_pcd), 0);
+ dwc_otg_flush_rx_fifo(GET_CORE_IF(_pcd));
+
+ /* prevent new request submissions, kill any outstanding requests */
+ ep = &_pcd->ep0;
+ request_nuke(ep);
+
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 0; i < num_in_eps; i++) {
+ dwc_otg_pcd_ep_t * ep = &_pcd->in_ep[i];
+ request_nuke(ep);
+ }
+
+ /* prevent new request submissions, kill any outstanding requests */
+ for (i = 0; i < num_out_eps; i++) {
+ dwc_otg_pcd_ep_t * ep = &_pcd->out_ep[i];
+ request_nuke(ep);
+ }
+
+ /* report disconnect; the driver is already quiesced */
+ if (_pcd->driver && _pcd->driver->disconnect) {
+ SPIN_UNLOCK(&_pcd->lock);
+ _pcd->driver->disconnect(&_pcd->gadget);
+ SPIN_LOCK(&_pcd->lock);
+ }
+}
+
+/**
+ * This interrupt indicates that ...
+ */
+int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t * _pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ gintsts_data_t gintsts;
+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr");
+ intr_mask.b.i2cintr = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.i2cintr = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that ...
+ */
+int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t * _pcd)
+{
+ gintsts_data_t gintsts;
+
+#if defined(VERBOSE)
+ DWC_PRINT("Early Suspend Detected\n");
+#endif /* */
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.erlysuspend = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This function configures EPO to receive SETUP packets.
+ *
+ * @todo NGS: Update the comments from the HW FS.
+ *
+ * -# Program the following fields in the endpoint specific registers
+ * for Control OUT EP 0, in order to receive a setup packet
+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
+ * setup packets)
+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
+ * to back setup packets)
+ * - In DMA mode, DOEPDMA0 Register with a memory address to
+ * store any setup packets received
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ * @param _pcd Programming view of the PCD.
+ */
+static inline void ep0_out_start(dwc_otg_core_if_t * _core_if,
+ dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_dev_if_t * dev_if = _core_if->dev_if;
+ deptsiz0_data_t doeptsize0 = {.d32 = 0};
+
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV, "%s() doepctl0=%0x\n", __func__,
+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+
+#endif /* */
+ doeptsize0.b.supcnt = 3;
+ doeptsize0.b.pktcnt = 1;
+ doeptsize0.b.xfersize = 8 * 3;
+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, doeptsize0.d32);
+ if (_core_if->dma_enable) {
+ depctl_data_t doepctl = {.d32 = 0};
+
+ /** @todo dma needs to handle multiple setup packets (up to 3) */
+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma,
+ _pcd->setup_pkt_dma_handle);
+
+ // EP enable
+ doepctl.d32 =
+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl);
+ doepctl.b.epena = 1;
+ doepctl.d32 = 0x80008000;
+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl,
+ doepctl.d32);
+ }
+
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV, "doepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl));
+ DWC_DEBUGPL(DBG_PCDV, "diepctl0=%0x\n",
+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl));
+#endif /* */
+}
+
+/**
+ * This interrupt occurs when a USB Reset is detected. When the USB
+ * Reset Interrupt occurs the device state is set to DEFAULT and the
+ * EP0 state is set to IDLE.
+ * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1)
+ * -# Unmask the following interrupt bits
+ * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint)
+ * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint)
+ * - DOEPMSK.SETUP = 1
+ * - DOEPMSK.XferCompl = 1
+ * - DIEPMSK.XferCompl = 1
+ * - DIEPMSK.TimeOut = 1
+ * -# Program the following fields in the endpoint specific registers
+ * for Control OUT EP 0, in order to receive a setup packet
+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back
+ * setup packets)
+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back
+ * to back setup packets)
+ * - In DMA mode, DOEPDMA0 Register with a memory address to
+ * store any setup packets received
+ * At this point, all the required initialization, except for enabling
+ * the control 0 OUT endpoint is done, for receiving SETUP packets.
+ */
+int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+ depctl_data_t doepctl = {.d32 = 0};
+ daint_data_t daintmsk = {.d32 = 0};
+ doepmsk_data_t doepmsk = {.d32 = 0};
+ diepmsk_data_t diepmsk = {.d32 = 0};
+ dcfg_data_t dcfg = {.d32 = 0};
+ grstctl_t resetctl = {.d32 = 0};
+ dctl_data_t dctl = {.d32 = 0};
+ int i = 0;
+ volatile gintsts_data_t gintsts = {.d32 = 0 };
+ DWC_PRINT("USB RESET\n");
+
+ /* reset the HNP settings */
+ dwc_otg_pcd_update_otg(_pcd, 1);
+
+ /* Clear the Remote Wakeup Signalling */
+ dctl.b.rmtwkupsig = 1;
+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
+
+ /* Set NAK for all OUT EPs */
+ doepctl.b.snak = 1;
+ for (i = 0; i <= dev_if->num_out_eps; i++) {
+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl,
+ doepctl.d32);
+ }
+
+ /* Flush the NP Tx FIFO */
+ dwc_otg_flush_tx_fifo(core_if, 0);
+
+ /* Flush the Learning Queue */
+ resetctl.b.intknqflsh = 1;
+ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32);
+ daintmsk.b.inep0 = 1;
+ daintmsk.b.outep0 = 1;
+ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, daintmsk.d32);
+ doepmsk.b.setup = 1;
+ doepmsk.b.xfercompl = 1;
+ doepmsk.b.ahberr = 1;
+ doepmsk.b.epdisabled = 1;
+ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32);
+ diepmsk.b.xfercompl = 1;
+ diepmsk.b.timeout = 1;
+ diepmsk.b.epdisabled = 1;
+ diepmsk.b.ahberr = 1;
+ diepmsk.b.intknepmis = 1;
+ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32);
+
+ /* Reset Device Address */
+ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg);
+ dcfg.b.devaddr = 0;
+ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
+
+ /* setup EP0 to receive SETUP packets */
+ ep0_out_start(core_if, _pcd);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.usbreset = 1;
+ dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * Get the device speed from the device status register and convert it
+ * to USB speed constant.
+ *
+ * @param _core_if Programming view of DWC_otg controller.
+ */
+static int get_device_speed(dwc_otg_core_if_t * _core_if)
+{
+ dsts_data_t dsts;
+ enum usb_device_speed speed = USB_SPEED_UNKNOWN;
+ dsts.d32 = dwc_read_reg32(&_core_if->dev_if->dev_global_regs->dsts);
+ switch (dsts.b.enumspd) {
+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ:
+ speed = USB_SPEED_HIGH;
+ break;
+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ:
+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ:
+ speed = USB_SPEED_FULL;
+ break;
+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ:
+ speed = USB_SPEED_LOW;
+ break;
+ }
+ return speed;
+}
+
+/**
+ * Read the device status register and set the device speed in the
+ * data structure.
+ * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate.
+ */
+int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_pcd_ep_t * ep0 = &_pcd->ep0;
+ gintsts_data_t gintsts;
+ gusbcfg_data_t gusbcfg;
+ dwc_otg_core_global_regs_t * global_regs =
+ GET_CORE_IF(_pcd)->core_global_regs;
+ uint32_t gsnpsid = global_regs->gsnpsid;
+ uint8_t utmi16b, utmi8b;
+ DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n");
+ if (gsnpsid >= (uint32_t) 0x4f54260a) {
+ utmi16b = 5;
+ utmi8b = 9;
+ } else {
+ utmi16b = 4;
+ utmi8b = 8;
+ }
+ dwc_otg_ep0_activate(GET_CORE_IF(_pcd), &ep0->dwc_ep);
+
+#ifdef DEBUG_EP0
+ print_ep0_state(_pcd);
+#endif /* */
+ _pcd->ep0state = EP0_IDLE;
+ ep0->stopped = 0;
+ _pcd->gadget.speed = get_device_speed(GET_CORE_IF(_pcd));
+
+ /* Set USB turnaround time based on device speed and PHY interface. */
+ gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg);
+ if (_pcd->gadget.speed == USB_SPEED_HIGH) {
+ if (GET_CORE_IF(_pcd)->hwcfg2.b.hs_phy_type ==
+ DWC_HWCFG2_HS_PHY_TYPE_ULPI) {
+
+ /* ULPI interface */
+ gusbcfg.b.usbtrdtim = 9;
+ }
+ if (GET_CORE_IF(_pcd)->hwcfg2.b.hs_phy_type ==
+ DWC_HWCFG2_HS_PHY_TYPE_UTMI) {
+
+/////
+ /* UTMI+ interface */
+ if (GET_CORE_IF(_pcd)->hwcfg4.b.
+ utmi_phy_data_width == 0) {
+ gusbcfg.b.usbtrdtim = utmi8b;
+ } else if (GET_CORE_IF(_pcd)->hwcfg4.b.
+ utmi_phy_data_width == 1) {
+ gusbcfg.b.usbtrdtim = utmi16b;
+ } else if (GET_CORE_IF(_pcd)->core_params->
+ phy_utmi_width == 8) {
+ gusbcfg.b.usbtrdtim = utmi8b;
+ } else {
+ gusbcfg.b.usbtrdtim = utmi16b;
+ }
+ }
+ if (GET_CORE_IF(_pcd)->hwcfg2.b.hs_phy_type ==
+ DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) {
+ /* UTMI+ OR ULPI interface */
+ if (gusbcfg.b.ulpi_utmi_sel == 1) {
+ /* ULPI interface */
+ gusbcfg.b.usbtrdtim = 9;
+ } else {
+ /* UTMI+ interface */
+ if (GET_CORE_IF(_pcd)->core_params->
+ phy_utmi_width == 16) {
+ gusbcfg.b.usbtrdtim = utmi16b;
+ } else {
+ gusbcfg.b.usbtrdtim = utmi8b;
+ }
+ }
+ }
+ } else {
+ /* Full or low speed */
+ gusbcfg.b.usbtrdtim = 9;
+ }
+ dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.enumdone = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that the ISO OUT Packet was dropped due to
+ * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs
+ * read all the data from the Rx FIFO.
+ */
+int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *
+ _pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ gintsts_data_t gintsts;
+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+ "ISOC Out Dropped");
+ intr_mask.b.isooutdrop = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.isooutdrop = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates the end of the portion of the micro-frame
+ * for periodic transactions. If there is a periodic transaction for
+ * the next frame, load the packets into the EP periodic Tx FIFO.
+ */
+int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t * _pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ gintsts_data_t gintsts;
+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+ "End of Periodic Portion of Micro-Frame Interrupt");
+ intr_mask.b.eopframe = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.eopframe = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This interrupt indicates that EP of the packet on the top of the
+ * non-periodic Tx FIFO does not match EP of the IN Token received.
+ *
+ * The "Device IN Token Queue" Registers are read to determine the
+ * order the IN Tokens have been received. The non-periodic Tx FIFO
+ * is flushed, so it can be reloaded in the order seen in the IN Token
+ * Queue.
+ */
+int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t * _core_if)
+{
+ gintsts_data_t gintsts;
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _core_if);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.epmismatch = 1;
+ dwc_write_reg32(&_core_if->core_global_regs->gintsts, gintsts.d32);
+ return 1;
+}
+
+/**
+ * This funcion stalls EP0.
+ */
+static inline void ep0_do_stall(dwc_otg_pcd_t * _pcd, const int err_val)
+{
+ dwc_otg_pcd_ep_t * ep0 = &_pcd->ep0;
+ struct usb_ctrlrequest *ctrl = &_pcd->setup_pkt->req;
+ DWC_WARN("req %02x.%02x protocol STALL; err %d\n", ctrl->bRequestType,
+ ctrl->bRequest, err_val);
+ ep0->dwc_ep.is_in = 1;
+ dwc_otg_ep_set_stall(_pcd->otg_dev->core_if, &ep0->dwc_ep);
+ _pcd->ep0.stopped = 1;
+ _pcd->ep0state = EP0_IDLE;
+ ep0_out_start(GET_CORE_IF(_pcd), _pcd);
+}
+
+/**
+ * This functions delegates the setup command to the gadget driver.
+ */
+static inline void do_gadget_setup(dwc_otg_pcd_t * _pcd,
+ struct usb_ctrlrequest *_ctrl)
+{
+ int ret = 0;
+ if (_pcd->driver && _pcd->driver->setup) {
+ SPIN_UNLOCK(&_pcd->lock);
+ ret = _pcd->driver->setup(&_pcd->gadget, _ctrl);
+ SPIN_LOCK(&_pcd->lock);
+ if (ret < 0) {
+ ep0_do_stall(_pcd, ret);
+ }
+
+ /** @todo This is a g_file_storage gadget driver specific
+ * workaround: a DELAYED_STATUS result from the fsg_setup
+ * routine will result in the gadget queueing a EP0 IN status
+ * phase for a two-stage control transfer. Exactly the same as
+ * a SET_CONFIGURATION/SET_INTERFACE except that this is a class
+ * specific request. Need a generic way to know when the gadget
+ * driver will queue the status phase. Can we assume when we
+ * call the gadget driver setup() function that it will always
+ * queue and require the following flag? Need to look into
+ * this.
+ */
+ if (ret == 256 + 999) {
+ _pcd->request_config = 1;
+ }
+ }
+}
+
+/**
+ * This function starts the Zero-Length Packet for the IN status phase
+ * of a 2 stage control transfer.
+ */
+static inline void do_setup_in_status_phase(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_pcd_ep_t * ep0 = &_pcd->ep0;
+ if (_pcd->ep0state == EP0_STALL) {
+ return;
+ }
+ _pcd->ep0state = EP0_STATUS;
+
+ /* Prepare for more SETUP Packets */
+ DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n");
+ ep0->dwc_ep.xfer_len = 0;
+ ep0->dwc_ep.xfer_count = 0;
+ ep0->dwc_ep.is_in = 1;
+ ep0->dwc_ep.dma_addr = _pcd->setup_pkt_dma_handle;
+ dwc_otg_ep0_start_transfer(GET_CORE_IF(_pcd), &ep0->dwc_ep);
+
+ /* Prepare for more SETUP Packets */
+ ep0_out_start(GET_CORE_IF(_pcd), _pcd);
+}
+
+/**
+ * This function starts the Zero-Length Packet for the OUT status phase
+ * of a 2 stage control transfer.
+ */
+static inline void do_setup_out_status_phase(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_pcd_ep_t * ep0 = &_pcd->ep0;
+ if (_pcd->ep0state == EP0_STALL) {
+ DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n");
+ return;
+ }
+ _pcd->ep0state = EP0_STATUS;
+
+ /* Prepare for more SETUP Packets */
+ //ep0_out_start( GET_CORE_IF(_pcd), _pcd );
+ DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n");
+ ep0->dwc_ep.xfer_len = 0;
+ ep0->dwc_ep.xfer_count = 0;
+ ep0->dwc_ep.is_in = 0;
+
+ //ep0->dwc_ep.dma_addr = 0xffffffff;
+ ep0->dwc_ep.dma_addr = _pcd->setup_pkt_dma_handle;
+ dwc_otg_ep0_start_transfer(GET_CORE_IF(_pcd), &ep0->dwc_ep);
+
+ /* Prepare for more SETUP Packets */
+ ep0_out_start(GET_CORE_IF(_pcd), _pcd);
+}
+
+/**
+ * Clear the EP halt (STALL) and if pending requests start the
+ * transfer.
+ */
+static inline void pcd_clear_halt(dwc_otg_pcd_t * _pcd,
+ dwc_otg_pcd_ep_t * _ep)
+{
+ if (_ep->dwc_ep.stall_clear_flag == 0)
+ dwc_otg_ep_clear_stall(GET_CORE_IF(_pcd), &_ep->dwc_ep);
+
+ /* Reactive the EP */
+ dwc_otg_ep_activate(GET_CORE_IF(_pcd), &_ep->dwc_ep);
+ if (_ep->stopped) {
+ _ep->stopped = 0;
+
+ /* If there is a request in the EP queue start it */
+
+ /** @todo FIXME: this causes an EP mismatch in DMA mode.
+ * epmismatch not yet implemented. */
+
+ /*
+ * Above fixme is solved by implmenting a tasklet to call the
+ * start_next_request(), outside of interrupt context at some
+ * time after the current time, after a clear-halt setup packet.
+ * Still need to implement ep mismatch in the future if a gadget
+ * ever uses more than one endpoint at once
+ */
+ if (GET_CORE_IF(_pcd)->dma_enable) {
+ _ep->queue_sof = 1;
+ tasklet_schedule(_pcd->start_xfer_tasklet);
+ } else {
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * To re-enable this endpoint it's important to
+ * set this next_ep number. Otherwise the endpoint
+ * will not get active again after stalling.
+ */
+//test-only _ep->pcd->next_ep = _ep->dwc_ep.num;
+ start_next_request( _ep );
+#endif
+#if 0
+ _ep->queue_sof = 1;
+ DWC_ERROR("tasklet schedule\n");
+ tasklet_schedule(_pcd->start_xfer_tasklet);
+ if (GET_CORE_IF(_pcd)->core_params->opt)
+ {
+ start_next_request(_ep);
+ }
+
+#endif /* */
+ }
+ }
+
+ /* Start Control Status Phase */
+ do_setup_in_status_phase(_pcd);
+}
+
+/**
+ * This function is called when the SET_FEATURE TEST_MODE Setup packet
+ * is sent from the host. The Device Control register is written with
+ * the Test Mode bits set to the specified Test Mode. This is done as
+ * a tasklet so that the "Status" phase of the control transfer
+ * completes before transmitting the TEST packets.
+ *
+ * @todo This has not been tested since the tasklet struct was put
+ * into the PCD struct!
+ *
+ */
+static void do_test_mode(unsigned long _data)
+{
+ dctl_data_t dctl;
+ dwc_otg_pcd_t * pcd = (dwc_otg_pcd_t *) _data;
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(pcd);
+ int test_mode = pcd->test_mode;
+
+// DWC_WARN("%s() has not been tested since being rewritten!\n", __func__);
+ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl);
+ switch (test_mode) {
+ case 1: // TEST_J
+ dctl.b.tstctl = 1;
+ break;
+ case 2: // TEST_K
+ dctl.b.tstctl = 2;
+ break;
+ case 3: // TEST_SE0_NAK
+ dctl.b.tstctl = 3;
+ break;
+ case 4: // TEST_PACKET
+ dctl.b.tstctl = 4;
+ break;
+ case 5: // TEST_FORCE_ENABLE
+ dctl.b.tstctl = 5;
+ break;
+ }
+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
+}
+
+/**
+ * This function process the SET_FEATURE Setup Commands.
+ */
+static inline void do_set_feature(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_core_global_regs_t * global_regs = core_if->core_global_regs;
+ struct usb_ctrlrequest ctrl = _pcd->setup_pkt->req;
+ dwc_otg_pcd_ep_t * ep = 0;
+ int32_t otg_cap_param = core_if->core_params->otg_cap;
+ gotgctl_data_t gotgctl = {.d32 = 0};
+ DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bRequestType, ctrl.bRequest,
+ __le16_to_cpu(ctrl.wValue), __le16_to_cpu(ctrl.wIndex),
+ __le16_to_cpu(ctrl.wLength));
+
+ DWC_DEBUGPL(DBG_PCD, "otg_cap=%d\n", otg_cap_param);
+ switch (ctrl.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (__le16_to_cpu(ctrl.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ _pcd->remote_wakeup_enable = 1;
+ break;
+ case USB_DEVICE_TEST_MODE:
+
+ /* Setup the Test Mode tasklet to do the Test
+ * Packet generation after the SETUP Status
+ * phase has completed. */
+
+ /** @todo This has not been tested since the
+ * tasklet struct was put into the PCD
+ * struct! */
+ _pcd->test_mode_tasklet.next = 0;
+ _pcd->test_mode_tasklet.state = 0;
+ atomic_set(&_pcd->test_mode_tasklet.count, 0);
+ _pcd->test_mode_tasklet.func = do_test_mode;
+ _pcd->test_mode_tasklet.data = (unsigned long)_pcd;
+ _pcd->test_mode = __le16_to_cpu(ctrl.wIndex) >> 8;
+ tasklet_schedule(&_pcd->test_mode_tasklet);
+ break;
+ case USB_DEVICE_B_HNP_ENABLE:
+ DWC_DEBUGPL(DBG_PCDV,
+ "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n");
+
+ /* dev may initiate HNP */
+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+ _pcd->b_hnp_enable = 1;
+ dwc_otg_pcd_update_otg(_pcd, 0);
+ DWC_DEBUGPL(DBG_PCD, "Request B HNP\n");
+
+ /**@todo Is the gotgctl.devhnpen cleared
+ * by a USB Reset? */
+ gotgctl.b.devhnpen = 1;
+ gotgctl.b.hnpreq = 1;
+ dwc_write_reg32(&global_regs->gotgctl,gotgctl.d32);
+ } else {
+ ep0_do_stall(_pcd, -EOPNOTSUPP);
+ }
+ break;
+ case USB_DEVICE_A_HNP_SUPPORT:
+ /* RH port supports HNP */
+ DWC_DEBUGPL(DBG_PCDV,
+ "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n");
+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+ _pcd->a_hnp_support = 1;
+ dwc_otg_pcd_update_otg(_pcd, 0);
+ } else {
+ ep0_do_stall(_pcd, -EOPNOTSUPP);
+ }
+ break;
+ case USB_DEVICE_A_ALT_HNP_SUPPORT:
+ /* other RH port does */
+ DWC_DEBUGPL(DBG_PCDV,
+ "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n");
+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) {
+ _pcd->a_alt_hnp_support = 1;
+ dwc_otg_pcd_update_otg(_pcd, 0);
+ } else {
+ ep0_do_stall(_pcd, -EOPNOTSUPP);
+ }
+ break;
+ }
+ do_setup_in_status_phase(_pcd);
+ break;
+ case USB_RECIP_INTERFACE:
+ do_gadget_setup(_pcd, &ctrl);
+ break;
+ case USB_RECIP_ENDPOINT:
+ if (__le16_to_cpu(ctrl.wValue) == USB_ENDPOINT_HALT) {
+ ep = get_ep_by_addr(_pcd, __le16_to_cpu(ctrl.wIndex));
+ if (ep == 0) {
+ ep0_do_stall(_pcd, -EOPNOTSUPP);
+ return;
+ }
+ ep->stopped = 1;
+ dwc_otg_ep_set_stall(core_if, &ep->dwc_ep);
+ }
+ do_setup_in_status_phase(_pcd);
+ break;
+ }
+}
+
+/**
+ * This function process the CLEAR_FEATURE Setup Commands.
+ */
+static inline void do_clear_feature(dwc_otg_pcd_t * _pcd)
+{
+ struct usb_ctrlrequest ctrl = _pcd->setup_pkt->req;
+ dwc_otg_pcd_ep_t * ep = 0;
+ DWC_DEBUGPL(DBG_PCD, "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bRequestType, ctrl.bRequest,
+ __le16_to_cpu(ctrl.wValue), __le16_to_cpu(ctrl.wIndex),
+ __le16_to_cpu(ctrl.wLength));
+
+ switch (ctrl.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ switch (__le16_to_cpu(ctrl.wValue)) {
+ case USB_DEVICE_REMOTE_WAKEUP:
+ _pcd->remote_wakeup_enable = 0;
+ break;
+ case USB_DEVICE_TEST_MODE:
+ /** @todo Add CLEAR_FEATURE for TEST modes. */
+ break;
+ }
+ do_setup_in_status_phase(_pcd);
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = get_ep_by_addr(_pcd,__le16_to_cpu(ctrl.wIndex));
+ if (ep == 0) {
+ ep0_do_stall(_pcd, -EOPNOTSUPP);
+ return;
+ }
+ pcd_clear_halt(_pcd, ep);
+ DWC_DEBUGPL(DBG_PCD, "%s halt cleared by host\n",
+ ep->ep.name);
+ break;
+ }
+}
+
+/**
+ * This function processes SETUP commands. In Linux, the USB Command
+ * processing is done in two places - the first being the PCD and the
+ * second in the Gadget Driver (for example, the File-Backed Storage
+ * Gadget Driver).
+ *
+ * <table>
+ * <tr><td>Command </td><td>Driver </td><td>Description</td></tr>
+ *
+ * <tr><td>GET_STATUS </td><td>PCD </td><td>Command is processed as
+ * defined in chapter 9 of the USB 2.0 Specification chapter 9
+ * </td></tr>
+ *
+ * <tr><td>CLEAR_FEATURE </td><td>PCD </td><td>The Device and Endpoint
+ * requests are the ENDPOINT_HALT feature is procesed, all others the
+ * interface requests are ignored.</td></tr>
+ *
+ * <tr><td>SET_FEATURE </td><td>PCD </td><td>The Device and Endpoint
+ * requests are processed by the PCD. Interface requests are passed
+ * to the Gadget Driver.</td></tr>
+ *
+ * <tr><td>SET_ADDRESS </td><td>PCD </td><td>Program the DCFG reg,
+ * with device address received </td></tr>
+ *
+ * <tr><td>GET_DESCRIPTOR </td><td>Gadget Driver </td><td>Return the
+ * requested descriptor</td></tr>
+ *
+ * <tr><td>SET_DESCRIPTOR </td><td>Gadget Driver </td><td>Optional -
+ * not implemented by any of the existing Gadget Drivers.</td></tr>
+ *
+ * <tr><td>SET_CONFIGURATION </td><td>Gadget Driver </td><td>Disable
+ * all EPs and enable EPs for new configuration.</td></tr>
+ *
+ * <tr><td>GET_CONFIGURATION </td><td>Gadget Driver </td><td>Return
+ * the current configuration</td></tr>
+ *
+ * <tr><td>SET_INTERFACE </td><td>Gadget Driver </td><td>Disable all
+ * EPs and enable EPs for new configuration.</td></tr>
+ *
+ * <tr><td>GET_INTERFACE </td><td>Gadget Driver </td><td>Return the
+ * current interface.</td></tr>
+ *
+ * <tr><td>SYNC_FRAME </td><td>PCD </td><td>Display debug
+ * message.</td></tr>
+ * </table>
+ *
+ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are
+ * processed by pcd_setup. Calling the Function Driver's setup function from
+ * pcd_setup processes the gadget SETUP commands.
+ */
+static inline void pcd_setup(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+ struct usb_ctrlrequest ctrl = _pcd->setup_pkt->req;
+ dwc_otg_pcd_ep_t * ep;
+ dwc_otg_pcd_ep_t * ep0 = &_pcd->ep0;
+ uint16_t * status = _pcd->status_buf;
+ deptsiz0_data_t doeptsize0 = {.d32 = 0};
+
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bRequestType, ctrl.bRequest, __le16_to_cpu(ctrl.wValue),
+ __le16_to_cpu(ctrl.wIndex), __le16_to_cpu(ctrl.wLength));
+
+#endif /* */
+ doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz);
+
+ /** @todo handle > 1 setup packet , assert error for now */
+ if (core_if->dma_enable && (doeptsize0.b.supcnt < 2)) {
+ DWC_ERROR("\n\n CANNOT handle > 1 setup packet in DMA mode\n\n");
+ }
+
+ /* Clean up the request queue */
+ request_nuke(ep0);
+ ep0->stopped = 0;
+ if (ctrl.bRequestType & USB_DIR_IN) {
+ ep0->dwc_ep.is_in = 1;
+ _pcd->ep0state = EP0_IN_DATA_PHASE;
+ } else {
+ ep0->dwc_ep.is_in = 0;
+ _pcd->ep0state = EP0_OUT_DATA_PHASE;
+ }
+ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
+ /* handle non-standard (class/vendor) requests in the gadget driver */
+ do_gadget_setup(_pcd, &ctrl);
+ return;
+ }
+
+ /** @todo NGS: Handle bad setup packet? */
+ switch (ctrl.bRequest) {
+ case USB_REQ_GET_STATUS:
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCD,
+ "GET_STATUS %02x.%02x v%04x i%04x l%04x\n",
+ ctrl.bRequestType, ctrl.bRequest, __le16_to_cpu( ctrl.wValue),
+ __le16_to_cpu(ctrl.wIndex), __le16_to_cpu(ctrl.wLength));
+
+#endif /* */
+ switch (ctrl.bRequestType & USB_RECIP_MASK) {
+ case USB_RECIP_DEVICE:
+ *status = 0x1; /* Self powered */
+ *status |= _pcd->remote_wakeup_enable << 1;
+ break;
+ case USB_RECIP_INTERFACE:
+ *status = 0;
+ break;
+ case USB_RECIP_ENDPOINT:
+ ep = get_ep_by_addr(_pcd, __le16_to_cpu(ctrl.wIndex));
+ if (ep == 0 || __le16_to_cpu(ctrl.wLength) > 2) {
+ ep0_do_stall(_pcd, -EOPNOTSUPP);
+ return;
+ }
+ /** @todo check for EP stall */
+ *status = ep->stopped;
+ break;
+ }
+ *status = __cpu_to_le16(*status);
+ _pcd->ep0_pending = 1;
+ ep0->dwc_ep.start_xfer_buff = (uint8_t *) status;
+ ep0->dwc_ep.xfer_buff = (uint8_t *) status;
+ ep0->dwc_ep.dma_addr = _pcd->status_buf_dma_handle;
+ ep0->dwc_ep.xfer_len = 2;
+ ep0->dwc_ep.xfer_count = 0;
+ ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len;
+ dwc_otg_ep0_start_transfer(GET_CORE_IF(_pcd), &ep0->dwc_ep);
+ break;
+ case USB_REQ_CLEAR_FEATURE:
+ do_clear_feature(_pcd);
+ break;
+ case USB_REQ_SET_FEATURE:
+ do_set_feature(_pcd);
+ break;
+ case USB_REQ_SET_ADDRESS:
+ if (ctrl.bRequestType == USB_RECIP_DEVICE) {
+ dcfg_data_t dcfg = {
+ .d32 = 0};
+
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n",
+ __le16_to_cpu(ctrl.wValue));
+
+#endif /* */
+ dcfg.b.devaddr = __le16_to_cpu(ctrl.wValue);
+ dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0,
+ dcfg.d32);
+#ifdef DEBUG_EP0
+
+ DWC_DEBUGPL(DBG_PCDV, "dcfg(after updating address :0x%x, dcfg.b.devaddr:%d\n",
+ dwc_read_reg32(&dev_if->dev_global_regs->dcfg), dcfg.b.devaddr);
+#endif
+
+ do_setup_in_status_phase(_pcd);
+ return;
+ }
+ break;
+ case USB_REQ_SET_INTERFACE:
+ case USB_REQ_SET_CONFIGURATION:
+
+ //DWC_DEBUGPL(DBG_PCD, "SET_CONFIGURATION\n");
+ _pcd->request_config = 1; /* Configuration changed */
+ do_gadget_setup(_pcd, &ctrl);
+ break;
+ case USB_REQ_SYNCH_FRAME:
+ do_gadget_setup(_pcd, &ctrl);
+ break;
+ default:
+ /* Call the Gadget Driver's setup functions */
+ do_gadget_setup(_pcd, &ctrl);
+ break;
+ }
+}
+
+/**
+ * This function completes the ep0 control transfer.
+ */
+static int32_t ep0_complete_request( dwc_otg_pcd_ep_t *_ep )
+{
+ dwc_otg_core_if_t *core_if = GET_CORE_IF(_ep->pcd);
+ dwc_otg_dev_if_t *dev_if = core_if->dev_if;
+ dwc_otg_dev_in_ep_regs_t *in_ep_regs =
+ dev_if->in_ep_regs[_ep->dwc_ep.num];
+#ifdef DEBUG_EP0
+ dwc_otg_dev_out_ep_regs_t *out_ep_regs =
+ dev_if->out_ep_regs[_ep->dwc_ep.num];
+#endif
+ deptsiz0_data_t deptsiz;
+ dwc_otg_pcd_request_t *req;
+ int is_last = 0;
+ dwc_otg_pcd_t *pcd = _ep->pcd;
+ static int counter = 0; /*DFX added*/
+ counter++;
+ DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, _ep->ep.name);
+ /*
+ if ( in_set_config == 1 ) {
+ printk(KERN_ERR "DFX ep0_complete_request in_set_config. ep0 pending: %d list empty:"
+ " %d ep.is_in: %d ep0State: %d counter: %d\n",
+ pcd->ep0_pending, list_empty(&_ep->queue), _ep->dwc_ep.is_in,
+ pcd->ep0state, counter);
+ }
+ if ( in_set_config == 2 ) {
+ printk(KERN_ERR "DFX ep0_complete_request in_set_ADDRESS. ep0 pending: %d list empty:"
+ " %d ep.is_in: %d ep0State: %d counter: %d\n",
+ pcd->ep0_pending, list_empty(&_ep->queue), _ep->dwc_ep.is_in,
+ pcd->ep0state, counter);
+ }
+ */
+ if ((pcd->ep0_pending && list_empty(&_ep->queue)) /*|| counter == 1*/) {
+ if (_ep->dwc_ep.is_in) {
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n");
+#endif
+ do_setup_out_status_phase(pcd);
+ } else {
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n");
+#endif
+ do_setup_in_status_phase(pcd);
+ }
+ pcd->ep0_pending = 0;
+ pcd->ep0state = EP0_STATUS;
+ return 1;
+ }
+
+
+ if (list_empty(&_ep->queue)) {
+ return 0;
+ }
+ req = list_entry(_ep->queue.next, dwc_otg_pcd_request_t, queue);
+ //printk(KERN_ERR "DFX compelete request req.zero: %d\n", req->req.zero);
+
+ if (pcd->ep0state == EP0_STATUS) {
+ is_last = 1;
+ }
+ /* DFX TODO Gadget zero sets req.zero to true when the data it is sending
+ * to the host is shorter than the length specified by the host. In this
+ * case, if we also send a ZLP, we also somehow need to come back and
+ * do_setup_out_status_phase() Which apparently is not done.
+ */
+ /* else if (req->req.zero) {
+ req->req.actual = _ep->dwc_ep.xfer_count;
+ //do_setup_in_status_phase (pcd);
+ req->req.zero = 0;
+ _ep->dwc_ep.xfer_len = 0;
+ _ep->dwc_ep.xfer_count = 0;
+ _ep->dwc_ep.sent_zlp = 1;
+ dwc_otg_ep0_start_transfer( GET_CORE_IF(pcd), &_ep->dwc_ep );
+ return 1;
+ }*/
+ else if (_ep->dwc_ep.is_in) {
+ //printk(KERN_ERR "DFX complete request counter: %d\n", counter);
+ deptsiz.d32 = dwc_read_reg32( &in_ep_regs->dieptsiz);
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n",
+ _ep->ep.name, _ep->dwc_ep.xfer_len,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+#endif
+ if (deptsiz.b.xfersize == 0) {
+ req->req.actual = _ep->dwc_ep.xfer_count;
+ /* Is a Zero Len Packet needed? */
+ //if (req->req.zero) {
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n");
+#endif
+ do_setup_out_status_phase(pcd);
+ }
+ } else {
+ /* ep0-OUT */
+#ifdef DEBUG_EP0
+ deptsiz.d32 = dwc_read_reg32( &out_ep_regs->doeptsiz);
+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n",
+ _ep->ep.name, _ep->dwc_ep.xfer_len,
+ deptsiz.b.xfersize,
+ deptsiz.b.pktcnt);
+#endif
+ req->req.actual = _ep->dwc_ep.xfer_count;
+
+ /* Is a Zero Len Packet needed? */
+ //if (req->req.zero) {
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n");
+#endif
+ do_setup_in_status_phase(pcd);
+ }
+
+ /* Complete the request */
+ if (is_last) {
+ request_done(_ep, req, 0);
+ _ep->dwc_ep.start_xfer_buff = 0;
+ _ep->dwc_ep.xfer_buff = 0;
+ _ep->dwc_ep.xfer_len = 0;
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * This function completes the request for the EP. If there are
+ * additional requests for the EP in the queue they will be started.
+ */
+static void complete_ep(dwc_otg_pcd_ep_t * _ep)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_ep->pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+ dwc_otg_dev_in_ep_regs_t * in_ep_regs =
+ dev_if->in_ep_regs[_ep->dwc_ep.num];
+ deptsiz_data_t deptsiz;
+ dwc_otg_pcd_request_t * req = 0;
+ int is_last = 0;
+ DWC_DEBUGPL(DBG_PCDV, "%s() %s-%s\n", __func__, _ep->ep.name,
+ (_ep->dwc_ep.is_in ? "IN" : "OUT"));
+
+ /* Get any pending requests */
+ if (!list_empty(&_ep->queue)) {
+ req = list_entry(_ep->queue.next, dwc_otg_pcd_request_t, queue);
+ }
+ DWC_DEBUGPL(DBG_PCD, "Requests %d\n", _ep->pcd->request_pending);
+ if (_ep->dwc_ep.is_in) {
+ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz);
+ if (core_if->dma_enable) {
+ if (deptsiz.b.xfersize == 0)
+ _ep->dwc_ep.xfer_count = _ep->dwc_ep.xfer_len;
+ }
+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n",
+ _ep->ep.name, _ep->dwc_ep.xfer_len,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0
+ && _ep->dwc_ep.xfer_count == _ep->dwc_ep.xfer_len) {
+ is_last = 1;
+ } else {
+ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n",
+ _ep->ep.name, (_ep->dwc_ep.is_in ? "IN" : "OUT"),
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+ }
+ } else {
+ dwc_otg_dev_out_ep_regs_t * out_ep_regs =
+ dev_if->out_ep_regs[_ep->dwc_ep.num];
+ deptsiz.d32 = 0;
+ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz);
+#ifdef CONFIG_DEBUG
+ DWC_DEBUGPL(DBG_PCDV,
+ "addr %p, %s len=%d cnt=%d xsize=%d pktcnt=%d\n",
+ &out_ep_regs->doeptsiz, _ep->ep.name,
+ _ep->dwc_ep.xfer_len,
+ _ep->dwc_ep.xfer_count,
+ deptsiz.b.xfersize, deptsiz.b.pktcnt);
+
+#endif /* */
+ is_last = 1;
+ }
+
+ /* Complete the request */
+ if (is_last) {
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Since the 405EZ (Ultra) only support 2047 bytes as
+ * max transfer size, we have to split up bigger transfers
+ * into multiple transfers of 1024 bytes sized messages.
+ * I happens often, that transfers of 4096 bytes are
+ * required (zero-gadget, file_storage-gadget).
+ */
+ if (_ep->dwc_ep.bytes_pending) {
+ dwc_otg_dev_in_ep_regs_t *in_regs =
+ core_if->dev_if->in_ep_regs[_ep->dwc_ep.num];
+ gintmsk_data_t intr_mask = { .d32 = 0};
+
+ _ep->dwc_ep.xfer_len = _ep->dwc_ep.bytes_pending;
+ if (_ep->dwc_ep.xfer_len > MAX_XFER_LEN) {
+ _ep->dwc_ep.bytes_pending = _ep->dwc_ep.xfer_len -
+ MAX_XFER_LEN;
+ _ep->dwc_ep.xfer_len = MAX_XFER_LEN;
+ } else {
+ _ep->dwc_ep.bytes_pending = 0;
+ }
+
+ /*
+ * Restart the current transfer with the next "chunk"
+ * of data.
+ */
+ _ep->dwc_ep.xfer_count = 0;
+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz));
+ deptsiz.b.xfersize = _ep->dwc_ep.xfer_len;
+ deptsiz.b.pktcnt = (_ep->dwc_ep.xfer_len - 1 +
+ _ep->dwc_ep.maxpacket) / _ep->dwc_ep.maxpacket;
+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32);
+
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32( &core_if->core_global_regs->gintsts,
+ intr_mask.d32, 0);
+ dwc_modify_reg32( &core_if->core_global_regs->gintmsk,
+ intr_mask.d32, intr_mask.d32);
+
+ /*
+ * Just return here if message was not completely
+ * transferred.
+ */
+ return;
+ }
+#endif
+ if (core_if->dma_enable) {
+ req->req.actual =
+ _ep->dwc_ep.xfer_len - deptsiz.b.xfersize;
+ } else {
+ req->req.actual = _ep->dwc_ep.xfer_count;
+ }
+ request_done(_ep, req, 0);
+ _ep->dwc_ep.start_xfer_buff = 0;
+ _ep->dwc_ep.xfer_buff = 0;
+ _ep->dwc_ep.xfer_len = 0;
+
+ /* If there is a request in the queue start it. */
+ start_next_request(_ep);
+ }
+}
+
+/**
+ * This function handles EP0 Control transfers.
+ *
+ * The state of the control tranfers are tracked in
+ * <code>ep0state</code>.
+ */
+static void handle_ep0(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_pcd_ep_t * ep0 = &_pcd->ep0;
+
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
+ print_ep0_state(_pcd);
+
+#endif /* */
+ switch (_pcd->ep0state) {
+ case EP0_DISCONNECT:
+ break;
+ case EP0_IDLE:
+ _pcd->request_config = 0;
+ pcd_setup(_pcd);
+ break;
+ case EP0_IN_DATA_PHASE:
+
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n",
+ ep0->dwc_ep.num,
+ (ep0->dwc_ep.is_in ? "IN" : "OUT"),
+ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
+
+#endif /* */
+ if (core_if->dma_enable) {
+ /*
+ * For EP0 we can only program 1 packet at a time so we
+ * need to do the make calculations after each complete.
+ * Call write_packet to make the calculations, as in
+ * slave mode, and use those values to determine if we
+ * can complete.
+ */
+ dwc_otg_ep_write_packet(core_if, &ep0->dwc_ep, 1);
+ }
+#ifdef CONFIG_DWC_SLAVE
+ else {
+ dwc_otg_ep_write_packet(core_if, &ep0->dwc_ep, 0);
+ }
+#endif
+
+ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) {
+ dwc_otg_ep0_continue_transfer(GET_CORE_IF(_pcd),
+ &ep0->dwc_ep);
+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n");
+ } else {
+ ep0_complete_request(ep0);
+ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n");
+ }
+ break;
+ case EP0_OUT_DATA_PHASE:
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n",
+ ep0->dwc_ep.num,
+ (ep0->dwc_ep.is_in ? "IN" : "OUT"),
+ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket);
+
+#endif /* */
+ ep0_complete_request(ep0);
+ break;
+ case EP0_STATUS:
+ DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n");
+ ep0_complete_request(ep0);
+ _pcd->ep0state = EP0_IDLE;
+ ep0->stopped = 1;
+ ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */
+
+ /* Prepare for more SETUP Packets */
+ if (core_if->dma_enable) {
+ ep0_out_start(core_if, _pcd);
+ }
+ if (!GET_CORE_IF(_pcd)->dma_enable) {
+ int i;
+ depctl_data_t diepctl;
+ diepctl.d32 =dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+ if (_pcd->ep0.queue_sof) {
+ _pcd->ep0.queue_sof = 0;
+ start_next_request(&_pcd->ep0);
+ }
+ diepctl.d32 =dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl);
+ if (_pcd->ep0.queue_sof) {
+ _pcd->ep0.queue_sof = 0;
+ start_next_request(&_pcd->ep0);
+ }
+ for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
+ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[i + 1]->diepctl);
+ if (_pcd->in_ep[i].queue_sof) {
+ _pcd->in_ep[i].queue_sof = 0;
+ start_next_request(&_pcd->in_ep[i]);
+ }
+ }
+ }
+ break;
+ case EP0_STALL:
+ DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n");
+ break;
+ }
+
+#ifdef DEBUG_EP0
+ print_ep0_state(_pcd);
+#endif /* */
+}
+
+/**
+ * Restart transfer
+ */
+static void restart_transfer(dwc_otg_pcd_t * _pcd, const uint32_t _epnum)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+ deptsiz_data_t dieptsiz = {.d32 = 0};
+
+ //depctl_data_t diepctl = {.d32=0};
+ dwc_otg_pcd_ep_t * ep;
+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->dieptsiz);
+ ep = get_in_ep(_pcd, _epnum);
+
+/*
+ if(_epnum != 0)
+ ep = &_pcd->in_ep[ _epnum - 1];
+ else
+ ep = &_pcd->ep0;
+*/
+ DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x xfer_len=%0x"
+ " stopped=%d\n", ep->dwc_ep.xfer_buff,
+ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
+ ep->stopped);
+
+ /*
+ * If xfersize is 0 and pktcnt in not 0, resend the last packet.
+ */
+ if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0
+ && ep->dwc_ep.start_xfer_buff != 0) {
+ if (ep->dwc_ep.xfer_len <= ep->dwc_ep.maxpacket) {
+ ep->dwc_ep.xfer_count = 0;
+ ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff;
+ } else {
+ ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket;
+
+ /* convert packet size to dwords. */
+ ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket;
+ }
+ ep->stopped = 0;
+ DWC_DEBUGPL(DBG_PCD, "xfer_buff=%p xfer_count=%0x "
+ "xfer_len=%0x stopped=%d\n", ep->dwc_ep.xfer_buff,
+ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len,
+ ep->stopped );
+ if (_epnum == 0) {
+ dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep);
+ } else {
+ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep);
+ }
+ }
+}
+
+/**
+ * handle the IN EP disable interrupt.
+ */
+static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t * _pcd,
+ const uint32_t _epnum)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+ deptsiz_data_t dieptsiz = {.d32 = 0};
+ dctl_data_t dctl = {.d32 = 0};
+ dwc_otg_pcd_ep_t * ep;
+ dwc_ep_t * dwc_ep;
+ ep = get_in_ep(_pcd, _epnum);
+ dwc_ep = &ep->dwc_ep;
+/*
+ if(_epnum != 0)
+ {
+ ep = &_pcd->in_ep[ _epnum - 1];
+ dwc_ep = &_pcd->in_ep[ _epnum - 1].dwc_ep;
+ }
+ else
+ {
+ ep = &_pcd->ep0;
+ dwc_ep = &_pcd->ep0.dwc_ep;
+ }
+*/
+ DWC_DEBUGPL(DBG_PCD, "diepctl%d=%0x\n", _epnum,
+ dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->diepctl));
+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[_epnum]->dieptsiz);
+ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", dieptsiz.b.pktcnt,
+ dieptsiz.b.xfersize);
+ if (ep->stopped) {
+
+ /* Flush the Tx FIFO */
+ /** @todo NGS: This is not the correct FIFO */
+ dwc_otg_flush_tx_fifo(core_if, 0);
+
+ /* Clear the Global IN NP NAK */
+ dctl.d32 = 0;
+ dctl.b.cgnpinnak = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, 0);
+
+ /* Restart the transaction */
+ if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
+ restart_transfer(_pcd, _epnum);
+ }
+ } else {
+
+ /* Restart the transaction */
+ if (dieptsiz.b.pktcnt != 0 || dieptsiz.b.xfersize != 0) {
+ restart_transfer(_pcd, _epnum);
+ }
+ DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n");
+ }
+}
+
+/**
+ * Handler for the IN EP timeout handshake interrupt.
+ */
+static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t * _pcd,
+ const uint32_t _epnum)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+
+#ifdef CONFIG_DWC_DEBUG
+ deptsiz_data_t dieptsiz = {.d32 = 0};
+ uint32_t epnum = 0;
+
+#endif /* */
+ dctl_data_t dctl = {
+ .d32 = 0};
+ dwc_otg_pcd_ep_t * ep;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ ep = get_in_ep(_pcd, _epnum);
+
+/*
+ if(_epnum != 0)
+ ep = &_pcd->in_ep[ _epnum - 1];
+ else
+ ep = &_pcd->ep0;
+*/
+
+ /* Disable the NP Tx Fifo Empty Interrrupt */
+ if (!core_if->dma_enable) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+ }
+
+ /** @todo NGS Check EP type.
+ * Implement for Periodic EPs */
+ /*
+ * Non-periodic EP
+ */
+ /* Enable the Global IN NAK Effective Interrupt */
+ intr_mask.d32 = 0; /* Bug fixed - clear mask b4 reusing */
+ intr_mask.b.ginnakeff = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0,
+ intr_mask.d32);
+
+ /* Set Global IN NAK */
+ dctl.b.sgnpinnak = 1;
+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32);
+ ep->stopped = 1;
+
+#ifdef CONFIG_DWC_DEBUG
+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz);
+ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", dieptsiz.b.pktcnt,
+ dieptsiz.b.xfersize);
+
+#endif /* */
+
+#ifdef DISABLE_PERIODIC_EP
+ /*
+ * Set the NAK bit for this EP to
+ * start the disable process.
+ */
+ diepctl.d32 = 0;
+ diepctl.b.snak = 1;
+ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, diepctl.d32,
+ diepctl.d32);
+ ep->disabling = 1;
+ ep->stopped = 1;
+
+#endif /* */
+}
+
+/**
+ * This interrupt indicates that an IN EP has a pending Interrupt.
+ * The sequence for handling the IN EP interrupt is shown below:
+ * -# Read the Device All Endpoint Interrupt register
+ * -# Repeat the following for each IN EP interrupt bit set (from
+ * LSB to MSB).
+ * -# Read the Device Endpoint Interrupt (DIEPINTn) register
+ * -# If "Transfer Complete" call the request complete function
+ * -# If "Endpoint Disabled" complete the EP disable procedure.
+ * -# If "AHB Error Interrupt" log error
+ * -# If "Time-out Handshake" log error
+ * -# If "IN Token Received when TxFIFO Empty" write packet to Tx
+ * FIFO.
+ * -# If "IN Token EP Mismatch" (disable, this is handled by EP
+ * Mismatch Interrupt)
+ */
+static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t * _pcd)
+{
+
+#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \
+ do { \
+ diepint_data_t diepint = {.d32 = 0}; \
+ diepint.b.__intr = 1; \
+ dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \
+ diepint.d32); \
+ } while (0)
+
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ dwc_otg_dev_if_t * dev_if = core_if->dev_if;
+ diepint_data_t diepint = {.d32 = 0};
+ depctl_data_t diepctl = {.d32 = 0};
+ uint32_t ep_intr;
+ uint32_t epnum = 0;
+ dwc_otg_pcd_ep_t * ep;
+ dwc_ep_t * dwc_ep;
+ uint32_t _empty_msk, _diepctl;
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd);
+
+ /* Read in the device interrupt bits */
+ ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if);
+
+ /* Service the Device IN interrupts for each endpoint */
+ while (ep_intr) {
+ if (ep_intr & 0x1) {
+ /* Get EP pointer */
+ ep = get_in_ep(_pcd, epnum);
+ dwc_ep = &ep->dwc_ep;
+ _diepctl = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl);
+ _empty_msk = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk);
+ DWC_DEBUGPL(DBG_PCDV,
+ "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n",
+ epnum, _empty_msk, _diepctl);
+ DWC_DEBUGPL(DBG_PCD, "EP%d-%s: type=%d, mps=%d\n",
+ dwc_ep->num,(dwc_ep->is_in ? "IN" : "OUT"),
+ dwc_ep->type, dwc_ep->maxpacket);
+ diepint.d32 = dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep);
+ DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt Register - 0x%x\n",
+ epnum, diepint.d32);
+
+ /* Transfer complete */
+ if (diepint.b.xfercompl) {
+ DWC_DEBUGPL(DBG_PCD,
+ "EP%d IN Xfer Complete\n", epnum);
+
+ /* Disable the NP Tx FIFO Empty
+ * Interrrupt */
+ if (core_if->en_multiple_tx_fifo == 0) {
+ intr_mask.b.nptxfempty = 1;
+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+ } else {
+ /* Disable the Tx FIFO Empty Interrupt for this EP */
+ uint32_t fifoemptymsk = 0x1 << dwc_ep->num;
+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->
+ dtknqr4_fifoemptymsk,fifoemptymsk, 0);
+ }
+
+ /* Clear the bit in DIEPINTn for this interrupt */
+ CLEAR_IN_EP_INTR(core_if, epnum, xfercompl);
+
+ /* Complete the transfer */
+ if (epnum == 0) {
+ handle_ep0(_pcd);
+ } else {
+ complete_ep(ep);
+ }
+ }
+
+ /* Endpoint disable */
+ if (diepint.b.epdisabled) {
+ DWC_DEBUGPL(DBG_ANY, "EP%d IN disabled\n", epnum);
+ handle_in_ep_disable_intr(_pcd, epnum);
+
+ /* Clear the bit in DIEPINTn for this interrupt */
+ CLEAR_IN_EP_INTR(core_if, epnum, epdisabled);
+ }
+
+ /* AHB Error */
+ if (diepint.b.ahberr) {
+ DWC_DEBUGPL(DBG_ANY, "EP%d IN AHB Error\n", epnum);
+
+ /* Clear the bit in DIEPINTn for this interrupt */
+ CLEAR_IN_EP_INTR(core_if, epnum, ahberr);
+ }
+
+ /* TimeOUT Handshake (non-ISOC IN EPs) */
+ if (diepint.b.timeout) {
+ DWC_DEBUGPL(DBG_ANY, "EP%d IN Time-out\n", epnum);
+ handle_in_ep_timeout_intr(_pcd, epnum);
+ CLEAR_IN_EP_INTR(core_if, epnum, timeout);
+ }
+
+ /** IN Token received with TxF Empty */
+ if (diepint.b.intktxfemp) {
+ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN TxFifo Empty\n",epnum);
+ if (!ep->stopped && epnum != 0) {
+ diepmsk_data_t diepmsk = {.d32 = 0};
+ diepmsk.b.intktxfemp = 1;
+ dwc_modify_reg32(&dev_if-> dev_global_regs->
+ diepmsk, diepmsk.d32, 0);
+#ifdef CONFIG_405EZ
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Only start the next transfer, when currently
+ * no other transfer is active on this endpoint.
+ */
+ if (dwc_ep->active == 0)
+ start_next_request(ep);
+#else
+ start_next_request(ep);
+#endif
+ }
+ CLEAR_IN_EP_INTR(core_if, epnum, intktxfemp);
+ }
+
+ /** IN Token Received with EP mismatch */
+ if (diepint.b.intknepmis) {
+ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN EP Mismatch\n",
+ epnum);
+ CLEAR_IN_EP_INTR(core_if, epnum, intknepmis);
+ }
+
+ /** IN Endpoint NAK Effective */
+ if (diepint.b.inepnakeff) {
+ DWC_DEBUGPL(DBG_ANY, "EP%d IN EP NAK Effective\n",epnum);
+
+ /* Periodic EP */
+ if (ep->disabling) {
+ diepctl.d32 = 0;
+ diepctl.b.snak = 1;
+ diepctl.b.epdis = 1;
+ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->
+ diepctl, diepctl.d32,diepctl.d32);
+ }
+ CLEAR_IN_EP_INTR(core_if, epnum, inepnakeff);
+ }
+
+ /** IN EP Tx FIFO Empty Intr */
+ if (diepint.b.emptyintr) {
+ DWC_DEBUGPL(DBG_ANY,"EP%d Tx FIFO Empty Intr \n",epnum);
+ write_empty_tx_fifo(_pcd, epnum);
+ /* VJ updated based on v2.65a */
+ // CLEAR_IN_EP_INTR(core_if, epnum, emptyintr);
+ }
+ }
+ epnum++;
+ ep_intr >>= 1;
+ }
+ return 1;
+#undef CLEAR_IN_EP_INTR
+}
+
+/**
+ * This interrupt indicates that an OUT EP has a pending Interrupt.
+ * The sequence for handling the OUT EP interrupt is shown below:
+ * -# Read the Device All Endpoint Interrupt register
+ * -# Repeat the following for each OUT EP interrupt bit set (from
+ * LSB to MSB).
+ * -# Read the Device Endpoint Interrupt (DOEPINTn) register
+ * -# If "Transfer Complete" call the request complete function
+ * -# If "Endpoint Disabled" complete the EP disable procedure.
+ * -# If "AHB Error Interrupt" log error
+ * -# If "Setup Phase Done" process Setup Packet (See Standard USB
+ * Command Processing)
+ */
+static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t * _pcd)
+{
+
+#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \
+ do { \
+ doepint_data_t doepint = { .d32 = 0}; \
+ doepint.b.__intr = 1; \
+ dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \
+ doepint.d32); \
+ } while (0)
+
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+ uint32_t ep_intr;
+ doepint_data_t doepint = {.d32 = 0};
+ uint32_t epnum = 0;
+ dwc_ep_t * dwc_ep;
+ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__);
+
+ /* Read in the device interrupt bits */
+ ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if);
+ while (ep_intr) {
+ if (ep_intr & 0x1) {
+ /* Get EP pointer */
+ dwc_ep = &((get_out_ep(_pcd, epnum))->dwc_ep);
+// dwc_ep = &_pcd->out_ep[ epnum - 1].dwc_ep;
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV, "EP%d-%s: type=%d, mps=%d\n",
+ dwc_ep->num,
+ (dwc_ep->is_in ? "IN" : "OUT"),
+ dwc_ep->type, dwc_ep->maxpacket);
+
+#endif /* */
+ doepint.d32 = dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep);
+
+ /* Transfer complete */
+ if (doepint.b.xfercompl) {
+ DWC_DEBUGPL(DBG_PCD,"EP%d OUT Xfer Complete\n",epnum);
+
+ /* Clear the bit in DOEPINTn for this interrupt */
+ CLEAR_OUT_EP_INTR(core_if, epnum,xfercompl);
+ if (epnum == 0) {
+ handle_ep0(_pcd);
+ } else {
+ complete_ep(get_out_ep(_pcd, epnum));
+ // complete_ep( &_pcd->out_ep[ epnum - 1] );
+ }
+ }
+
+ /* Endpoint disable */
+ if (doepint.b.epdisabled) {
+ DWC_DEBUGPL(DBG_PCD, "EP%d OUT disabled\n",epnum);
+
+ /* Clear the bit in DOEPINTn for this interrupt */
+ CLEAR_OUT_EP_INTR(core_if, epnum,epdisabled);
+ }
+
+ /* AHB Error */
+ if (doepint.b.ahberr) {
+ DWC_DEBUGPL(DBG_PCD, "EP%d OUT AHB Error\n",
+ epnum);
+ DWC_DEBUGPL(DBG_PCD, "EP DMA REG %d \n",
+ core_if->dev_if->out_ep_regs[epnum]->doepdma);
+ CLEAR_OUT_EP_INTR(core_if, epnum, ahberr);
+ }
+
+ /* Setup Phase Done (contorl EPs) */
+ if (doepint.b.setup) {
+#ifdef DEBUG_EP0
+ DWC_DEBUGPL(DBG_PCD, "EP%d SETUP Done\n",epnum);
+
+#endif /* */
+ handle_ep0(_pcd);
+ CLEAR_OUT_EP_INTR(core_if, epnum, setup);
+ }
+ }
+ epnum++;
+ ep_intr >>= 1;
+ }
+ return 1;
+
+#undef CLEAR_OUT_EP_INTR
+}
+
+/**
+ * Incomplete ISO IN Transfer Interrupt.
+ * This interrupt indicates one of the following conditions occurred
+ * while transmitting an ISOC transaction.
+ * - Corrupted IN Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ * The follow actions will be taken:
+ * -# Determine the EP
+ * -# Set incomplete flag in dwc_ep structure
+ * -# Disable EP; when "Endpoint Disabled" interrupt is received
+ * Flush FIFO
+ */
+int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t * _pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ gintsts_data_t gintsts;
+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+ "IN ISOC Incomplete");
+ intr_mask.b.incomplisoin = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.incomplisoin = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * Incomplete ISO OUT Transfer Interrupt.
+ *
+ * This interrupt indicates that the core has dropped an ISO OUT
+ * packet. The following conditions can be the cause:
+ * - FIFO Full, the entire packet would not fit in the FIFO.
+ * - CRC Error
+ * - Corrupted Token
+ * The follow actions will be taken:
+ * -# Determine the EP
+ * -# Set incomplete flag in dwc_ep structure
+ * -# Read any data from the FIFO
+ * -# Disable EP. when "Endpoint Disabled" interrupt is received
+ * re-enable EP.
+ */
+int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t * _pcd)
+{
+
+ /** @todo implement ISR */
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ gintsts_data_t gintsts;
+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+ "OUT ISOC Incomplete");
+ intr_mask.b.incomplisoout = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.incomplisoout = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * This function handles the Global IN NAK Effective interrupt.
+ *
+ */
+int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_dev_if_t * dev_if = GET_CORE_IF(_pcd)->dev_if;
+ depctl_data_t diepctl = {.d32 = 0};
+ depctl_data_t diepctl_rd = {.d32 = 0};
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ gintsts_data_t gintsts;
+ int i;
+ DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n");
+
+ /* Disable all active IN EPs */
+ diepctl.b.epdis = 1;
+ diepctl.b.snak = 1;
+ for (i = 0; i <= dev_if->num_in_eps; i++) {
+ diepctl_rd.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl);
+ if (diepctl_rd.b.epena) {
+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, diepctl.d32);
+ }
+ }
+
+ /* Disable the Global IN NAK Effective Interrupt */
+ intr_mask.b.ginnakeff = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.ginnakeff = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * OUT NAK Effective.
+ *
+ */
+int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t * _pcd)
+{
+ gintmsk_data_t intr_mask = {.d32 = 0};
+ gintsts_data_t gintsts;
+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n",
+ "Global IN NAK Effective\n");
+
+ /* Disable the Global IN NAK Effective Interrupt */
+ intr_mask.b.goutnakeff = 1;
+ dwc_modify_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintmsk,
+ intr_mask.d32, 0);
+
+ /* Clear interrupt */
+ gintsts.d32 = 0;
+ gintsts.b.goutnakeff = 1;
+ dwc_write_reg32(&GET_CORE_IF(_pcd)->core_global_regs->gintsts,
+ gintsts.d32);
+ return 1;
+}
+
+/**
+ * PCD interrupt handler.
+ *
+ * The PCD handles the device interrupts. Many conditions can cause a
+ * device interrupt. When an interrupt occurs, the device interrupt
+ * service routine determines the cause of the interrupt and
+ * dispatches handling to the appropriate function. These interrupt
+ * handling functions are described below.
+ *
+ * All interrupt registers are processed from LSB to MSB.
+ *
+ */
+int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t * _pcd)
+{
+ dwc_otg_core_if_t * core_if = GET_CORE_IF(_pcd);
+
+#ifdef VERBOSE
+ dwc_otg_core_global_regs_t * global_regs =
+ core_if->core_global_regs;
+#endif /* */
+ gintsts_data_t gintr_status;
+ int32_t retval = 0;
+
+ if (dwc_otg_is_device_mode(core_if)) {
+ SPIN_LOCK(&_pcd->lock);
+
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n",
+ __func__,
+ dwc_read_reg32(&global_regs->gintsts),
+ dwc_read_reg32(&global_regs->gintmsk));
+
+#endif /* */
+ gintr_status.d32 = dwc_otg_read_core_intr(core_if);
+ if (!gintr_status.d32) {
+ return 0;
+ }
+ DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", __func__,
+ gintr_status.d32);
+ if (gintr_status.b.sofintr) {
+ retval |= dwc_otg_pcd_handle_sof_intr(_pcd);
+ }
+#ifndef CONFIG_OTG_PLB_DMA_TASKLET
+ if (gintr_status.b.rxstsqlvl) {
+ retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(_pcd);
+ }
+ if (gintr_status.b.nptxfempty) {
+ retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(_pcd);
+ }
+#endif
+ if (gintr_status.b.ginnakeff) {
+ retval |= dwc_otg_pcd_handle_in_nak_effective(_pcd);
+ }
+ if (gintr_status.b.goutnakeff) {
+ retval |= dwc_otg_pcd_handle_out_nak_effective(_pcd);
+ }
+ if (gintr_status.b.i2cintr) {
+ retval |= dwc_otg_pcd_handle_i2c_intr(_pcd);
+ }
+ if (gintr_status.b.erlysuspend) {
+ retval |= dwc_otg_pcd_handle_early_suspend_intr(_pcd);
+ }
+ if (gintr_status.b.usbreset) {
+ retval |= dwc_otg_pcd_handle_usb_reset_intr(_pcd);
+ }
+ if (gintr_status.b.enumdone) {
+ retval |= dwc_otg_pcd_handle_enum_done_intr(_pcd);
+ }
+ if (gintr_status.b.isooutdrop) {
+ retval |= dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(_pcd);
+ }
+ if (gintr_status.b.eopframe) {
+ retval |= dwc_otg_pcd_handle_end_periodic_frame_intr(_pcd);
+ }
+ if (gintr_status.b.epmismatch) {
+ retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if);
+ }
+ if (gintr_status.b.inepint) {
+ retval |= dwc_otg_pcd_handle_in_ep_intr(_pcd);
+ }
+ if (gintr_status.b.outepintr) {
+ retval |= dwc_otg_pcd_handle_out_ep_intr(_pcd);
+ }
+ if (gintr_status.b.incomplisoin) {
+ retval |= dwc_otg_pcd_handle_incomplete_isoc_in_intr(_pcd);
+ }
+ if (gintr_status.b.incomplisoout) {
+ retval |= dwc_otg_pcd_handle_incomplete_isoc_out_intr(_pcd);
+ }
+#ifdef VERBOSE
+ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__,
+ dwc_read_reg32(&global_regs->gintsts));
+
+#endif /* */
+#ifdef CONFIG_OTG_PLB_DMA_TASKLET
+ if (gintr_status.b.rxstsqlvl) {
+ retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(_pcd);
+ }
+ if (!atomic_read(&release_later) && gintr_status.b.nptxfempty) {
+ retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(_pcd);
+ }
+#endif
+ SPIN_UNLOCK(&_pcd->lock);
+ }
+ return retval;
+}
+
+
+#endif /* DWC_HOST_ONLY */
diff --git a/drivers/usb/gadget/dwc_otg/dwc_otg_regs.h b/drivers/usb/gadget/dwc_otg/dwc_otg_regs.h
new file mode 100644
index 00000000000..e6fc661d762
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/dwc_otg_regs.h
@@ -0,0 +1,3606 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_regs.h $
+ * $Revision: #8 $
+ * $Date: 2007/02/07 $
+ * $Change: 791271 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#ifndef __DWC_OTG_REGS_H__
+#define __DWC_OTG_REGS_H__
+
+/**
+ * @file
+ *
+ * This file contains the data structures for accessing the DWC_otg core registers.
+ *
+ * The application interfaces with the HS OTG core by reading from and
+ * writing to the Control and Status Register (CSR) space through the
+ * AHB Slave interface. These registers are 32 bits wide, and the
+ * addresses are 32-bit-block aligned.
+ * CSRs are classified as follows:
+ * - Core Global Registers
+ * - Device Mode Registers
+ * - Device Global Registers
+ * - Device Endpoint Specific Registers
+ * - Host Mode Registers
+ * - Host Global Registers
+ * - Host Port CSRs
+ * - Host Channel Specific Registers
+ *
+ * Only the Core Global registers can be accessed in both Device and
+ * Host modes. When the HS OTG core is operating in one mode, either
+ * Device or Host, the application must not access registers from the
+ * other mode. When the core switches from one mode to another, the
+ * registers in the new mode of operation must be reprogrammed as they
+ * would be after a power-on reset.
+ */
+
+/****************************************************************************/
+/** DWC_otg Core registers .
+ * The dwc_otg_core_global_regs structure defines the size
+ * and relative field offsets for the Core Global registers.
+ */
+typedef struct dwc_otg_core_global_regs
+{
+ /** OTG Control and Status Register. <i>Offset: 000h</i> */
+ volatile uint32_t gotgctl;
+ /** OTG Interrupt Register. <i>Offset: 004h</i> */
+ volatile uint32_t gotgint;
+ /**Core AHB Configuration Register. <i>Offset: 008h</i> */
+ volatile uint32_t gahbcfg;
+
+#define DWC_GLBINTRMASK 0x0001
+#define DWC_DMAENABLE 0x0020
+#define DWC_NPTXEMPTYLVL_EMPTY 0x0080
+#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000
+#define DWC_PTXEMPTYLVL_EMPTY 0x0100
+#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000
+
+ /**Core USB Configuration Register. <i>Offset: 00Ch</i> */
+ volatile uint32_t gusbcfg;
+ /**Core Reset Register. <i>Offset: 010h</i> */
+ volatile uint32_t grstctl;
+ /**Core Interrupt Register. <i>Offset: 014h</i> */
+ volatile uint32_t gintsts;
+ /**Core Interrupt Mask Register. <i>Offset: 018h</i> */
+ volatile uint32_t gintmsk;
+ /**Receive Status Queue Read Register (Read Only). <i>Offset: 01Ch</i> */
+ volatile uint32_t grxstsr;
+ /**Receive Status Queue Read & POP Register (Read Only). <i>Offset: 020h</i>*/
+ volatile uint32_t grxstsp;
+ /**Receive FIFO Size Register. <i>Offset: 024h</i> */
+ volatile uint32_t grxfsiz;
+ /**Non Periodic Transmit FIFO Size Register. <i>Offset: 028h</i> */
+ volatile uint32_t gnptxfsiz;
+ /**Non Periodic Transmit FIFO/Queue Status Register (Read
+ * Only). <i>Offset: 02Ch</i> */
+ volatile uint32_t gnptxsts;
+ /**I2C Access Register. <i>Offset: 030h</i> */
+ volatile uint32_t gi2cctl;
+ /**PHY Vendor Control Register. <i>Offset: 034h</i> */
+ volatile uint32_t gpvndctl;
+ /**General Purpose Input/Output Register. <i>Offset: 038h</i> */
+ volatile uint32_t ggpio;
+ /**User ID Register. <i>Offset: 03Ch</i> */
+ volatile uint32_t guid;
+ /**Synopsys ID Register (Read Only). <i>Offset: 040h</i> */
+ volatile uint32_t gsnpsid;
+ /**User HW Config1 Register (Read Only). <i>Offset: 044h</i> */
+ volatile uint32_t ghwcfg1;
+ /**User HW Config2 Register (Read Only). <i>Offset: 048h</i> */
+ volatile uint32_t ghwcfg2;
+#define DWC_SLAVE_ONLY_ARCH 0
+#define DWC_EXT_DMA_ARCH 1
+#define DWC_INT_DMA_ARCH 2
+
+#define DWC_MODE_HNP_SRP_CAPABLE 0
+#define DWC_MODE_SRP_ONLY_CAPABLE 1
+#define DWC_MODE_NO_HNP_SRP_CAPABLE 2
+#define DWC_MODE_SRP_CAPABLE_DEVICE 3
+#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define DWC_MODE_SRP_CAPABLE_HOST 5
+#define DWC_MODE_NO_SRP_CAPABLE_HOST 6
+
+ /**User HW Config3 Register (Read Only). <i>Offset: 04Ch</i> */
+ volatile uint32_t ghwcfg3;
+ /**User HW Config4 Register (Read Only). <i>Offset: 050h</i>*/
+ volatile uint32_t ghwcfg4;
+ /** Reserved <i>Offset: 054h-0FFh</i> */
+ uint32_t reserved[43];
+ /** Host Periodic Transmit FIFO Size Register. <i>Offset: 100h</i> */
+ volatile uint32_t hptxfsiz;
+ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled,
+ otherwise Device Transmit FIFO#n Register.
+ * <i>Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15).</i> */
+ volatile uint32_t dptxfsiz_dieptxf[15];
+} dwc_otg_core_global_regs_t;
+
+
+#if defined(CONFIG_4xx)
+/**
+ * This union represents the bit fields of the Core OTG Control
+ * and Status Register (GOTGCTL). Set the bits using the bit
+ * fields then write the <i>d32</i> value to the register.
+ */
+typedef union gotgctl_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ unsigned reserved31_21 : 11;
+ unsigned currmod : 1;
+ unsigned bsesvld : 1;
+ unsigned asesvld : 1;
+ unsigned reserved17 : 1;
+ unsigned conidsts : 1;
+ unsigned reserved1_12 : 4;
+ unsigned devhnpen : 1;
+ unsigned hstsethnpen : 1;
+ unsigned hnpreq : 1;
+ unsigned hstnegscs : 1;
+ unsigned reserved07_02 : 6;
+ unsigned sesreq : 1;
+ unsigned sesreqscs : 1;
+ } b;
+} gotgctl_data_t;
+
+/**
+ * This union represents the bit fields of the Core OTG Interrupt Register
+ * (GOTGINT). Set/clear the bits using the bit fields then write the <i>d32</i>
+ * value to the register.
+ */
+typedef union gotgint_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ /** Current Mode */
+ unsigned reserved31_20 : 12;
+ /** Debounce Done */
+ unsigned debdone : 1;
+ /** A-Device Timeout Change */
+ unsigned adevtoutchng : 1;
+ /** Host Negotiation Detected */
+ unsigned hstnegdet : 1;
+ unsigned reserver16_10 : 7;
+ /** Host Negotiation Success Status Change */
+ unsigned hstnegsucstschng : 1;
+ /** Session Request Success Status Change */
+ unsigned sesreqsucstschng : 1;
+ unsigned reserved3_7 : 5;
+ /** Session End Detected */
+ unsigned sesenddet : 1;
+ unsigned reserved01_00 : 2;
+ } b;
+} gotgint_data_t;
+
+
+/**
+ * This union represents the bit fields of the Core AHB Configuration
+ * Register (GAHBCFG). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gahbcfg_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ unsigned reserved9_31 : 23;
+ unsigned ptxfemplvl : 1;
+#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1
+#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0
+ unsigned nptxfemplvl_txfemplvl : 1; /*fscz*/
+ unsigned reserved : 1;
+
+ unsigned dmaenable : 1;
+#define DWC_GAHBCFG_DMAENABLE 1
+
+ unsigned hburstlen : 4;
+#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7
+
+ unsigned glblintrmsk : 1;
+#define DWC_GAHBCFG_GLBINT_ENABLE 1
+ } b;
+} gahbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core USB Configuration
+ * Register (GUSBCFG). Set the bits using the bit fields then write
+ * the <i>d32</i> value to the register.
+ */
+typedef union gusbcfg_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ unsigned corrupt_tx_packet: 1; /*fscz*/
+ unsigned force_device_mode: 1;
+ unsigned force_host_mode: 1;
+ unsigned reserved23_28 : 6;
+ unsigned term_sel_dl_pulse : 1;
+ unsigned ulpi_int_vbus_indicator : 1;
+ unsigned ulpi_ext_vbus_drv : 1;
+ unsigned ulpi_clk_sus_m : 1;
+ unsigned ulpi_auto_res : 1;
+ unsigned ulpi_fsls : 1;
+
+ unsigned otgutmifssel : 1;
+ unsigned phylpwrclksel : 1;
+ unsigned nptxfrwnden : 1;
+ unsigned usbtrdtim : 4;
+ unsigned hnpcap : 1;
+ unsigned srpcap : 1;
+ unsigned ddrsel : 1;
+ unsigned physel : 1;
+ unsigned fsintf : 1;
+ unsigned ulpi_utmi_sel : 1;
+ unsigned phyif : 1;
+ unsigned toutcal : 3;
+ } b;
+} gusbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core Reset Register
+ * (GRSTCTL). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union grstctl_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ /** AHB Master Idle. Indicates the AHB Master State
+ * Machine is in IDLE condition. */
+ unsigned ahbidle : 1;
+
+ /** DMA Request Signal. Indicated DMA request is in
+ * probress. Used for debug purpose. */
+ unsigned dmareq : 1;
+
+ /** Reserved */
+ unsigned reserved29_11 : 19;
+
+ /** TxFIFO Number (TxFNum) (Device and Host).
+ *
+ * This is the FIFO number which needs to be flushed,
+ * using the TxFIFO Flush bit. This field should not
+ * be changed until the TxFIFO Flush bit is cleared by
+ * the core.
+ * - 0x0 : Non Periodic TxFIFO Flush
+ * - 0x1 : Periodic TxFIFO #1 Flush in device mode
+ * or Periodic TxFIFO in host mode
+ * - 0x2 : Periodic TxFIFO #2 Flush in device mode.
+ * - ...
+ * - 0xF : Periodic TxFIFO #15 Flush in device mode
+ * - 0x10: Flush all the Transmit NonPeriodic and
+ * Transmit Periodic FIFOs in the core
+ */
+ unsigned txfnum : 5;
+
+ /** TxFIFO Flush (TxFFlsh) (Device and Host).
+ *
+ * This bit is used to selectively flush a single or
+ * all transmit FIFOs. The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. <p>The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is writing into the TxFIFO nor the MAC
+ * is reading the data out of the FIFO. <p>The
+ * application should wait until the core clears this
+ * bit, before performing any operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned txfflsh : 1;
+
+ /** RxFIFO Flush (RxFFlsh) (Device and Host)
+ *
+ * The application can flush the entire Receive FIFO
+ * using this bit. <p>The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. <p>The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is reading from the RxFIFO nor the MAC
+ * is writing the data in to the FIFO. <p>The
+ * application should wait until the bit is cleared
+ * before performing any other operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned rxfflsh : 1;
+
+ /** In Token Sequence Learning Queue Flush
+ * (INTknQFlsh) (Device Only)
+ */
+ unsigned intknqflsh : 1;
+
+ /** Host Frame Counter Reset (Host Only)<br>
+ *
+ * The application can reset the (micro)frame number
+ * counter inside the core, using this bit. When the
+ * (micro)frame counter is reset, the subsequent SOF
+ * sent out by the core, will have a (micro)frame
+ * number of 0.
+ */
+ unsigned hstfrm : 1;
+
+ /** Hclk Soft Reset
+ *
+ * The application uses this bit to reset the control logic in
+ * the AHB clock domain. Only AHB clock domain pipelines are
+ * reset.
+ */
+ unsigned hsftrst : 1;
+
+ /** Core Soft Reset (CSftRst) (Device and Host)
+ *
+ * The application can flush the control logic in the
+ * entire core using this bit. This bit resets the
+ * pipelines in the AHB Clock domain as well as the
+ * PHY Clock domain.
+ *
+ * The state machines are reset to an IDLE state, the
+ * control bits in the CSRs are cleared, all the
+ * transmit FIFOs and the receive FIFO are flushed.
+ *
+ * The status mask bits that control the generation of
+ * the interrupt, are cleared, to clear the
+ * interrupt. The interrupt status bits are not
+ * cleared, so the application can get the status of
+ * any events that occurred in the core after it has
+ * set this bit.
+ *
+ * Any transactions on the AHB are terminated as soon
+ * as possible following the protocol. Any
+ * transactions on the USB are terminated immediately.
+ *
+ * The configuration settings in the CSRs are
+ * unchanged, so the software doesn't have to
+ * reprogram these registers (Device
+ * Configuration/Host Configuration/Core System
+ * Configuration/Core PHY Configuration).
+ *
+ * The application can write to this bit, any time it
+ * wants to reset the core. This is a self clearing
+ * bit and the core clears this bit after all the
+ * necessary logic is reset in the core, which may
+ * take several clocks, depending on the current state
+ * of the core.
+ */
+ unsigned csftrst : 1;
+ } b;
+} grstctl_t;
+
+/**
+ * This union represents the bit fields of the Core Interrupt Mask
+ * Register (GINTMSK). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gintmsk_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct
+ {
+ unsigned wkupintr : 1;
+ unsigned sessreqintr : 1;
+ unsigned disconnect : 1;
+ unsigned conidstschng : 1;
+ unsigned reserved27 : 1;
+ unsigned ptxfempty : 1;
+ unsigned hcintr : 1;
+ unsigned portintr : 1;
+ unsigned reserved22_23 : 2;
+ unsigned incomplisoout : 1;
+ unsigned incomplisoin : 1;
+ unsigned outepintr : 1;
+ unsigned inepintr : 1;
+ unsigned epmismatch : 1;
+ unsigned reserved16 : 1;
+ unsigned eopframe : 1;
+ unsigned isooutdrop : 1;
+ unsigned enumdone : 1;
+ unsigned usbreset : 1;
+ unsigned usbsuspend : 1;
+ unsigned erlysuspend : 1;
+ unsigned i2cintr : 1;
+ unsigned reserved08 : 1;
+ unsigned goutnakeff : 1;
+ unsigned ginnakeff : 1;
+ unsigned nptxfempty : 1;
+ unsigned rxstsqlvl : 1;
+ unsigned sofintr : 1;
+ unsigned otgintr : 1;
+ unsigned modemismatch : 1;
+ unsigned reserved00 : 1;
+ } b;
+} gintmsk_data_t;
+/**
+ * This union represents the bit fields of the Core Interrupt Register
+ * (GINTSTS). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union gintsts_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+#define DWC_SOF_INTR_MASK 0x0008
+ /** register bits */
+ struct
+ {
+#define DWC_HOST_MODE 1
+ unsigned wkupintr : 1;
+ unsigned sessreqintr : 1;
+ unsigned disconnect : 1;
+ unsigned conidstschng : 1;
+ unsigned reserved27 : 1;
+ unsigned ptxfempty : 1;
+ unsigned hcintr : 1;
+ unsigned portintr : 1;
+ unsigned reserved22_23 : 2;
+ unsigned incomplisoout : 1;
+ unsigned incomplisoin : 1;
+ unsigned outepintr : 1;
+ unsigned inepint: 1;
+ unsigned epmismatch : 1;
+ unsigned intokenrx : 1;
+ unsigned eopframe : 1;
+ unsigned isooutdrop : 1;
+ unsigned enumdone : 1;
+ unsigned usbreset : 1;
+ unsigned usbsuspend : 1;
+ unsigned erlysuspend : 1;
+ unsigned i2cintr : 1;
+ unsigned reserved8 : 1;
+ unsigned goutnakeff : 1;
+ unsigned ginnakeff : 1;
+ unsigned nptxfempty : 1;
+ unsigned rxstsqlvl : 1;
+ unsigned sofintr : 1;
+ unsigned otgintr : 1;
+ unsigned modemismatch : 1;
+ unsigned curmode : 1;
+ } b;
+} gintsts_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union device_grxsts_data { //*
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 7;
+ unsigned fn : 4;
+ unsigned pktsts : 4;
+#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet
+#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete
+
+#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK
+#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete
+#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet
+
+ unsigned dpid : 2;
+ unsigned bcnt : 11;
+ unsigned epnum : 4;
+ } b;
+} device_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union host_grxsts_data { //*
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31_21 : 11;
+ unsigned pktsts : 4;
+#define DWC_GRXSTS_PKTSTS_IN 0x2
+#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3
+#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5
+#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7
+
+ unsigned dpid : 2;
+ unsigned bcnt : 11;
+ unsigned chnum : 4;
+ } b;
+} host_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ,
+ * GNPTXFSIZ, DPTXFSIZn). Read the register into the <i>d32</i> element then
+ * read out the bits using the <i>b</i>it elements.
+ */
+typedef union fifosize_data { //*
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned depth : 16;
+ unsigned startaddr : 16;
+ } b;
+} fifosize_data_t;
+
+/**
+ * This union represents the bit fields in the Non-Periodic Transmit
+ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union gnptxsts_data { //*
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 1;
+ /** Top of the Non-Periodic Transmit Request Queue
+ * - bits 30:27 - Channel/EP Number
+ * - bits 26:25 - Token Type
+ * - 2'b00 - IN/OUT
+ * - 2'b01 - Zero Length OUT
+ * - 2'b10 - PING/Complete Split
+ * - 2'b11 - Channel Halt
+ * - bit 24 - Terminate (Last entry for the selected
+ * channel/EP)
+ */
+ unsigned nptxqtop_chnep : 4;
+ unsigned nptxqtop_token : 2;
+ unsigned nptxqtop_terminate : 1;
+ unsigned nptxqspcavail : 8;
+ unsigned nptxfspcavail : 16;
+ } b;
+} gnptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Transmit
+ * FIFO Status Register (DTXFSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union dtxfsts_data /* fscz */ //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned txfspcavail : 16;
+ } b;
+} dtxfsts_data_t;
+
+
+/**
+ * This union represents the bit fields in the I2C Control Register
+ * (I2CCTL). Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union gi2cctl_data { //*
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned bsydne : 1;
+ unsigned rw : 1;
+ unsigned reserved : 2;
+ unsigned i2cdevaddr : 2;
+ unsigned i2csuspctl : 1;
+ unsigned ack : 1;
+ unsigned i2cen : 1;
+ unsigned addr : 7;
+ unsigned regaddr : 8;
+ unsigned rwdata : 8;
+ } b;
+} gi2cctl_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config1
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg1_data { //*
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ep_dir15 : 2;
+ unsigned ep_dir14 : 2;
+ unsigned ep_dir13 : 2;
+ unsigned ep_dir12 : 2;
+ unsigned ep_dir11 : 2;
+ unsigned ep_dir10 : 2;
+ unsigned ep_dir9 : 2;
+ unsigned ep_dir8 : 2;
+ unsigned ep_dir7 : 2;
+ unsigned ep_dir6 : 2;
+ unsigned ep_dir5 : 2;
+ unsigned ep_dir4 : 2;
+ unsigned ep_dir3 : 2;
+ unsigned ep_dir2 : 2;
+ unsigned ep_dir1 : 2;
+ unsigned ep_dir0 : 2;
+ } b;
+} hwcfg1_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config2
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg2_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG2 */
+ unsigned reserved31 : 1;
+ unsigned dev_token_q_depth : 5;
+ unsigned host_perio_tx_q_depth : 2;
+ unsigned nonperio_tx_q_depth : 2;
+ unsigned rx_status_q_depth : 2;
+ unsigned dynamic_fifo : 1;
+ unsigned perio_ep_supported : 1;
+ unsigned num_host_chan : 4;
+ unsigned num_dev_ep : 4;
+ unsigned fs_phy_type : 2;
+ unsigned hs_phy_type : 2;
+#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1
+#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+
+ unsigned point2point : 1;
+ unsigned architecture : 2;
+ unsigned op_mode : 3;
+#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0
+#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1
+#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+ } b;
+} hwcfg2_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config3
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg3_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG3 */
+ unsigned dfifo_depth : 16;
+ unsigned reserved15_13 : 3;
+ unsigned ahb_phy_clock_synch : 1;
+ unsigned synch_reset_type : 1;
+ unsigned optional_features : 1;
+ unsigned vendor_ctrl_if : 1;
+ unsigned i2c : 1;
+ unsigned otg_func : 1;
+ unsigned packet_size_cntr_width : 3;
+ unsigned xfer_size_cntr_width : 4;
+ } b;
+} hwcfg3_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config4
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg4_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+
+ unsigned reserved31_30 : 2; /* fscz */
+ unsigned num_in_eps : 4;
+ unsigned ded_fifo_en : 1;
+
+ unsigned session_end_filt_en : 1;
+ unsigned b_valid_filt_en : 1;
+ unsigned a_valid_filt_en : 1;
+ unsigned vbus_valid_filt_en : 1;
+ unsigned iddig_filt_en : 1;
+ unsigned num_dev_mode_ctrl_ep : 4;
+ unsigned utmi_phy_data_width : 2;
+ unsigned min_ahb_freq : 9;
+ unsigned power_optimiz : 1;
+ unsigned num_dev_perio_in_ep : 4;
+ } b;
+} hwcfg4_data_t;
+
+
+////////////////////////////////////////////
+// Device Registers
+/**
+ * Device Global Registers. <i>Offsets 800h-BFFh</i>
+ *
+ * The following structures define the size and relative field offsets
+ * for the Device Mode Registers.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_global_regs
+{
+ /** Device Configuration Register. <i>Offset 800h</i> */
+ volatile uint32_t dcfg;
+ /** Device Control Register. <i>Offset: 804h</i> */
+ volatile uint32_t dctl;
+ /** Device Status Register (Read Only). <i>Offset: 808h</i> */
+ volatile uint32_t dsts;
+ /** Reserved. <i>Offset: 80Ch</i> */
+ uint32_t unused;
+ /** Device IN Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 810h</i> */
+ volatile uint32_t diepmsk;
+ /** Device OUT Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 814h</i> */
+ volatile uint32_t doepmsk;
+ /** Device All Endpoints Interrupt Register. <i>Offset: 818h</i> */
+ volatile uint32_t daint;
+ /** Device All Endpoints Interrupt Mask Register. <i>Offset:
+ * 81Ch</i> */
+ volatile uint32_t daintmsk;
+ /** Device IN Token Queue Read Register-1 (Read Only).
+ * <i>Offset: 820h</i> */
+ volatile uint32_t dtknqr1;
+ /** Device IN Token Queue Read Register-2 (Read Only).
+ * <i>Offset: 824h</i> */
+ volatile uint32_t dtknqr2;
+ /** Device VBUS discharge Register. <i>Offset: 828h</i> */
+ volatile uint32_t dvbusdis;
+ /** Device VBUS Pulse Register. <i>Offset: 82Ch</i> */
+ volatile uint32_t dvbuspulse;
+ /** Device IN Token Queue Read Register-3 (Read Only).
+ * <i>Offset: 830h</i> */
+ /*fscz*/
+ /** Device IN Token Queue Read Register-3 (Read Only). /
+ * Device Thresholding control register (Read/Write)
+ * <i>Offset: 830h</i> */
+ volatile uint32_t dtknqr3_dthrctl;
+ /** Device IN Token Queue Read Register-4 (Read Only). /
+ * Device IN EPs empty Inr. Mask Register (Read/Write)
+ * <i>Offset: 834h</i> */
+ volatile uint32_t dtknqr4_fifoemptymsk;
+} dwc_otg_device_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Device Configuration
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements. Write the
+ * <i>d32</i> member to the dcfg register.
+ */
+typedef union dcfg_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved0_8 : 9; /*fsczzzz correct*/
+ unsigned epmscnt : 5;
+ /** In Endpoint Mis-match count */
+ unsigned reserved17_13 : 5;
+ /** Periodic Frame Interval */
+ unsigned perfrint : 2;
+#define DWC_DCFG_FRAME_INTERVAL_80 0
+#define DWC_DCFG_FRAME_INTERVAL_85 1
+#define DWC_DCFG_FRAME_INTERVAL_90 2
+#define DWC_DCFG_FRAME_INTERVAL_95 3
+
+ /** Device Addresses */
+ unsigned devaddr : 7;
+ unsigned reserved3 : 1;
+ /** Non Zero Length Status OUT Handshake */
+ unsigned nzstsouthshk : 1;
+#define DWC_DCFG_SEND_STALL 1
+
+ /** Device Speed */
+ unsigned devspd : 2;
+ } b;
+} dcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Device Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dctl_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31_12 : 21;
+ /** Clear Global OUT NAK */
+ unsigned cgoutnak : 1;
+ /** Set Global OUT NAK */
+ unsigned sgoutnak : 1;
+ /** Clear Global Non-Periodic IN NAK */
+ unsigned cgnpinnak : 1;
+ /** Set Global Non-Periodic IN NAK */
+ unsigned sgnpinnak : 1;
+ /** Test Control */
+ unsigned tstctl : 3;
+ /** Global OUT NAK Status */
+ unsigned goutnaksts : 1;
+ /** Global Non-Periodic IN NAK Status */
+ unsigned gnpinnaksts : 1;
+ /** Soft Disconnect */
+ unsigned sftdiscon : 1;
+ /** Remote Wakeup */
+ unsigned rmtwkupsig : 1;
+ } b;
+} dctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device Status
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dsts_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31_22 : 10;
+ /** Frame or Microframe Number of the received SOF */
+ unsigned soffn : 14;
+ unsigned reserved07_04 : 4;
+ /** Erratic Error */
+ unsigned errticerr : 1;
+ /** Enumerated Speed */
+ unsigned enumspd : 2;
+#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0
+#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1
+#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2
+#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3
+ /** Suspend Status */
+ unsigned suspsts : 1;
+ } b;
+} dsts_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device IN EP Interrupt
+ * Register and the Device IN EP Common Mask Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union diepint_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31_08 : 23;
+ unsigned txfifoundrn : 1;
+ /** IN Endpoint HAK Effective mask */
+ unsigned emptyintr : 1;
+ /** IN Endpoint NAK Effective mask */
+ unsigned inepnakeff : 1;
+ /** IN Token Received with EP mismatch mask */
+ unsigned intknepmis : 1;
+ /** IN Token received with TxF Empty mask */
+ unsigned intktxfemp : 1;
+ /** TimeOUT Handshake mask (non-ISOC EPs) */
+ unsigned timeout : 1;
+ /** AHB Error mask */
+ unsigned ahberr : 1;
+ /** Endpoint disable mask */
+ unsigned epdisabled : 1;
+ /** Transfer complete mask */
+ unsigned xfercompl : 1;
+ } b;
+} diepint_data_t;
+/**
+ * This union represents the bit fields in the Device IN EP Common
+ * Interrupt Mask Register.
+ */
+typedef union diepint_data diepmsk_data_t; //*-*
+
+/**
+ * This union represents the bit fields in the Device OUT EP
+ * Interrupt Register and Device OUT EP Common Interrupt Mask
+ * Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union doepint_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31_04 : 28; // Docs say reserved is 27 bits
+
+ // There is 1 bit missing here, not used?
+
+ /** Setup Phase Done (control EPs) */
+ unsigned setup : 1;
+ /** AHB Error */
+ unsigned ahberr : 1;
+ /** Endpoint disable */
+ unsigned epdisabled : 1;
+ /** Transfer complete */
+ unsigned xfercompl : 1;
+ } b;
+} doepint_data_t;
+/**
+ * This union represents the bit fields in the Device OUT EP Common
+ * Interrupt Mask Register.
+ */
+typedef union doepint_data doepmsk_data_t; //*-*
+
+
+/**
+ * This union represents the bit fields in the Device All EP Interrupt
+ * and Mask Registers.
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union daint_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** OUT Endpoint bits */
+ unsigned out : 16;
+ /** IN Endpoint bits */
+ unsigned in : 16;
+ } ep;
+ struct {
+ /** OUT Endpoint bits */
+ unsigned outep15 : 1;
+ unsigned outep14 : 1;
+ unsigned outep13 : 1;
+ unsigned outep12 : 1;
+ unsigned outep11 : 1;
+ unsigned outep10 : 1;
+ unsigned outep9 : 1;
+ unsigned outep8 : 1;
+ unsigned outep7 : 1;
+ unsigned outep6 : 1;
+ unsigned outep5 : 1;
+ unsigned outep4 : 1;
+ unsigned outep3 : 1;
+ unsigned outep2 : 1;
+ unsigned outep1 : 1;
+ unsigned outep0 : 1;
+ /** IN Endpoint bits */
+ unsigned inep15 : 1;
+ unsigned inep14 : 1;
+ unsigned inep13 : 1;
+ unsigned inep12 : 1;
+ unsigned inep11 : 1;
+ unsigned inep10 : 1;
+ unsigned inep9 : 1;
+ unsigned inep8 : 1;
+ unsigned inep7 : 1;
+ unsigned inep6 : 1;
+ unsigned inep5 : 1;
+ unsigned inep4 : 1;
+ unsigned inep3 : 1;
+ unsigned inep2 : 1;
+ unsigned inep1 : 1;
+ unsigned inep0 : 1;
+ } b;
+} daint_data_t;
+
+/**
+ * This union represents the bit fields in the Device IN Token Queue
+ * Read Registers.
+ * - Read the register into the <i>d32</i> member.
+ * - READ-ONLY Register
+ */
+typedef union dtknq1_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** EP Numbers of IN Tokens 0 ... 4 */
+ unsigned epnums0_5 : 24;
+ /** write pointer has wrapped. */
+ unsigned wrap_bit : 1;
+ /** Reserved */
+ unsigned reserved05_06 : 2;
+ /** In Token Queue Write Pointer */
+ unsigned intknwptr : 5;
+ }b;
+} dtknq1_data_t;
+
+/**
+ * This union represents Threshold control Register
+ * - Read and write the register into the <i>d32</i> member.
+ * - READ-WRITABLE Register
+ */
+typedef union dthrctl_data //* /*fscz */
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Reserved */
+ unsigned reserved26_31 : 6;
+ /** Rx Thr. Length */
+ unsigned rx_thr_len : 9;
+ /** Rx Thr. Enable */
+ unsigned rx_thr_en : 1;
+ /** Reserved */
+ unsigned reserved11_15 : 5;
+ /** Tx Thr. Length */
+ unsigned tx_thr_len : 9;
+ /** ISO Tx Thr. Enable */
+ unsigned iso_thr_en : 1;
+ /** non ISO Tx Thr. Enable */
+ unsigned non_iso_thr_en : 1;
+
+ }b;
+} dthrctl_data_t;
+
+
+/**
+ * Device Logical IN Endpoint-Specific Registers. <i>Offsets
+ * 900h-AFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_in_ep_regs
+{
+ /** Device IN Endpoint Control Register. <i>Offset:900h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t diepctl;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 04h</i> */
+ uint32_t reserved04;
+ /** Device IN Endpoint Interrupt Register. <i>Offset:900h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t diepint;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device IN Endpoint Transfer Size
+ * Register. <i>Offset:900h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t dieptsiz;
+ /** Device IN Endpoint DMA Address Register. <i>Offset:900h +
+ * (ep_num * 20h) + 14h</i> */
+ volatile uint32_t diepdma;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 18h - 900h +
+ * (ep_num * 20h) + 1Ch</i>*/
+ volatile uint32_t dtxfsts;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 1Ch - 900h +
+ * (ep_num * 20h) + 1Ch</i>*/
+ uint32_t reserved18;
+} dwc_otg_dev_in_ep_regs_t;
+
+/**
+ * Device Logical OUT Endpoint-Specific Registers. <i>Offsets:
+ * B00h-CFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_out_ep_regs
+{
+ /** Device OUT Endpoint Control Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t doepctl;
+ /** Device OUT Endpoint Frame number Register. <i>Offset:
+ * B00h + (ep_num * 20h) + 04h</i> */
+ volatile uint32_t doepfn;
+ /** Device OUT Endpoint Interrupt Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t doepint;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device OUT Endpoint Transfer Size Register. <i>Offset:
+ * B00h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t doeptsiz;
+ /** Device OUT Endpoint DMA Address Register. <i>Offset:B00h
+ * + (ep_num * 20h) + 14h</i> */
+ volatile uint32_t doepdma;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 18h - B00h +
+ * (ep_num * 20h) + 1Ch</i> */
+ uint32_t unused[2];
+} dwc_otg_dev_out_ep_regs_t;
+
+/**
+ * This union represents the bit fields in the Device EP Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union depctl_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Endpoint Enable */
+ unsigned epena : 1;
+ /** Endpoint Disable */
+ unsigned epdis : 1;
+
+ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA1 Set Odd
+ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to odd (micro) frame.
+ */
+ unsigned setd1pid : 1;
+ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA0. Set Even
+ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to even (micro)
+ * frame.
+ */
+ unsigned setd0pid : 1;
+
+ /** Set NAK */
+ unsigned snak : 1;
+ /** Clear NAK */
+ unsigned cnak : 1;
+
+ /** Tx Fifo Number
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned txfnum : 4;
+
+ /** Stall Handshake */
+ unsigned stall : 1;
+
+ /** Snoop Mode
+ * OUT EPn/OUT EP0
+ * IN EPn/IN EP0 - reserved */
+ unsigned snp : 1;
+
+ /** Endpoint Type
+ * 2'b00: Control
+ * 2'b01: Isochronous
+ * 2'b10: Bulk
+ * 2'b11: Interrupt */
+ unsigned eptype : 2;
+
+ /** NAK Status */
+ unsigned naksts : 1;
+
+ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints)
+ * This field contains the PID of the packet going to
+ * be received or transmitted on this endpoint. The
+ * application should program the PID of the first
+ * packet going to be received or transmitted on this
+ * endpoint , after the endpoint is
+ * activated. Application use the SetD1PID and
+ * SetD0PID fields of this register to program either
+ * D0 or D1 PID.
+ *
+ * The encoding for this field is
+ * - 0: D0
+ * - 1: D1
+ */
+ unsigned dpid : 1;
+
+ /** USB Active Endpoint */
+ unsigned usbactep : 1;
+
+ /** Next Endpoint
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned nextep : 4;
+
+ /** Maximum Packet Size
+ * IN/OUT EPn
+ * IN/OUT EP0 - 2 bits
+ * 2'b00: 64 Bytes
+ * 2'b01: 32
+ * 2'b10: 16
+ * 2'b11: 8 */
+ unsigned mps : 11;
+#define DWC_DEP0CTL_MPS_64 0
+#define DWC_DEP0CTL_MPS_32 1
+#define DWC_DEP0CTL_MPS_16 2
+#define DWC_DEP0CTL_MPS_8 3
+ } b;
+} depctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /*
+ * Added-sr: 2007-07-26
+ *
+ * Correct ther register layout for the 405EZ Ultra
+ * USB device implementation.
+ */
+#ifdef CONFIG_405EZ
+ /** register bits */
+ struct {
+ unsigned reserved : 1;
+ /** Multi Count - Periodic IN endpoints */
+ unsigned mc : 2;
+ unsigned reserved1 : 5;
+ /** Packet Count */
+ unsigned pktcnt : 5;
+ unsigned reserved2 : 8;
+ /** Transfer size */
+ unsigned xfersize : 11;
+ } b;
+#else
+ /** register bits */
+ struct {
+ unsigned reserved : 1;
+ /** Multi Count - Periodic IN endpoints */
+ unsigned mc : 2;
+ /** Packet Count */
+ unsigned pktcnt : 10;
+ /** Transfer size */
+ unsigned xfersize : 19;
+ } b;
+#endif
+} deptsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP 0 Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz0_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved31 : 1; /*fsczzzz device*/
+ /**Setup Packet Count (DOEPTSIZ0 Only) */
+ unsigned supcnt : 2;
+ /** Reserved */
+ unsigned reserved28_20 : 9;
+ /** Packet Count */
+ unsigned pktcnt : 1;
+ /** Reserved */
+ unsigned reserved18_7 : 12;
+ /** Transfer size */
+ unsigned xfersize : 7;
+ } b;
+} deptsiz0_data_t;
+
+
+/** Maximum number of Periodic FIFOs */
+#define MAX_PERIO_FIFOS 15
+/** Maximum number of Periodic FIFOs */
+#define MAX_TX_FIFOS 15
+
+/** Maximum number of Endpoints/HostChannels */
+#if defined(CONFIG_460EX) || defined(CONFIG_APM82181)
+#define MAX_EPS_CHANNELS 12
+#else
+#define MAX_EPS_CHANNELS 4
+#endif
+
+/**
+ * The dwc_otg_dev_if structure contains information needed to manage
+ * the DWC_otg controller acting in device mode. It represents the
+ * programming view of the device-specific aspects of the controller.
+ */
+typedef struct dwc_otg_dev_if {
+ /** Pointer to device Global registers.
+ * Device Global Registers starting at offset 800h
+ */
+ dwc_otg_device_global_regs_t *dev_global_regs;
+#define DWC_DEV_GLOBAL_REG_OFFSET 0x800
+
+ /**
+ * Device Logical IN Endpoint-Specific Registers 900h-AFCh
+ */
+ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; /*fscz divide 2 */
+#define DWC_DEV_IN_EP_REG_OFFSET 0x900
+#define DWC_EP_REG_OFFSET 0x20
+
+ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */
+ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS]; /*fscz divide 2 */
+#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00
+
+ /* Device configuration information*/
+ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */
+ /*fscz */
+ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */
+ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/
+
+
+ /** Size of periodic FIFOs (Bytes) */
+ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS];
+
+ /*fscz */
+ /** Size of Tx FIFOs (Bytes) */
+ uint16_t tx_fifo_size[MAX_TX_FIFOS];
+
+ /** Thresholding enable flags and length varaiables **/
+ uint16_t rx_thr_en;
+ uint16_t iso_tx_thr_en;
+ uint16_t non_iso_tx_thr_en;
+
+ uint16_t rx_thr_length;
+ uint16_t tx_thr_length;
+} dwc_otg_dev_if_t;
+
+/**
+ * This union represents the bit fields in the Power and Clock Gating Control
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union pcgcctl_data /*fsczzzz */
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned reserved31_05 : 27;
+ /** PHY Suspended */
+ unsigned physuspended : 1;
+ /** Reset Power Down Modules */
+ unsigned rstpdwnmodule : 1;
+ /** Power Clamp */
+ unsigned pwrclmp : 1;
+ /** Gate Hclk */
+ unsigned gatehclk : 1;
+ /** Stop Pclk */
+ unsigned stoppclk : 1;
+ } b;
+} pcgcctl_data_t;
+
+/////////////////////////////////////////////////
+// Host Mode Register Structures
+//
+/**
+ * The Host Global Registers structure defines the size and relative
+ * field offsets for the Host Mode Global Registers. Host Global
+ * Registers offsets 400h-7FFh.
+*/
+typedef struct dwc_otg_host_global_regs
+{
+ /** Host Configuration Register. <i>Offset: 400h</i> */
+ volatile uint32_t hcfg;
+ /** Host Frame Interval Register. <i>Offset: 404h</i> */
+ volatile uint32_t hfir;
+ /** Host Frame Number / Frame Remaining Register. <i>Offset: 408h</i> */
+ volatile uint32_t hfnum;
+ /** Reserved. <i>Offset: 40Ch</i> */
+ uint32_t reserved40C;
+ /** Host Periodic Transmit FIFO/ Queue Status Register. <i>Offset: 410h</i> */
+ volatile uint32_t hptxsts;
+ /** Host All Channels Interrupt Register. <i>Offset: 414h</i> */
+ volatile uint32_t haint;
+ /** Host All Channels Interrupt Mask Register. <i>Offset: 418h</i> */
+ volatile uint32_t haintmsk;
+} dwc_otg_host_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Configuration Register.
+ * Read the register into the <i>d32</i> member then set/clear the bits using
+ * the <i>b</i>it elements. Write the <i>d32</i> member to the hcfg register.
+ */
+typedef union hcfg_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+
+#define DWC_HCFG_30_60_MHZ 0
+#define DWC_HCFG_48_MHZ 1
+#define DWC_HCFG_6_MHZ 2
+ /** FS/LS Only Support */
+ unsigned fslssupp : 1;
+ /** FS/LS Phy Clock Select */
+ unsigned fslspclksel : 2;
+ } b;
+} hcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfir_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned frint : 16;
+ } b;
+} hfir_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfnum_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+#define DWC_HFNUM_MAX_FRNUM 0x3FFF
+ unsigned frrem : 16;
+ unsigned frnum : 16;
+ } b;
+} hfnum_data_t;
+
+typedef union hptxsts_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned ptxqtop_odd : 1;
+ unsigned ptxqtop_chnum : 4;
+ unsigned ptxqtop_token : 2;
+ unsigned ptxqtop_terminate : 1;
+ unsigned ptxqspcavail : 8;
+ unsigned ptxfspcavail : 16;
+ /** Top of the Periodic Transmit Request Queue
+ * - bit 24 - Terminate (last entry for the selected channel)
+ * - bits 26:25 - Token Type
+ * - 2'b00 - Zero length
+ * - 2'b01 - Ping
+ * - 2'b10 - Disable
+ * - bits 30:27 - Channel Number
+ * - bit 31 - Odd/even microframe
+ */
+ } b;
+} hptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Port Control and Status
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hprt0 register.
+ */
+typedef union hprt0_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+
+#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0
+#define DWC_HPRT0_PRTSPD_FULL_SPEED 1
+#define DWC_HPRT0_PRTSPD_LOW_SPEED 2
+ unsigned reserved19_31 : 13;
+ unsigned prtspd : 2;
+ unsigned prttstctl : 4;
+ unsigned prtpwr : 1;
+ unsigned prtlnsts : 2;
+ unsigned reserved9 : 1;
+ unsigned prtrst : 1;
+ unsigned prtsusp : 1;
+ unsigned prtres : 1;
+ unsigned prtovrcurrchng : 1;
+ unsigned prtovrcurract : 1;
+ unsigned prtenchng : 1;
+ unsigned prtena : 1;
+ unsigned prtconndet : 1;
+ unsigned prtconnsts : 1;
+ } b;
+} hprt0_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haint_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned ch15 : 1;
+ unsigned ch14 : 1;
+ unsigned ch13 : 1;
+ unsigned ch12 : 1;
+ unsigned ch11 : 1;
+ unsigned ch10 : 1;
+ unsigned ch9 : 1;
+ unsigned ch8 : 1;
+ unsigned ch7 : 1;
+ unsigned ch6 : 1;
+ unsigned ch5 : 1;
+ unsigned ch4 : 1;
+ unsigned ch3 : 1;
+ unsigned ch2 : 1;
+ unsigned ch1 : 1;
+ unsigned ch0 : 1;
+ } b;
+ struct {
+ unsigned reserved : 16;
+ unsigned chint : 16;
+ } b2;
+} haint_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haintmsk_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved : 16;
+ unsigned ch15 : 1;
+ unsigned ch14 : 1;
+ unsigned ch13 : 1;
+ unsigned ch12 : 1;
+ unsigned ch11 : 1;
+ unsigned ch10 : 1;
+ unsigned ch9 : 1;
+ unsigned ch8 : 1;
+ unsigned ch7 : 1;
+ unsigned ch6 : 1;
+ unsigned ch5 : 1;
+ unsigned ch4 : 1;
+ unsigned ch3 : 1;
+ unsigned ch2 : 1;
+ unsigned ch1 : 1;
+ unsigned ch0 : 1;
+ } b;
+ struct {
+ unsigned reserved : 16;
+ unsigned chint : 16;
+ } b2;
+} haintmsk_data_t;
+
+/**
+ * Host Channel Specific Registers. <i>500h-5FCh</i>
+ */
+typedef struct dwc_otg_hc_regs
+{
+ /** Host Channel 0 Characteristic Register. <i>Offset: 500h + (chan_num * 20h) + 00h</i> */
+ volatile uint32_t hcchar;
+ /** Host Channel 0 Split Control Register. <i>Offset: 500h + (chan_num * 20h) + 04h</i> */
+ volatile uint32_t hcsplt;
+ /** Host Channel 0 Interrupt Register. <i>Offset: 500h + (chan_num * 20h) + 08h</i> */
+ volatile uint32_t hcint;
+ /** Host Channel 0 Interrupt Mask Register. <i>Offset: 500h + (chan_num * 20h) + 0Ch</i> */
+ volatile uint32_t hcintmsk;
+ /** Host Channel 0 Transfer Size Register. <i>Offset: 500h + (chan_num * 20h) + 10h</i> */
+ volatile uint32_t hctsiz;
+ /** Host Channel 0 DMA Address Register. <i>Offset: 500h + (chan_num * 20h) + 14h</i> */
+ volatile uint32_t hcdma;
+ /** Reserved. <i>Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch</i> */
+ uint32_t reserved[2];
+} dwc_otg_hc_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Characteristics
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+typedef union hcchar_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Channel enable */
+ unsigned chen : 1;
+
+ /** Channel disable */
+ unsigned chdis : 1;
+
+ /**
+ * Frame to transmit periodic transaction.
+ * 0: even, 1: odd
+ */
+ unsigned oddfrm : 1;
+
+ /** Device address */
+ unsigned devaddr : 7;
+
+ /** Packets per frame for periodic transfers. 0 is reserved. */
+ unsigned multicnt : 2;
+
+ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */
+ unsigned eptype : 2;
+
+ /** 0: Full/high speed device, 1: Low speed device */
+ unsigned lspddev : 1;
+
+ unsigned reserved : 1;
+
+ /** 0: OUT, 1: IN */
+ unsigned epdir : 1;
+
+ /** Endpoint number */
+ unsigned epnum : 4;
+
+ /** Maximum packet size in bytes */
+ unsigned mps : 11;
+ } b;
+} hcchar_data_t;
+
+typedef union hcsplt_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Split Enble */
+ unsigned spltena : 1;
+
+ /** Reserved */
+ unsigned reserved : 14;
+
+ /** Do Complete Split */
+ unsigned compsplt : 1;
+
+
+ /** Transaction Position */
+ unsigned xactpos : 2;
+#define DWC_HCSPLIT_XACTPOS_MID 0
+#define DWC_HCSPLIT_XACTPOS_END 1
+#define DWC_HCSPLIT_XACTPOS_BEGIN 2
+#define DWC_HCSPLIT_XACTPOS_ALL 3
+
+ /** Hub Address */
+ unsigned hubaddr : 7;
+
+ /** Port Address */
+ unsigned prtaddr : 7;
+
+ } b;
+} hcsplt_data_t;
+
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union hcint_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Reserved */
+ unsigned reserved : 21;
+ /** Data Toggle Error */
+ unsigned datatglerr : 1;
+ /** Frame Overrun */
+ unsigned frmovrun : 1;
+ /** Babble Error */
+ unsigned bblerr : 1;
+ /** Transaction Err */
+ unsigned xacterr : 1;
+ /** NYET Response Received */
+ unsigned nyet : 1;
+ /** ACK Response Received */
+ unsigned ack : 1;
+ /** NAK Response Received */
+ unsigned nak : 1;
+ /** STALL Response Received */
+ unsigned stall : 1;
+ /** AHB Error */
+ unsigned ahberr : 1;
+ /** Channel Halted */
+ unsigned chhltd : 1;
+ /** Transfer Complete */
+ unsigned xfercomp : 1;
+ } b;
+} hcint_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Transfer Size
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+typedef union hctsiz_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+#define DWC_HCTSIZ_DATA0 0
+#define DWC_HCTSIZ_DATA1 2
+#define DWC_HCTSIZ_DATA2 1
+#define DWC_HCTSIZ_MDATA 3
+#define DWC_HCTSIZ_SETUP 3
+
+ /** Do PING protocol when 1 */
+ unsigned dopng : 1;
+
+ /**
+ * Packet ID for next data packet
+ * 0: DATA0
+ * 1: DATA2
+ * 2: DATA1
+ * 3: MDATA (non-Control), SETUP (Control)
+ */
+ unsigned pid : 2;
+
+ /** Data packets to transfer */
+ unsigned pktcnt : 10;
+
+ /** Total transfer size in bytes */
+ unsigned xfersize : 19;
+ } b;
+} hctsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Interrupt Mask
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcintmsk register.
+ */
+typedef union hcintmsk_data //*
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned reserved : 21;
+ unsigned datatglerr : 1;
+ unsigned frmovrun : 1;
+ unsigned bblerr : 1;
+ unsigned xacterr : 1;
+ unsigned nyet : 1;
+ unsigned ack : 1;
+ unsigned nak : 1;
+ unsigned stall : 1;
+ unsigned ahberr : 1;
+ unsigned chhltd : 1;
+ unsigned xfercompl : 1;
+ } b;
+} hcintmsk_data_t;
+
+/** OTG Host Interface Structure.
+ *
+ * The OTG Host Interface Structure structure contains information
+ * needed to manage the DWC_otg controller acting in host mode. It
+ * represents the programming view of the host-specific aspects of the
+ * controller.
+ */
+typedef struct dwc_otg_host_if {
+ /** Host Global Registers starting at offset 400h.*/
+ dwc_otg_host_global_regs_t *host_global_regs;
+#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400
+
+ /** Host Port 0 Control and Status Register */
+ volatile uint32_t *hprt0;
+#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440
+
+
+ /** Host Channel Specific Registers at offsets 500h-5FCh. */
+ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS];
+#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500
+#define DWC_OTG_CHAN_REGS_OFFSET 0x20
+
+
+ /* Host configuration information */
+ /** Number of Host Channels (range: 1-16) */
+ uint8_t num_host_channels;
+ /** Periodic EPs supported (0: no, 1: yes) */
+ uint8_t perio_eps_supported;
+ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */
+ uint16_t perio_tx_fifo_size;
+
+} dwc_otg_host_if_t;
+
+
+#else /* CONFIG_4xx not defined */
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * This union represents the bit fields of the Core OTG Control
+ * and Status Register (GOTGCTL). Set the bits using the bit
+ * fields then write the <i>d32</i> value to the register.
+ */
+typedef union gotgctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned sesreqscs : 1;
+ unsigned sesreq : 1;
+ unsigned reserved2_7 : 6;
+ unsigned hstnegscs : 1;
+ unsigned hnpreq : 1;
+ unsigned hstsethnpen : 1;
+ unsigned devhnpen : 1;
+ unsigned reserved12_15 : 4;
+ unsigned conidsts : 1;
+ unsigned reserved17 : 1;
+ unsigned asesvld : 1;
+ unsigned bsesvld : 1;
+ unsigned currmod : 1;
+ unsigned reserved21_31 : 11;
+ } b;
+} gotgctl_data_t;
+
+/**
+ * This union represents the bit fields of the Core OTG Interrupt Register
+ * (GOTGINT). Set/clear the bits using the bit fields then write the <i>d32</i>
+ * value to the register.
+ */
+typedef union gotgint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Current Mode */
+ unsigned reserved0_1 : 2;
+
+ /** Session End Detected */
+ unsigned sesenddet : 1;
+
+ unsigned reserved3_7 : 5;
+
+ /** Session Request Success Status Change */
+ unsigned sesreqsucstschng : 1;
+ /** Host Negotiation Success Status Change */
+ unsigned hstnegsucstschng : 1;
+
+ unsigned reserver10_16 : 7;
+
+ /** Host Negotiation Detected */
+ unsigned hstnegdet : 1;
+ /** A-Device Timeout Change */
+ unsigned adevtoutchng : 1;
+ /** Debounce Done */
+ unsigned debdone : 1;
+
+ unsigned reserved31_20 : 12;
+
+ } b;
+} gotgint_data_t;
+
+
+/**
+ * This union represents the bit fields of the Core AHB Configuration
+ * Register (GAHBCFG). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gahbcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned glblintrmsk : 1;
+#define DWC_GAHBCFG_GLBINT_ENABLE 1
+
+ unsigned hburstlen : 4;
+#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5
+#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7
+
+ unsigned dmaenable : 1;
+#define DWC_GAHBCFG_DMAENABLE 1
+ unsigned reserved : 1;
+ unsigned nptxfemplvl_txfemplvl : 1;
+ unsigned ptxfemplvl : 1;
+#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1
+#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0
+ unsigned reserved9_31 : 23;
+ } b;
+} gahbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core USB Configuration
+ * Register (GUSBCFG). Set the bits using the bit fields then write
+ * the <i>d32</i> value to the register.
+ */
+typedef union gusbcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned toutcal : 3;
+ unsigned phyif : 1;
+ unsigned ulpi_utmi_sel : 1;
+ unsigned fsintf : 1;
+ unsigned physel : 1;
+ unsigned ddrsel : 1;
+ unsigned srpcap : 1;
+ unsigned hnpcap : 1;
+ unsigned usbtrdtim : 4;
+ unsigned nptxfrwnden : 1;
+ unsigned phylpwrclksel : 1;
+ unsigned otgutmifssel : 1;
+ unsigned ulpi_fsls : 1;
+ unsigned ulpi_auto_res : 1;
+ unsigned ulpi_clk_sus_m : 1;
+ unsigned ulpi_ext_vbus_drv : 1;
+ unsigned ulpi_int_vbus_indicator : 1;
+ unsigned term_sel_dl_pulse : 1;
+ unsigned reserved23_28 : 6;
+ unsigned force_host_mode: 1;
+ unsigned force_device_mode: 1;
+ unsigned corrupt_tx_packet: 1;
+ } b;
+} gusbcfg_data_t;
+
+/**
+ * This union represents the bit fields of the Core Reset Register
+ * (GRSTCTL). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union grstctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Core Soft Reset (CSftRst) (Device and Host)
+ *
+ * The application can flush the control logic in the
+ * entire core using this bit. This bit resets the
+ * pipelines in the AHB Clock domain as well as the
+ * PHY Clock domain.
+ *
+ * The state machines are reset to an IDLE state, the
+ * control bits in the CSRs are cleared, all the
+ * transmit FIFOs and the receive FIFO are flushed.
+ *
+ * The status mask bits that control the generation of
+ * the interrupt, are cleared, to clear the
+ * interrupt. The interrupt status bits are not
+ * cleared, so the application can get the status of
+ * any events that occurred in the core after it has
+ * set this bit.
+ *
+ * Any transactions on the AHB are terminated as soon
+ * as possible following the protocol. Any
+ * transactions on the USB are terminated immediately.
+ *
+ * The configuration settings in the CSRs are
+ * unchanged, so the software doesn't have to
+ * reprogram these registers (Device
+ * Configuration/Host Configuration/Core System
+ * Configuration/Core PHY Configuration).
+ *
+ * The application can write to this bit, any time it
+ * wants to reset the core. This is a self clearing
+ * bit and the core clears this bit after all the
+ * necessary logic is reset in the core, which may
+ * take several clocks, depending on the current state
+ * of the core.
+ */
+ unsigned csftrst : 1;
+ /** Hclk Soft Reset
+ *
+ * The application uses this bit to reset the control logic in
+ * the AHB clock domain. Only AHB clock domain pipelines are
+ * reset.
+ */
+ unsigned hsftrst : 1;
+ /** Host Frame Counter Reset (Host Only)<br>
+ *
+ * The application can reset the (micro)frame number
+ * counter inside the core, using this bit. When the
+ * (micro)frame counter is reset, the subsequent SOF
+ * sent out by the core, will have a (micro)frame
+ * number of 0.
+ */
+ unsigned hstfrm : 1;
+ /** In Token Sequence Learning Queue Flush
+ * (INTknQFlsh) (Device Only)
+ */
+ unsigned intknqflsh : 1;
+ /** RxFIFO Flush (RxFFlsh) (Device and Host)
+ *
+ * The application can flush the entire Receive FIFO
+ * using this bit. <p>The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. <p>The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is reading from the RxFIFO nor the MAC
+ * is writing the data in to the FIFO. <p>The
+ * application should wait until the bit is cleared
+ * before performing any other operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned rxfflsh : 1;
+ /** TxFIFO Flush (TxFFlsh) (Device and Host).
+ *
+ * This bit is used to selectively flush a single or
+ * all transmit FIFOs. The application must first
+ * ensure that the core is not in the middle of a
+ * transaction. <p>The application should write into
+ * this bit, only after making sure that neither the
+ * DMA engine is writing into the TxFIFO nor the MAC
+ * is reading the data out of the FIFO. <p>The
+ * application should wait until the core clears this
+ * bit, before performing any operations. This bit
+ * will takes 8 clocks (slowest of PHY or AHB clock)
+ * to clear.
+ */
+ unsigned txfflsh : 1;
+
+ /** TxFIFO Number (TxFNum) (Device and Host).
+ *
+ * This is the FIFO number which needs to be flushed,
+ * using the TxFIFO Flush bit. This field should not
+ * be changed until the TxFIFO Flush bit is cleared by
+ * the core.
+ * - 0x0 : Non Periodic TxFIFO Flush
+ * - 0x1 : Periodic TxFIFO #1 Flush in device mode
+ * or Periodic TxFIFO in host mode
+ * - 0x2 : Periodic TxFIFO #2 Flush in device mode.
+ * - ...
+ * - 0xF : Periodic TxFIFO #15 Flush in device mode
+ * - 0x10: Flush all the Transmit NonPeriodic and
+ * Transmit Periodic FIFOs in the core
+ */
+ unsigned txfnum : 5;
+ /** Reserved */
+ unsigned reserved11_29 : 19;
+ /** DMA Request Signal. Indicated DMA request is in
+ * probress. Used for debug purpose. */
+ unsigned dmareq : 1;
+ /** AHB Master Idle. Indicates the AHB Master State
+ * Machine is in IDLE condition. */
+ unsigned ahbidle : 1;
+ } b;
+} grstctl_t;
+
+
+/**
+ * This union represents the bit fields of the Core Interrupt Mask
+ * Register (GINTMSK). Set/clear the bits using the bit fields then
+ * write the <i>d32</i> value to the register.
+ */
+typedef union gintmsk_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned reserved0 : 1;
+ unsigned modemismatch : 1;
+ unsigned otgintr : 1;
+ unsigned sofintr : 1;
+ unsigned rxstsqlvl : 1;
+ unsigned nptxfempty : 1;
+ unsigned ginnakeff : 1;
+ unsigned goutnakeff : 1;
+ unsigned reserved8 : 1;
+ unsigned i2cintr : 1;
+ unsigned erlysuspend : 1;
+ unsigned usbsuspend : 1;
+ unsigned usbreset : 1;
+ unsigned enumdone : 1;
+ unsigned isooutdrop : 1;
+ unsigned eopframe : 1;
+ unsigned reserved16 : 1;
+ unsigned epmismatch : 1;
+ unsigned inepintr : 1;
+ unsigned outepintr : 1;
+ unsigned incomplisoin : 1;
+ unsigned incomplisoout : 1;
+ unsigned reserved22_23 : 2;
+ unsigned portintr : 1;
+ unsigned hcintr : 1;
+ unsigned ptxfempty : 1;
+ unsigned reserved27 : 1;
+ unsigned conidstschng : 1;
+ unsigned disconnect : 1;
+ unsigned sessreqintr : 1;
+ unsigned wkupintr : 1;
+ } b;
+} gintmsk_data_t;
+/**
+ * This union represents the bit fields of the Core Interrupt Register
+ * (GINTSTS). Set/clear the bits using the bit fields then write the
+ * <i>d32</i> value to the register.
+ */
+typedef union gintsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+#define DWC_SOF_INTR_MASK 0x0008
+ /** register bits */
+ struct {
+#define DWC_HOST_MODE 1
+ unsigned curmode : 1;
+ unsigned modemismatch : 1;
+ unsigned otgintr : 1;
+ unsigned sofintr : 1;
+ unsigned rxstsqlvl : 1;
+ unsigned nptxfempty : 1;
+ unsigned ginnakeff : 1;
+ unsigned goutnakeff : 1;
+ unsigned reserved8 : 1;
+ unsigned i2cintr : 1;
+ unsigned erlysuspend : 1;
+ unsigned usbsuspend : 1;
+ unsigned usbreset : 1;
+ unsigned enumdone : 1;
+ unsigned isooutdrop : 1;
+ unsigned eopframe : 1;
+ unsigned intokenrx : 1;
+ unsigned epmismatch : 1;
+ unsigned inepint: 1;
+ unsigned outepintr : 1;
+ unsigned incomplisoin : 1;
+ unsigned incomplisoout : 1;
+ unsigned reserved22_23 : 2;
+ unsigned portintr : 1;
+ unsigned hcintr : 1;
+ unsigned ptxfempty : 1;
+ unsigned reserved27 : 1;
+ unsigned conidstschng : 1;
+ unsigned disconnect : 1;
+ unsigned sessreqintr : 1;
+ unsigned wkupintr : 1;
+ } b;
+} gintsts_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union device_grxsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned epnum : 4;
+ unsigned bcnt : 11;
+ unsigned dpid : 2;
+
+#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet
+#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete
+
+#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK
+#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete
+#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet
+ unsigned pktsts : 4;
+ unsigned fn : 4;
+ unsigned reserved : 7;
+ } b;
+} device_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Receive Status Read and
+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the <i>d32</i>
+ * element then read out the bits using the <i>b</i>it elements.
+ */
+typedef union host_grxsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned chnum : 4;
+ unsigned bcnt : 11;
+ unsigned dpid : 2;
+
+ unsigned pktsts : 4;
+#define DWC_GRXSTS_PKTSTS_IN 0x2
+#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3
+#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5
+#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7
+
+ unsigned reserved : 11;
+ } b;
+} host_grxsts_data_t;
+
+/**
+ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ,
+ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the <i>d32</i> element then
+ * read out the bits using the <i>b</i>it elements.
+ */
+typedef union fifosize_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned startaddr : 16;
+ unsigned depth : 16;
+ } b;
+} fifosize_data_t;
+
+/**
+ * This union represents the bit fields in the Non-Periodic Transmit
+ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union gnptxsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned nptxfspcavail : 16;
+ unsigned nptxqspcavail : 8;
+ /** Top of the Non-Periodic Transmit Request Queue
+ * - bit 24 - Terminate (Last entry for the selected
+ * channel/EP)
+ * - bits 26:25 - Token Type
+ * - 2'b00 - IN/OUT
+ * - 2'b01 - Zero Length OUT
+ * - 2'b10 - PING/Complete Split
+ * - 2'b11 - Channel Halt
+ * - bits 30:27 - Channel/EP Number
+ */
+ unsigned nptxqtop_terminate : 1;
+ unsigned nptxqtop_token : 2;
+ unsigned nptxqtop_chnep : 4;
+ unsigned reserved : 1;
+ } b;
+} gnptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Transmit
+ * FIFO Status Register (DTXFSTS). Read the register into the
+ * <i>d32</i> element then read out the bits using the <i>b</i>it
+ * elements.
+ */
+typedef union dtxfsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned txfspcavail : 16;
+ unsigned reserved : 16;
+ } b;
+} dtxfsts_data_t;
+
+/**
+ * This union represents the bit fields in the I2C Control Register
+ * (I2CCTL). Read the register into the <i>d32</i> element then read out the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union gi2cctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned rwdata : 8;
+ unsigned regaddr : 8;
+ unsigned addr : 7;
+ unsigned i2cen : 1;
+ unsigned ack : 1;
+ unsigned i2csuspctl : 1;
+ unsigned i2cdevaddr : 2;
+ unsigned reserved : 2;
+ unsigned rw : 1;
+ unsigned bsydne : 1;
+ } b;
+} gi2cctl_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config1
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg1_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ep_dir0 : 2;
+ unsigned ep_dir1 : 2;
+ unsigned ep_dir2 : 2;
+ unsigned ep_dir3 : 2;
+ unsigned ep_dir4 : 2;
+ unsigned ep_dir5 : 2;
+ unsigned ep_dir6 : 2;
+ unsigned ep_dir7 : 2;
+ unsigned ep_dir8 : 2;
+ unsigned ep_dir9 : 2;
+ unsigned ep_dir10 : 2;
+ unsigned ep_dir11 : 2;
+ unsigned ep_dir12 : 2;
+ unsigned ep_dir13 : 2;
+ unsigned ep_dir14 : 2;
+ unsigned ep_dir15 : 2;
+ } b;
+} hwcfg1_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config2
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg2_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG2 */
+ unsigned op_mode : 3;
+#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0
+#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1
+#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+
+ unsigned architecture : 2;
+ unsigned point2point : 1;
+ unsigned hs_phy_type : 2;
+#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1
+#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2
+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
+
+ unsigned fs_phy_type : 2;
+ unsigned num_dev_ep : 4;
+ unsigned num_host_chan : 4;
+ unsigned perio_ep_supported : 1;
+ unsigned dynamic_fifo : 1;
+ unsigned rx_status_q_depth : 2;
+ unsigned nonperio_tx_q_depth : 2;
+ unsigned host_perio_tx_q_depth : 2;
+ unsigned dev_token_q_depth : 5;
+ unsigned reserved31 : 1;
+ } b;
+} hwcfg2_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config3
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg3_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /* GHWCFG3 */
+ unsigned xfer_size_cntr_width : 4;
+ unsigned packet_size_cntr_width : 3;
+ unsigned otg_func : 1;
+ unsigned i2c : 1;
+ unsigned vendor_ctrl_if : 1;
+ unsigned optional_features : 1;
+ unsigned synch_reset_type : 1;
+ unsigned reserved15_12 : 4;
+ unsigned dfifo_depth : 16;
+ } b;
+} hwcfg3_data_t;
+
+/**
+ * This union represents the bit fields in the User HW Config4
+ * Register. Read the register into the <i>d32</i> element then read
+ * out the bits using the <i>b</i>it elements.
+ */
+typedef union hwcfg4_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned num_dev_perio_in_ep : 4;
+ unsigned power_optimiz : 1;
+ unsigned min_ahb_freq : 9;
+ unsigned utmi_phy_data_width : 2;
+ unsigned num_dev_mode_ctrl_ep : 4;
+ unsigned iddig_filt_en : 1;
+ unsigned vbus_valid_filt_en : 1;
+ unsigned a_valid_filt_en : 1;
+ unsigned b_valid_filt_en : 1;
+ unsigned session_end_filt_en : 1;
+ unsigned ded_fifo_en : 1;
+ unsigned num_in_eps : 4;
+ unsigned reserved31_30 : 2;
+ } b;
+} hwcfg4_data_t;
+
+////////////////////////////////////////////
+// Device Registers
+/**
+ * Device Global Registers. <i>Offsets 800h-BFFh</i>
+ *
+ * The following structures define the size and relative field offsets
+ * for the Device Mode Registers.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_global_regs
+{
+ /** Device Configuration Register. <i>Offset 800h</i> */
+ volatile uint32_t dcfg;
+ /** Device Control Register. <i>Offset: 804h</i> */
+ volatile uint32_t dctl;
+ /** Device Status Register (Read Only). <i>Offset: 808h</i> */
+ volatile uint32_t dsts;
+ /** Reserved. <i>Offset: 80Ch</i> */
+ uint32_t unused;
+ /** Device IN Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 810h</i> */
+ volatile uint32_t diepmsk;
+ /** Device OUT Endpoint Common Interrupt Mask
+ * Register. <i>Offset: 814h</i> */
+ volatile uint32_t doepmsk;
+ /** Device All Endpoints Interrupt Register. <i>Offset: 818h</i> */
+ volatile uint32_t daint;
+ /** Device All Endpoints Interrupt Mask Register. <i>Offset:
+ * 81Ch</i> */
+ volatile uint32_t daintmsk;
+ /** Device IN Token Queue Read Register-1 (Read Only).
+ * <i>Offset: 820h</i> */
+ volatile uint32_t dtknqr1;
+ /** Device IN Token Queue Read Register-2 (Read Only).
+ * <i>Offset: 824h</i> */
+ volatile uint32_t dtknqr2;
+ /** Device VBUS discharge Register. <i>Offset: 828h</i> */
+ volatile uint32_t dvbusdis;
+ /** Device VBUS Pulse Register. <i>Offset: 82Ch</i> */
+ volatile uint32_t dvbuspulse;
+ /** Device IN Token Queue Read Register-3 (Read Only). /
+ * Device Thresholding control register (Read/Write)
+ * <i>Offset: 830h</i> */
+ volatile uint32_t dtknqr3_dthrctl;
+ /** Device IN Token Queue Read Register-4 (Read Only). /
+ * Device IN EPs empty Inr. Mask Register (Read/Write)
+ * <i>Offset: 834h</i> */
+ volatile uint32_t dtknqr4_fifoemptymsk;
+} dwc_otg_device_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Device Configuration
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements. Write the
+ * <i>d32</i> member to the dcfg register.
+ */
+typedef union dcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Device Speed */
+ unsigned devspd : 2;
+ /** Non Zero Length Status OUT Handshake */
+ unsigned nzstsouthshk : 1;
+#define DWC_DCFG_SEND_STALL 1
+
+ unsigned reserved3 : 1;
+ /** Device Addresses */
+ unsigned devaddr : 7;
+ /** Periodic Frame Interval */
+ unsigned perfrint : 2;
+#define DWC_DCFG_FRAME_INTERVAL_80 0
+#define DWC_DCFG_FRAME_INTERVAL_85 1
+#define DWC_DCFG_FRAME_INTERVAL_90 2
+#define DWC_DCFG_FRAME_INTERVAL_95 3
+
+ unsigned reserved13_17 : 5;
+ /** In Endpoint Mis-match count */
+ unsigned epmscnt : 4;
+ } b;
+} dcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Device Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Remote Wakeup */
+ unsigned rmtwkupsig : 1;
+ /** Soft Disconnect */
+ unsigned sftdiscon : 1;
+ /** Global Non-Periodic IN NAK Status */
+ unsigned gnpinnaksts : 1;
+ /** Global OUT NAK Status */
+ unsigned goutnaksts : 1;
+ /** Test Control */
+ unsigned tstctl : 3;
+ /** Set Global Non-Periodic IN NAK */
+ unsigned sgnpinnak : 1;
+ /** Clear Global Non-Periodic IN NAK */
+ unsigned cgnpinnak : 1;
+ /** Set Global OUT NAK */
+ unsigned sgoutnak : 1;
+ /** Clear Global OUT NAK */
+ unsigned cgoutnak : 1;
+
+ unsigned reserved : 21;
+ } b;
+} dctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device Status
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union dsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Suspend Status */
+ unsigned suspsts : 1;
+ /** Enumerated Speed */
+ unsigned enumspd : 2;
+#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0
+#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1
+#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2
+#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3
+ /** Erratic Error */
+ unsigned errticerr : 1;
+ unsigned reserved4_7: 4;
+ /** Frame or Microframe Number of the received SOF */
+ unsigned soffn : 14;
+ unsigned reserved22_31 : 10;
+ } b;
+} dsts_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device IN EP Interrupt
+ * Register and the Device IN EP Common Mask Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union diepint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer complete mask */
+ unsigned xfercompl : 1;
+ /** Endpoint disable mask */
+ unsigned epdisabled : 1;
+ /** AHB Error mask */
+ unsigned ahberr : 1;
+ /** TimeOUT Handshake mask (non-ISOC EPs) */
+ unsigned timeout : 1;
+ /** IN Token received with TxF Empty mask */
+ unsigned intktxfemp : 1;
+ /** IN Token Received with EP mismatch mask */
+ unsigned intknepmis : 1;
+ /** IN Endpoint HAK Effective mask */
+ unsigned inepnakeff : 1;
+ /** IN Endpoint HAK Effective mask */
+ unsigned emptyintr : 1;
+
+ unsigned txfifoundrn : 1;
+
+ unsigned reserved08_31 : 23;
+ } b;
+} diepint_data_t;
+/**
+ * This union represents the bit fields in the Device IN EP Common
+ * Interrupt Mask Register.
+ */
+typedef union diepint_data diepmsk_data_t;
+
+/**
+ * This union represents the bit fields in the Device OUT EP Interrupt
+ * Registerand Device OUT EP Common Interrupt Mask Register.
+ *
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union doepint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer complete */
+ unsigned xfercompl : 1;
+ /** Endpoint disable */
+ unsigned epdisabled : 1;
+ /** AHB Error */
+ unsigned ahberr : 1;
+ /** Setup Phase Done (contorl EPs) */
+ unsigned setup : 1;
+ unsigned reserved04_31 : 28;
+ } b;
+} doepint_data_t;
+/**
+ * This union represents the bit fields in the Device OUT EP Common
+ * Interrupt Mask Register.
+ */
+typedef union doepint_data doepmsk_data_t;
+
+
+/**
+ * This union represents the bit fields in the Device All EP Interrupt
+ * and Mask Registers.
+ * - Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union daint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** IN Endpoint bits */
+ unsigned in : 16;
+ /** OUT Endpoint bits */
+ unsigned out : 16;
+ } ep;
+ struct {
+ /** IN Endpoint bits */
+ unsigned inep0 : 1;
+ unsigned inep1 : 1;
+ unsigned inep2 : 1;
+ unsigned inep3 : 1;
+ unsigned inep4 : 1;
+ unsigned inep5 : 1;
+ unsigned inep6 : 1;
+ unsigned inep7 : 1;
+ unsigned inep8 : 1;
+ unsigned inep9 : 1;
+ unsigned inep10 : 1;
+ unsigned inep11 : 1;
+ unsigned inep12 : 1;
+ unsigned inep13 : 1;
+ unsigned inep14 : 1;
+ unsigned inep15 : 1;
+ /** OUT Endpoint bits */
+ unsigned outep0 : 1;
+ unsigned outep1 : 1;
+ unsigned outep2 : 1;
+ unsigned outep3 : 1;
+ unsigned outep4 : 1;
+ unsigned outep5 : 1;
+ unsigned outep6 : 1;
+ unsigned outep7 : 1;
+ unsigned outep8 : 1;
+ unsigned outep9 : 1;
+ unsigned outep10 : 1;
+ unsigned outep11 : 1;
+ unsigned outep12 : 1;
+ unsigned outep13 : 1;
+ unsigned outep14 : 1;
+ unsigned outep15 : 1;
+ } b;
+} daint_data_t;
+
+/**
+ * This union represents the bit fields in the Device IN Token Queue
+ * Read Registers.
+ * - Read the register into the <i>d32</i> member.
+ * - READ-ONLY Register
+ */
+typedef union dtknq1_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** In Token Queue Write Pointer */
+ unsigned intknwptr : 5;
+ /** Reserved */
+ unsigned reserved05_06 : 2;
+ /** write pointer has wrapped. */
+ unsigned wrap_bit : 1;
+ /** EP Numbers of IN Tokens 0 ... 4 */
+ unsigned epnums0_5 : 24;
+ }b;
+} dtknq1_data_t;
+
+/**
+ * This union represents Threshold control Register
+ * - Read and write the register into the <i>d32</i> member.
+ * - READ-WRITABLE Register
+ */
+typedef union dthrctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** non ISO Tx Thr. Enable */
+ unsigned non_iso_thr_en : 1;
+ /** ISO Tx Thr. Enable */
+ unsigned iso_thr_en : 1;
+ /** Tx Thr. Length */
+ unsigned tx_thr_len : 9;
+ /** Reserved */
+ unsigned reserved11_15 : 5;
+ /** Rx Thr. Enable */
+ unsigned rx_thr_en : 1;
+ /** Rx Thr. Length */
+ unsigned rx_thr_len : 9;
+ /** Reserved */
+ unsigned reserved26_31 : 6;
+ }b;
+} dthrctl_data_t;
+
+
+/**
+ * Device Logical IN Endpoint-Specific Registers. <i>Offsets
+ * 900h-AFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_in_ep_regs
+{
+ /** Device IN Endpoint Control Register. <i>Offset:900h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t diepctl;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 04h</i> */
+ uint32_t reserved04;
+ /** Device IN Endpoint Interrupt Register. <i>Offset:900h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t diepint;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device IN Endpoint Transfer Size
+ * Register. <i>Offset:900h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t dieptsiz;
+ /** Device IN Endpoint DMA Address Register. <i>Offset:900h +
+ * (ep_num * 20h) + 14h</i> */
+ volatile uint32_t diepdma;
+ /** Device IN Endpoint Transmit FIFO Status Register. <i>Offset:900h +
+ * (ep_num * 20h) + 18h</i> */
+ volatile uint32_t dtxfsts;
+ /** Reserved. <i>Offset:900h + (ep_num * 20h) + 1Ch - 900h +
+ * (ep_num * 20h) + 1Ch</i>*/
+ uint32_t reserved18;
+} dwc_otg_dev_in_ep_regs_t;
+
+/**
+ * Device Logical OUT Endpoint-Specific Registers. <i>Offsets:
+ * B00h-CFCh</i>
+ *
+ * There will be one set of endpoint registers per logical endpoint
+ * implemented.
+ *
+ * <i>These registers are visible only in Device mode and must not be
+ * accessed in Host mode, as the results are unknown.</i>
+ */
+typedef struct dwc_otg_dev_out_ep_regs
+{
+ /** Device OUT Endpoint Control Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 00h</i> */
+ volatile uint32_t doepctl;
+ /** Device OUT Endpoint Frame number Register. <i>Offset:
+ * B00h + (ep_num * 20h) + 04h</i> */
+ volatile uint32_t doepfn;
+ /** Device OUT Endpoint Interrupt Register. <i>Offset:B00h +
+ * (ep_num * 20h) + 08h</i> */
+ volatile uint32_t doepint;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 0Ch</i> */
+ uint32_t reserved0C;
+ /** Device OUT Endpoint Transfer Size Register. <i>Offset:
+ * B00h + (ep_num * 20h) + 10h</i> */
+ volatile uint32_t doeptsiz;
+ /** Device OUT Endpoint DMA Address Register. <i>Offset:B00h
+ * + (ep_num * 20h) + 14h</i> */
+ volatile uint32_t doepdma;
+ /** Reserved. <i>Offset:B00h + (ep_num * 20h) + 18h - B00h +
+ * (ep_num * 20h) + 1Ch</i> */
+ uint32_t unused[2];
+} dwc_otg_dev_out_ep_regs_t;
+
+/**
+ * This union represents the bit fields in the Device EP Control
+ * Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union depctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Maximum Packet Size
+ * IN/OUT EPn
+ * IN/OUT EP0 - 2 bits
+ * 2'b00: 64 Bytes
+ * 2'b01: 32
+ * 2'b10: 16
+ * 2'b11: 8 */
+ unsigned mps : 11;
+#define DWC_DEP0CTL_MPS_64 0
+#define DWC_DEP0CTL_MPS_32 1
+#define DWC_DEP0CTL_MPS_16 2
+#define DWC_DEP0CTL_MPS_8 3
+
+ /** Next Endpoint
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned nextep : 4;
+
+ /** USB Active Endpoint */
+ unsigned usbactep : 1;
+
+ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints)
+ * This field contains the PID of the packet going to
+ * be received or transmitted on this endpoint. The
+ * application should program the PID of the first
+ * packet going to be received or transmitted on this
+ * endpoint , after the endpoint is
+ * activated. Application use the SetD1PID and
+ * SetD0PID fields of this register to program either
+ * D0 or D1 PID.
+ *
+ * The encoding for this field is
+ * - 0: D0
+ * - 1: D1
+ */
+ unsigned dpid : 1;
+
+ /** NAK Status */
+ unsigned naksts : 1;
+
+ /** Endpoint Type
+ * 2'b00: Control
+ * 2'b01: Isochronous
+ * 2'b10: Bulk
+ * 2'b11: Interrupt */
+ unsigned eptype : 2;
+
+ /** Snoop Mode
+ * OUT EPn/OUT EP0
+ * IN EPn/IN EP0 - reserved */
+ unsigned snp : 1;
+
+ /** Stall Handshake */
+ unsigned stall : 1;
+
+ /** Tx Fifo Number
+ * IN EPn/IN EP0
+ * OUT EPn/OUT EP0 - reserved */
+ unsigned txfnum : 4;
+
+ /** Clear NAK */
+ unsigned cnak : 1;
+ /** Set NAK */
+ unsigned snak : 1;
+ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA0. Set Even
+ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to even (micro)
+ * frame.
+ */
+ unsigned setd0pid : 1;
+ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints)
+ * Writing to this field sets the Endpoint DPID (DPID)
+ * field in this register to DATA1 Set Odd
+ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints)
+ * Writing to this field sets the Even/Odd
+ * (micro)frame (EO_FrNum) field to odd (micro) frame.
+ */
+ unsigned setd1pid : 1;
+
+ /** Endpoint Disable */
+ unsigned epdis : 1;
+ /** Endpoint Enable */
+ unsigned epena : 1;
+ } b;
+} depctl_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer size */
+ unsigned xfersize : 19;
+ /** Packet Count */
+ unsigned pktcnt : 10;
+ /** Multi Count - Periodic IN endpoints */
+ unsigned mc : 2;
+ unsigned reserved : 1;
+ } b;
+} deptsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Device EP 0 Transfer
+ * Size Register. Read the register into the <i>d32</i> member then
+ * set/clear the bits using the <i>b</i>it elements.
+ */
+typedef union deptsiz0_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer size */
+ unsigned xfersize : 7;
+ /** Reserved */
+ unsigned reserved7_18 : 12;
+ /** Packet Count */
+ unsigned pktcnt : 2;
+ /** Reserved */
+ unsigned reserved21_28 : 9;
+ /**Setup Packet Count (DOEPTSIZ0 Only) */
+ unsigned supcnt : 2;
+ unsigned reserved31;
+ } b;
+} deptsiz0_data_t;
+
+
+/** Maximum number of Periodic FIFOs */
+#define MAX_PERIO_FIFOS 15
+/** Maximum number of Periodic FIFOs */
+#define MAX_TX_FIFOS 15
+
+/** Maximum number of Endpoints/HostChannels */
+#define MAX_EPS_CHANNELS 4
+
+/**
+ * The dwc_otg_dev_if structure contains information needed to manage
+ * the DWC_otg controller acting in device mode. It represents the
+ * programming view of the device-specific aspects of the controller.
+ */
+typedef struct dwc_otg_dev_if
+{
+ /** Pointer to device Global registers.
+ * Device Global Registers starting at offset 800h
+ */
+ dwc_otg_device_global_regs_t *dev_global_regs;
+#define DWC_DEV_GLOBAL_REG_OFFSET 0x800
+
+ /**
+ * Device Logical IN Endpoint-Specific Registers 900h-AFCh
+ */
+ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS/2];
+#define DWC_DEV_IN_EP_REG_OFFSET 0x900
+#define DWC_EP_REG_OFFSET 0x20
+
+ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */
+ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS/2];
+#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00
+
+ /* Device configuration information*/
+ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */
+ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */
+ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/
+
+ /** Size of periodic FIFOs (Bytes) */
+ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS];
+
+ /** Size of Tx FIFOs (Bytes) */
+ uint16_t tx_fifo_size[MAX_TX_FIFOS];
+
+ /** Thresholding enable flags and length varaiables **/
+ uint16_t rx_thr_en;
+ uint16_t iso_tx_thr_en;
+ uint16_t non_iso_tx_thr_en;
+
+ uint16_t rx_thr_length;
+ uint16_t tx_thr_length;
+
+} dwc_otg_dev_if_t;
+
+
+/////////////////////////////////////////////////
+// Host Mode Register Structures
+//
+/**
+ * The Host Global Registers structure defines the size and relative
+ * field offsets for the Host Mode Global Registers. Host Global
+ * Registers offsets 400h-7FFh.
+*/
+typedef struct dwc_otg_host_global_regs
+{
+ /** Host Configuration Register. <i>Offset: 400h</i> */
+ volatile uint32_t hcfg;
+ /** Host Frame Interval Register. <i>Offset: 404h</i> */
+ volatile uint32_t hfir;
+ /** Host Frame Number / Frame Remaining Register. <i>Offset: 408h</i> */
+ volatile uint32_t hfnum;
+ /** Reserved. <i>Offset: 40Ch</i> */
+ uint32_t reserved40C;
+ /** Host Periodic Transmit FIFO/ Queue Status Register. <i>Offset: 410h</i> */
+ volatile uint32_t hptxsts;
+ /** Host All Channels Interrupt Register. <i>Offset: 414h</i> */
+ volatile uint32_t haint;
+ /** Host All Channels Interrupt Mask Register. <i>Offset: 418h</i> */
+ volatile uint32_t haintmsk;
+} dwc_otg_host_global_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Configuration Register.
+ * Read the register into the <i>d32</i> member then set/clear the bits using
+ * the <i>b</i>it elements. Write the <i>d32</i> member to the hcfg register.
+ */
+typedef union hcfg_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** FS/LS Phy Clock Select */
+ unsigned fslspclksel : 2;
+#define DWC_HCFG_30_60_MHZ 0
+#define DWC_HCFG_48_MHZ 1
+#define DWC_HCFG_6_MHZ 2
+
+ /** FS/LS Only Support */
+ unsigned fslssupp : 1;
+ } b;
+} hcfg_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfir_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned frint : 16;
+ unsigned reserved : 16;
+ } b;
+} hfir_data_t;
+
+/**
+ * This union represents the bit fields in the Host Frame Remaing/Number
+ * Register.
+ */
+typedef union hfnum_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned frnum : 16;
+#define DWC_HFNUM_MAX_FRNUM 0x3FFF
+ unsigned frrem : 16;
+ } b;
+} hfnum_data_t;
+
+typedef union hptxsts_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned ptxfspcavail : 16;
+ unsigned ptxqspcavail : 8;
+ /** Top of the Periodic Transmit Request Queue
+ * - bit 24 - Terminate (last entry for the selected channel)
+ * - bits 26:25 - Token Type
+ * - 2'b00 - Zero length
+ * - 2'b01 - Ping
+ * - 2'b10 - Disable
+ * - bits 30:27 - Channel Number
+ * - bit 31 - Odd/even microframe
+ */
+ unsigned ptxqtop_terminate : 1;
+ unsigned ptxqtop_token : 2;
+ unsigned ptxqtop_chnum : 4;
+ unsigned ptxqtop_odd : 1;
+ } b;
+} hptxsts_data_t;
+
+/**
+ * This union represents the bit fields in the Host Port Control and Status
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hprt0 register.
+ */
+typedef union hprt0_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned prtconnsts : 1;
+ unsigned prtconndet : 1;
+ unsigned prtena : 1;
+ unsigned prtenchng : 1;
+ unsigned prtovrcurract : 1;
+ unsigned prtovrcurrchng : 1;
+ unsigned prtres : 1;
+ unsigned prtsusp : 1;
+ unsigned prtrst : 1;
+ unsigned reserved9 : 1;
+ unsigned prtlnsts : 2;
+ unsigned prtpwr : 1;
+ unsigned prttstctl : 4;
+ unsigned prtspd : 2;
+#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0
+#define DWC_HPRT0_PRTSPD_FULL_SPEED 1
+#define DWC_HPRT0_PRTSPD_LOW_SPEED 2
+ unsigned reserved19_31 : 13;
+ } b;
+} hprt0_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ch0 : 1;
+ unsigned ch1 : 1;
+ unsigned ch2 : 1;
+ unsigned ch3 : 1;
+ unsigned ch4 : 1;
+ unsigned ch5 : 1;
+ unsigned ch6 : 1;
+ unsigned ch7 : 1;
+ unsigned ch8 : 1;
+ unsigned ch9 : 1;
+ unsigned ch10 : 1;
+ unsigned ch11 : 1;
+ unsigned ch12 : 1;
+ unsigned ch13 : 1;
+ unsigned ch14 : 1;
+ unsigned ch15 : 1;
+ unsigned reserved : 16;
+ } b;
+
+ struct {
+ unsigned chint : 16;
+ unsigned reserved : 16;
+ } b2;
+} haint_data_t;
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union haintmsk_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ unsigned ch0 : 1;
+ unsigned ch1 : 1;
+ unsigned ch2 : 1;
+ unsigned ch3 : 1;
+ unsigned ch4 : 1;
+ unsigned ch5 : 1;
+ unsigned ch6 : 1;
+ unsigned ch7 : 1;
+ unsigned ch8 : 1;
+ unsigned ch9 : 1;
+ unsigned ch10 : 1;
+ unsigned ch11 : 1;
+ unsigned ch12 : 1;
+ unsigned ch13 : 1;
+ unsigned ch14 : 1;
+ unsigned ch15 : 1;
+ unsigned reserved : 16;
+ } b;
+
+ struct {
+ unsigned chint : 16;
+ unsigned reserved : 16;
+ } b2;
+} haintmsk_data_t;
+
+/**
+ * Host Channel Specific Registers. <i>500h-5FCh</i>
+ */
+typedef struct dwc_otg_hc_regs
+{
+ /** Host Channel 0 Characteristic Register. <i>Offset: 500h + (chan_num * 20h) + 00h</i> */
+ volatile uint32_t hcchar;
+ /** Host Channel 0 Split Control Register. <i>Offset: 500h + (chan_num * 20h) + 04h</i> */
+ volatile uint32_t hcsplt;
+ /** Host Channel 0 Interrupt Register. <i>Offset: 500h + (chan_num * 20h) + 08h</i> */
+ volatile uint32_t hcint;
+ /** Host Channel 0 Interrupt Mask Register. <i>Offset: 500h + (chan_num * 20h) + 0Ch</i> */
+ volatile uint32_t hcintmsk;
+ /** Host Channel 0 Transfer Size Register. <i>Offset: 500h + (chan_num * 20h) + 10h</i> */
+ volatile uint32_t hctsiz;
+ /** Host Channel 0 DMA Address Register. <i>Offset: 500h + (chan_num * 20h) + 14h</i> */
+ volatile uint32_t hcdma;
+ /** Reserved. <i>Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch</i> */
+ uint32_t reserved[2];
+} dwc_otg_hc_regs_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Characteristics
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+typedef union hcchar_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Maximum packet size in bytes */
+ unsigned mps : 11;
+
+ /** Endpoint number */
+ unsigned epnum : 4;
+
+ /** 0: OUT, 1: IN */
+ unsigned epdir : 1;
+
+ unsigned reserved : 1;
+
+ /** 0: Full/high speed device, 1: Low speed device */
+ unsigned lspddev : 1;
+
+ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */
+ unsigned eptype : 2;
+
+ /** Packets per frame for periodic transfers. 0 is reserved. */
+ unsigned multicnt : 2;
+
+ /** Device address */
+ unsigned devaddr : 7;
+
+ /**
+ * Frame to transmit periodic transaction.
+ * 0: even, 1: odd
+ */
+ unsigned oddfrm : 1;
+
+ /** Channel disable */
+ unsigned chdis : 1;
+
+ /** Channel enable */
+ unsigned chen : 1;
+ } b;
+} hcchar_data_t;
+
+typedef union hcsplt_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Port Address */
+ unsigned prtaddr : 7;
+
+ /** Hub Address */
+ unsigned hubaddr : 7;
+
+ /** Transaction Position */
+ unsigned xactpos : 2;
+#define DWC_HCSPLIT_XACTPOS_MID 0
+#define DWC_HCSPLIT_XACTPOS_END 1
+#define DWC_HCSPLIT_XACTPOS_BEGIN 2
+#define DWC_HCSPLIT_XACTPOS_ALL 3
+
+ /** Do Complete Split */
+ unsigned compsplt : 1;
+
+ /** Reserved */
+ unsigned reserved : 14;
+
+ /** Split Enble */
+ unsigned spltena : 1;
+ } b;
+} hcsplt_data_t;
+
+
+/**
+ * This union represents the bit fields in the Host All Interrupt
+ * Register.
+ */
+typedef union hcint_data
+{
+ /** raw register data */
+ uint32_t d32;
+ /** register bits */
+ struct {
+ /** Transfer Complete */
+ unsigned xfercomp : 1;
+ /** Channel Halted */
+ unsigned chhltd : 1;
+ /** AHB Error */
+ unsigned ahberr : 1;
+ /** STALL Response Received */
+ unsigned stall : 1;
+ /** NAK Response Received */
+ unsigned nak : 1;
+ /** ACK Response Received */
+ unsigned ack : 1;
+ /** NYET Response Received */
+ unsigned nyet : 1;
+ /** Transaction Err */
+ unsigned xacterr : 1;
+ /** Babble Error */
+ unsigned bblerr : 1;
+ /** Frame Overrun */
+ unsigned frmovrun : 1;
+ /** Data Toggle Error */
+ unsigned datatglerr : 1;
+ /** Reserved */
+ unsigned reserved : 21;
+ } b;
+} hcint_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Transfer Size
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcchar register.
+ */
+typedef union hctsiz_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Total transfer size in bytes */
+ unsigned xfersize : 19;
+
+ /** Data packets to transfer */
+ unsigned pktcnt : 10;
+
+ /**
+ * Packet ID for next data packet
+ * 0: DATA0
+ * 1: DATA2
+ * 2: DATA1
+ * 3: MDATA (non-Control), SETUP (Control)
+ */
+ unsigned pid : 2;
+#define DWC_HCTSIZ_DATA0 0
+#define DWC_HCTSIZ_DATA1 2
+#define DWC_HCTSIZ_DATA2 1
+#define DWC_HCTSIZ_MDATA 3
+#define DWC_HCTSIZ_SETUP 3
+
+ /** Do PING protocol when 1 */
+ unsigned dopng : 1;
+ } b;
+} hctsiz_data_t;
+
+/**
+ * This union represents the bit fields in the Host Channel Interrupt Mask
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements. Write the <i>d32</i> member to the
+ * hcintmsk register.
+ */
+typedef union hcintmsk_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ unsigned xfercompl : 1;
+ unsigned chhltd : 1;
+ unsigned ahberr : 1;
+ unsigned stall : 1;
+ unsigned nak : 1;
+ unsigned ack : 1;
+ unsigned nyet : 1;
+ unsigned xacterr : 1;
+ unsigned bblerr : 1;
+ unsigned frmovrun : 1;
+ unsigned datatglerr : 1;
+ unsigned reserved : 21;
+ } b;
+} hcintmsk_data_t;
+
+/** OTG Host Interface Structure.
+ *
+ * The OTG Host Interface Structure structure contains information
+ * needed to manage the DWC_otg controller acting in host mode. It
+ * represents the programming view of the host-specific aspects of the
+ * controller.
+ */
+typedef struct dwc_otg_host_if
+{
+ /** Host Global Registers starting at offset 400h.*/
+ dwc_otg_host_global_regs_t *host_global_regs;
+#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400
+
+ /** Host Port 0 Control and Status Register */
+ volatile uint32_t *hprt0;
+#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440
+
+
+ /** Host Channel Specific Registers at offsets 500h-5FCh. */
+ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS];
+#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500
+#define DWC_OTG_CHAN_REGS_OFFSET 0x20
+
+
+ /* Host configuration information */
+ /** Number of Host Channels (range: 1-16) */
+ uint8_t num_host_channels;
+ /** Periodic EPs supported (0: no, 1: yes) */
+ uint8_t perio_eps_supported;
+ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */
+ uint16_t perio_tx_fifo_size;
+
+} dwc_otg_host_if_t;
+
+/**
+ * This union represents the bit fields in the Power and Clock Gating Control
+ * Register. Read the register into the <i>d32</i> member then set/clear the
+ * bits using the <i>b</i>it elements.
+ */
+typedef union pcgcctl_data
+{
+ /** raw register data */
+ uint32_t d32;
+
+ /** register bits */
+ struct {
+ /** Stop Pclk */
+ unsigned stoppclk : 1;
+ /** Gate Hclk */
+ unsigned gatehclk : 1;
+ /** Power Clamp */
+ unsigned pwrclmp : 1;
+ /** Reset Power Down Modules */
+ unsigned rstpdwnmodule : 1;
+ /** PHY Suspended */
+ unsigned physuspended : 1;
+
+ unsigned reserved : 27;
+ } b;
+} pcgcctl_data_t;
+
+#endif /* CONFIG_4xx */
+#endif
diff --git a/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h b/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h
new file mode 100644
index 00000000000..c2fe8caecf6
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/linux/dwc_otg_plat.h
@@ -0,0 +1,304 @@
+/* ==========================================================================
+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/platform/dwc_otg_plat.h $
+ * $Revision: #1 $
+ * $Date: 2005/07/07 $
+ * $Change: 510301 $
+ *
+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
+ * otherwise expressly agreed to in writing between Synopsys and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product under
+ * any End User Software License Agreement or Agreement for Licensed Product
+ * with Synopsys or any supplement thereto. You are permitted to use and
+ * redistribute this Software in source and binary forms, with or without
+ * modification, provided that redistributions of source code must retain this
+ * notice. You may not view, use, disclose, copy or distribute this file or
+ * any information contained herein except pursuant to this license grant from
+ * Synopsys. If you do not agree with this notice, including the disclaimer
+ * below, then you are not authorized to use the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ * ========================================================================== */
+
+#if !defined(__DWC_OTG_PLAT_H__)
+#define __DWC_OTG_PLAT_H__
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+/**
+ * @file
+ *
+ * This file contains the Platform Specific constants, interfaces
+ * (functions and macros) for Linux.
+ *
+ */
+#if !defined(CONFIG_4xx)
+#error "The contents of this file are AMCC 44x processor specific!!!"
+#endif
+
+#if defined(CONFIG_405EX) || defined(CONFIG_460EX) || \
+ defined(CONFIG_APM82181)
+#define CONFIG_DWC_OTG_REG_LE
+#endif
+
+#if defined(CONFIG_405EZ)
+#define CONFIG_DWC_OTG_FIFO_LE
+#endif
+
+#define SZ_256K 0x00040000
+/**
+ * Reads the content of a register.
+ *
+ * @param _reg address of register to read.
+ * @return contents of the register.
+ *
+
+ * Usage:<br>
+ * <code>uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl);</code>
+ */
+static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *_reg)
+{
+#ifdef CONFIG_DWC_OTG_REG_LE
+ return in_le32(_reg);
+#else
+ return in_be32(_reg);
+#endif
+};
+
+/**
+ * Writes a register with a 32 bit value.
+ *
+ * @param _reg address of register to read.
+ * @param _value to write to _reg.
+ *
+ * Usage:<br>
+ * <code>dwc_write_reg32(&dev_regs->dctl, 0); </code>
+ */
+static __inline__ void dwc_write_reg32( volatile uint32_t *_reg, const uint32_t _value)
+{
+#ifdef CONFIG_DWC_OTG_REG_LE
+ out_le32(_reg, _value);
+#else
+ out_be32(_reg, _value);
+#endif
+};
+
+/**
+ * This function modifies bit values in a register. Using the
+ * algorithm: (reg_contents & ~clear_mask) | set_mask.
+ *
+ * @param _reg address of register to read.
+ * @param _clear_mask bit mask to be cleared.
+ * @param _set_mask bit mask to be set.
+ *
+ * Usage:<br>
+ * <code> // Clear the SOF Interrupt Mask bit and <br>
+ * // set the OTG Interrupt mask bit, leaving all others as they were.
+ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);</code>
+ */
+static __inline__
+void dwc_modify_reg32( volatile uint32_t *_reg, const uint32_t _clear_mask, const uint32_t _set_mask)
+{
+#ifdef CONFIG_DWC_OTG_REG_LE
+ out_le32( _reg, (in_le32(_reg) & ~_clear_mask) | _set_mask );
+#else
+ out_be32( _reg, (in_be32(_reg) & ~_clear_mask) | _set_mask );
+#endif
+};
+
+static __inline__ void dwc_write_datafifo32( volatile uint32_t *_reg, const uint32_t _value)
+{
+#ifdef CONFIG_DWC_OTG_FIFO_LE
+ out_le32(_reg, _value);
+#else
+ out_be32(_reg, _value);
+#endif
+};
+
+static __inline__ uint32_t dwc_read_datafifo32( volatile uint32_t *_reg)
+{
+#ifdef CONFIG_DWC_OTG_FIFO_LE
+ return in_le32(_reg);
+#else
+ return in_be32(_reg);
+#endif
+};
+
+
+/**
+ * Wrapper for the OS micro-second delay function.
+ * @param[in] _usecs Microseconds of delay
+ */
+static __inline__ void UDELAY( const uint32_t _usecs )
+{
+ udelay( _usecs );
+}
+
+/**
+ * Wrapper for the OS milli-second delay function.
+ * @param[in] _msecs milliseconds of delay
+ */
+static __inline__ void MDELAY( const uint32_t _msecs )
+{
+ mdelay( _msecs );
+}
+
+/**
+ * Wrapper for the Linux spin_lock. On the ARM (Integrator)
+ * spin_lock() is a nop.
+ *
+ * @param _lock Pointer to the spinlock.
+ */
+static __inline__ void SPIN_LOCK( spinlock_t *_lock )
+{
+ spin_lock(_lock);
+}
+
+/**
+ * Wrapper for the Linux spin_unlock. On the ARM (Integrator)
+ * spin_lock() is a nop.
+ *
+ * @param _lock Pointer to the spinlock.
+ */
+static __inline__ void SPIN_UNLOCK( spinlock_t *_lock )
+{
+ spin_unlock(_lock);
+}
+
+/**
+ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM
+ * (Integrator) spin_lock() is a nop.
+ *
+ * @param _l Pointer to the spinlock.
+ * @param _f unsigned long for irq flags storage.
+ */
+#define SPIN_LOCK_IRQSAVE( _l, _f ) { \
+ spin_lock_irqsave(_l,_f); \
+ }
+
+/**
+ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM
+ * (Integrator) spin_lock() is a nop.
+ *
+ * @param _l Pointer to the spinlock.
+ * @param _f unsigned long for irq flags storage.
+ */
+#define SPIN_UNLOCK_IRQRESTORE( _l,_f ) {\
+ spin_unlock_irqrestore(_l,_f); \
+ }
+
+
+/*
+ * Debugging support vanishes in non-debug builds.
+ */
+
+
+/**
+ * The Debug Level bit-mask variable.
+ */
+extern uint32_t g_dbg_lvl;
+/**
+ * Set the Debug Level variable.
+ */
+static inline uint32_t SET_DEBUG_LEVEL( const uint32_t _new )
+{
+ uint32_t old = g_dbg_lvl;
+ g_dbg_lvl = _new;
+ return old;
+}
+
+/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */
+#define DBG_CIL (0x2)
+/** When debug level has the DBG_CILV bit set, display CIL Verbose debug
+ * messages */
+#define DBG_CILV (0x20)
+/** When debug level has the DBG_PCD bit set, display PCD (Device) debug
+ * messages */
+#define DBG_PCD (0x4)
+/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug
+ * messages */
+#define DBG_PCDV (0x40)
+/** When debug level has the DBG_HCD bit set, display Host debug messages */
+#define DBG_HCD (0x8)
+/** When debug level has the DBG_HCDV bit set, display Verbose Host debug
+ * messages */
+#define DBG_HCDV (0x80)
+/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host
+ * mode. */
+#define DBG_HCD_URB (0x800)
+
+/** When debug level has any bit set, display debug messages */
+#define DBG_ANY (0xFF)
+
+/** All debug messages off */
+#define DBG_OFF 0
+
+/** Prefix string for DWC_DEBUG print macros. */
+#define USB_DWC "dwc_otg: "
+
+/**
+ * Print a debug message when the Global debug level variable contains
+ * the bit defined in <code>lvl</code>.
+ *
+ * @param[in] lvl - Debug level, use one of the DBG_ constants above.
+ * @param[in] x - like printf
+ *
+ * Example:<p>
+ * <code>
+ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr);
+ * </code>
+ * <br>
+ * results in:<br>
+ * <code>
+ * usb-DWC_otg: dwc_otg_cil_init(ca867000)
+ * </code>
+ */
+#ifdef DEBUG
+# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_ERR USB_DWC x ); }while(0)
+# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x )
+
+# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl)
+
+#else
+
+# define DWC_DEBUGPL(lvl, x...) do{}while(0)
+# define DWC_DEBUGP(x...)
+
+# define CHK_DEBUG_LEVEL(level) (0)
+
+#endif /*DEBUG*/
+
+/**
+ * Print an Error message.
+ */
+#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x )
+/**
+ * Print a Warning message.
+ */
+#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x )
+/**
+ * Print a notice (normal but significant message).
+ */
+#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x )
+/**
+ * Basic message printing.
+ */
+#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x )
+
+#endif
+
diff --git a/drivers/usb/gadget/dwc_otg/ppc4xx_dma.c b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.c
new file mode 100644
index 00000000000..844222d21fc
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.c
@@ -0,0 +1,735 @@
+/*
+ * IBM PPC4xx DMA engine core library
+ *
+ * Copyright 2000-2004 MontaVista Software Inc.
+ *
+ * Cleaned up and converted to new DCR access
+ * Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Original code by Armin Kuster <akuster@mvista.com>
+ * and Pete Popov <ppopov@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/dcr.h>
+#include "ppc4xx_dma.h"
+
+ppc_dma_ch_t dma_channels[MAX_PPC4xx_DMA_CHANNELS];
+
+int
+ppc4xx_get_dma_status(void)
+{
+ return (mfdcr(DCRN_DMASR));
+}
+
+void
+ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr)
+{
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("set_src_addr: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCRN_DMASAH0 + dmanr*8, src_addr >> 32);
+#endif
+ mtdcr(DCRN_DMASA0 + dmanr*8, (u32)src_addr);
+}
+
+void
+ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr)
+{
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("set_dst_addr: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCRN_DMADAH0 + dmanr*8, dst_addr >> 32);
+#endif
+ mtdcr(DCRN_DMADA0 + dmanr*8, (u32)dst_addr);
+}
+
+void
+ppc4xx_enable_dma(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
+ DMA_CS1 | DMA_TS1 | DMA_CH1_ERR,
+ DMA_CS2 | DMA_TS2 | DMA_CH2_ERR,
+ DMA_CS3 | DMA_TS3 | DMA_CH3_ERR};
+
+ if (p_dma_ch->in_use) {
+ printk("enable_dma: channel %d in use\n", dmanr);
+ return;
+ }
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("enable_dma: bad channel: %d\n", dmanr);
+ return;
+ }
+
+ if (p_dma_ch->mode == DMA_MODE_READ) {
+ /* peripheral to memory */
+ ppc4xx_set_src_addr(dmanr, 0);
+ ppc4xx_set_dst_addr(dmanr, p_dma_ch->addr);
+ } else if (p_dma_ch->mode == DMA_MODE_WRITE) {
+ /* memory to peripheral */
+ ppc4xx_set_src_addr(dmanr, p_dma_ch->addr);
+ ppc4xx_set_dst_addr(dmanr, 0);
+ }
+
+ /* for other xfer modes, the addresses are already set */
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+ control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
+ if (p_dma_ch->mode == DMA_MODE_MM) {
+ /* software initiated memory to memory */
+ control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
+#if defined(CONFIG_405EX) || defined(CONFIG_405EXr) || \
+ defined(CONFIG_APM82181)
+ control |= DMA_MODE_MM;
+ if (p_dma_ch->dai) {
+ control |= DMA_DAI;
+ }
+ if (p_dma_ch->sai) {
+ control |= DMA_SAI;
+ }
+
+#endif
+#if defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
+ defined(CONFIG_APM82181)
+ control |= DMA_MODE_MM | DMA_DAI | DMA_SAI;
+#endif
+ }
+
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ /*
+ * Clear the CS, TS, RI bits for the channel from DMASR. This
+ * has been observed to happen correctly only after the mode and
+ * ETD/DCE bits in DMACRx are set above. Must do this before
+ * enabling the channel.
+ */
+
+ mtdcr(DCRN_DMASR, status_bits[dmanr]);
+
+ /*
+ * For device-paced transfers, Terminal Count Enable apparently
+ * must be on, and this must be turned on after the mode, etc.
+ * bits are cleared above (at least on Redwood-6).
+ */
+
+ if ((p_dma_ch->mode == DMA_MODE_MM_DEVATDST) ||
+ (p_dma_ch->mode == DMA_MODE_MM_DEVATSRC))
+ control |= DMA_TCE_ENABLE;
+
+ /*
+ * Now enable the channel.
+ */
+
+ control |= (p_dma_ch->mode | DMA_CE_ENABLE);
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ p_dma_ch->in_use = 1;
+}
+
+void
+ppc4xx_disable_dma(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (!p_dma_ch->in_use) {
+ printk("disable_dma: channel %d not in use\n", dmanr);
+ return;
+ }
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("disable_dma: bad channel: %d\n", dmanr);
+ return;
+ }
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control &= ~DMA_CE_ENABLE;
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ p_dma_ch->in_use = 0;
+}
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later. This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ peripheral to memory
+ * DMA_MODE_WRITE memory to peripheral
+ * DMA_MODE_MM memory to memory
+ * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
+ */
+int
+ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("set_dma_mode: bad channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->mode = mode;
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width. Thus, count
+ * MUST be a multiple of the bus width.
+ */
+void
+ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+#ifdef DEBUG_4xxDMA
+ {
+ int error = 0;
+ switch (p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, p_dma_ch->pwidth);
+ }
+#endif
+
+ count = count >> p_dma_ch->shift;
+
+ mtdcr(DCRN_DMACT0 + (dmanr * 0x8), count);
+}
+
+/*
+ * Returns the number of bytes left to be transfered.
+ * After a DMA transfer, this should return zero.
+ * Reading this while a DMA transfer is still in progress will return
+ * unpredictable results.
+ */
+int
+ppc4xx_get_dma_residue(unsigned int dmanr)
+{
+ unsigned int count;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_get_dma_residue: bad channel 0x%x\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ count = mfdcr(DCRN_DMACT0 + (dmanr * 0x8));
+#if defined(CONFIG_405EX) || defined(CONFIG_405EXr) || \
+ defined(CONFIG_460EX) || defined(CONFIG_460GT) || \
+ defined(CONFIG_APM82181)
+ count &= DMA_CTC_TC_MASK;
+#endif
+
+ return (count << p_dma_ch->shift);
+}
+
+/*
+ * Sets the DMA address for a memory to peripheral or peripheral
+ * to memory transfer. The address is just saved in the channel
+ * structure for now and used later in enable_dma().
+ */
+void
+ppc4xx_set_dma_addr(unsigned int dmanr, phys_addr_t addr)
+{
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_set_dma_addr: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef DEBUG_4xxDMA
+ {
+ int error = 0;
+ switch (p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if ((unsigned) addr & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if ((unsigned) addr & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if ((unsigned) addr & 0x7)
+ error = 1;
+ break;
+ default:
+ printk("ppc4xx_set_dma_addr: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk("Warning: ppc4xx_set_dma_addr addr 0x%x bus width %d\n",
+ addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ /* save dma address and program it later after we know the xfer mode */
+ p_dma_ch->addr = addr;
+}
+
+/*
+ * Sets both DMA addresses for a memory to memory transfer.
+ * For memory to peripheral or peripheral to memory transfers
+ * the function set_dma_addr() should be used instead.
+ */
+void
+ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
+ phys_addr_t dst_dma_addr)
+{
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_set_dma_addr2: bad channel: %d\n", dmanr);
+ return;
+ }
+
+#ifdef DEBUG_4xxDMA
+ {
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+ int error = 0;
+ switch (p_dma_ch->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (((unsigned) src_dma_addr & 0x1) ||
+ ((unsigned) dst_dma_addr & 0x1)
+ )
+ error = 1;
+ break;
+ case PW_32:
+ if (((unsigned) src_dma_addr & 0x3) ||
+ ((unsigned) dst_dma_addr & 0x3)
+ )
+ error = 1;
+ break;
+ case PW_64:
+ if (((unsigned) src_dma_addr & 0x7) ||
+ ((unsigned) dst_dma_addr & 0x7)
+ )
+ error = 1;
+ break;
+ default:
+ printk("ppc4xx_set_dma_addr2: invalid bus width: 0x%x\n",
+ p_dma_ch->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: ppc4xx_set_dma_addr2 src 0x%x dst 0x%x bus width %d\n",
+ src_dma_addr, dst_dma_addr, p_dma_ch->pwidth);
+ }
+#endif
+
+ ppc4xx_set_src_addr(dmanr, src_dma_addr);
+ ppc4xx_set_dst_addr(dmanr, dst_dma_addr);
+}
+
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+int
+ppc4xx_enable_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_enable_dma_interrupt: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->int_enable = 1;
+
+#if defined(CONFIG_405EX) || defined (CONFIG_405EXr) || \
+ defined(CONFIG_460EX) || defined (CONFIG_460GT) || \
+ defined(CONFIG_APM82181)
+ control = mfdcr(DCRN_DMACT0 + (dmanr * 0x8));
+ control |= DMA_CTC_TCIE | DMA_CTC_ETIE | DMA_CTC_EIE;
+ mtdcr(DCRN_DMACT0 + (dmanr * 0x8), control);
+#endif
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+int
+ppc4xx_disable_dma_interrupt(unsigned int dmanr)
+{
+ unsigned int control;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_disable_dma_interrupt: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ p_dma_ch->int_enable = 0;
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Configures a DMA channel, including the peripheral bus width, if a
+ * peripheral is attached to the channel, the polarity of the DMAReq and
+ * DMAAck signals, etc. This information should really be setup by the boot
+ * code, since most likely the configuration won't change dynamically.
+ * If the kernel has to call this function, it's recommended that it's
+ * called from platform specific init code. The driver should not need to
+ * call this function.
+ */
+int
+ppc4xx_init_dma_channel(unsigned int dmanr, ppc_dma_ch_t * p_init)
+{
+ unsigned int polarity;
+ uint32_t control = 0;
+ ppc_dma_ch_t *p_dma_ch = &dma_channels[dmanr];
+
+ DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */
+ DMA_MODE_WRITE = 0; /* Memory to Peripheral */
+
+ if (!p_init) {
+ printk("ppc4xx_init_dma_channel: NULL p_init\n");
+ return DMA_STATUS_NULL_POINTER;
+ }
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_init_dma_channel: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#if DCRN_POL > 0
+ polarity = mfdcr(DCRN_POL);
+#else
+ polarity = 0;
+#endif
+
+ /* Setup the control register based on the values passed to
+ * us in p_init. Then, over-write the control register with this
+ * new value.
+ */
+ control |= SET_DMA_CONTROL;
+
+ /* clear all polarity signals and then "or" in new signal levels */
+ polarity &= ~GET_DMA_POLARITY(dmanr);
+ polarity |= p_init->polarity;
+#if DCRN_POL > 0
+ mtdcr(DCRN_POL, polarity);
+#endif
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ /* save these values in our dma channel structure */
+ memcpy(p_dma_ch, p_init, sizeof (ppc_dma_ch_t));
+
+ /*
+ * The peripheral width values written in the control register are:
+ * PW_8 0
+ * PW_16 1
+ * PW_32 2
+ * PW_64 3
+ *
+ * Since the DMA count register takes the number of "transfers",
+ * we need to divide the count sent to us in certain
+ * functions by the appropriate number. It so happens that our
+ * right shift value is equal to the peripheral width value.
+ */
+ p_dma_ch->shift = p_init->pwidth;
+
+ /*
+ * Save the control word for easy access.
+ */
+ p_dma_ch->control = control;
+ mtdcr(DCRN_DMASR, 0xffffffff); /* clear status register */
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * This function returns the channel configuration.
+ */
+int
+ppc4xx_get_channel_config(unsigned int dmanr, ppc_dma_ch_t * p_dma_ch)
+{
+ unsigned int polarity;
+ unsigned int control;
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_get_channel_config: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ memcpy(p_dma_ch, &dma_channels[dmanr], sizeof (ppc_dma_ch_t));
+
+#if DCRN_POL > 0
+ polarity = mfdcr(DCRN_POL);
+#else
+ polarity = 0;
+#endif
+
+ p_dma_ch->polarity = polarity & GET_DMA_POLARITY(dmanr);
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+ p_dma_ch->cp = GET_DMA_PRIORITY(control);
+ p_dma_ch->pwidth = GET_DMA_PW(control);
+ p_dma_ch->psc = GET_DMA_PSC(control);
+ p_dma_ch->pwc = GET_DMA_PWC(control);
+ p_dma_ch->phc = GET_DMA_PHC(control);
+ p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
+ p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+ p_dma_ch->shift = GET_DMA_PW(control);
+
+#ifdef CONFIG_PPC4xx_EDMA
+ p_dma_ch->pf = GET_DMA_PREFETCH(control);
+#else
+ p_dma_ch->ch_enable = GET_DMA_CH(control);
+ p_dma_ch->ece_enable = GET_DMA_ECE(control);
+ p_dma_ch->tcd_disable = GET_DMA_TCD(control);
+#endif
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Sets the priority for the DMA channel dmanr.
+ * Since this is setup by the hardware init function, this function
+ * can be used to dynamically change the priority of a channel.
+ *
+ * Acceptable priorities:
+ *
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ *
+ */
+int
+ppc4xx_set_channel_priority(unsigned int dmanr, unsigned int priority)
+{
+ unsigned int control;
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_set_channel_priority: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ if ((priority != PRIORITY_LOW) &&
+ (priority != PRIORITY_MID_LOW) &&
+ (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
+ printk("ppc4xx_set_channel_priority: bad priority: 0x%x\n", priority);
+ }
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+ control |= SET_DMA_PRIORITY(priority);
+ mtdcr(DCRN_DMACR0 + (dmanr * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Returns the width of the peripheral attached to this channel. This assumes
+ * that someone who knows the hardware configuration, boot code or some other
+ * init code, already set the width.
+ *
+ * The return value is one of:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ *
+ * The function returns 0 on error.
+ */
+unsigned int
+ppc4xx_get_peripheral_width(unsigned int dmanr)
+{
+ unsigned int control;
+
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk("ppc4xx_get_peripheral_width: bad channel %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ control = mfdcr(DCRN_DMACR0 + (dmanr * 0x8));
+
+ return (GET_DMA_PW(control));
+}
+
+/*
+ * Clears the channel status bits
+ */
+int
+ppc4xx_clr_dma_status(unsigned int dmanr)
+{
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk(KERN_ERR "ppc4xx_clr_dma_status: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ mtdcr(DCRN_DMASR, ((u32)DMA_CH0_ERR | (u32)DMA_CS0 | (u32)DMA_TS0) >> dmanr);
+ return DMA_STATUS_GOOD;
+}
+
+#ifdef CONFIG_PPC4xx_EDMA
+/*
+ * Enables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int
+ppc4xx_enable_burst(unsigned int dmanr)
+{
+ unsigned int ctc;
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk(KERN_ERR "ppc4xx_enable_burst: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ ctc = mfdcr(DCRN_DMACT0 + (dmanr * 0x8)) | DMA_CTC_BTEN;
+ mtdcr(DCRN_DMACT0 + (dmanr * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+/*
+ * Disables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int
+ppc4xx_disable_burst(unsigned int dmanr)
+{
+ unsigned int ctc;
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk(KERN_ERR "ppc4xx_disable_burst: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ ctc = mfdcr(DCRN_DMACT0 + (dmanr * 0x8)) &~ DMA_CTC_BTEN;
+ mtdcr(DCRN_DMACT0 + (dmanr * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+/*
+ * Sets the burst size (number of peripheral widths) for the channel
+ * (BSIZ bits in the control/count register))
+ * must be one of:
+ * DMA_CTC_BSIZ_2
+ * DMA_CTC_BSIZ_4
+ * DMA_CTC_BSIZ_8
+ * DMA_CTC_BSIZ_16
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int
+ppc4xx_set_burst_size(unsigned int dmanr, unsigned int bsize)
+{
+ unsigned int ctc;
+ if (dmanr >= MAX_PPC4xx_DMA_CHANNELS) {
+ printk(KERN_ERR "ppc4xx_set_burst_size: bad channel: %d\n", dmanr);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ ctc = mfdcr(DCRN_DMACT0 + (dmanr * 0x8)) &~ DMA_CTC_BSIZ_MSK;
+ ctc |= (bsize & DMA_CTC_BSIZ_MSK);
+ mtdcr(DCRN_DMACT0 + (dmanr * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+EXPORT_SYMBOL(ppc4xx_enable_burst);
+EXPORT_SYMBOL(ppc4xx_disable_burst);
+EXPORT_SYMBOL(ppc4xx_set_burst_size);
+#endif /* CONFIG_PPC4xx_EDMA */
+
+EXPORT_SYMBOL(ppc4xx_init_dma_channel);
+EXPORT_SYMBOL(ppc4xx_get_channel_config);
+EXPORT_SYMBOL(ppc4xx_set_channel_priority);
+EXPORT_SYMBOL(ppc4xx_get_peripheral_width);
+EXPORT_SYMBOL(dma_channels);
+EXPORT_SYMBOL(ppc4xx_set_src_addr);
+EXPORT_SYMBOL(ppc4xx_set_dst_addr);
+EXPORT_SYMBOL(ppc4xx_set_dma_addr);
+EXPORT_SYMBOL(ppc4xx_set_dma_addr2);
+EXPORT_SYMBOL(ppc4xx_enable_dma);
+EXPORT_SYMBOL(ppc4xx_disable_dma);
+EXPORT_SYMBOL(ppc4xx_set_dma_mode);
+EXPORT_SYMBOL(ppc4xx_set_dma_count);
+EXPORT_SYMBOL(ppc4xx_get_dma_residue);
+EXPORT_SYMBOL(ppc4xx_enable_dma_interrupt);
+EXPORT_SYMBOL(ppc4xx_disable_dma_interrupt);
+EXPORT_SYMBOL(ppc4xx_get_dma_status);
+EXPORT_SYMBOL(ppc4xx_clr_dma_status);
+
diff --git a/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h
new file mode 100644
index 00000000000..c4a4f2806f6
--- /dev/null
+++ b/drivers/usb/gadget/dwc_otg/ppc4xx_dma.h
@@ -0,0 +1,620 @@
+/*
+ * include/asm-ppc/ppc4xx_dma.h
+ *
+ * IBM PPC4xx DMA engine library
+ *
+ * Copyright 2000-2004 MontaVista Software Inc.
+ *
+ * Cleaned up a bit more, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * Original code by Armin Kuster <akuster@mvista.com>
+ * and Pete Popov <ppopov@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifdef __KERNEL__
+#ifndef __ASMPPC_PPC4xx_DMA_H
+#define __ASMPPC_PPC4xx_DMA_H
+
+#include <linux/types.h>
+#include <asm/mmu.h>
+
+#ifdef CONFIG_405EX
+#define DCRN_DMA0_BASE 0x100
+#define DCRN_DMASR_BASE 0x120
+#endif
+
+#ifdef CONFIG_460EX || defined(CONFIG_APM82181)
+#define DCRN_DMA0_BASE 0x200
+#define DCRN_DMASR_BASE 0x220
+#endif
+
+#ifndef DCRN_DMA0_BASE
+#error DMA register not defined for this PPC4xx variant!
+#endif
+
+#define DCRN_DMACR0 (DCRN_DMA0_BASE + 0x0) /* DMA Channel Control 0 */
+#define DCRN_DMACT0 (DCRN_DMA0_BASE + 0x1) /* DMA Count 0 */
+#define DCRN_DMASAH0 (DCRN_DMA0_BASE + 0x2) /* DMA Src Addr High 0 */
+#define DCRN_DMASA0 (DCRN_DMA0_BASE + 0x3) /* DMA Src Addr Low 0 */
+#define DCRN_DMADAH0 (DCRN_DMA0_BASE + 0x4) /* DMA Dest Addr High 0 */
+#define DCRN_DMADA0 (DCRN_DMA0_BASE + 0x5) /* DMA Dest Addr Low 0 */
+#define DCRN_ASGH0 (DCRN_DMA0_BASE + 0x6) /* DMA SG Desc Addr High 0 */
+#define DCRN_ASG0 (DCRN_DMA0_BASE + 0x7) /* DMA SG Desc Addr Low 0 */
+
+#define DCRN_DMASR (DCRN_DMASR_BASE + 0x0) /* DMA Status Register */
+#define DCRN_ASGC (DCRN_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */
+#define DCRN_SLP (DCRN_DMASR_BASE + 0x5) /* DMA Sleep Register */
+#define DCRN_POL (DCRN_DMASR_BASE + 0x6) /* DMA Polarity Register */
+
+#undef DEBUG_4xxDMA
+
+#define MAX_PPC4xx_DMA_CHANNELS 4
+
+#define DMA_CH0 0
+#define DMA_CH1 1
+#define DMA_CH2 2
+#define DMA_CH3 3
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD 0
+#define DMA_STATUS_BAD_CHANNEL 1
+#define DMA_STATUS_BAD_HANDLE 2
+#define DMA_STATUS_BAD_MODE 3
+#define DMA_STATUS_NULL_POINTER 4
+#define DMA_STATUS_OUT_OF_MEMORY 5
+#define DMA_STATUS_SGL_LIST_EMPTY 6
+#define DMA_STATUS_GENERAL_ERROR 7
+#define DMA_STATUS_CHANNEL_NOTFREE 8
+
+#define DMA_CHANNEL_BUSY 0x80000000
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR 0
+#define DMA_STATUS_CS 1 /* Count Status */
+#define DMA_STATUS_TS 2 /* Transfer Status */
+#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
+#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
+
+
+/*
+ * DMA Channel Control Registers
+ */
+
+/* The 44x devices have 64bit DMA controllers, where the 405EX/r have 32bit */
+#if defined(CONFIG_44x)
+#define PPC4xx_DMA_64BIT
+#endif
+
+/* The 44x and 405EX/r come up big-endian with last bit reserved */
+#if defined(CONFIG_44x) || defined(CONFIG_405EX) || defined(CONFIG_405EXr)
+#define DMA_CR_OFFSET 1
+#else
+#define DMA_CR_OFFSET 0
+#endif
+
+#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */
+#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD (1<<29)
+#define SET_DMA_TD(x) (((x)&0x1)<<29)
+#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
+
+#define DMA_PL (1<<28) /* Peripheral Location */
+#define SET_DMA_PL(x) (((x)&0x1)<<28)
+#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL 0
+#define INTERNAL_PERIPHERAL 1
+
+#define SET_DMA_PW(x) (((x)&0x3)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
+#define DMA_PW_MASK SET_DMA_PW(3)
+#define PW_8 0
+#define PW_16 1
+#define PW_32 2
+#define PW_64 3
+/* FIXME: Add PW_128 support for 440GP DMA block */
+#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
+
+#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */
+#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET))
+
+#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */
+#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET))
+
+#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */
+#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET))
+
+#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
+#define DMA_TM_MASK SET_DMA_TM(3)
+#define TM_PERIPHERAL 0 /* Peripheral */
+#define TM_RESERVED 1 /* Reserved */
+#define TM_S_MM 2 /* Memory to Memory */
+#define TM_D_MM 3 /* Device Paced Memory to Memory */
+#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
+
+#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */
+#define DMA_PSC_MASK SET_DMA_PSC(3)
+#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
+
+#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */
+#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
+
+#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */
+#define DMA_PHC_MASK SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
+
+#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */
+#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET))
+
+#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET))
+#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET))
+
+#define DMA_DEC (1<<(2)) /* Address Decrement */
+#define SET_DMA_DEC(x) (((x)&0x1)<<2)
+#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+
+#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
+
+ /* Device-paced memory to memory, */
+ /* device is at source address */
+#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
+
+ /* Device-paced memory to memory, */
+ /* device is at destination address */
+#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
+
+/* 405gp/440gp */
+#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */
+#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
+#define PREFETCH_1 0 /* Prefetch 1 Double Word */
+#define PREFETCH_2 1
+#define PREFETCH_4 2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
+
+#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
+#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET))
+#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
+
+/* stb3x */
+
+#define DMA_ECE_ENABLE (1<<5)
+#define SET_DMA_ECE(x) (((x)&0x1)<<5)
+#define GET_DMA_ECE(x) (((x)&DMA_ECE_ENABLE)>>5)
+
+#define DMA_TCD_DISABLE (1<<4)
+#define SET_DMA_TCD(x) (((x)&0x1)<<4)
+#define GET_DMA_TCD(x) (((x)&DMA_TCD_DISABLE)>>4)
+
+typedef uint32_t sgl_handle_t;
+
+#ifdef CONFIG_PPC4xx_EDMA
+
+#define SGL_LIST_SIZE 4096
+#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
+
+#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW 0
+#define PRIORITY_MID_LOW 1
+#define PRIORITY_MID_HIGH 2
+#define PRIORITY_HIGH 3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
+#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
+#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<18)
+#define DMA_IN_DMA_REQ2 (1<<17)
+#define DMA_IN_DMA_REQ3 (1<<16)
+
+#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<14)
+#define DMA_EXT_DMA_REQ2 (1<<13)
+#define DMA_EXT_DMA_REQ3 (1<<12)
+
+#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<10)
+#define DMA_CH2_BUSY (1<<9)
+#define DMA_CH3_BUSY (1<<8)
+
+#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1 (1<<6)
+#define DMA_SG2 (1<<5)
+#define DMA_SG3 (1<<4)
+
+/* DMA Channel Count Register */
+#define DMA_CTC_TCIE (1<<29) /* Terminal Count Interrupt Enable */
+#define DMA_CTC_ETIE (1<<28) /* EOT Interupt Enable */
+#define DMA_CTC_EIE (1<<27) /* Error Interrupt Enable */
+#define DMA_CTC_BTEN (1<<23) /* Burst Enable/Disable bit */
+#define DMA_CTC_BSIZ_MSK (3<<21) /* Mask of the Burst size bits */
+#define DMA_CTC_BSIZ_2 (0)
+#define DMA_CTC_BSIZ_4 (1<<21)
+#define DMA_CTC_BSIZ_8 (2<<21)
+#define DMA_CTC_BSIZ_16 (3<<21)
+#define DMA_CTC_TC_MASK 0xFFFFF
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */
+#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK (1<<31) /* Link */
+#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
+#define SG_COUNT_MASK 0xFFFF /* Count Field */
+
+#define SET_DMA_CONTROL \
+ (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+ SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
+ SET_DMA_PL(p_init->pl) | /* peripheral location */ \
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
+ SET_DMA_PREFETCH(p_init->pf) /* read prefetch */)
+
+#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
+
+#elif defined(CONFIG_STB03xxx) /* stb03xxx */
+
+#define DMA_PPC4xx_SIZE 4096
+
+/*
+ * DMA Status Register
+ */
+
+#define SET_DMA_PRIORITY(x) (((x)&0x00800001)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK 0x00800001
+#define PRIORITY_LOW 0x00000000
+#define PRIORITY_MID_LOW 0x00000001
+#define PRIORITY_MID_HIGH 0x00800000
+#define PRIORITY_HIGH 0x00800001
+#define GET_DMA_PRIORITY(x) (((((x)&DMA_PRIORITY_MASK) &0x00800000) >> 22 ) | (((x)&DMA_PRIORITY_MASK) &0x00000001))
+
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_CT0 (1<<19) /* Chained transfere */
+
+#define DMA_IN_DMA_REQ0 (1<<18) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<17)
+#define DMA_IN_DMA_REQ2 (1<<16)
+#define DMA_IN_DMA_REQ3 (1<<15)
+
+#define DMA_EXT_DMA_REQ0 (1<<14) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<13)
+#define DMA_EXT_DMA_REQ2 (1<<12)
+#define DMA_EXT_DMA_REQ3 (1<<11)
+
+#define DMA_CH0_BUSY (1<<10) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<9)
+#define DMA_CH2_BUSY (1<<8)
+#define DMA_CH3_BUSY (1<<7)
+
+#define DMA_CT1 (1<<6) /* Chained transfere */
+#define DMA_CT2 (1<<5)
+#define DMA_CT3 (1<<4)
+
+#define DMA_CH_ENABLE (1<<7)
+#define SET_DMA_CH(x) (((x)&0x1)<<7)
+#define GET_DMA_CH(x) (((x)&DMA_CH_ENABLE)>>7)
+
+/* STBx25xxx dma unique */
+/* enable device port on a dma channel
+ * example ext 0 on dma 1
+ */
+
+#define SSP0_RECV 15
+#define SSP0_XMIT 14
+#define EXT_DMA_0 12
+#define SC1_XMIT 11
+#define SC1_RECV 10
+#define EXT_DMA_2 9
+#define EXT_DMA_3 8
+#define SERIAL2_XMIT 7
+#define SERIAL2_RECV 6
+#define SC0_XMIT 5
+#define SC0_RECV 4
+#define SERIAL1_XMIT 3
+#define SERIAL1_RECV 2
+#define SERIAL0_XMIT 1
+#define SERIAL0_RECV 0
+
+#define DMA_CHAN_0 1
+#define DMA_CHAN_1 2
+#define DMA_CHAN_2 3
+#define DMA_CHAN_3 4
+
+/* end STBx25xx */
+
+/*
+ * Bit 30 must be one for Redwoods, otherwise transfers may receive errors.
+ */
+#define DMA_CR_MB0 0x2
+
+#define SET_DMA_CONTROL \
+ (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
+ SET_DMA_PL(p_init->pl) | /* peripheral location */ \
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
+ SET_DMA_TCD(p_init->tcd_disable) | /* TC chain mode disable */ \
+ SET_DMA_ECE(p_init->ece_enable) | /* ECE chanin mode enable */ \
+ SET_DMA_CH(p_init->ch_enable) | /* Chain enable */ \
+ DMA_CR_MB0 /* must be one */)
+
+#define GET_DMA_POLARITY(chan) chan
+
+#endif
+
+typedef struct {
+ unsigned short in_use; /* set when channel is being used, clr when
+ * available.
+ */
+ /*
+ * Valid polarity settings:
+ * DMAReq_ActiveLow(n)
+ * DMAAck_ActiveLow(n)
+ * EOT_ActiveLow(n)
+ *
+ * n is 0 to max dma chans
+ */
+ unsigned int polarity;
+
+ char buffer_enable; /* Boolean: buffer enable */
+ char tce_enable; /* Boolean: terminal count enable */
+ char etd_output; /* Boolean: eot pin is a tc output */
+ char pce; /* Boolean: parity check enable */
+
+ /*
+ * Peripheral location:
+ * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+ * EXTERNAL_PERIPHERAL
+ */
+ char pl; /* internal/external peripheral */
+
+ /*
+ * Valid pwidth settings:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ */
+ unsigned int pwidth;
+
+ char dai; /* Boolean: dst address increment */
+ char sai; /* Boolean: src address increment */
+
+ /*
+ * Valid psc settings: 0-3
+ */
+ unsigned int psc; /* Peripheral Setup Cycles */
+
+ /*
+ * Valid pwc settings:
+ * 0-63
+ */
+ unsigned int pwc; /* Peripheral Wait Cycles */
+
+ /*
+ * Valid phc settings:
+ * 0-7
+ */
+ unsigned int phc; /* Peripheral Hold Cycles */
+
+ /*
+ * Valid cp (channel priority) settings:
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ */
+ unsigned int cp; /* channel priority */
+
+ /*
+ * Valid pf (memory read prefetch) settings:
+ *
+ * PREFETCH_1
+ * PREFETCH_2
+ * PREFETCH_4
+ */
+ unsigned int pf; /* memory read prefetch */
+
+ /*
+ * Boolean: channel interrupt enable
+ * NOTE: for sgl transfers, only the last descriptor will be setup to
+ * interrupt.
+ */
+ char int_enable;
+
+ char shift; /* easy access to byte_count shift, based on */
+ /* the width of the channel */
+
+ uint32_t control; /* channel control word */
+
+ /* These variabled are used ONLY in single dma transfers */
+ unsigned int mode; /* transfer mode */
+ phys_addr_t addr;
+ char ce; /* channel enable */
+#ifdef CONFIG_STB03xxx
+ char ch_enable;
+ char tcd_disable;
+ char ece_enable;
+ char td; /* transfer direction */
+#endif
+
+ char int_on_final_sg;/* for scatter/gather - only interrupt on last sg */
+} ppc_dma_ch_t;
+
+/*
+ * PPC44x DMA implementations have a slightly different
+ * descriptor layout. Probably moved about due to the
+ * change to 64-bit addresses and link pointer. I don't
+ * know why they didn't just leave control_count after
+ * the dst_addr.
+ */
+#ifdef PPC4xx_DMA_64BIT
+typedef struct {
+ uint32_t control;
+ uint32_t control_count;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ phys_addr_t next;
+} ppc_sgl_t;
+#else
+typedef struct {
+ uint32_t control;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+#endif
+
+typedef struct {
+ unsigned int dmanr;
+ uint32_t control; /* channel ctrl word; loaded from each descrptr */
+ uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
+ dma_addr_t dma_addr; /* dma (physical) address of this list */
+ ppc_sgl_t *phead;
+ dma_addr_t phead_dma;
+ ppc_sgl_t *ptail;
+ dma_addr_t ptail_dma;
+} sgl_list_info_t;
+
+typedef struct {
+ phys_addr_t *src_addr;
+ phys_addr_t *dst_addr;
+ phys_addr_t dma_src_addr;
+ phys_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+extern ppc_dma_ch_t dma_channels[];
+
+/*
+ * The DMA API are in ppc4xx_dma.c and ppc4xx_sgdma.c
+ */
+extern int ppc4xx_init_dma_channel(unsigned int, ppc_dma_ch_t *);
+extern int ppc4xx_get_channel_config(unsigned int, ppc_dma_ch_t *);
+extern int ppc4xx_set_channel_priority(unsigned int, unsigned int);
+extern unsigned int ppc4xx_get_peripheral_width(unsigned int);
+extern void ppc4xx_set_sg_addr(int, phys_addr_t);
+extern int ppc4xx_add_dma_sgl(sgl_handle_t, phys_addr_t, phys_addr_t, unsigned int);
+extern void ppc4xx_enable_dma_sgl(sgl_handle_t);
+extern void ppc4xx_disable_dma_sgl(sgl_handle_t);
+extern int ppc4xx_get_dma_sgl_residue(sgl_handle_t, phys_addr_t *, phys_addr_t *);
+extern int ppc4xx_delete_dma_sgl_element(sgl_handle_t, phys_addr_t *, phys_addr_t *);
+extern int ppc4xx_alloc_dma_handle(sgl_handle_t *, unsigned int, unsigned int);
+extern void ppc4xx_free_dma_handle(sgl_handle_t);
+extern int ppc4xx_get_dma_status(void);
+extern int ppc4xx_enable_burst(unsigned int);
+extern int ppc4xx_disable_burst(unsigned int);
+extern int ppc4xx_set_burst_size(unsigned int, unsigned int);
+extern void ppc4xx_set_src_addr(int dmanr, phys_addr_t src_addr);
+extern void ppc4xx_set_dst_addr(int dmanr, phys_addr_t dst_addr);
+extern void ppc4xx_enable_dma(unsigned int dmanr);
+extern void ppc4xx_disable_dma(unsigned int dmanr);
+extern void ppc4xx_set_dma_count(unsigned int dmanr, unsigned int count);
+extern int ppc4xx_get_dma_residue(unsigned int dmanr);
+extern void ppc4xx_set_dma_addr2(unsigned int dmanr, phys_addr_t src_dma_addr,
+ phys_addr_t dst_dma_addr);
+extern int ppc4xx_enable_dma_interrupt(unsigned int dmanr);
+extern int ppc4xx_disable_dma_interrupt(unsigned int dmanr);
+extern int ppc4xx_clr_dma_status(unsigned int dmanr);
+extern int ppc4xx_map_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
+extern int ppc4xx_disable_dma_port(unsigned int dmanr, unsigned int ocp_dma,short dma_chan);
+extern int ppc4xx_set_dma_mode(unsigned int dmanr, unsigned int mode);
+
+/* These are in kernel/dma.c: */
+
+/* reserve a DMA channel */
+extern int request_dma(unsigned int dmanr, const char *device_id);
+/* release it again */
+extern void free_dma(unsigned int dmanr);
+#endif
+#endif /* __KERNEL__ */
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f2d270b202f..ae4b2c33922 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -150,6 +150,12 @@
#define gadget_is_mpc8272(g) 0
#endif
+#ifdef CONFIG_USB_GADGET_DWC_OTG
+#define gadget_is_dwc_otg(g) !strcmp("dwc_otg_pcd", (g)->name)
+#else
+#define gadget_is_dwc_otg(g) 0
+#endif
+
#ifdef CONFIG_USB_GADGET_M66592
#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
#else
@@ -235,6 +241,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x18;
else if (gadget_is_fsl_usb2(gadget))
return 0x19;
+ else if (gadget_is_dwc_otg(gadget))
+ return 0x1a;
else if (gadget_is_amd5536udc(gadget))
return 0x20;
else if (gadget_is_m66592(gadget))
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2fc02bd9584..df2ea5b99bc 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -125,10 +125,13 @@ static inline int qlen(struct usb_gadget *gadget)
#define xprintk(d, level, fmt, args...) \
printk(level "%s: " fmt , (d)->net->name , ## args)
+#define DEBUG
+#define VERBOSE_DEBUG
+
#ifdef DEBUG
#undef DEBUG
#define DBG(dev, fmt, args...) \
- xprintk(dev , KERN_DEBUG , fmt , ## args)
+ printk(fmt , ## args)
#else
#define DBG(dev, fmt, args...) \
do { } while (0)
@@ -249,7 +252,17 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
* but on at least one, checksumming fails otherwise. Note:
* RNDIS headers involve variable numbers of LE32 values.
*/
+#if defined(CONFIG_USB_GADGET_DWC_OTG)
+ {
+ unsigned int off = 0;
+ off = ((unsigned long) skb->data) % 4; /* align=4 */
+ if (off != 0)
+ skb_reserve(skb, 4 - off);
+ }
+#else
skb_reserve(skb, NET_IP_ALIGN);
+#endif
+
req->buf = skb->data;
req->length = size;
@@ -573,6 +586,18 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
length = skb->len;
}
+
+#if defined(CONFIG_USB_GADGET_DWC_OTG)
+ {
+ unsigned int off = 0;
+ off = ((unsigned long) skb->data) % 4; /* align=4 */
+ if (off != 0) {
+ memmove(skb->data + (4 - off), skb->data, skb->len);
+ skb_reserve(skb, 4 - off);
+ }
+ }
+#endif
+
req->buf = skb->data;
req->context = skb;
req->complete = tx_complete;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 9b43b226817..b2b904b6141 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -40,6 +40,7 @@ config USB_XHCI_HCD_DEBUGGING
config USB_EHCI_HCD
tristate "EHCI HCD (USB 2.0) support"
depends on USB && USB_ARCH_HAS_EHCI
+ select DWC_SLAVE if 460EX
---help---
The Enhanced Host Controller Interface (EHCI) is standard for USB 2.0
"high speed" (480 Mbit/sec, 60 Mbyte/sec) host controller hardware.
@@ -175,6 +176,7 @@ config USB_OHCI_HCD
tristate "OHCI HCD support"
depends on USB && USB_ARCH_HAS_OHCI
select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
+ select DWC_SLAVE if 460EX
select USB_OTG_UTILS if ARCH_OMAP
---help---
The Open Host Controller Interface (OHCI) is a standard for accessing