aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/Kconfig9
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libata-core.c12
-rw-r--r--drivers/ata/libata-eh.c9
-rw-r--r--drivers/ata/pata_ep93xx.c2
-rw-r--r--drivers/ata/sata_dwc_pmp.c3041
-rw-r--r--drivers/block/drbd/drbd_nl.c6
-rw-r--r--drivers/block/zram/zram_drv.c22
-rw-r--r--drivers/clk/ti/clk-7xx.c7
-rw-r--r--drivers/dma/Kconfig17
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/ppc4xx/Makefile2
-rw-r--r--drivers/dma/ppc4xx/apm82181-adma.c2201
-rw-r--r--drivers/dma/ppc4xx/ppc460ex_4chan_dma.c1110
-rw-r--r--drivers/dma/ppc4xx/ppc460ex_4chan_dma.h531
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/gpio/gpio-rcar.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
-rw-r--r--drivers/gpu/drm/radeon/cik.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c87
-rw-r--r--drivers/gpu/drm/radeon/si.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c15
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/ide/Kconfig5
-rw-r--r--drivers/ide/ide-probe.c8
-rw-r--r--drivers/iio/accel/bma180.c8
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/st-keyscan.c2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/synaptics.c5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_wac.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c5
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c1
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c20
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-target.c13
-rw-r--r--drivers/media/dvb-frontends/si2168.c16
-rw-r--r--drivers/media/dvb-frontends/si2168_priv.h2
-rw-r--r--drivers/media/dvb-frontends/tda10071.c12
-rw-r--r--drivers/media/dvb-frontends/tda10071_priv.h1
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c1
-rw-r--r--drivers/media/platform/davinci/vpif_display.c1
-rw-r--r--drivers/media/tuners/si2157.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c40
-rw-r--r--drivers/media/usb/gspca/pac7302.c1
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/net/can/c_can/c_can_platform.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c42
-rw-r--r--drivers/net/hyperv/netvsc.c4
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy_device.c15
-rw-r--r--drivers/net/ppp/ppp_generic.c22
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wan/x25_asy.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/xen-netback/netback.c86
-rw-r--r--drivers/of/fdt.c66
-rw-r--r--drivers/parport/Kconfig12
-rw-r--r--drivers/pinctrl/pinctrl-st.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c3
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c8
-rw-r--r--drivers/s390/char/raw3270.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c9
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/staging/media/omap4iss/Kconfig2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c4
-rw-r--r--drivers/staging/vt6655/bssdb.c2
-rw-r--r--drivers/staging/vt6655/device_main.c7
-rw-r--r--drivers/xen/grant-table.c9
96 files changed, 7557 insertions, 262 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 7671dbac601..2664da32d9d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -278,6 +278,15 @@ config SATA_DWC_VDEBUG
help
This option enables the taskfile dumping and NCQ debugging.
+config SATA_DWC_PMP
+ tristate "DesignWare Cores SATA with PMP support"
+ depends on 460EX
+ help
+ This option enables support for the on-chip SATA controller of the
+ AppliedMicro processor 460EX with PMP support.
+
+ If unsure, say N.
+
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
depends on ARCH_HIGHBANK || COMPILE_TEST
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index 5a02aeecef5..7e7a77de757 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_SATA_FSL) += sata_fsl.o
obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
+obj-$(CONFIG_SATA_DWC_PMP) += sata_dwc_pmp.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dae5607e111..4cd52a4541a 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* Promise */
{ PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
+ { PCI_VDEVICE(PROMISE, 0x3781), board_ahci }, /* FastTrak TX8660 ahci-mode */
/* Asmedia */
{ PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 18d97d5c7d9..677c0c1b03b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* ata_qc_new - Request an available ATA command, for queueing
* @ap: target port
*
+ * Some ATA host controllers may implement a queue depth which is less
+ * than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ * the hardware limitation.
+ *
* LOCKING:
* None.
*/
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
+ unsigned int max_queue = ap->host->n_tags;
unsigned int i, tag;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+ for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+ tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex);
+ host->n_tags = ATA_MAX_QUEUE - 1;
host->dev = dev;
host->ops = ops;
}
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
int i, rc;
+ host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
/* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) {
dev_err(host->dev, "BUG: trying to register unstarted host\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 6760fc4e85b..dad83df555c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
case ATA_DEV_ATA:
if (err & ATA_ICRC)
qc->err_mask |= AC_ERR_ATA_BUS;
- if (err & ATA_UNC)
+ if (err & (ATA_UNC | ATA_AMNF))
qc->err_mask |= AC_ERR_MEDIA;
if (err & ATA_IDNF)
qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
}
if (cmd->command != ATA_CMD_PACKET &&
- (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
- ATA_ABORTED)))
- ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
+ (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+ ATA_IDNF | ATA_ABORTED)))
+ ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
res->feature & ATA_ICRC ? "ICRC " : "",
res->feature & ATA_UNC ? "UNC " : "",
+ res->feature & ATA_AMNF ? "AMNF " : "",
res->feature & ATA_IDNF ? "IDNF " : "",
res->feature & ATA_ABORTED ? "ABRT " : "");
#endif
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 6ad5c072ce3..4d37c5415fc 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
struct ep93xx_pata_data *drv_data;
struct ata_host *host;
struct ata_port *ap;
- unsigned int irq;
+ int irq;
struct resource *mem_res;
void __iomem *ide_base;
int err;
diff --git a/drivers/ata/sata_dwc_pmp.c b/drivers/ata/sata_dwc_pmp.c
new file mode 100644
index 00000000000..3ff190af8d2
--- /dev/null
+++ b/drivers/ata/sata_dwc_pmp.c
@@ -0,0 +1,3041 @@
+/*
+ * drivers/ata/sata_dwc.c
+ *
+ * Synopsys DesignWare Cores (DWC) SATA host driver
+ *
+ * Author: Mark Miesfeld <mmiesfeld@amcc.com>
+ *
+ * Ported from 2.6.19.2 to 2.6.25/26 by Stefan Roese <sr@denx.de>
+ * Copyright 2008 DENX Software Engineering
+ *
+ * Based on versions provided by AMCC and Synopsys which are:
+ * Copyright 2006 Applied Micro Circuits Corporation
+ * COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/libata.h>
+#include <linux/rtc.h>
+
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+
+
+#ifdef CONFIG_SATA_DWC_DEBUG
+#define dwc_dev_dbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_dbg(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_dbg(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+#else
+#define dwc_dev_dbg(dev, format, arg...) \
+ ({ 0; })
+#define dwc_port_dbg(ap, format, arg...) \
+ ({ 0; })
+#define dwc_link_dbg(link, format, arg...) \
+ ({ 0; })
+#endif
+
+#ifdef CONFIG_SATA_DWC_VDEBUG
+#define DEBUG_NCQ
+#define dwc_dev_vdbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_vdbg(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_vdbg(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+#else
+#define dwc_dev_vdbg(dev, format, arg...) \
+ ({ 0; })
+#define dwc_port_vdbg(ap, format, arg...) \
+ ({ 0; })
+#define dwc_link_vdbg(link, format, arg...) \
+ ({ 0; })
+#endif
+
+#define dwc_dev_info(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_INFO, dev, format, ##arg); 0; })
+#define dwc_port_info(ap, format, arg...) \
+ ata_port_printk(ap, KERN_INFO, format, ##arg)
+#define dwc_link_info(link, format, arg...) \
+ ata_link_printk(link, KERN_INFO, format, ##arg)
+
+/* These two are defined in "libata.h" */
+#undef DRV_NAME
+#undef DRV_VERSION
+#define DRV_NAME "sata-dwc"
+#define DRV_VERSION "2.0"
+
+/* Port Multiplier discovery Signature */
+#define PSCR_SCONTROL_DET_ENABLE 0x00000001
+#define PSCR_SSTATUS_DET_PRESENT 0x00000001
+#define PSCR_SERROR_DIAG_X 0x04000000
+
+/* Port multiplier port entry in SCONTROL register */
+#define SCONTROL_PMP_MASK 0x000f0000
+#define PMP_TO_SCONTROL(p) ((p << 16) & 0x000f0000)
+#define SCONTROL_TO_PMP(p) (((p) & 0x000f0000) >> 16)
+
+
+/* SATA DMA driver Globals */
+#if defined(CONFIG_APM821xx)
+#define DMA_NUM_CHANS 2
+#else
+#define DMA_NUM_CHANS 1
+#endif
+
+#define DMA_NUM_CHAN_REGS 8
+
+/* SATA DMA Register definitions */
+#if defined(CONFIG_APM821xx)
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#else
+#define AHB_DMA_BRST_DFLT 64 /* 16 data items burst length */
+#endif
+
+#if defined(CONFIG_APOLLO3G)
+extern void signal_hdd_led(int, int);
+#endif
+struct dmareg {
+ u32 low; /* Low bits 0-31 */
+ u32 high; /* High bits 32-63 */
+};
+
+/* DMA Per Channel registers */
+
+struct dma_chan_regs {
+ struct dmareg sar; /* Source Address */
+ struct dmareg dar; /* Destination address */
+ struct dmareg llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+ struct dmareg sstat; /* Source Status not implemented in core */
+ struct dmareg dstat; /* Destination Status not implemented in core */
+ struct dmareg sstatar; /* Source Status Address not impl in core */
+ struct dmareg dstatar; /* Destination Status Address not implemented */
+ struct dmareg cfg; /* Config */
+ struct dmareg sgr; /* Source Gather */
+ struct dmareg dsr; /* Destination Scatter */
+};
+
+/* Generic Interrupt Registers */
+struct dma_interrupt_regs {
+ struct dmareg tfr; /* Transfer Interrupt */
+ struct dmareg block; /* Block Interrupt */
+ struct dmareg srctran; /* Source Transfer Interrupt */
+ struct dmareg dsttran; /* Dest Transfer Interrupt */
+ struct dmareg error; /* Error */
+};
+
+struct ahb_dma_regs {
+ struct dma_chan_regs chan_regs[DMA_NUM_CHAN_REGS];
+ struct dma_interrupt_regs interrupt_raw; /* Raw Interrupt */
+ struct dma_interrupt_regs interrupt_status; /* Interrupt Status */
+ struct dma_interrupt_regs interrupt_mask; /* Interrupt Mask */
+ struct dma_interrupt_regs interrupt_clear; /* Interrupt Clear */
+ struct dmareg statusInt; /* Interrupt combined */
+ struct dmareg rq_srcreg; /* Src Trans Req */
+ struct dmareg rq_dstreg; /* Dst Trans Req */
+ struct dmareg rq_sgl_srcreg; /* Sngl Src Trans Req */
+ struct dmareg rq_sgl_dstreg; /* Sngl Dst Trans Req */
+ struct dmareg rq_lst_srcreg; /* Last Src Trans Req */
+ struct dmareg rq_lst_dstreg; /* Last Dst Trans Req */
+ struct dmareg dma_cfg; /* DMA Config */
+ struct dmareg dma_chan_en; /* DMA Channel Enable */
+ struct dmareg dma_id; /* DMA ID */
+ struct dmareg dma_test; /* DMA Test */
+ struct dmareg res1; /* reserved */
+ struct dmareg res2; /* reserved */
+
+ /* DMA Comp Params
+ * Param 6 = dma_param[0], Param 5 = dma_param[1],
+ * Param 4 = dma_param[2] ...
+ */
+ struct dmareg dma_params[6];
+};
+
+/* Data structure for linked list item */
+struct lli {
+ u32 sar; /* Source Address */
+ u32 dar; /* Destination address */
+ u32 llp; /* Linked List Pointer */
+ struct dmareg ctl; /* Control */
+#if defined(CONFIG_APM821xx)
+ u32 dstat; /* Source status is not supported */
+#else
+ struct dmareg dstat; /* Destination Status */
+#endif
+};
+
+#define SATA_DWC_DMAC_LLI_SZ (sizeof(struct lli))
+#define SATA_DWC_DMAC_LLI_NUM 256
+#define SATA_DWC_DMAC_TWIDTH_BYTES 4
+#define SATA_DWC_DMAC_LLI_TBL_SZ \
+ (SATA_DWC_DMAC_LLI_SZ * SATA_DWC_DMAC_LLI_NUM)
+#if defined(CONFIG_APM821xx)
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#else
+#define SATA_DWC_DMAC_CTRL_TSIZE_MAX \
+ (0x00000800 * SATA_DWC_DMAC_TWIDTH_BYTES)
+#endif
+/* DMA Register Operation Bits */
+#define DMA_EN 0x00000001 /* Enable AHB DMA */
+#define DMA_CHANNEL(ch) (0x00000001 << (ch)) /* Select channel */
+#define DMA_ENABLE_CHAN(ch) ((0x00000001 << (ch)) | \
+ ((0x000000001 << (ch)) << 8))
+#define DMA_DISABLE_CHAN(ch) (0x00000000 | ((0x000000001 << (ch)) << 8))
+
+/* Channel Control Register */
+#define DMA_CTL_BLK_TS(size) ((size) & 0x000000FFF) /* Blk Transfer size */
+#define DMA_CTL_LLP_SRCEN 0x10000000 /* Blk chain enable Src */
+#define DMA_CTL_LLP_DSTEN 0x08000000 /* Blk chain enable Dst */
+/*
+ * This define is used to set block chaining disabled in the control low
+ * register. It is already in little endian format so it can be &'d dirctly.
+ * It is essentially: cpu_to_le32(~(DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN))
+ */
+#define DMA_CTL_LLP_DISABLE_LE32 0xffffffe7
+#define DMA_CTL_SMS(num) ((num & 0x3) << 25) /*Src Master Select*/
+#define DMA_CTL_DMS(num) ((num & 0x3) << 23) /*Dst Master Select*/
+#define DMA_CTL_TTFC(type) ((type & 0x7) << 20) /*Type&Flow cntr*/
+#define DMA_CTL_TTFC_P2M_DMAC 0x00000002 /*Per mem,DMAC cntr*/
+#define DMA_CTL_TTFC_M2P_PER 0x00000003 /*Mem per,peri cntr*/
+#define DMA_CTL_SRC_MSIZE(size) ((size & 0x7) << 14) /*Src Burst Len*/
+#define DMA_CTL_DST_MSIZE(size) ((size & 0x7) << 11) /*Dst Burst Len*/
+#define DMA_CTL_SINC_INC 0x00000000 /*Src addr incr*/
+#define DMA_CTL_SINC_DEC 0x00000200
+#define DMA_CTL_SINC_NOCHANGE 0x00000400
+#define DMA_CTL_DINC_INC 0x00000000 /*Dst addr incr*/
+#define DMA_CTL_DINC_DEC 0x00000080
+#define DMA_CTL_DINC_NOCHANGE 0x00000100
+#define DMA_CTL_SRC_TRWID(size) ((size & 0x7) << 4) /*Src Trnsfr Width*/
+#define DMA_CTL_DST_TRWID(size) ((size & 0x7) << 1) /*Dst Trnsfr Width*/
+#define DMA_CTL_INT_EN 0x00000001 /*Interrupt Enable*/
+
+/* Channel Configuration Register high bits */
+#define DMA_CFG_FCMOD_REQ 0x00000001 /*Flow cntrl req*/
+#define DMA_CFG_PROTCTL (0x00000003 << 2) /*Protection cntrl*/
+
+/* Channel Configuration Register low bits */
+#define DMA_CFG_RELD_DST 0x80000000 /*Reload Dst/Src Addr*/
+#define DMA_CFG_RELD_SRC 0x40000000
+#define DMA_CFG_HS_SELSRC 0x00000800 /*SW hndshk Src/Dst*/
+#define DMA_CFG_HS_SELDST 0x00000400
+#define DMA_CFG_FIFOEMPTY (0x00000001 << 9) /*FIFO Empty bit*/
+
+/* Assign hardware handshaking interface (x) to dst / sre peripheral */
+#define DMA_CFG_HW_HS_DEST(int_num) ((int_num & 0xF) << 11)
+#define DMA_CFG_HW_HS_SRC(int_num) ((int_num & 0xF) << 7)
+
+/* Channel Linked List Pointer Register */
+#define DMA_LLP_LMS(addr, master) (((addr) & 0xfffffffc) | (master))
+#define DMA_LLP_AHBMASTER1 0 /* List Master Select */
+#define DMA_LLP_AHBMASTER2 1
+
+#define SATA_DWC_MAX_PORTS 1
+
+#define SATA_DWC_SCR_OFFSET 0x24
+#define SATA_DWC_REG_OFFSET 0x64
+
+/* DWC SATA Registers */
+struct sata_dwc_regs {
+ u32 fptagr; /* 1st party DMA tag */
+ u32 fpbor; /* 1st party DMA buffer offset */
+ u32 fptcr; /* 1st party DMA Xfr count */
+ u32 dmacr; /* DMA Control */
+ u32 dbtsr; /* DMA Burst Transac size */
+ u32 intpr; /* Interrupt Pending */
+ u32 intmr; /* Interrupt Mask */
+ u32 errmr; /* Error Mask */
+ u32 llcr; /* Link Layer Control */
+ u32 phycr; /* PHY Control */
+ u32 physr; /* PHY Status */
+ u32 rxbistpd; /* Recvd BIST pattern def register */
+ u32 rxbistpd1; /* Recvd BIST data dword1 */
+ u32 rxbistpd2; /* Recvd BIST pattern data dword2 */
+ u32 txbistpd; /* Trans BIST pattern def register */
+ u32 txbistpd1; /* Trans BIST data dword1 */
+ u32 txbistpd2; /* Trans BIST data dword2 */
+ u32 bistcr; /* BIST Control Register */
+ u32 bistfctr; /* BIST FIS Count Register */
+ u32 bistsr; /* BIST Status Register */
+ u32 bistdecr; /* BIST Dword Error count register */
+ u32 res[15]; /* Reserved locations */
+ u32 testr; /* Test Register */
+ u32 versionr; /* Version Register */
+ u32 idr; /* ID Register */
+ u32 unimpl[192]; /* Unimplemented */
+ u32 dmadr[256]; /* FIFO Locations in DMA Mode */
+};
+
+#define SCR_SCONTROL_DET_ENABLE 0x00000001
+#define SCR_SSTATUS_DET_PRESENT 0x00000001
+#define SCR_SERROR_DIAG_X 0x04000000
+
+/* DWC SATA Register Operations */
+#define SATA_DWC_TXFIFO_DEPTH 0x01FF
+#define SATA_DWC_RXFIFO_DEPTH 0x01FF
+
+#define SATA_DWC_DMACR_TMOD_TXCHEN 0x00000004
+#define SATA_DWC_DMACR_TXCHEN (0x00000001 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RXCHEN (0x00000002 | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_TXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_RX_CLEAR(v) (((v) & ~SATA_DWC_DMACR_RXCHEN) | \
+ SATA_DWC_DMACR_TMOD_TXCHEN)
+#define SATA_DWC_DMACR_TXRXCH_CLEAR SATA_DWC_DMACR_TMOD_TXCHEN
+
+#define SATA_DWC_DBTSR_MWR(size) ((size/4) & \
+ SATA_DWC_TXFIFO_DEPTH)
+#define SATA_DWC_DBTSR_MRD(size) (((size/4) & \
+ SATA_DWC_RXFIFO_DEPTH) << 16)
+
+// SATA DWC Interrupts
+#define SATA_DWC_INTPR_DMAT 0x00000001
+#define SATA_DWC_INTPR_NEWFP 0x00000002
+#define SATA_DWC_INTPR_PMABRT 0x00000004
+#define SATA_DWC_INTPR_ERR 0x00000008
+#define SATA_DWC_INTPR_NEWBIST 0x00000010
+#define SATA_DWC_INTPR_IPF 0x80000000
+// Interrupt masks
+#define SATA_DWC_INTMR_DMATM 0x00000001
+#define SATA_DWC_INTMR_NEWFPM 0x00000002
+#define SATA_DWC_INTMR_PMABRTM 0x00000004
+#define SATA_DWC_INTMR_ERRM 0x00000008
+#define SATA_DWC_INTMR_NEWBISTM 0x00000010
+#define SATA_DWC_INTMR_PRIMERRM 0x00000020
+#define SATA_DWC_INTPR_CMDGOOD 0x00000080
+#define SATA_DWC_INTPR_CMDABORT 0x00000040
+
+#define SATA_DWC_LLCR_SCRAMEN 0x00000001
+#define SATA_DWC_LLCR_DESCRAMEN 0x00000002
+#define SATA_DWC_LLCR_RPDEN 0x00000004
+
+// Defines for SError register
+#define SATA_DWC_SERR_ERRI 0x00000001 // Recovered data integrity error
+#define SATA_DWC_SERR_ERRM 0x00000002 // Recovered communication error
+#define SATA_DWC_SERR_ERRT 0x00000100 // Non-recovered transient data integrity error
+#define SATA_DWC_SERR_ERRC 0x00000200 // Non-recovered persistent communication or data integrity error
+#define SATA_DWC_SERR_ERRP 0x00000400 // Protocol error
+#define SATA_DWC_SERR_ERRE 0x00000800 // Internal host adapter error
+#define SATA_DWC_SERR_DIAGN 0x00010000 // PHYRdy change
+#define SATA_DWC_SERR_DIAGI 0x00020000 // PHY internal error
+#define SATA_DWC_SERR_DIAGW 0x00040000 // Phy COMWAKE signal is detected
+#define SATA_DWC_SERR_DIAGB 0x00080000 // 10b to 8b decoder err
+#define SATA_DWC_SERR_DIAGT 0x00100000 // Disparity error
+#define SATA_DWC_SERR_DIAGC 0x00200000 // CRC error
+#define SATA_DWC_SERR_DIAGH 0x00400000 // Handshake error
+#define SATA_DWC_SERR_DIAGL 0x00800000 // Link sequence (illegal transition) error
+#define SATA_DWC_SERR_DIAGS 0x01000000 // Transport state transition error
+#define SATA_DWC_SERR_DIAGF 0x02000000 // Unrecognized FIS type
+#define SATA_DWC_SERR_DIAGX 0x04000000 // Exchanged error - Set when PHY COMINIT signal is detected.
+#define SATA_DWC_SERR_DIAGA 0x08000000 // Port Selector Presence detected
+
+/* This is all error bits, zero's are reserved fields. */
+#define SATA_DWC_SERR_ERR_BITS 0x0FFF0F03
+
+#define SATA_DWC_SCR0_SPD_GET(v) ((v >> 4) & 0x0000000F)
+
+struct sata_dwc_device {
+ struct resource reg; /* Resource for register */
+ struct device *dev; /* generic device struct */
+ struct ata_probe_ent *pe; /* ptr to probe-ent */
+ struct ata_host *host;
+ u8 *reg_base;
+ struct sata_dwc_regs *sata_dwc_regs; /* DW Synopsys SATA specific */
+ u8 *scr_base;
+ int dma_channel; /* DWC SATA DMA channel */
+ int irq_dma;
+ struct timer_list an_timer;
+};
+
+#define SATA_DWC_QCMD_MAX 32
+
+struct sata_dwc_device_port {
+ struct sata_dwc_device *hsdev;
+ int cmd_issued[SATA_DWC_QCMD_MAX];
+ struct lli *llit[SATA_DWC_QCMD_MAX];
+ dma_addr_t llit_dma[SATA_DWC_QCMD_MAX];
+ u32 dma_chan[SATA_DWC_QCMD_MAX];
+ int dma_pending[SATA_DWC_QCMD_MAX];
+ u32 sata_dwc_sactive_issued; /* issued queued ops */
+ u32 sata_dwc_sactive_queued; /* queued ops */
+ u32 dma_interrupt_count;
+
+};
+
+static struct sata_dwc_device* dwc_dev_list[2];
+static int dma_intr_registered = 0;
+/*
+ * Commonly used DWC SATA driver Macros
+ */
+#define HSDEV_FROM_HOST(host) ((struct sata_dwc_device *) \
+ (host)->private_data)
+#define HSDEV_FROM_AP(ap) ((struct sata_dwc_device *) \
+ (ap)->host->private_data)
+#define HSDEVP_FROM_AP(ap) ((struct sata_dwc_device_port *) \
+ (ap)->private_data)
+#define HSDEV_FROM_QC(qc) ((struct sata_dwc_device *) \
+ (qc)->ap->host->private_data)
+#define HSDEV_FROM_HSDEVP(p) ((struct sata_dwc_device *) \
+ (hsdevp)->hsdev)
+
+enum {
+ SATA_DWC_CMD_ISSUED_NOT = 0,
+ SATA_DWC_CMD_ISSUED_PENDING = 1,
+ SATA_DWC_CMD_ISSUED_EXEC = 2,
+ SATA_DWC_CMD_ISSUED_NODATA = 3,
+
+ SATA_DWC_DMA_PENDING_NONE = 0,
+ SATA_DWC_DMA_PENDING_TX = 1,
+ SATA_DWC_DMA_PENDING_RX = 2,
+};
+
+/*
+ * Globals
+ */
+static struct ahb_dma_regs *sata_dma_regs = 0;
+
+/*
+ * Prototypes
+ */
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status);
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
+static void sata_dwc_port_stop(struct ata_port *ap);
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
+
+static int dma_dwc_init(struct sata_dwc_device *hsdev);
+static void dma_dwc_exit(struct sata_dwc_device *hsdev);
+static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr);
+static void dma_dwc_xfer_start(int dma_ch);
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch);
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev);
+static void sata_dwc_init_port ( struct ata_port *ap );
+u8 sata_dwc_check_status(struct ata_port *ap);
+
+
+
+
+static const char *dir_2_txt(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ return "bi";
+ case DMA_FROM_DEVICE:
+ return "from";
+ case DMA_TO_DEVICE:
+ return "to";
+ case DMA_NONE:
+ return "none";
+ default:
+ return "err";
+ }
+}
+
+static const char *prot_2_txt(enum ata_tf_protocols protocol)
+{
+ switch (protocol) {
+ case ATA_PROT_UNKNOWN:
+ return "unknown";
+ case ATA_PROT_NODATA:
+ return "nodata";
+ case ATA_PROT_PIO:
+ return "pio";
+ case ATA_PROT_DMA:
+ return "dma";
+ case ATA_PROT_NCQ:
+ return "ncq";
+ case ATAPI_PROT_PIO:
+ return "atapi pio";
+ case ATAPI_PROT_NODATA:
+ return "atapi nodata";
+ case ATAPI_PROT_DMA:
+ return "atapi dma";
+ default:
+ return "err";
+ }
+}
+
+inline const char *ata_cmd_2_txt(const struct ata_taskfile *tf)
+{
+ switch (tf->command) {
+ case ATA_CMD_CHK_POWER:
+ return "ATA_CMD_CHK_POWER";
+ case ATA_CMD_EDD:
+ return "ATA_CMD_EDD";
+ case ATA_CMD_FLUSH:
+ return "ATA_CMD_FLUSH";
+ case ATA_CMD_FLUSH_EXT:
+ return "ATA_CMD_FLUSH_EXT";
+ case ATA_CMD_ID_ATA:
+ return "ATA_CMD_ID_ATA";
+ case ATA_CMD_ID_ATAPI:
+ return "ATA_CMD_ID_ATAPI";
+ case ATA_CMD_FPDMA_READ:
+ return "ATA_CMD_FPDMA_READ";
+ case ATA_CMD_FPDMA_WRITE:
+ return "ATA_CMD_FPDMA_WRITE";
+ case ATA_CMD_READ:
+ return "ATA_CMD_READ";
+ case ATA_CMD_READ_EXT:
+ return "ATA_CMD_READ_EXT";
+ case ATA_CMD_READ_NATIVE_MAX_EXT :
+ return "ATA_CMD_READ_NATIVE_MAX_EXT";
+ case ATA_CMD_VERIFY_EXT :
+ return "ATA_CMD_VERIFY_EXT";
+ case ATA_CMD_WRITE:
+ return "ATA_CMD_WRITE";
+ case ATA_CMD_WRITE_EXT:
+ return "ATA_CMD_WRITE_EXT";
+ case ATA_CMD_PIO_READ:
+ return "ATA_CMD_PIO_READ";
+ case ATA_CMD_PIO_READ_EXT:
+ return "ATA_CMD_PIO_READ_EXT";
+ case ATA_CMD_PIO_WRITE:
+ return "ATA_CMD_PIO_WRITE";
+ case ATA_CMD_PIO_WRITE_EXT:
+ return "ATA_CMD_PIO_WRITE_EXT";
+ case ATA_CMD_SET_FEATURES:
+ return "ATA_CMD_SET_FEATURES";
+ case ATA_CMD_PACKET:
+ return "ATA_CMD_PACKET";
+ case ATA_CMD_PMP_READ:
+ return "ATA_CMD_PMP_READ";
+ case ATA_CMD_PMP_WRITE:
+ return "ATA_CMD_PMP_WRITE";
+ default:
+ return "ATA_CMD_???";
+ }
+}
+
+/*
+ * Dump content of the taskfile
+ */
+static void sata_dwc_tf_dump(struct device *dwc_dev, struct ata_taskfile *tf)
+{
+ dwc_dev_vdbg(dwc_dev, "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx"
+ "device: %x\n", tf->command, prot_2_txt(tf->protocol),
+ tf->flags, tf->device);
+ dwc_dev_vdbg(dwc_dev, "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam:"
+ "0x%x lbah: 0x%x\n", tf->feature, tf->nsect, tf->lbal,
+ tf->lbam, tf->lbah);
+ dwc_dev_vdbg(dwc_dev, "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x "
+ "hob_lbam: 0x%x hob_lbah: 0x%x\n", tf->hob_feature,
+ tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
+ tf->hob_lbah);
+}
+
+/*
+ * Function: get_burst_length_encode
+ * arguments: datalength: length in bytes of data
+ * returns value to be programmed in register corresponding to data length
+ * This value is effectively the log(base 2) of the length
+ */
+static inline int get_burst_length_encode(int datalength)
+{
+ int items = datalength >> 2; /* div by 4 to get lword count */
+
+ if (items >= 64)
+ return 5;
+
+ if (items >= 32)
+ return 4;
+
+ if (items >= 16)
+ return 3;
+
+ if (items >= 8)
+ return 2;
+
+ if (items >= 4)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Clear Interrupts on a DMA channel
+ */
+static inline void clear_chan_interrupts(int c)
+{
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.block.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.srctran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.dsttran.low), DMA_CHANNEL(c));
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low), DMA_CHANNEL(c));
+}
+
+/*
+ * Function: dma_request_channel
+ * arguments: None
+ * returns channel number if available else -1
+ * This function assigns the next available DMA channel from the list to the
+ * requester
+ */
+static int dma_request_channel(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ if (!(in_le32(&(sata_dma_regs->dma_chan_en.low)) & DMA_CHANNEL(hsdev->dma_channel))) {
+ dwc_port_vdbg(ap, "%s Successfully requested DMA channel %d\n",
+ __func__, hsdev->dma_channel);
+ return (hsdev->dma_channel);
+ }
+
+ return -1;
+}
+
+
+
+/*
+ * Function: dma_dwc_interrupt
+ * arguments: irq, dev_id, pt_regs
+ * returns channel number if available else -1
+ * Interrupt Handler for DW AHB SATA DMA
+ */
+static int dma_dwc_interrupt(int irq, void *hsdev_instance)
+{
+ volatile u32 tfr_reg, err_reg;
+ unsigned long flags;
+ struct sata_dwc_device *hsdev = hsdev_instance;
+ struct ata_host *host = (struct ata_host *)hsdev->host;
+ struct ata_port *ap;
+ struct sata_dwc_device_port *hsdevp;
+ u8 tag = 0;
+ int chan;
+ unsigned int port = 0;
+ spin_lock_irqsave(&host->lock, flags);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+ tag = ap->link.active_tag;
+
+ dwc_port_vdbg(ap, "%s: DMA interrupt in channel %d\n", __func__, hsdev->dma_channel);
+
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ dwc_port_vdbg(ap, "eot=0x%08x err=0x%08x pending=%d active port=%d\n",
+ tfr_reg, err_reg, hsdevp->dma_pending[tag], port);
+ chan = hsdev->dma_channel;
+
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ /*
+ *Each DMA command produces 2 interrupts. Only
+ * complete the command after both interrupts have been
+ * seen. (See sata_dwc_isr())
+ */
+ hsdevp->dma_interrupt_count++;
+ sata_dwc_clear_dmacr(hsdevp, tag);
+
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "DMA not pending eot=0x%08x "
+ "err=0x%08x tag=0x%02x pending=%d\n",
+ tfr_reg, err_reg, tag,
+ hsdevp->dma_pending[tag]);
+ }
+
+ // Do remain jobs after DMA transfer complete
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+
+ /* Clear the interrupt */
+ out_le32(&(sata_dma_regs->interrupt_clear.tfr.low),
+ DMA_CHANNEL(chan));
+ }
+
+ /* Process error interrupt. */
+ // We do not expect error happen
+ if (unlikely(err_reg & DMA_CHANNEL(chan))) {
+ /* TODO Need error handler ! */
+ dev_err(ap->dev, "error interrupt err_reg=0x%08x\n",
+ err_reg);
+
+ spin_lock_irqsave(ap->lock, flags);
+ //if (ata_is_dma(qc->tf.protocol)) {
+ /* disable DMAC */
+ dma_dwc_terminate_dma(ap, chan);
+ //}
+ spin_unlock_irqrestore(ap->lock, flags);
+
+ /* Clear the interrupt. */
+ out_le32(&(sata_dma_regs->interrupt_clear.error.low),
+ DMA_CHANNEL(chan));
+ }
+
+ spin_unlock_irqrestore(&host->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dma_dwc_handler(int irq, void *hsdev_instance)
+{
+ volatile u32 tfr_reg, err_reg;
+ int chan;
+
+ tfr_reg = in_le32(&(sata_dma_regs->interrupt_status.tfr.low));
+ err_reg = in_le32(&(sata_dma_regs->interrupt_status.error.low));
+
+ for (chan = 0; chan < DMA_NUM_CHANS; chan++) {
+ /* Check for end-of-transfer interrupt. */
+
+ if (tfr_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+ else
+
+ /* Check for error interrupt. */
+ if (err_reg & DMA_CHANNEL(chan)) {
+ dma_dwc_interrupt(0, dwc_dev_list[chan]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dma_register_interrupt (struct sata_dwc_device *hsdev)
+{
+ int retval = 0;
+ int irq = hsdev->irq_dma;
+ /*
+ * FIXME: 2 SATA controllers share the same DMA engine so
+ * currently, they also share same DMA interrupt
+ */
+ if (!dma_intr_registered) {
+ printk("%s register irq (%d)\n", __func__, irq);
+ retval = request_irq(irq, dma_dwc_handler, IRQF_SHARED, "SATA DMA", hsdev);
+ //retval = request_irq(irq, dma_dwc_handler, IRQF_DISABLED, "SATA DMA", NULL);
+ if (retval) {
+ dev_err(hsdev->dev, "%s: could not get IRQ %d\n", __func__, irq);
+ return -ENODEV;
+ }
+ //dma_intr_registered = 1;
+ }
+ return retval;
+}
+
+/*
+ * Function: dma_request_interrupts
+ * arguments: hsdev
+ * returns status
+ * This function registers ISR for a particular DMA channel interrupt
+ */
+static int dma_request_interrupts(struct sata_dwc_device *hsdev, int irq)
+{
+ int retval = 0;
+ int dma_chan = hsdev->dma_channel;
+
+ /* Unmask error interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.error.low,
+ in_le32(&sata_dma_regs->interrupt_mask.error.low) | DMA_ENABLE_CHAN(dma_chan));
+
+ /* Unmask end-of-transfer interrupt */
+ out_le32(&sata_dma_regs->interrupt_mask.tfr.low,
+ in_le32(&sata_dma_regs->interrupt_mask.tfr.low) | DMA_ENABLE_CHAN(dma_chan));
+
+ dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.error=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.error.low));
+ dwc_dev_vdbg(hsdev->dev, "Current value of interrupt_mask.tfr=0x%0x\n", in_le32(&sata_dma_regs->interrupt_mask.tfr.low));
+#if 0
+ out_le32(&sata_dma_regs->interrupt_mask.block.low,
+ DMA_ENABLE_CHAN(dma_chan));
+
+ out_le32(&sata_dma_regs->interrupt_mask.srctran.low,
+ DMA_ENABLE_CHAN(dma_chan));
+
+ out_le32(&sata_dma_regs->interrupt_mask.dsttran.low,
+ DMA_ENABLE_CHAN(dma_chan));
+#endif
+ return retval;
+}
+
+/*
+ * Function: map_sg_to_lli
+ * arguments: sg: scatter/gather list(sg)
+ * num_elems: no of elements in sg list
+ * dma_lli: LLI table
+ * dest: destination address
+ * read: whether the transfer is read or write
+ * returns array of AHB DMA Linked List Items
+ * This function creates a list of LLIs for DMA Xfr and returns the number
+ * of elements in the DMA linked list.
+ *
+ * Note that the Synopsis driver has a comment proposing that better performance
+ * is possible by only enabling interrupts on the last item in the linked list.
+ * However, it seems that could be a problem if an error happened on one of the
+ * first items. The transfer would halt, but no error interrupt would occur.
+ *
+ * Currently this function sets interrupts enabled for each linked list item:
+ * DMA_CTL_INT_EN.
+ */
+static int map_sg_to_lli(struct ata_queued_cmd *qc, struct lli *lli,
+ dma_addr_t dma_lli, void __iomem *dmadr_addr)
+{
+ struct scatterlist *sg = qc->sg;
+ struct device *dwc_dev = qc->ap->dev;
+ int num_elems = qc->n_elem;
+ int dir = qc->dma_dir;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(qc->ap);
+
+ int i, idx = 0;
+ int fis_len = 0;
+ dma_addr_t next_llp;
+ int bl;
+ unsigned int dma_ts = 0;
+
+ dwc_port_vdbg(qc->ap, "%s: sg=%p nelem=%d lli=%p dma_lli=0x%08x "
+ "dmadr=0x%08x\n", __func__, sg, num_elems, lli, (u32)dma_lli,
+ (u32)dmadr_addr);
+
+ bl = get_burst_length_encode(AHB_DMA_BRST_DFLT);
+
+ for (i = 0; i < num_elems; i++, sg++) {
+ u32 addr, offset;
+ u32 sg_len, len;
+
+ addr = (u32) sg_dma_address(sg);
+ sg_len = sg_dma_len(sg);
+
+ dwc_port_vdbg(qc->ap, "%s: elem=%d sg_addr=0x%x sg_len=%d\n",
+ __func__, i, addr, sg_len);
+
+ while (sg_len) {
+
+ if (unlikely(idx >= SATA_DWC_DMAC_LLI_NUM)) {
+ /* The LLI table is not large enough. */
+ dev_err(dwc_dev, "LLI table overrun (idx=%d)\n",
+ idx);
+ break;
+ }
+ len = (sg_len > SATA_DWC_DMAC_CTRL_TSIZE_MAX) ?
+ SATA_DWC_DMAC_CTRL_TSIZE_MAX : sg_len;
+
+ offset = addr & 0xffff;
+ if ((offset + sg_len) > 0x10000)
+ len = 0x10000 - offset;
+
+ /*
+ * Make sure a LLI block is not created that will span a
+ * 8K max FIS boundary. If the block spans such a FIS
+ * boundary, there is a chance that a DMA burst will
+ * cross that boundary -- this results in an error in
+ * the host controller.
+ */
+ if (unlikely(fis_len + len > 8192)) {
+ dwc_port_vdbg(qc->ap, "SPLITTING: fis_len=%d(0x%x) "
+ "len=%d(0x%x)\n", fis_len, fis_len,
+ len, len);
+ len = 8192 - fis_len;
+ fis_len = 0;
+ } else {
+ fis_len += len;
+ }
+ if (fis_len == 8192)
+ fis_len = 0;
+
+ /*
+ * Set DMA addresses and lower half of control register
+ * based on direction.
+ */
+ dwc_port_vdbg(qc->ap, "%s: sg_len = %d, len = %d\n", __func__, sg_len, len);
+
+#if defined(CONFIG_APM821xx)
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(1) | /* Source: Master 2 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(2) | /* Source: Master 3 */
+ DMA_CTL_DMS(0) | /* Dest: Master 1 */
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+ if (hsdevp->hsdev->dma_channel == 0) {/* DMA channel 0 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else if (hsdevp->hsdev->dma_channel == 1) {/* DMA channel 1 */
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(2) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+ }
+#else
+ if (dir == DMA_FROM_DEVICE) {
+ lli[idx].dar = cpu_to_le32(addr);
+ lli[idx].sar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_P2M_DMAC) |
+ DMA_CTL_SMS(0) |
+ DMA_CTL_DMS(1) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_SINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ } else { /* DMA_TO_DEVICE */
+ lli[idx].sar = cpu_to_le32(addr);
+ lli[idx].dar = cpu_to_le32((u32)dmadr_addr);
+
+ lli[idx].ctl.low = cpu_to_le32(
+ DMA_CTL_TTFC(DMA_CTL_TTFC_M2P_PER) |
+ DMA_CTL_SMS(1) |
+ DMA_CTL_DMS(0) |
+ DMA_CTL_SRC_MSIZE(bl) |
+ DMA_CTL_DST_MSIZE(bl) |
+ DMA_CTL_DINC_NOCHANGE |
+ DMA_CTL_SRC_TRWID(2) |
+ DMA_CTL_DST_TRWID(2) |
+ DMA_CTL_INT_EN |
+ DMA_CTL_LLP_SRCEN |
+ DMA_CTL_LLP_DSTEN);
+ }
+#endif
+ dwc_port_vdbg(qc->ap, "%s setting ctl.high len: 0x%08x val: "
+ "0x%08x\n", __func__, len,
+ DMA_CTL_BLK_TS(len / 4));
+
+ /* Program the LLI CTL high register */
+ dma_ts = DMA_CTL_BLK_TS(len / 4);
+ lli[idx].ctl.high = cpu_to_le32(dma_ts);
+
+ /*
+ *Program the next pointer. The next pointer must be
+ * the physical address, not the virtual address.
+ */
+ next_llp = (dma_lli + ((idx + 1) * sizeof(struct lli)));
+
+ /* The last 2 bits encode the list master select. */
+#if defined(CONFIG_APM821xx)
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER1);
+#else
+ next_llp = DMA_LLP_LMS(next_llp, DMA_LLP_AHBMASTER2);
+#endif
+
+ lli[idx].llp = cpu_to_le32(next_llp);
+
+ dwc_port_vdbg(qc->ap, "%s: index %d\n", __func__, idx);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx].ctl.high);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx].ctl.low);
+ dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx].dar);
+ dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx].sar);
+ dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx].llp);
+
+ idx++;
+ sg_len -= len;
+ addr += len;
+ }
+ }
+
+ /*
+ * The last next ptr has to be zero and the last control low register
+ * has to have LLP_SRC_EN and LLP_DST_EN (linked list pointer source
+ * and destination enable) set back to 0 (disabled.) This is what tells
+ * the core that this is the last item in the linked list.
+ */
+ if (likely(idx)) {
+ lli[idx-1].llp = 0x00000000;
+ lli[idx-1].ctl.low &= DMA_CTL_LLP_DISABLE_LE32;
+
+ /* Flush cache to memory */
+ dma_cache_sync(NULL, lli, (sizeof(struct lli) * idx),
+ DMA_BIDIRECTIONAL);
+ }
+
+ dwc_port_vdbg(qc->ap, "%s: Final index %d\n", __func__, idx-1);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.high with val: 0x%08x\n", __func__, lli[idx-1].ctl.high);
+ dwc_port_vdbg(qc->ap, "%s setting ctl.low with val: 0x%08x\n", __func__, lli[idx-1].ctl.low);
+ dwc_port_vdbg(qc->ap, "%s setting lli.dar with val: 0x%08x\n", __func__, lli[idx-1].dar);
+ dwc_port_vdbg(qc->ap, "%s setting lli.sar with val: 0x%08x\n", __func__, lli[idx-1].sar);
+ dwc_port_vdbg(qc->ap, "%s setting next_llp with val: 0x%08x\n", __func__, lli[idx-1].llp);
+
+ return idx;
+}
+
+/*
+ * Function: dma_dwc_xfer_start
+ * arguments: Channel number
+ * Return : None
+ * Enables the DMA channel
+ */
+static void dma_dwc_xfer_start(int dma_ch)
+{
+ /* Enable the DMA channel */
+ out_le32(&(sata_dma_regs->dma_chan_en.low),
+ in_le32(&(sata_dma_regs->dma_chan_en.low)) |
+ DMA_ENABLE_CHAN(dma_ch));
+
+#if defined(CONFIG_SATA_DWC_VDEBUG)
+ printk("DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low)));
+ printk("%s: setting sata_dma_regs->dma_chan_en.low with val: 0x%08x\n",
+ __func__, in_le32(&(sata_dma_regs->dma_chan_en.low)));
+#endif
+
+
+#if defined(CONFIG_APOLLO3G)
+ signal_hdd_led(1 /*blink=yes*/, 2 /* _3G_LED_GREEN */);
+#endif
+
+}
+
+/*
+ * Check if the selected DMA channel is currently enabled.
+ */
+static int dma_dwc_channel_enabled(int ch)
+{
+ u32 dma_chan;
+
+ // Read the DMA channel register
+ dma_chan = in_le32(&(sata_dma_regs->dma_chan_en.low));
+ if (dma_chan & DMA_CHANNEL(ch))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Terminate the current DMA transaction
+ */
+static void dma_dwc_terminate_dma(struct ata_port *ap, int dma_ch)
+{
+ int enabled = dma_dwc_channel_enabled(dma_ch);
+
+ dev_info(ap->dev, "%s terminate DMA on channel=%d enabled=%d\n",
+ __func__, dma_ch, enabled);
+
+ if (enabled) {
+ // Disable the selected channel
+ out_le32(&(sata_dma_regs->dma_chan_en.low),
+ in_le32(&(sata_dma_regs->dma_chan_en.low)) | DMA_DISABLE_CHAN(dma_ch));
+
+ // Wait for the channel is disabled
+ do {
+ enabled = dma_dwc_channel_enabled(dma_ch);
+ msleep(10);
+ } while (enabled);
+ }
+}
+
+
+/*
+ * Setup data and DMA configuration ready for DMA transfer
+ */
+static int dma_dwc_xfer_setup(struct ata_queued_cmd *qc,
+ struct lli *lli, dma_addr_t dma_lli,
+ void __iomem *addr)
+{
+ int dma_ch;
+ int num_lli;
+
+ /* Acquire DMA channel */
+ dma_ch = dma_request_channel(qc->ap);
+ if (unlikely(dma_ch == -1)) {
+ dev_err(qc->ap->dev, "%s: dma channel unavailable\n", __func__);
+ return -EAGAIN;
+ }
+ dwc_port_vdbg(qc->ap, "%s: Got channel %d\n", __func__, dma_ch);
+
+ /* Convert SG list to linked list of items (LLIs) for AHB DMA */
+ num_lli = map_sg_to_lli(qc, lli, dma_lli, addr);
+
+ dwc_port_vdbg(qc->ap, "%s sg: 0x%p, count: %d lli: %p dma_lli: 0x%0xlx addr:"
+ " %p lli count: %d\n", __func__, qc->sg, qc->n_elem, lli,
+ (u32)dma_lli, addr, num_lli);
+
+ /* Clear channel interrupts */
+ clear_chan_interrupts(dma_ch);
+
+ /* Program the CFG register. */
+#if defined(CONFIG_APM821xx)
+ if (dma_ch == 0) {
+ /* Buffer mode enabled, FIFO_MODE=0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000009);
+ /* Channel 0 bit[7:5] */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
+ } else if (dma_ch == 1) {
+ /* Buffer mode enabled, FIFO_MODE=0 */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high), 0x0000088d);
+ /* Channel 1 bit[7:5] */
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0x00000020);
+ }
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high),
+ DMA_CFG_PROTCTL | DMA_CFG_FCMOD_REQ);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low), 0);
+#endif
+
+ /* Program the address of the linked list */
+#if defined(CONFIG_APM821xx)
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER1));
+#else
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low),
+ DMA_LLP_LMS(dma_lli, DMA_LLP_AHBMASTER2));
+#endif
+
+ /* Program the CTL register with src enable / dst enable */
+ //out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low),
+ // DMA_CTL_LLP_SRCEN | DMA_CTL_LLP_DSTEN);
+ out_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low), 0x18000000);
+
+ dwc_port_vdbg(qc->ap, "%s DMA channel %d is ready\n", __func__, dma_ch);
+ dwc_port_vdbg(qc->ap, "%s setting cfg.high of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.high)));
+ dwc_port_vdbg(qc->ap, "%s setting cfg.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].cfg.low)));
+ dwc_port_vdbg(qc->ap, "%s setting llp.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].llp.low)));
+ dwc_port_vdbg(qc->ap, "%s setting ctl.low of channel %d with val: 0x%08x\n", __func__, dma_ch, in_le32(&(sata_dma_regs->chan_regs[dma_ch].ctl.low)));
+
+ return dma_ch;
+}
+
+/*
+ * Function: dma_dwc_exit
+ * arguments: None
+ * returns status
+ * This function exits the SATA DMA driver
+ */
+static void dma_dwc_exit(struct sata_dwc_device *hsdev)
+{
+ dwc_dev_vdbg(hsdev->dev, "%s:\n", __func__);
+ if (sata_dma_regs)
+ iounmap(sata_dma_regs);
+
+ if (hsdev->irq_dma)
+ free_irq(hsdev->irq_dma, hsdev);
+}
+
+/*
+ * Function: dma_dwc_init
+ * arguments: hsdev
+ * returns status
+ * This function initializes the SATA DMA driver
+ */
+static int dma_dwc_init(struct sata_dwc_device *hsdev)
+{
+ int err;
+ int irq = hsdev->irq_dma;
+
+ err = dma_request_interrupts(hsdev, irq);
+ if (err) {
+ dev_err(hsdev->dev, "%s: dma_request_interrupts returns %d\n",
+ __func__, err);
+ goto error_out;
+ }
+
+ /* Enabe DMA */
+ out_le32(&(sata_dma_regs->dma_cfg.low), DMA_EN);
+
+ dev_notice(hsdev->dev, "DMA initialized\n");
+ dev_notice(hsdev->dev, "DMA CFG = 0x%08x\n", in_le32(&(sata_dma_regs->dma_cfg.low)));
+ dwc_dev_vdbg(hsdev->dev, "SATA DMA registers=0x%p\n", sata_dma_regs);
+
+ return 0;
+
+error_out:
+ dma_dwc_exit(hsdev);
+
+ return err;
+}
+
+
+static void sata_dwc_dev_config(struct ata_device *adev)
+{
+ /*
+ * Does not support NCQ over a port multiplier
+ * (no FIS-based switching).
+ */
+ if (adev->flags & ATA_DFLAG_NCQ) {
+ /*
+ * TODO: debug why enabling NCQ makes the linux crashed
+ * in hot plug after the first hot unplug action.
+ * --> need to investigate more
+ */
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ if (sata_pmp_attached(adev->link->ap)) {
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ ata_dev_printk(adev, KERN_INFO,
+ "NCQ disabled for command-based switching\n");
+ }
+ }
+
+ /*
+ * Since the sata_pmp_error_handler function in libata-pmp
+ * make FLAG_AN disabled in the first time SATA port is configured.
+ * Asynchronous notification is not configured.
+ * This will enable the AN feature manually.
+ */
+ adev->flags |= ATA_DFLAG_AN;
+}
+
+
+static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
+{
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+
+ *val = in_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4));
+ dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, *val);
+
+ return 0;
+}
+
+static int sata_dwc_scr_write(struct ata_link *link, unsigned int scr, u32 val)
+{
+ dwc_dev_vdbg(link->ap->dev, "%s: id=%d reg=%d val=val=0x%08x\n",
+ __func__, link->ap->print_id, scr, val);
+ if (unlikely(scr > SCR_NOTIFICATION)) {
+ dev_err(link->ap->dev, "%s: Incorrect SCR offset 0x%02x\n",
+ __func__, scr);
+ return -EINVAL;
+ }
+ out_le32((void *)link->ap->ioaddr.scr_addr + (scr * 4), val);
+
+ return 0;
+}
+
+static inline u32 sata_dwc_core_scr_read ( struct ata_port *ap, unsigned int scr)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ return in_le32((void __iomem *)hsdev->scr_base + (scr * 4));
+}
+
+
+static inline void sata_dwc_core_scr_write ( struct ata_port *ap, unsigned int scr, u32 val)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32((void __iomem *)hsdev->scr_base + (scr * 4), val);
+}
+
+static inline void clear_serror(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ out_le32( (void __iomem *)hsdev->scr_base + 4,
+ in_le32((void __iomem *)hsdev->scr_base + 4));
+}
+
+static inline void clear_intpr(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr,
+ in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+static inline void clear_interrupt_bit(struct sata_dwc_device *hsdev, u32 bit)
+{
+ out_le32(&hsdev->sata_dwc_regs->intpr, bit);
+ // in_le32(&hsdev->sata_dwc_regs->intpr));
+}
+
+
+static inline void enable_err_irq(struct sata_dwc_device *hsdev)
+{
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ in_le32(&hsdev->sata_dwc_regs->intmr) | SATA_DWC_INTMR_ERRM);
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS);
+}
+
+static inline u32 qcmd_tag_to_mask(u8 tag)
+{
+ return 0x00000001 << (tag & 0x1f);
+}
+
+
+/*
+ * Timer to monitor SCR_NOTIFICATION registers on the
+ * SATA port
+ */
+static void sata_dwc_an_chk(unsigned long arg)
+{
+ struct ata_port *ap = (void *)arg;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ unsigned long flags;
+ int rc = 0x0;
+ u32 sntf = 0x0;
+
+ spin_lock_irqsave(ap->lock, flags);
+ rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
+
+ // If some changes on the SCR4, call asynchronous notification
+ if ( (rc == 0) & (sntf != 0)) {
+ dwc_port_dbg(ap, "Call assynchronous notification sntf=0x%08x\n", sntf);
+ sata_async_notification(ap);
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(8000);
+ } else {
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(3000);
+ }
+ add_timer(&hsdev->an_timer);
+ spin_unlock_irqrestore(ap->lock, flags);
+}
+
+
+/*
+ * sata_dwc_pmp_select - Set the PMP field in SControl to the specified port number.
+ *
+ * @port: The value (port number) to set the PMP field to.
+ *
+ * @return: The old value of the PMP field.
+ */
+static u32 sata_dwc_pmp_select(struct ata_port *ap, u32 port)
+{
+ u32 scontrol, old_port;
+ if (sata_pmp_supported(ap)) {
+ scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL);
+ old_port = SCONTROL_TO_PMP(scontrol);
+
+ // Select new PMP port
+ if ( port != old_port ) {
+ scontrol &= ~SCONTROL_PMP_MASK;
+ sata_dwc_core_scr_write(ap, SCR_CONTROL, scontrol | PMP_TO_SCONTROL(port));
+ dwc_port_dbg(ap, "%s: old port=%d new port=%d\n", __func__, old_port, port);
+ }
+ return old_port;
+ }
+ else
+ return port;
+}
+
+/*
+ * Get the current PMP port
+ */
+static inline u32 current_pmp(struct ata_port *ap)
+{
+ return SCONTROL_TO_PMP(sata_dwc_core_scr_read(ap, SCR_CONTROL));
+}
+
+
+/*
+ * Process when a PMP card is attached in the SATA port.
+ * Since our SATA port support command base switching only,
+ * NCQ will not be available.
+ * We disable the NCQ feature in SATA port.
+ */
+static void sata_dwc_pmp_attach ( struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dev_info(ap->dev, "Attach SATA port multiplier with %d ports\n", ap->nr_pmp_links);
+ // Disable NCQ
+ ap->flags &= ~ATA_FLAG_NCQ;
+
+ // Initialize timer for checking AN
+ init_timer(&hsdev->an_timer);
+ hsdev->an_timer.expires = jiffies + msecs_to_jiffies(20000);
+ hsdev->an_timer.function = sata_dwc_an_chk;
+ hsdev->an_timer.data = (unsigned long)(ap);
+ add_timer(&hsdev->an_timer);
+}
+
+/*
+ * Process when PMP card is removed from the SATA port.
+ * Re-enable NCQ for using by the SATA drive in the future
+ */
+static void sata_dwc_pmp_detach ( struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dev_info(ap->dev, "Detach SATA port\n");
+ // Re-enable NCQ
+ // TODO: remove the below comment out when NCQ problem fixed
+ //ap->flags |= ATA_FLAG_NCQ;
+
+ sata_dwc_pmp_select(ap, 0);
+
+ // Delete timer since PMP card is detached
+ del_timer(&hsdev->an_timer);
+}
+
+
+
+// Check the link to be ready
+int sata_dwc_check_ready ( struct ata_link *link ) {
+ u8 status;
+ struct ata_port *ap = link->ap;
+ status = ioread8(ap->ioaddr.status_addr);
+ return ata_check_ready(status);
+}
+
+
+/*
+ * Do soft reset on the current SATA link.
+ */
+static int sata_dwc_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc;
+ struct ata_port *ap = link->ap;
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct ata_taskfile tf;
+
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+
+ /* Issue bus reset */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
+ udelay(20); /* FIXME: flush */
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ ap->last_ctl = ap->ctl;
+
+ /* Always check readiness of the master device */
+ rc = ata_wait_after_reset(link, deadline, sata_dwc_check_ready);
+
+ // Classify the ata_port
+ *classes = ATA_DEV_NONE;
+ /* Verify if SStatus indicates device presence */
+ if (ata_link_online(link)) {
+ memset(&tf, 0, sizeof(tf));
+ ata_sff_tf_read(ap, &tf);
+ *classes = ata_dev_classify(&tf);
+ }
+
+ if ( *classes == ATA_DEV_PMP)
+ dwc_link_dbg(link, "-->found PMP device by sig\n");
+
+ clear_serror(link->ap);
+
+ return rc;
+}
+
+
+
+
+/*
+ * sata_dwc_hardreset - Do hardreset the SATA controller
+ */
+static int sata_dwc_hardreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc;
+ const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ bool online;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(link->ap);
+
+ dwc_link_dbg(link, "%s\n", __func__);
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+ dwc_port_vdbg(link->ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+
+ // Call standard hard reset
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+
+ // Reconfigure the port after hard reset
+ if ( ata_link_online(link) )
+ sata_dwc_init_port(link->ap);
+
+ return online ? -EAGAIN : rc;
+}
+
+/*
+ * Do hard reset on each PMP link
+ */
+static int sata_dwc_pmp_hardreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ int rc = 0;
+ sata_dwc_pmp_select(link->ap, sata_srst_pmp(link));
+ rc = sata_std_hardreset(link, classes, deadline);
+ return rc;
+}
+
+/* See ahci.c */
+/*
+ * Process error when the SATAn_INTPR's ERR bit is set
+ * The processing is based on SCR_ERROR register content
+ */
+static void sata_dwc_error_intr(struct ata_port *ap,
+ struct sata_dwc_device *hsdev, uint intpr)
+{
+ struct ata_eh_info *ehi;
+ struct ata_link *link;
+ struct ata_queued_cmd *active_qc = NULL;
+ u32 serror;
+ bool freeze = false, abort = false;
+ int pmp, ret;
+ unsigned int err_mask = 0, action = 0;
+#if defined(CONFIG_SATA_DWC_VDEBUG)
+ int dma_chan = hsdev->dma_channel;
+#endif
+
+ link = &ap->link;
+ ehi = &link->eh_info;
+
+ /* Record irq stat */
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x", intpr);
+
+ // Record SERROR
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ dwc_port_dbg(ap, "%s serror = 0x%08x\n", __func__, serror);
+
+ // Clear SERROR and interrupt bit
+ clear_serror(ap);
+ clear_intpr(hsdev);
+
+ // Print out for test only
+ if ( serror ) {
+ dwc_port_info(ap, "Detect errors:");
+ if ( serror & SATA_DWC_SERR_ERRI )
+ printk(" ERRI");
+ if ( serror & SATA_DWC_SERR_ERRM )
+ printk(" ERRM");
+ if ( serror & SATA_DWC_SERR_ERRT )
+ printk(" ERRT");
+ if ( serror & SATA_DWC_SERR_ERRC )
+ printk(" ERRC");
+ if ( serror & SATA_DWC_SERR_ERRP )
+ printk(" ERRP");
+ if ( serror & SATA_DWC_SERR_ERRE )
+ printk(" ERRE");
+ if ( serror & SATA_DWC_SERR_DIAGN )
+ printk(" DIAGN");
+ if ( serror & SATA_DWC_SERR_DIAGI )
+ printk(" DIAGI");
+ if ( serror & SATA_DWC_SERR_DIAGW )
+ printk(" DIAGW");
+ if ( serror & SATA_DWC_SERR_DIAGB )
+ printk(" DIAGB");
+ if ( serror & SATA_DWC_SERR_DIAGT )
+ printk(" DIAGT");
+ if ( serror & SATA_DWC_SERR_DIAGC )
+ printk(" DIAGC");
+ if ( serror & SATA_DWC_SERR_DIAGH )
+ printk(" DIAGH");
+ if ( serror & SATA_DWC_SERR_DIAGL )
+ printk(" DIAGL");
+ if ( serror & SATA_DWC_SERR_DIAGS )
+ printk(" DIAGS");
+ if ( serror & SATA_DWC_SERR_DIAGF )
+ printk(" DIAGF");
+ if ( serror & SATA_DWC_SERR_DIAGX )
+ printk(" DIAGX");
+ if ( serror & SATA_DWC_SERR_DIAGA )
+ printk(" DIAGA");
+ printk("\n");
+ }
+
+#if defined(CONFIG_SATA_DWC_VDEBUG)
+ printk("%s reading cfg.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.high)));
+ printk("%s reading cfg.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].cfg.low)));
+ printk("%s reading llp.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].llp.low)));
+ printk("%s reading ctl.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].ctl.low)));
+ printk("%s reading sar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.low)));
+ printk("%s reading sar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].sar.high)));
+ printk("%s reading dar.low of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.low)));
+ printk("%s reading dar.high of channel %d with val: 0x%08x\n", __func__, dma_chan, in_le32(&(sata_dma_regs->chan_regs[dma_chan].dar.high)));
+#endif
+
+ // Process hotplug for SATA port
+ if ( serror & (SATA_DWC_SERR_DIAGX | SATA_DWC_SERR_DIAGW)) {
+ dwc_port_info(ap, "Detect hot plug signal\n");
+ ata_ehi_hotplugged(ehi);
+ ata_ehi_push_desc(ehi, serror & SATA_DWC_SERR_DIAGN ? "PHY RDY changed" : "device exchanged");
+ freeze = true;
+ }
+
+ // Process PHY internal error / Link sequence (illegal transition) error
+ if ( serror & (SATA_DWC_SERR_DIAGI | SATA_DWC_SERR_DIAGL)) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process Internal host adapter error
+ if ( serror & SATA_DWC_SERR_ERRE ) {
+ dev_err(ap->dev, "Detect Internal host adapter error\n");
+ // --> need to review
+ ehi->err_mask |= AC_ERR_HOST_BUS;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process Protocol Error
+ if ( serror & SATA_DWC_SERR_ERRP ) {
+ dev_err(ap->dev, "Detect Protocol error\n");
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ // Process non-recovered persistent communication error
+ if ( serror & SATA_DWC_SERR_ERRC ) {
+ dev_err(ap->dev, "Detect non-recovered persistent communication error\n");
+ // --> TODO: review processing error
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+ ehi->action |= ATA_EH_SOFTRESET;
+ //ehi->flags |= ATA_EHI_NO_AUTOPSY;
+ //freeze = true;
+ }
+
+ // Non-recovered transient data integrity error
+ if ( serror & SATA_DWC_SERR_ERRT ) {
+ dev_err(ap->dev, "Detect non-recovered transient data integrity error\n");
+ ehi->err_mask |= AC_ERR_ATA_BUS;
+ //ehi->err_mask |= AC_ERR_DEV;
+ ehi->action |= ATA_EH_SOFTRESET;
+ //ehi->flags |= ATA_EHI_NO_AUTOPSY;
+ }
+
+ // Since below errors have been recovered by hardware
+ // they don't need any error processing.
+ if ( serror & SATA_DWC_SERR_ERRM ) {
+ dev_warn(ap->dev, "Detect recovered communication error");
+ }
+ if ( serror & SATA_DWC_SERR_ERRI ) {
+ dev_warn(ap->dev, "Detect recovered data integrity error");
+ }
+
+ // If any error occur, process the qc
+ if (serror & (SATA_DWC_SERR_ERRT | SATA_DWC_SERR_ERRC)) {
+ //if (serror & 0x03f60f0) {
+ abort = true;
+ /* find out the offending link and qc */
+ if (sata_pmp_attached(ap)) {
+ pmp = current_pmp(ap);
+ // If we are working on the PMP port
+ if ( pmp < ap->nr_pmp_links ) {
+ link = &ap->pmp_link[pmp];
+ ehi = &link->eh_info;
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ err_mask |= AC_ERR_DEV;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
+ } else {
+ err_mask |= AC_ERR_HSM;
+ action |= ATA_EH_RESET;
+ freeze = true;
+ }
+
+ }
+ // Work on SATA port
+ else {
+ freeze = true;
+ active_qc = ata_qc_from_tag(ap, link->active_tag);
+ }
+
+ if ( active_qc) {
+ active_qc->err_mask |= err_mask;
+ } else {
+ ehi->err_mask = err_mask;
+ }
+ }
+
+ if ( freeze | abort ) {
+ //sata_dwc_qc_complete(ap, active_qc, 1);
+ // Terminate DMA channel if it is currenly in use
+ if ( dma_request_channel(ap) != -1 ) {
+ dwc_port_dbg(ap, "Terminate DMA channel %d for handling error\n", hsdev->dma_channel);
+ dma_dwc_terminate_dma(ap, hsdev->dma_channel);
+ }
+ }
+
+ if (freeze) {
+ ret = ata_port_freeze(ap);
+ ata_port_printk(ap, KERN_INFO, "Freeze port with %d QCs aborted\n", ret);
+ }
+ else if (abort) {
+ if (active_qc) {
+ ret = ata_link_abort(active_qc->dev->link);
+ ata_link_printk(link, KERN_INFO, "Abort %d QCs\n", ret);
+ } else {
+ ret = ata_port_abort(ap);
+ ata_port_printk(ap, KERN_INFO, "Abort %d QCs on the SATA port\n", ret);
+ }
+ }
+}
+
+
+/*
+ * Function : sata_dwc_isr
+ * arguments : irq, void *dev_instance, struct pt_regs *regs
+ * Return value : irqreturn_t - status of IRQ
+ * This Interrupt handler called via port ops registered function.
+ * .irq_handler = sata_dwc_isr
+ */
+static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
+{
+ struct ata_host *host = (struct ata_host *)dev_instance;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HOST(host);
+ struct ata_port *ap;
+ struct ata_queued_cmd *qc;
+ unsigned long flags;
+ u8 status, tag;
+ int handled, num_processed, port = 0;
+ u32 intpr, sactive, sactive2, tag_mask;
+ struct sata_dwc_device_port *hsdevp;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Read the interrupt register */
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+
+ ap = host->ports[port];
+ hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap,"%s\n",__func__);
+ if ( intpr != 0x80000080)
+ dwc_port_dbg(ap, "%s intpr=0x%08x active_tag=%d\n", __func__, intpr, ap->link.active_tag);
+ //dwc_port_dbg(ap, "%s: INTMR=0x%08x, ERRMR=0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->intmr), in_le32(&hsdev->sata_dwc_regs->errmr));
+
+ /* Check for error interrupt */
+ if (intpr & SATA_DWC_INTPR_ERR) {
+ sata_dwc_error_intr(ap, hsdev, intpr);
+ handled = 1;
+#if defined(CONFIG_APOLLO3G)
+ signal_hdd_led(0 /*off blink*/, 1 /*red color*/);
+#endif
+ goto done_irqrestore;
+ }
+
+ /* Check for DMA SETUP FIS (FP DMA) interrupt */
+ if (intpr & SATA_DWC_INTPR_NEWFP) {
+ dwc_port_dbg(ap, "%s: NEWFP INTERRUPT in HSDEV with DMA channel %d\n", __func__, hsdev->dma_channel);
+ clear_interrupt_bit(hsdev, SATA_DWC_INTPR_NEWFP);
+
+ tag = (u8)(in_le32(&hsdev->sata_dwc_regs->fptagr));
+ dwc_dev_dbg(ap->dev, "%s: NEWFP tag=%d\n", __func__, tag);
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_PENDING)
+ dev_warn(ap->dev, "CMD tag=%d not pending?\n", tag);
+
+ hsdevp->sata_dwc_sactive_issued |= qcmd_tag_to_mask(tag);
+
+ qc = ata_qc_from_tag(ap, tag);
+ /*
+ * Start FP DMA for NCQ command. At this point the tag is the
+ * active tag. It is the tag that matches the command about to
+ * be completed.
+ */
+ qc->ap->link.active_tag = tag;
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+ qc->ap->hsm_task_state = HSM_ST_LAST;
+
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE);
+ tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ /* If no sactive issued and tag_mask is zero then this is not NCQ */
+ if (hsdevp->sata_dwc_sactive_issued == 0 && tag_mask == 0) {
+ if (ap->link.active_tag == ATA_TAG_POISON)
+ tag = 0;
+ else
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* DEV interrupt w/ no active qc? */
+ if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
+ dev_err(ap->dev, "%s intr with no active qc qc=%p\n",
+ __func__, qc);
+ ap->ops->sff_check_status(ap);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ status = ap->ops->sff_check_status(ap);
+
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ if (status & ATA_ERR) {
+ dwc_dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ dwc_dev_dbg(ap->dev, "%s non-NCQ cmd interrupt, protocol: %s\n",
+ __func__, prot_2_txt(qc->tf.protocol));
+drv_still_busy:
+ if (ata_is_dma(qc->tf.protocol)) {
+ int dma_flag = hsdevp->dma_pending[tag];
+ /*
+ * Each DMA transaction produces 2 interrupts. The DMAC
+ * transfer complete interrupt and the SATA controller
+ * operation done interrupt. The command should be
+ * completed only after both interrupts are seen.
+ */
+ hsdevp->dma_interrupt_count++;
+ if (unlikely(dma_flag == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "%s: DMA not pending "
+ "intpr=0x%08x status=0x%08x pend=%d\n",
+ __func__, intpr, status, dma_flag);
+ }
+
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else if (ata_is_pio(qc->tf.protocol)) {
+ ata_sff_hsm_move(ap, qc, status, 0);
+ handled = 1;
+ goto done_irqrestore;
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto drv_still_busy;
+ }
+
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /*
+ * This is a NCQ command. At this point we need to figure out for which
+ * tags we have gotten a completion interrupt. One interrupt may serve
+ * as completion for more than one operation when commands are queued
+ * (NCQ). We need to process each completed command.
+ */
+
+process_cmd: /* process completed commands */
+ sactive = sata_dwc_core_scr_read(ap, SCR_ACTIVE);
+ tag_mask = (hsdevp->sata_dwc_sactive_issued | sactive) ^ sactive;
+
+ if (sactive != 0 || hsdevp->sata_dwc_sactive_issued > 1 || tag_mask > 1) {
+ dwc_dev_dbg(ap->dev, "%s NCQ: sactive=0x%08x sactive_issued=0x%08x"
+ " tag_mask=0x%08x\n", __func__, sactive,
+ hsdevp->sata_dwc_sactive_issued, tag_mask);
+ }
+
+ if (unlikely((tag_mask | hsdevp->sata_dwc_sactive_issued) != hsdevp->sata_dwc_sactive_issued)) {
+ dev_warn(ap->dev, "Bad tag mask? sactive=0x%08x "
+ "sata_dwc_sactive_issued=0x%08x tag_mask=0x%08x\n",
+ sactive, hsdevp->sata_dwc_sactive_issued, tag_mask);
+ }
+
+ /* read just to clear ... not bad if currently still busy */
+ status = ap->ops->sff_check_status(ap);
+ dwc_dev_dbg(ap->dev, "%s ATA status register=0x%x, tag_mask=0x%x\n", __func__, status, tag_mask);
+
+ tag = 0;
+ num_processed = 0;
+ while (tag_mask) {
+ num_processed++;
+ while (!(tag_mask & 0x00000001)) {
+ tag++;
+ tag_mask <<= 1;
+ }
+ tag_mask &= (~0x00000001);
+ qc = ata_qc_from_tag(ap, tag);
+
+ /* To be picked up by completion functions */
+ qc->ap->link.active_tag = tag;
+ hsdevp->cmd_issued[tag] = SATA_DWC_CMD_ISSUED_NOT;
+
+ /* Let libata/scsi layers handle error */
+ if (unlikely(status & ATA_ERR)) {
+ dwc_dev_vdbg(ap->dev, "%s ATA_ERR (0x%x)\n",
+ __func__, status);
+
+ sata_dwc_qc_complete(ap, qc, 1);
+ handled = 1;
+ goto done_irqrestore;
+ }
+
+ /* Process completed command */
+ dwc_dev_dbg(ap->dev, "%s NCQ command, protocol: %s\n", __func__,
+ prot_2_txt(qc->tf.protocol));
+ if (ata_is_dma(qc->tf.protocol)) {
+ hsdevp->dma_interrupt_count++;
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)
+ dev_warn(ap->dev,
+ "%s: DMA not pending?\n", __func__);
+ if ((hsdevp->dma_interrupt_count % 2) == 0)
+ sata_dwc_dma_xfer_complete(ap, 1);
+ } else {
+ if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ goto still_busy;
+ }
+ continue;
+
+still_busy:
+ ap->stats.idle_irq++;
+ dev_warn(ap->dev, "STILL BUSY IRQ ata%d: irq trap\n",
+ ap->print_id);
+ } /* while tag_mask */
+
+ /*
+ * Check to see if any commands completed while we were processing our
+ * initial set of completed commands (reading of status clears
+ * interrupts, so we might miss a completed command interrupt if one
+ * came in while we were processing:
+ * we read status as part of processing a completed command).
+ */
+ sactive2 = sata_dwc_core_scr_read(ap, SCR_ACTIVE);
+ if (sactive2 != sactive) {
+ dwc_dev_dbg(ap->dev, "More finished - sactive=0x%x sactive2=0x%x\n",
+ sactive, sactive2);
+ goto process_cmd;
+ }
+ handled = 1;
+
+done_irqrestore:
+ spin_unlock_irqrestore(&host->lock, flags);
+#if defined(CONFIG_APOLLO3G)
+ signal_hdd_led(0 /*off blink*/, -1 /* no color */);
+#endif
+ return IRQ_RETVAL(handled);
+}
+
+
+/*
+ * Clear DMA Control Register after completing transferring data
+ * using AHB DMA.
+ */
+static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_HSDEVP(hsdevp);
+
+ if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX) {
+ // Clear receive channel enable bit
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_RX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX) {
+ // Clear transmit channel enable bit
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TX_CLEAR(
+ in_le32(&(hsdev->sata_dwc_regs->dmacr))));
+ } else {
+ /*
+ * This should not happen, it indicates the driver is out of
+ * sync. If it does happen, clear dmacr anyway.
+ */
+ dev_err(hsdev->dev, "%s DMA protocol RX and TX DMA not pending "
+ "tag=0x%02x pending=%d dmacr: 0x%08x\n",
+ __func__, tag, hsdevp->dma_pending[tag],
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+
+ // Clear all transmit and receive bit, but TXMOD bit is set to 1
+ out_le32(&(hsdev->sata_dwc_regs->dmacr),
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+ }
+}
+
+/*
+ *
+ */
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
+{
+ struct ata_queued_cmd *qc;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u8 tag = 0;
+
+ tag = ap->link.active_tag;
+ qc = ata_qc_from_tag(ap, tag);
+
+#ifdef DEBUG_NCQ
+ if (tag > 0) {
+ dev_info(ap->dev, "%s tag=%u cmd=0x%02x dma dir=%s proto=%s "
+ "dmacr=0x%08x\n", __func__, qc->tag, qc->tf.command,
+ dir_2_txt(qc->dma_dir), prot_2_txt(qc->tf.protocol),
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+#endif
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ // DMA out of sync error
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE)) {
+ dev_err(ap->dev, "%s DMA protocol RX and TX DMA not "
+ "pending dmacr: 0x%08x\n", __func__,
+ in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ }
+
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+ sata_dwc_qc_complete(ap, qc, check_status);
+ ap->link.active_tag = ATA_TAG_POISON;
+ } else {
+ sata_dwc_qc_complete(ap, qc, check_status);
+ }
+}
+
+/*
+ *
+ */
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
+ u32 check_status)
+{
+ u8 status = 0;
+ int i = 0;
+ u32 mask = 0x0;
+ u8 tag = qc->tag;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u32 serror;
+ int dma_ch;
+
+ dwc_dev_vdbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
+
+ if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX))
+ dev_err(ap->dev, "TX DMA PENDINGING\n");
+ else if (unlikely(hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_RX))
+ dev_err(ap->dev, "RX DMA PENDINGING\n");
+
+ if (check_status) {
+ i = 0;
+ do {
+ /* check main status, clearing INTRQ */
+ status = ap->ops->sff_check_status(ap);
+ if (status & ATA_BUSY) {
+ dwc_dev_vdbg(ap->dev, "STATUS BUSY (0x%02x) [%d]\n",
+ status, i);
+ }
+ if (++i > 10)
+ break;
+ } while (status & ATA_BUSY);
+
+ status = ap->ops->sff_check_status(ap);
+ if (unlikely(status & ATA_BUSY))
+ dev_err(ap->dev, "QC complete cmd=0x%02x STATUS BUSY "
+ "(0x%02x) [%d]\n", qc->tf.command, status, i);
+
+
+ // Check error ==> need to process error here
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ if (unlikely(serror & SATA_DWC_SERR_ERR_BITS))
+ {
+ dev_err(ap->dev, "****** SERROR=0x%08x ******\n", serror);
+ ap->link.eh_context.i.action |= ATA_EH_RESET;
+ if (ata_is_dma(qc->tf.protocol)) {
+ dma_ch = hsdevp->dma_chan[tag];
+ dma_dwc_terminate_dma(ap, dma_ch);
+ } else {
+ dma_ch = hsdevp->dma_chan[0];
+ dma_dwc_terminate_dma(ap, dma_ch);
+ }
+ }
+ }
+ dwc_dev_vdbg(ap->dev, "QC complete cmd=0x%02x status=0x%02x ata%u: "
+ "protocol=%d\n", qc->tf.command, status, ap->print_id,
+ qc->tf.protocol);
+
+ /* clear active bit */
+ mask = (~(qcmd_tag_to_mask(tag)));
+ hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask;
+ hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask;
+ dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued);
+ dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+
+ /* Complete taskfile transaction (does not read SCR registers) */
+ ata_qc_complete(qc);
+
+ return 0;
+}
+
+/*
+ * Clear interrupt and error flags in DMA status register.
+ */
+void sata_dwc_irq_clear (struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ dwc_port_dbg(ap,"%s\n",__func__);
+
+ // Clear DMA interrupts
+ clear_chan_interrupts(hsdev->dma_channel);
+ //sata_dma_regs
+ //out_le32(&hsdev->sata_dwc_regs->intmr,
+ // in_le32(&hsdev->sata_dwc_regs->intmr) & ~SATA_DWC_INTMR_ERRM);
+ //out_le32(&hsdev->sata_dwc_regs->errmr, 0x0);
+ //sata_dwc_check_status(ap);
+}
+
+/*
+ * Turn on IRQ
+ */
+void sata_dwc_irq_on(struct ata_port *ap)
+{
+ struct ata_ioports *ioaddr = &ap->ioaddr;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ u8 tmp;
+
+ dwc_port_dbg(ap,"%s\n",__func__);
+ ap->ctl &= ~ATA_NIEN;
+ ap->last_ctl = ap->ctl;
+
+ if (ioaddr->ctl_addr)
+ iowrite8(ap->ctl, ioaddr->ctl_addr);
+ tmp = ata_wait_idle(ap);
+
+ ap->ops->sff_irq_clear(ap);
+ enable_err_irq(hsdev);
+}
+
+
+/*
+ * This function enables the interrupts in IMR and unmasks them in ERRMR
+ *
+ */
+static void sata_dwc_enable_interrupts(struct sata_dwc_device *hsdev)
+{
+ // Enable interrupts
+ out_le32(&hsdev->sata_dwc_regs->intmr,
+ SATA_DWC_INTMR_ERRM |
+ SATA_DWC_INTMR_NEWFPM |
+ SATA_DWC_INTMR_PMABRTM |
+ SATA_DWC_INTMR_DMATM);
+
+ /*
+ * Unmask the error bits that should trigger an error interrupt by
+ * setting the error mask register.
+ */
+ out_le32(&hsdev->sata_dwc_regs->errmr, SATA_DWC_SERR_ERR_BITS);
+
+ dwc_dev_dbg(hsdev->dev, "%s: INTMR = 0x%08x, ERRMR = 0x%08x\n", __func__,
+ in_le32(&hsdev->sata_dwc_regs->intmr),
+ in_le32(&hsdev->sata_dwc_regs->errmr));
+}
+
+/*
+ * Configure DMA and interrupts on SATA port. This should be called after
+ * hardreset is executed on the SATA port.
+ */
+static void sata_dwc_init_port ( struct ata_port *ap ) {
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ // Configure DMA
+ if (ap->port_no == 0) {
+ dwc_dev_dbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+
+ // Clear all transmit/receive bits
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dwc_dev_dbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ }
+
+ // Enable interrupts
+ sata_dwc_enable_interrupts(hsdev);
+}
+
+
+/*
+ * Setup SATA ioport with corresponding register addresses
+ */
+static void sata_dwc_setup_port(struct ata_ioports *port, unsigned long base)
+{
+ port->cmd_addr = (void *)base + 0x00;
+ port->data_addr = (void *)base + 0x00;
+
+ port->error_addr = (void *)base + 0x04;
+ port->feature_addr = (void *)base + 0x04;
+
+ port->nsect_addr = (void *)base + 0x08;
+
+ port->lbal_addr = (void *)base + 0x0c;
+ port->lbam_addr = (void *)base + 0x10;
+ port->lbah_addr = (void *)base + 0x14;
+
+ port->device_addr = (void *)base + 0x18;
+ port->command_addr = (void *)base + 0x1c;
+ port->status_addr = (void *)base + 0x1c;
+
+ port->altstatus_addr = (void *)base + 0x20;
+ port->ctl_addr = (void *)base + 0x20;
+}
+
+
+/*
+ * Function : sata_dwc_port_start
+ * arguments : struct ata_ioports *port
+ * Return value : returns 0 if success, error code otherwise
+ * This function allocates the scatter gather LLI table for AHB DMA
+ */
+static int sata_dwc_port_start(struct ata_port *ap)
+{
+ int err = 0;
+ struct sata_dwc_device *hsdev;
+ struct sata_dwc_device_port *hsdevp = NULL;
+ struct device *pdev;
+ u32 sstatus;
+ int i;
+
+ hsdev = HSDEV_FROM_AP(ap);
+
+ dwc_dev_dbg(ap->dev, "%s: port_no=%d\n", __func__, ap->port_no);
+
+ hsdev->host = ap->host;
+ pdev = ap->host->dev;
+ if (!pdev) {
+ dev_err(ap->dev, "%s: no ap->host->dev\n", __func__);
+ err = -ENODEV;
+ goto cleanup_exit;
+ }
+
+ /* Allocate Port Struct */
+ hsdevp = kzalloc(sizeof(*hsdevp), GFP_KERNEL);
+ if (!hsdevp) {
+ dev_err(ap->dev, "%s: kmalloc failed for hsdevp\n", __func__);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ hsdevp->hsdev = hsdev;
+
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++)
+ hsdevp->cmd_issued[i] = SATA_DWC_CMD_ISSUED_NOT;
+
+ ap->bmdma_prd = 0; /* set these so libata doesn't use them */
+ ap->bmdma_prd_dma = 0;
+
+ /*
+ * DMA - Assign scatter gather LLI table. We can't use the libata
+ * version since it's PRD is IDE PCI specific.
+ */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ hsdevp->llit[i] = dma_alloc_coherent(pdev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ &(hsdevp->llit_dma[i]),
+ GFP_ATOMIC);
+ if (!hsdevp->llit[i]) {
+ dev_err(ap->dev, "%s: dma_alloc_coherent failed size "
+ "0x%x\n", __func__, SATA_DWC_DMAC_LLI_TBL_SZ);
+ err = -ENOMEM;
+ goto cleanup_exit;
+ }
+ }
+
+ if (ap->port_no == 0) {
+ dwc_dev_vdbg(ap->dev, "%s: clearing TXCHEN, RXCHEN in DMAC\n",
+ __func__);
+
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXRXCH_CLEAR);
+
+ dwc_dev_vdbg(ap->dev, "%s: setting burst size in DBTSR\n", __func__);
+ out_le32(&hsdev->sata_dwc_regs->dbtsr,
+ (SATA_DWC_DBTSR_MWR(AHB_DMA_BRST_DFLT) |
+ SATA_DWC_DBTSR_MRD(AHB_DMA_BRST_DFLT)));
+ ata_port_printk(ap, KERN_INFO, "%s: setting burst size in DBTSR: 0x%08x\n",
+ __func__, in_le32(&hsdev->sata_dwc_regs->dbtsr));
+ }
+
+ /* Clear any error bits before libata starts issuing commands */
+ clear_serror(ap);
+
+ ap->private_data = hsdevp;
+
+ /* Are we in Gen I or II */
+ sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS);
+ switch (SATA_DWC_SCR0_SPD_GET(sstatus)) {
+ case 0x0:
+ dev_info(ap->dev, "**** No neg speed (nothing attached?) \n");
+ break;
+ case 0x1:
+ dev_info(ap->dev, "**** GEN I speed rate negotiated \n");
+ break;
+ case 0x2:
+ dev_info(ap->dev, "**** GEN II speed rate negotiated \n");
+ break;
+ }
+
+cleanup_exit:
+ if (err) {
+ kfree(hsdevp);
+ sata_dwc_port_stop(ap);
+ dwc_dev_vdbg(ap->dev, "%s: fail\n", __func__);
+ } else {
+ dwc_dev_vdbg(ap->dev, "%s: done\n", __func__);
+ }
+
+ return err;
+}
+
+
+static void sata_dwc_port_stop(struct ata_port *ap)
+{
+ int i;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "%s: stop port\n", __func__);
+
+ if (hsdevp && hsdev) {
+ /* deallocate LLI table */
+ for (i = 0; i < SATA_DWC_QCMD_MAX; i++) {
+ dma_free_coherent(ap->host->dev,
+ SATA_DWC_DMAC_LLI_TBL_SZ,
+ hsdevp->llit[i], hsdevp->llit_dma[i]);
+ }
+
+ kfree(hsdevp);
+ }
+ ap->private_data = NULL;
+}
+
+/*
+ * Since the SATA DWC is master only. The dev select operation will
+ * be removed.
+ */
+void sata_dwc_dev_select(struct ata_port *ap, unsigned int device)
+{
+ // Do nothing
+ ndelay(100);
+}
+
+/*
+ * Function : sata_dwc_exec_command_by_tag
+ * arguments : ata_port *ap, ata_taskfile *tf, u8 tag, u32 cmd_issued
+ * Return value : None
+ * This function keeps track of individual command tag ids and calls
+ * ata_exec_command in libata
+ */
+static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
+ struct ata_taskfile *tf,
+ u8 tag, u32 cmd_issued)
+{
+ unsigned long flags;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ dwc_dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d, ap->link->tag=0x%08x\n", __func__, tf->command,
+ ata_cmd_2_txt(tf), tag, ap->link.active_tag);
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ hsdevp->cmd_issued[tag] = cmd_issued;
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+
+ /*
+ * Clear SError before executing a new command.
+ *
+ * TODO if we read a PM's registers now, we will throw away the task
+ * file values loaded into the shadow registers for this command.
+ *
+ * sata_dwc_scr_write and read can not be used here. Clearing the PM
+ * managed SError register for the disk needs to be done before the
+ * task file is loaded.
+ */
+ clear_serror(ap);
+ ata_sff_exec_command(ap, tf);
+}
+
+static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ sata_dwc_exec_command_by_tag(qc->ap, &qc->tf, tag,
+ SATA_DWC_CMD_ISSUED_PENDING);
+}
+
+static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+
+ dwc_port_dbg(qc->ap, "%s\n", __func__);
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+
+ sata_dwc_bmdma_setup_by_tag(qc, tag);
+}
+
+static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ volatile int start_dma;
+ u32 reg, dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_QC(qc);
+ struct ata_port *ap = qc->ap;
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir = qc->dma_dir;
+ dma_chan = hsdevp->dma_chan[tag];
+
+ /* Used for ata_bmdma_start(qc) -- we are not BMDMA compatible */
+
+ if (hsdevp->cmd_issued[tag] != SATA_DWC_CMD_ISSUED_NOT) {
+ start_dma = 1;
+ if (dir == DMA_TO_DEVICE)
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_TX;
+ else
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_RX;
+ } else {
+ dev_err(ap->dev, "%s: Command not pending cmd_issued=%d "
+ "(tag=%d) - DMA NOT started\n", __func__,
+ hsdevp->cmd_issued[tag], tag);
+ start_dma = 0;
+ }
+
+ dwc_dev_dbg(ap->dev, "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s "
+ "start_dma? %x\n", __func__, qc, tag, qc->tf.command,
+ dir_2_txt(qc->dma_dir), start_dma);
+ sata_dwc_tf_dump(hsdev->dev, &(qc->tf));
+
+ // Start DMA transfer
+ if (start_dma) {
+ reg = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ if (unlikely(reg & SATA_DWC_SERR_ERR_BITS)) {
+ dev_err(ap->dev, "%s: ****** SError=0x%08x ******\n",
+ __func__, reg);
+ //sata_async_notification(ap);
+ //return;
+ }
+
+ // Set DMA control registers
+ if (dir == DMA_TO_DEVICE)
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_TXCHEN);
+ else
+ out_le32(&hsdev->sata_dwc_regs->dmacr,
+ SATA_DWC_DMACR_RXCHEN);
+
+ dwc_dev_vdbg(ap->dev, "%s: setting DMACR: 0x%08x\n", __func__, in_le32(&hsdev->sata_dwc_regs->dmacr));
+ /* Enable AHB DMA transfer on the specified channel */
+ dma_dwc_xfer_start(dma_chan);
+ }
+}
+
+
+static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
+{
+ u8 tag = qc->tag;
+
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dwc_dev_vdbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
+ __func__, qc->ap->link.sactive, tag);
+ } else {
+ tag = 0;
+ }
+
+ dwc_port_dbg(qc->ap, "%s, tag=0x%08x\n", __func__, tag);
+ sata_dwc_bmdma_start_by_tag(qc, tag);
+}
+
+/*
+ * Function : sata_dwc_qc_prep_by_tag
+ * arguments : ata_queued_cmd *qc, u8 tag
+ * Return value : None
+ * qc_prep for a particular queued command based on tag
+ */
+static void sata_dwc_qc_prep_by_tag(struct ata_queued_cmd *qc, u8 tag)
+{
+ struct ata_port *ap = qc->ap;
+ int dma_chan;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ int dir;
+
+ // DMA direction
+ dir = qc->dma_dir;
+
+ if ((dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+ dwc_dev_vdbg(ap->dev, "%s: port=%d dma dir=%s n_elem=%d\n",
+ __func__, ap->port_no, dir_2_txt(dir), qc->n_elem);
+
+ // Setup DMA for transfer
+ dma_chan = dma_dwc_xfer_setup(qc, hsdevp->llit[tag],
+ hsdevp->llit_dma[tag],
+ (void *__iomem)(&hsdev->sata_dwc_regs->dmadr));
+
+ if (unlikely(dma_chan < 0)) {
+ dev_err(ap->dev, "%s: dma_dwc_xfer_setup returns err %d\n",
+ __func__, dma_chan);
+ return;
+ }
+
+ hsdevp->dma_chan[tag] = dma_chan;
+}
+
+
+
+/**
+ * ata_sff_exec_command - issue ATA command to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA command, with proper synchronization with interrupt
+ * handler / other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void sata_dwc_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
+{
+ iowrite8(tf->command, ap->ioaddr.command_addr);
+ /* If we have an mmio device with no ctl and no altstatus
+ * method this will fail. No such devices are known to exist.
+ */
+ if (ap->ioaddr.altstatus_addr)
+ ioread8(ap->ioaddr.altstatus_addr);
+ ndelay(400);
+}
+
+/**
+ * sata_dwc_tf_to_host - issue ATA taskfile to host controller
+ * @ap: port to which command is being issued
+ * @tf: ATA taskfile register set
+ *
+ * Issues ATA taskfile register set to ATA host controller,
+ * with proper synchronization with interrupt handler and
+ * other threads.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+static inline void sata_dwc_tf_to_host(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ dwc_port_dbg(ap,"%s\n",__func__);
+ ap->ops->sff_tf_load(ap, tf);
+ sata_dwc_exec_command(ap, tf);
+}
+
+
+/*
+ * Process command queue issue
+ */
+static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ int ret = 0;
+ struct ata_eh_info *ehi;
+ u32 scontrol, sstatus;
+ scontrol = sata_dwc_core_scr_read(ap, SCR_CONTROL);
+
+ ehi = &ap->link.eh_info;
+ /*
+ * Fix the problem when PMP card is unplugged from the SATA port.
+ * QC is still issued but no device present. Ignore the current QC.
+ * and pass error to error handler
+ */
+ sstatus = sata_dwc_core_scr_read(ap, SCR_STATUS);
+ if ( sstatus == 0x0) {
+ ata_port_printk(ap, KERN_INFO, "Detect connection lost while commands are executing --> ignore current command\n");
+ ata_ehi_hotplugged(ehi);
+ ap->link.eh_context.i.action |= ATA_EH_RESET;
+ return ret;
+ }
+
+ // Set PMP field in the SCONTROL register
+ if ( sata_pmp_attached(ap) )
+ sata_dwc_pmp_select(ap, qc->dev->link->pmp);
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0 || ap->link.sactive > 1) {
+ dev_info(ap->dev, "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s"
+ " ap active_tag=0x%08x ap sactive=0x%08x\n",
+ __func__, ap->print_id, qc->tf.command,
+ ata_cmd_2_txt(&qc->tf), qc->tag,
+ prot_2_txt(qc->tf.protocol), ap->link.active_tag,
+ ap->link.sactive);
+ }
+#endif
+
+ // Process NCQ
+ if (ata_is_ncq(qc->tf.protocol)) {
+ dwc_link_dbg(qc->dev->link, "%s --> process NCQ , ap->link.active_tag=0x%08x, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag);
+ ap->link.active_tag = qc->tag;
+ ap->ops->sff_tf_load(ap, &qc->tf);
+ sata_dwc_exec_command_by_tag(ap, &qc->tf, qc->tag,
+ SATA_DWC_CMD_ISSUED_PENDING);
+ } else {
+ dwc_link_dbg(qc->dev->link, "%s --> non NCQ process, ap->link.active_tag=%d, active_tag=0%08x\n", __func__, ap->link.active_tag, qc->tag);
+ // Sync ata_port with qc->tag
+ ap->link.active_tag = qc->tag;
+ ret = ata_bmdma_qc_issue(qc);
+ }
+
+ return ret;
+}
+
+#if 0
+/*
+ * Function : sata_dwc_eng_timeout
+ * arguments : ata_port *ap
+ * Return value : None
+ * error handler for DMA time out
+ * ata_eng_timeout(ap) -- this does bmdma stuff which can not be done by this
+ * driver. SEE ALSO ata_qc_timeout(ap)
+ */
+static void sata_dwc_eng_timeout(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+ struct ata_queued_cmd *qc;
+ u8 tag;
+ uint mask = 0x0;
+ unsigned long flags;
+ u32 serror, intpr, dma_ch;
+
+ tag = ap->link.active_tag;
+ dma_ch = hsdevp->dma_chan[tag];
+ qc = ata_qc_from_tag(ap, tag);
+
+ dev_err(ap->dev, "%s: id=%d active_tag=%d qc=%p dma_chan=%d\n",
+ __func__, ap->print_id, tag, qc, dma_ch);
+
+ intpr = in_le32(&hsdev->sata_dwc_regs->intpr);
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+
+ dev_err(ap->dev, "intpr=0x%08x serror=0x%08x\n", intpr, serror);
+
+ /* If there are no error bits set, can we just pass this on to eh? */
+ if (!(serror & SATA_DWC_SERR_ERR_BITS) &&
+ !(intpr & SATA_DWC_INTPR_ERR)) {
+
+ spin_lock_irqsave(&ap->host->lock, flags);
+ if (dma_dwc_channel_enabled(dma_ch))
+ dma_dwc_terminate_dma(ap, dma_ch);
+
+ hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
+
+ /* clear active bit */
+ mask = (~(qcmd_tag_to_mask(tag)));
+ hsdevp->sata_dwc_sactive_queued = hsdevp->sata_dwc_sactive_queued & mask;
+ hsdevp->sata_dwc_sactive_issued = hsdevp->sata_dwc_sactive_issued & mask;
+
+ spin_unlock_irqrestore(&ap->host->lock, flags);
+ } else {
+ /* This is wrong, what really needs to be done is a reset. */
+
+ spin_lock_irqsave(ap->lock, flags);
+
+ if (ata_is_dma(qc->tf.protocol)) {
+ /* disable DMAC */
+ dma_dwc_terminate_dma(ap, dma_ch);
+ }
+
+ spin_unlock_irqrestore(ap->lock, flags);
+ }
+ WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
+ if (qc->flags & ATA_QCFLAG_ACTIVE) {
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ /*
+ * test-only: The original code (AMCC: 2.6.19) called
+ * ata_eng_timeout(ap) here. This function is not available
+ * anymore. So what to do now?
+ */
+ }
+}
+#endif
+/*
+ * Function : sata_dwc_qc_prep
+ * arguments : ata_queued_cmd *qc
+ * Return value : None
+ * qc_prep for a particular queued command
+ */
+static void sata_dwc_qc_prep(struct ata_queued_cmd *qc)
+{
+ u32 sactive;
+ u8 tag = qc->tag;
+
+ if ((qc->dma_dir == DMA_NONE) || (qc->tf.protocol == ATA_PROT_PIO))
+ return;
+
+#ifdef DEBUG_NCQ
+ if (qc->tag > 0) {
+ dev_info(qc->ap->dev, "%s: qc->tag=%d ap->active_tag=0x%08x\n",
+ __func__, qc->tag, qc->ap->link.active_tag);
+ }
+#endif
+
+ if (qc->tf.protocol == ATA_PROT_NCQ) {
+ sactive = sata_dwc_core_scr_read(qc->ap, SCR_ACTIVE);
+ sactive |= (0x00000001 << tag);
+ sata_dwc_core_scr_write(qc->ap, SCR_ACTIVE, sactive);
+ dwc_dev_vdbg(qc->ap->dev, "%s: tag=%d ap->link.sactive = 0x%08x "
+ "sactive=0x%08x\n", __func__, tag, qc->ap->link.sactive,
+ sactive);
+ } else {
+ tag = 0;
+ }
+
+ sata_dwc_qc_prep_by_tag(qc, tag);
+}
+
+
+
+static void sata_dwc_post_internal_cmd(struct ata_queued_cmd *qc)
+{
+ if (qc->flags & ATA_QCFLAG_FAILED)
+ ata_eh_freeze_port(qc->ap);
+}
+
+static void sata_dwc_error_handler(struct ata_port *ap)
+{
+ u32 serror;
+ u32 intmr, errmr;
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
+
+ serror = sata_dwc_core_scr_read(ap, SCR_ERROR);
+ intmr = in_le32(&hsdev->sata_dwc_regs->intmr);
+ errmr = in_le32(&hsdev->sata_dwc_regs->errmr);
+
+ //sata_dwc_dma_xfer_complete(ap,1);
+ dwc_port_dbg(ap, "%s: SERROR=0x%08x, INTMR=0x%08x, ERRMR=0x%08x\n", __func__, serror, intmr, errmr);
+
+ dwc_port_vdbg(ap, "%s - sata_dwc_sactive_queued=0x%08x, sata_dwc_sactive_issued=0x%08x\n",__func__, hsdevp->sata_dwc_sactive_queued, hsdevp->sata_dwc_sactive_issued);
+ dwc_port_vdbg(ap, "dmacr=0x%08x\n",in_le32(&(hsdev->sata_dwc_regs->dmacr)));
+ dwc_port_vdbg(ap, "qc_active=0x%08x, qc_allocated=0x%08x, active_tag=%d\n", ap->qc_active, ap->qc_allocated, ap->link.active_tag);
+
+ sata_pmp_error_handler(ap);
+}
+
+/*
+ * sata_dwc_check_status - Get value of the Status Register
+ * @ap: Port to check
+ *
+ * Output content of the status register (CDR7)
+ */
+u8 sata_dwc_check_status(struct ata_port *ap)
+{
+ return ioread8(ap->ioaddr.status_addr);
+}
+
+
+/*
+ * Freeze the port by clear interrupt
+ * @ap: Port to freeze
+ */
+void sata_dwc_freeze(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+ dwc_port_dbg(ap, "call %s ...\n",__func__);
+ // turn IRQ off
+ clear_intpr(hsdev);
+ clear_serror(ap);
+ out_le32(&hsdev->sata_dwc_regs->intmr, 0x0);
+}
+
+/*
+ * Thaw the port by turning IRQ on
+ */
+void sata_dwc_thaw(struct ata_port *ap)
+{
+ struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
+
+ dwc_port_dbg(ap, "call %s ...\n",__func__);
+ // Clear IRQ
+ clear_intpr(hsdev);
+ // Turn IRQ back on
+ sata_dwc_enable_interrupts(hsdev);
+}
+
+
+/*
+ * scsi mid-layer and libata interface structures
+ */
+static struct scsi_host_template sata_dwc_sht = {
+ ATA_NCQ_SHT(DRV_NAME),
+ /*
+ * test-only: Currently this driver doesn't handle NCQ
+ * correctly. We enable NCQ but set the queue depth to a
+ * max of 1. This will get fixed in in a future release.
+ */
+// .sg_tablesize = LIBATA_MAX_PRD,
+ .can_queue = ATA_DEF_QUEUE, /* ATA_MAX_QUEUE */
+ .dma_boundary = ATA_DMA_BOUNDARY,
+};
+
+
+static struct ata_port_operations sata_dwc_ops = {
+ .inherits = &sata_pmp_port_ops,
+ .dev_config = sata_dwc_dev_config,
+
+ .error_handler = sata_dwc_error_handler,
+ .softreset = sata_dwc_softreset,
+ .hardreset = sata_dwc_hardreset,
+ .pmp_softreset = sata_dwc_softreset,
+ .pmp_hardreset = sata_dwc_pmp_hardreset,
+
+ .qc_defer = sata_pmp_qc_defer_cmd_switch,
+ .qc_prep = sata_dwc_qc_prep,
+ .qc_issue = sata_dwc_qc_issue,
+ .qc_fill_rtf = ata_sff_qc_fill_rtf,
+
+ .scr_read = sata_dwc_scr_read,
+ .scr_write = sata_dwc_scr_write,
+
+ .port_start = sata_dwc_port_start,
+ .port_stop = sata_dwc_port_stop,
+
+ .bmdma_setup = sata_dwc_bmdma_setup,
+ .bmdma_start = sata_dwc_bmdma_start,
+ // Reuse some SFF functions
+ .sff_check_status = sata_dwc_check_status,
+ .sff_tf_read = ata_sff_tf_read,
+ .sff_data_xfer = ata_sff_data_xfer,
+ .sff_tf_load = ata_sff_tf_load,
+ .sff_dev_select = sata_dwc_dev_select,
+ .sff_exec_command = sata_dwc_exec_command,
+
+ .sff_irq_on = sata_dwc_irq_on,
+/* .sff_irq_clear = sata_dwc_irq_clear,
+ .freeze = sata_dwc_freeze,
+ .thaw = sata_dwc_thaw,
+ .sff_irq_on = ata_sff_irq_on,
+ */
+ .sff_irq_clear = ata_bmdma_irq_clear,
+ .freeze = ata_sff_freeze,
+ .thaw = ata_sff_thaw,
+ .pmp_attach = sata_dwc_pmp_attach,
+ .pmp_detach = sata_dwc_pmp_detach,
+ .post_internal_cmd = sata_dwc_post_internal_cmd,
+
+ /* test-only: really needed? */
+ //.eng_timeout = sata_dwc_eng_timeout,
+};
+
+static const struct ata_port_info sata_dwc_port_info[] = {
+ {
+ /*
+ * test-only: Currently this driver doesn't handle NCQ
+ * correctly. So we disable NCQ here for now. To enable
+ * it ATA_FLAG_NCQ needs to be added to the flags below.
+ */
+ .flags = ATA_FLAG_SATA |
+ ATA_FLAG_NCQ |
+ ATA_FLAG_PMP | ATA_FLAG_AN,
+ .pio_mask = ATA_PIO4, /* pio 0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &sata_dwc_ops,
+ },
+};
+
+static int sata_dwc_probe(struct platform_device *ofdev)
+{
+ struct sata_dwc_device *hsdev;
+ u32 idr, versionr;
+ char *ver = (char *)&versionr;
+ u8 *base = NULL;
+ int err = 0;
+ int irq;
+ struct ata_host *host;
+ struct ata_port_info pi = sata_dwc_port_info[0];
+ const struct ata_port_info *ppi[] = { &pi, NULL };
+ struct device_node *np = ofdev->dev.of_node;
+ const unsigned int *dma_channel;
+ /*
+ * Check if device is enabled
+ */
+ if (!of_device_is_available(np)) {
+ printk(KERN_INFO "%s: Port disabled via device-tree\n",
+ np->full_name);
+ return 0;
+ }
+
+ /* Allocate DWC SATA device */
+ hsdev = kzalloc(sizeof(*hsdev), GFP_KERNEL);
+ if (hsdev == NULL) {
+ dev_err(&ofdev->dev, "kmalloc failed for hsdev\n");
+ err = -ENOMEM;
+ goto error;
+ }
+
+
+ // Identify SATA DMA channel used for the current SATA device
+ dma_channel = of_get_property(np, "dma-channel", NULL);
+ if ( dma_channel ) {
+ dev_notice(&ofdev->dev, "Gettting DMA channel %d\n", *dma_channel);
+ hsdev->dma_channel = *dma_channel;
+ } else
+ hsdev->dma_channel = 0;
+
+ /* Ioremap SATA registers */
+ base = of_iomap(np, 0);
+ if (!base) {
+ dev_err(&ofdev->dev, "ioremap failed for SATA register address\n");
+ err = -ENODEV;
+ goto error_kmalloc;
+ }
+ hsdev->reg_base = base;
+ dwc_dev_vdbg(&ofdev->dev, "ioremap done for SATA register address\n");
+
+ /* Synopsys DWC SATA specific Registers */
+ hsdev->sata_dwc_regs = (void *__iomem)(base + SATA_DWC_REG_OFFSET);
+
+ /* Allocate and fill host */
+ host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
+ if (!host) {
+ dev_err(&ofdev->dev, "ata_host_alloc_pinfo failed\n");
+ err = -ENOMEM;
+ goto error_iomap;
+ }
+
+ host->private_data = hsdev;
+
+ /* Setup port */
+ host->ports[0]->ioaddr.cmd_addr = base;
+ host->ports[0]->ioaddr.scr_addr = base + SATA_DWC_SCR_OFFSET;
+ hsdev->scr_base = (u8 *)(base + SATA_DWC_SCR_OFFSET);
+ sata_dwc_setup_port(&host->ports[0]->ioaddr, (unsigned long)base);
+
+ /* Read the ID and Version Registers */
+ idr = in_le32(&hsdev->sata_dwc_regs->idr);
+ versionr = in_le32(&hsdev->sata_dwc_regs->versionr);
+ dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
+ idr, ver[0], ver[1], ver[2]);
+
+ /* Get SATA DMA interrupt number */
+ irq = irq_of_parse_and_map(np, 1);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /* Get physical SATA DMA register base address */
+ if (!sata_dma_regs) {
+ sata_dma_regs = of_iomap(np, 1);
+ if (!sata_dma_regs) {
+ dev_err(&ofdev->dev, "ioremap failed for AHBDMA register address\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+ }
+ /* Save dev for later use in dev_xxx() routines */
+ hsdev->dev = &ofdev->dev;
+
+ /* Init glovbal dev list */
+ dwc_dev_list[hsdev->dma_channel] = hsdev;
+
+ /* Initialize AHB DMAC */
+ hsdev->irq_dma = irq;
+ dma_dwc_init(hsdev);
+ dma_register_interrupt(hsdev);
+
+
+ /* Enable SATA Interrupts */
+ sata_dwc_enable_interrupts(hsdev);
+
+ /* Get SATA interrupt number */
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq == NO_IRQ) {
+ dev_err(&ofdev->dev, "no SATA irq\n");
+ err = -ENODEV;
+ goto error_out;
+ }
+
+ /*
+ * Now, register with libATA core, this will also initiate the
+ * device discovery process, invoking our port_start() handler &
+ * error_handler() to execute a dummy Softreset EH session
+ */
+ ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+
+ dev_set_drvdata(&ofdev->dev, host);
+
+ /* Everything is fine */
+ return 0;
+
+error_out:
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+error_iomap:
+ iounmap(base);
+error_kmalloc:
+ kfree(hsdev);
+error:
+ return err;
+}
+
+static int sata_dwc_remove(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct sata_dwc_device *hsdev = host->private_data;
+
+ ata_host_detach(host);
+
+ dev_set_drvdata(dev, NULL);
+
+ /* Free SATA DMA resources */
+ dma_dwc_exit(hsdev);
+
+ iounmap(hsdev->reg_base);
+ kfree(hsdev);
+ kfree(host);
+
+ dwc_dev_vdbg(&ofdev->dev, "done\n");
+
+ return 0;
+}
+
+static const struct of_device_id sata_dwc_match[] = {
+ { .compatible = "amcc,sata-460ex", },
+ { .compatible = "amcc,sata-apm82181", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sata_dwc_match);
+
+static struct platform_driver sata_dwc_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sata_dwc_match,
+ },
+ .probe = sata_dwc_probe,
+ .remove = sata_dwc_remove,
+};
+
+module_platform_driver(sata_dwc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
+MODULE_DESCRIPTION("DesignWare Cores SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 1b35c45c92b..3f2e1673808 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
struct task_struct *opa;
kref_get(&connection->kref);
+ /* We may just have force_sig()'ed this thread
+ * to get it out of some blocking network function.
+ * Clear signals; otherwise kthread_run(), which internally uses
+ * wait_on_completion_killable(), will mistake our pending signal
+ * for a new fatal signal and fail. */
+ flush_signals(current);
opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) {
drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 089e72cd37b..36e54be402d 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,11 +622,18 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- if (reset_capacity) {
+ if (reset_capacity)
set_capacity(zram->disk, 0);
- revalidate_disk(zram->disk);
- }
+
up_write(&zram->init_lock);
+
+ /*
+ * Revalidate disk out of the init_lock to avoid lockdep splat.
+ * It's okay because disk's capacity is protected by init_lock
+ * so that revalidate_disk always sees up-to-date capacity.
+ */
+ if (reset_capacity)
+ revalidate_disk(zram->disk);
}
static ssize_t disksize_store(struct device *dev,
@@ -666,8 +673,15 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
- revalidate_disk(zram->disk);
up_write(&zram->init_lock);
+
+ /*
+ * Revalidate disk out of the init_lock to avoid lockdep splat.
+ * It's okay because disk's capacity is protected by init_lock
+ * so that revalidate_disk always sees up-to-date capacity.
+ */
+ revalidate_disk(zram->disk);
+
return len;
out_destroy_comp:
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index e1581335937..cb8e6f14e88 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -16,7 +16,7 @@
#include <linux/clkdev.h>
#include <linux/clk/ti.h>
-#define DRA7_DPLL_ABE_DEFFREQ 361267200
+#define DRA7_DPLL_ABE_DEFFREQ 180633600
#define DRA7_DPLL_GMAC_DEFFREQ 1000000000
@@ -322,6 +322,11 @@ int __init dra7xx_dt_clk_init(void)
if (rc)
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
+ dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
+ rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
+ if (rc)
+ pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
+
dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
if (rc)
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 1eca7b9760e..d761ad3ba09 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -195,6 +195,23 @@ config AMCC_PPC440SPE_ADMA
help
Enable support for the AMCC PPC440SPe RAID engines.
+config AMCC_PPC460EX_460GT_4CHAN_DMA
+ tristate "AMCC PPC460EX PPC460GT PLB DMA support"
+ depends on 460EX || 460GT || APM821xx
+ select DMA_ENGINE
+ default y
+
+config APM82181_ADMA
+ tristate "APM82181 Asynchonous DMA support"
+ depends on APM821xx
+ select ASYNC_CORE
+ select ASYNC_TX_DMA
+ select DMA_ENGINE
+ select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+ default y
+ ---help---
+ Enable support for the APM82181 Asynchonous DMA engines.
+
config TIMB_DMA
tristate "Timberdale FPGA DMA support"
depends on MFD_TIMBERDALE
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c779e1eb2db..7acb4c437fa 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE_BASE) += sh/
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
-obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
+obj-y += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
diff --git a/drivers/dma/ppc4xx/Makefile b/drivers/dma/ppc4xx/Makefile
index b3d259b3e52..e7700985371 100644
--- a/drivers/dma/ppc4xx/Makefile
+++ b/drivers/dma/ppc4xx/Makefile
@@ -1 +1,3 @@
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o
+obj-$(CONFIG_AMCC_PPC460EX_460GT_4CHAN_DMA) += ppc460ex_4chan_dma.o
+obj-$(CONFIG_APM82181_ADMA) += apm82181-adma.o
diff --git a/drivers/dma/ppc4xx/apm82181-adma.c b/drivers/dma/ppc4xx/apm82181-adma.c
new file mode 100644
index 00000000000..c95e704b1d9
--- /dev/null
+++ b/drivers/dma/ppc4xx/apm82181-adma.c
@@ -0,0 +1,2201 @@
+/*
+ * Copyright(c) 2010 Applied Micro Circuits Corporation(AMCC). All rights reserved.
+ *
+ * Author: Tai Tri Nguyen <ttnguyen@appliedmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This driver supports the asynchrounous DMA copy and RAID engines available
+ * on the AppliedMicro APM82181 Processor.
+ * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
+ * ADMA driver written by D.Williams.
+ */
+#define ADMA_DEBUG
+#undef ADMA_DEBUG
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/apm82181-adma.h>
+#include "../dmaengine.h"
+
+#define PPC4XX_EDMA "apm82181-adma: "
+#ifdef ADMA_DEBUG
+#define DBG(string, args...) \
+ printk(PPC4XX_EDMA string ,##args)
+#define INFO DBG("<%s> -- line %d\n",__func__,__LINE__);
+#define ADMA_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+#else
+#define DBG(string, args...) \
+ {if (0) printk(KERN_INFO PPC4XX_EDMA string ,##args); 0; }
+#define INFO DBG("");
+#define ADMA_HEXDUMP(b, l) \
+ {if (0) print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 8, 1, (b), (l), false); 0;}
+#endif
+
+#define MEM_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+
+/* The list of channels exported by apm82181 ADMA */
+struct list_head
+ppc_adma_chan_list = LIST_HEAD_INIT(ppc_adma_chan_list);
+
+/* This flag is set when want to refetch the xor chain in the interrupt
+ * handler
+ */
+static u32 do_xor_refetch = 0;
+
+/* Pointers to last submitted to DMA0/1/2/3 and XOR CDBs */
+static apm82181_desc_t *chan_last_sub[5];
+static apm82181_desc_t *chan_first_cdb[5];
+
+/* Pointer to last linked and submitted xor CB */
+static apm82181_desc_t *xor_last_linked = NULL;
+static apm82181_desc_t *xor_last_submit = NULL;
+
+/* /proc interface is used here to verify the h/w RAID 5 capabilities
+ */
+static struct proc_dir_entry *apm82181_proot;
+
+/* These are used in enable & check routines
+ */
+static u32 apm82181_xor_verified;
+static u32 apm82181_memcpy_verified[4];
+static apm82181_ch_t *apm82181_dma_tchan[5];
+static struct completion apm82181_r5_test_comp;
+
+static inline int apm82181_chan_is_busy(apm82181_ch_t *chan);
+#if 0
+static phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size)
+{
+ phys_addr_t page_4gb = 0;
+
+ return (page_4gb | addr);
+}
+#endif
+/**
+ * apm82181_adma_device_estimate - estimate the efficiency of processing
+ * the operation given on this channel. It's assumed that 'chan' is
+ * capable to process 'cap' type of operation.
+ * @chan: channel to use
+ * @cap: type of transaction
+ * @src_lst: array of source pointers
+ * @src_cnt: number of source operands
+ * @src_sz: size of each source operand
+ */
+int apm82181_adma_estimate (struct dma_chan *chan,
+ enum dma_transaction_type cap, struct page **src_lst,
+ int src_cnt, size_t src_sz)
+{
+ int ef = 1;
+
+ /* channel idleness increases the priority */
+ if (likely(ef) &&
+ !apm82181_chan_is_busy(to_apm82181_adma_chan(chan)))
+ ef++;
+ else {
+ if(chan->chan_id !=APM82181_XOR_ID)
+ ef = -1;
+ }
+ return ef;
+}
+
+/******************************************************************************
+ * Command (Descriptor) Blocks low-level routines
+ ******************************************************************************/
+/**
+ * apm82181_desc_init_interrupt - initialize the descriptor for INTERRUPT
+ * pseudo operation
+ */
+static inline void apm82181_desc_init_interrupt (apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ xor_cb_t *p;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ BUG();
+ break;
+ case APM82181_XOR_ID:
+ p = desc->hw_desc;
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ /* NOP with Command Block Complete Enable */
+ p->cbc = XOR_CBCR_CBCE_BIT;
+ break;
+ default:
+ printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id,
+ __FUNCTION__);
+ break;
+ }
+}
+
+/**
+ * apm82181_desc_init_xor - initialize the descriptor for XOR operation
+ */
+static inline void apm82181_desc_init_xor(apm82181_desc_t *desc, int src_cnt,
+ unsigned long flags)
+{
+ xor_cb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = src_cnt;
+ desc->dst_cnt = 1;
+
+ hw_desc->cbc = XOR_CBCR_TGT_BIT | src_cnt;
+ if (flags & DMA_PREP_INTERRUPT)
+ /* Enable interrupt on complete */
+ hw_desc->cbc |= XOR_CBCR_CBCE_BIT;
+}
+
+/**
+ * apm82181_desc_init_memcpy - initialize the descriptor for MEMCPY operation
+ */
+static inline void apm82181_desc_init_memcpy(apm82181_desc_t *desc,
+ unsigned long flags)
+{
+ dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset(hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(APM82181_DESC_INT, &desc->flags);
+ else
+ clear_bit(APM82181_DESC_INT, &desc->flags);
+ /* dma configuration for running */
+ hw_desc->ctrl.tm = 2; /* soft init mem-mem mode */
+ hw_desc->ctrl.pw = 4; /* transfer width 128 bytes */
+ hw_desc->ctrl.ben = 1;/* buffer enable */
+ hw_desc->ctrl.sai = 1;/* increase source addr */
+ hw_desc->ctrl.dai = 1;/* increase dest addr */
+ hw_desc->ctrl.tce = 1;/* chan stops when TC is reached */
+ hw_desc->ctrl.cp = 3; /* hinghest priority */
+ hw_desc->ctrl.sl = 0; /* source is in PLB */
+ hw_desc->ctrl.pl = 0; /* dest is in PLB */
+ hw_desc->cnt.tcie = 0;/* no interrupt on init */
+ hw_desc->cnt.etie = 0; /* enable error interrupt */
+ hw_desc->cnt.eie = 1; /* enable error interrupt */
+ hw_desc->cnt.link = 0;/* not link to next cdb */
+ hw_desc->cnt.sgl = 0;
+ hw_desc->ctrl.ce =1; /* enable channel */
+ hw_desc->ctrl.cie =1; /* enable int channel */
+}
+
+/**
+ * apm82181_desc_init_memset - initialize the descriptor for MEMSET operation
+ */
+static inline void apm82181_desc_init_memset(apm82181_desc_t *desc, int value,
+ unsigned long flags)
+{
+ //dma_cdb_t *hw_desc = desc->hw_desc;
+
+ memset (desc->hw_desc, 0, sizeof(dma_cdb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 1;
+ desc->dst_cnt = 1;
+
+ if (flags & DMA_PREP_INTERRUPT)
+ set_bit(APM82181_DESC_INT, &desc->flags);
+ else
+ clear_bit(APM82181_DESC_INT, &desc->flags);
+
+}
+
+
+
+/**
+ * apm82181_desc_set_src_addr - set source address into the descriptor
+ */
+static inline void apm82181_desc_set_src_addr( apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int src_idx, dma_addr_t addr)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->src_hi = (u32)(addr >> 32);
+ dma_hw_desc->src_lo = (u32)addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->ops[src_idx].h = (u32)(addr >>32);
+ xor_hw_desc->ops[src_idx].l = (u32)addr;
+ break;
+ }
+}
+
+static void apm82181_adma_set_src(apm82181_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan);
+
+ sw_desc = sw_desc->group_head;
+
+ if (likely(sw_desc))
+ apm82181_desc_set_src_addr(sw_desc, chan, index, addr);
+}
+
+/**
+ * apm82181_desc_set_dest_addr - set destination address into the descriptor
+ */
+static inline void apm82181_desc_set_dest_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, dma_addr_t addr, u32 index)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ dma_hw_desc->dest_hi = (u32)(addr >> 32);
+ dma_hw_desc->dest_lo = (u32)addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbtah = (u32)(addr >> 32);
+ xor_hw_desc->cbtal |= (u32)addr;
+ break;
+ }
+}
+
+static int plbdma_get_transfer_width(dma_cdb_t *dma_hw_desc)
+{
+ switch (dma_hw_desc->ctrl.pw){
+ case 0:
+ return 1; /* unit: bytes */
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 8;
+ case 4:
+ return 16;
+ }
+ return 0;
+}
+/**
+ * apm82181_desc_set_byte_count - set number of data bytes involved
+ * into the operation
+ */
+static inline void apm82181_desc_set_byte_count(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, size_t byte_count)
+{
+ dma_cdb_t *dma_hw_desc;
+ xor_cb_t *xor_hw_desc;
+ int terminal_cnt, transfer_width = 0;
+
+ DBG("<%s> byte_count %08x\n", __func__,byte_count);
+ switch (chan->device->id){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_hw_desc = desc->hw_desc;
+ transfer_width = plbdma_get_transfer_width(dma_hw_desc);
+ terminal_cnt = byte_count/transfer_width;
+ dma_hw_desc->cnt.tc = terminal_cnt;
+ break;
+ case APM82181_XOR_ID:
+ xor_hw_desc = desc->hw_desc;
+ xor_hw_desc->cbbc = byte_count;
+ break;
+ }
+}
+
+/**
+ * apm82181_xor_set_link - set link address in xor CB
+ */
+static inline void apm82181_xor_set_link (apm82181_desc_t *prev_desc,
+ apm82181_desc_t *next_desc)
+{
+ xor_cb_t *xor_hw_desc = prev_desc->hw_desc;
+
+ if (unlikely(!next_desc || !(next_desc->phys))) {
+ printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%llx\n",
+ __func__, next_desc,
+ next_desc ? next_desc->phys : 0);
+ BUG();
+ }
+ DBG("<%s>:next_desc->phys %llx\n", __func__,next_desc->phys);
+ xor_hw_desc->cbs = 0;
+ xor_hw_desc->cblal = (u32)next_desc->phys;
+ xor_hw_desc->cblah = (u32)(next_desc->phys >> 32);
+ xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT;
+}
+
+/**
+ * apm82181_desc_set_link - set the address of descriptor following this
+ * descriptor in chain
+ */
+static inline void apm82181_desc_set_link(apm82181_ch_t *chan,
+ apm82181_desc_t *prev_desc, apm82181_desc_t *next_desc)
+{
+ unsigned long flags;
+ apm82181_desc_t *tail = next_desc;
+
+ if (unlikely(!prev_desc || !next_desc ||
+ (prev_desc->hw_next && prev_desc->hw_next != next_desc))) {
+ /* If previous next is overwritten something is wrong.
+ * though we may refetch from append to initiate list
+ * processing; in this case - it's ok.
+ */
+ printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; "
+ "prev->hw_next=0x%p\n", __FUNCTION__, prev_desc,
+ next_desc, prev_desc ? prev_desc->hw_next : 0);
+ BUG();
+ }
+
+ local_irq_save(flags);
+
+ /* do s/w chaining both for DMA and XOR descriptors */
+ prev_desc->hw_next = next_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ case APM82181_XOR_ID:
+ /* bind descriptor to the chain */
+ while (tail->hw_next)
+ tail = tail->hw_next;
+ xor_last_linked = tail;
+
+ if (prev_desc == xor_last_submit)
+ /* do not link to the last submitted CB */
+ break;
+ apm82181_xor_set_link (prev_desc, next_desc);
+ break;
+ default:
+ BUG();
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * apm82181_desc_get_src_addr - extract the source address from the descriptor
+ */
+static inline u32 apm82181_desc_get_src_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int src_idx)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->src_lo);
+}
+
+/**
+ * apm82181_desc_get_dest_addr - extract the destination address from the
+ * descriptor
+ */
+static inline u32 apm82181_desc_get_dest_addr(apm82181_desc_t *desc,
+ apm82181_ch_t *chan, int idx)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+
+ /* May have 0, 1, 2, or 3 sources */
+ return (dma_hw_desc->dest_lo);
+}
+
+/**
+ * apm82181_desc_get_byte_count - extract the byte count from the descriptor
+ */
+static inline u32 apm82181_desc_get_byte_count(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ dma_cdb_t *dma_hw_desc;
+
+ dma_hw_desc = desc->hw_desc;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ break;
+ default:
+ return 0;
+ }
+ /* May have 0, 1, 2, or 3 sources */
+ //return (dma_hw_desc->cnt);
+}
+
+
+/**
+ * apm82181_desc_get_link - get the address of the descriptor that
+ * follows this one
+ */
+static inline u32 apm82181_desc_get_link(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ if (!desc->hw_next)
+ return 0;
+
+ return desc->hw_next->phys;
+}
+
+/**
+ * apm82181_desc_is_aligned - check alignment
+ */
+static inline int apm82181_desc_is_aligned(apm82181_desc_t *desc,
+ int num_slots)
+{
+ return (desc->idx & (num_slots - 1)) ? 0 : 1;
+}
+
+
+
+/******************************************************************************
+ * ADMA channel low-level routines
+ ******************************************************************************/
+
+static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan);
+static inline void apm82181_chan_append(apm82181_ch_t *chan);
+
+/*
+ * apm82181_adma_device_clear_eot_status - interrupt ack to XOR or DMA engine
+ */
+static inline void apm82181_adma_device_clear_eot_status (apm82181_ch_t *chan)
+{
+ u32 val ;
+ int idx = chan->device->id;
+ volatile xor_regs_t *xor_reg;
+ INFO;
+ switch (idx) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ val = mfdcr(DCR_DMA2P40_SR);
+ if(val & DMA_SR_RI(idx)){
+ printk(KERN_ERR "Err occurred, DMA%d status: 0x%x\n", idx, val);
+ }
+ /* TC reached int, write back to clear */
+ mtdcr(DCR_DMA2P40_SR, val);
+ break;
+ case APM82181_XOR_ID:
+ /* reset status bits to ack*/
+ xor_reg = chan->device->xor_base;
+
+ val = xor_reg->sr;
+ DBG("XOR engine status: 0x%08x\n", val);
+ xor_reg->sr = val;
+
+ if (val & (XOR_IE_ICBIE_BIT|XOR_IE_ICIE_BIT|XOR_IE_RPTIE_BIT)) {
+ if (val & XOR_IE_RPTIE_BIT) {
+ /* Read PLB Timeout Error.
+ * Try to resubmit the CB
+ */
+ INFO;
+ xor_reg->cblalr = xor_reg->ccbalr;
+ xor_reg->crsr |= XOR_CRSR_XAE_BIT;
+ } else
+ printk (KERN_ERR "XOR ERR 0x%x status\n", val);
+ break;
+ }
+
+ /* if the XORcore is idle, but there are unprocessed CBs
+ * then refetch the s/w chain here
+ */
+ if (!(xor_reg->sr & XOR_SR_XCP_BIT) && do_xor_refetch) {
+ apm82181_chan_append(chan);
+ }
+ break;
+ }
+}
+
+/*
+ * apm82181_chan_is_busy - get the channel status
+ */
+
+static inline int apm82181_chan_is_busy(apm82181_ch_t *chan)
+{
+ int busy = 0;
+ volatile xor_regs_t *xor_reg = chan->device->xor_base;
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ if(mfdcr(DCR_DMA2P40_SR) & DMA_SR_CB(chan->device->id))
+ busy = 1;
+ else
+ busy = 0;
+ break;
+ case APM82181_XOR_ID:
+ /* use the special status bit for the XORcore
+ */
+ busy = (xor_reg->sr & XOR_SR_XCP_BIT) ? 1 : 0;
+ break;
+ default:
+ BUG();
+ }
+
+ return busy;
+}
+
+/**
+ * apm82181_dma_put_desc - put PLB DMA 0/1/2/3 descriptor to FIFO
+ */
+static inline void apm82181_dma_put_desc(apm82181_ch_t *chan,
+ apm82181_desc_t *desc)
+{
+ dma_cdb_t *cdb = desc->hw_desc;
+ u32 sg_cmd = 0;
+
+ /* Enable TC interrupt */
+ if(test_bit(APM82181_DESC_INT, &desc->flags))
+ cdb->cnt.tcie = 1;
+ else
+ cdb->cnt.tcie = 0;
+ /* Not link to next cdb */
+ cdb->sg_hi = 0xffffffff;
+ cdb->sg_lo = 0xffffffff;
+
+ chan_last_sub[chan->device->id] = desc;
+
+ /* Update new cdb addr */
+ mtdcr(DCR_DMA2P40_SGHx(chan->device->id), (u32)(desc->phys >> 32));
+ mtdcr(DCR_DMA2P40_SGLx(chan->device->id), (u32)desc->phys);
+
+ INFO;
+ DBG("slot id: %d addr: %llx\n", desc->idx, desc->phys);
+ DBG("S/G addr H: %08x addr L: %08x\n",
+ mfdcr(DCR_DMA2P40_SGHx(chan->device->id)),
+ mfdcr(DCR_DMA2P40_SGLx(chan->device->id)));
+ ADMA_HEXDUMP(cdb, 96);
+ /* Enable S/G */
+ sg_cmd |= (DMA_SGC_SSG(chan->device->id) | DMA_SGC_EM_ALL);
+ sg_cmd |= DMA_SGC_SGL(chan->device->id, 0); /* S/G addr in PLB */
+
+ mtdcr(DCR_DMA2P40_SGC, sg_cmd);
+ DBG("S/G addr H: %08x addr L: %08x\n",
+ mfdcr(DCR_DMA2P40_SGHx(chan->device->id)),
+ mfdcr(DCR_DMA2P40_SGLx(chan->device->id)));
+ /* need to use variable for logging current CDB */
+ chan->current_cdb_addr = desc->phys;
+
+}
+
+/**
+ * apm82181_chan_append - update the h/w chain in the channel
+ */
+static inline void apm82181_chan_append(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *iter;
+ volatile xor_regs_t *xor_reg;
+ phys_addr_t cur_desc;
+ xor_cb_t *xcb;
+ unsigned long flags;
+ INFO;
+
+ local_irq_save(flags);
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ cur_desc = apm82181_chan_get_current_descriptor(chan);
+ DBG("current_desc %llx\n", cur_desc);
+ if (likely(cur_desc)) {
+ INFO;
+ iter = chan_last_sub[chan->device->id];
+ BUG_ON(!iter);
+ } else {
+ INFO;
+ /* first peer */
+ iter = chan_first_cdb[chan->device->id];
+ BUG_ON(!iter);
+ INFO;
+ apm82181_dma_put_desc(chan, iter);
+ chan->hw_chain_inited = 1;
+ }
+
+ /* is there something new to append */
+ if (!iter->hw_next)
+ break;
+
+ /* flush descriptors from the s/w queue to fifo */
+ list_for_each_entry_continue(iter, &chan->chain, chain_node) {
+ apm82181_dma_put_desc(chan, iter);
+ if (!iter->hw_next)
+ break;
+ }
+ break;
+ case APM82181_XOR_ID:
+ /* update h/w links and refetch */
+ if (!xor_last_submit->hw_next)
+ break;
+ xor_reg = chan->device->xor_base;
+ /* the last linked CDB has to generate an interrupt
+ * that we'd be able to append the next lists to h/w
+ * regardless of the XOR engine state at the moment of
+ * appending of these next lists
+ */
+ xcb = xor_last_linked->hw_desc;
+ xcb->cbc |= XOR_CBCR_CBCE_BIT;
+
+ if (!(xor_reg->sr & XOR_SR_XCP_BIT)) {
+ /* XORcore is idle. Refetch now */
+ do_xor_refetch = 0;
+ apm82181_xor_set_link(xor_last_submit,
+ xor_last_submit->hw_next);
+
+ xor_last_submit = xor_last_linked;
+ xor_reg->crsr |= XOR_CRSR_RCBE_BIT | XOR_CRSR_64BA_BIT;
+ } else {
+ /* XORcore is running. Refetch later in the handler */
+ do_xor_refetch = 1;
+ }
+
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
+/**
+ * apm82181_chan_get_current_descriptor - get the currently executed descriptor
+ */
+static inline phys_addr_t apm82181_chan_get_current_descriptor(apm82181_ch_t *chan)
+{
+ phys_addr_t curr_cdb_addr;
+ volatile xor_regs_t *xor_reg;
+ int idx = chan->device->id;
+
+ if (unlikely(!chan->hw_chain_inited))
+ /* h/w descriptor chain is not initialized yet */
+ return 0;
+ switch(idx){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ curr_cdb_addr = chan->current_cdb_addr;
+ break;
+ case APM82181_XOR_ID:
+ xor_reg = chan->device->xor_base;
+ curr_cdb_addr = (dma_addr_t)xor_reg->ccbahr;
+ curr_cdb_addr = (curr_cdb_addr << 32) | xor_reg->ccbalr;
+ break;
+ default:
+ BUG();
+ }
+ return curr_cdb_addr;
+}
+
+
+/******************************************************************************
+ * ADMA device level
+ ******************************************************************************/
+
+static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan);
+static dma_cookie_t apm82181_adma_tx_submit(
+ struct dma_async_tx_descriptor *tx);
+
+static void apm82181_adma_set_dest(
+ apm82181_desc_t *tx,
+ dma_addr_t addr, int index);
+
+/**
+ * apm82181_get_group_entry - get group entry with index idx
+ * @tdesc: is the last allocated slot in the group.
+ */
+static inline apm82181_desc_t *
+apm82181_get_group_entry ( apm82181_desc_t *tdesc, u32 entry_idx)
+{
+ apm82181_desc_t *iter = tdesc->group_head;
+ int i = 0;
+
+ if (entry_idx < 0 || entry_idx >= (tdesc->src_cnt + tdesc->dst_cnt)) {
+ printk("%s: entry_idx %d, src_cnt %d, dst_cnt %d\n",
+ __func__, entry_idx, tdesc->src_cnt, tdesc->dst_cnt);
+ BUG();
+ }
+ list_for_each_entry(iter, &tdesc->group_list, chain_node) {
+ if (i++ == entry_idx)
+ break;
+ }
+ return iter;
+}
+
+/**
+ * apm82181_adma_free_slots - flags descriptor slots for reuse
+ * @slot: Slot to free
+ * Caller must hold &apm82181_chan->lock while calling this function
+ */
+static void apm82181_adma_free_slots(apm82181_desc_t *slot,
+ apm82181_ch_t *chan)
+{
+ int stride = slot->slots_per_op;
+
+ while (stride--) {
+ /*async_tx_clear_ack(&slot->async_tx);*/ /* Don't need to clear. It is hack*/
+ slot->slots_per_op = 0;
+ slot = list_entry(slot->slot_node.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+}
+
+static void
+apm82181_adma_unmap(apm82181_ch_t *chan, apm82181_desc_t *desc)
+{
+ u32 src_cnt, dst_cnt;
+ dma_addr_t addr;
+ /*
+ * get the number of sources & destination
+ * included in this descriptor and unmap
+ * them all
+ */
+ src_cnt = 1;
+ dst_cnt = 1;
+}
+/**
+ * apm82181_adma_run_tx_complete_actions - call functions to be called
+ * upon complete
+ */
+static dma_cookie_t apm82181_adma_run_tx_complete_actions(
+ apm82181_desc_t *desc,
+ apm82181_ch_t *chan,
+ dma_cookie_t cookie)
+{
+ int i;
+ //enum dma_data_direction dir;
+ INFO;
+ BUG_ON(desc->async_tx.cookie < 0);
+ if (desc->async_tx.cookie > 0) {
+ cookie = desc->async_tx.cookie;
+ desc->async_tx.cookie = 0;
+
+ /* call the callback (must not sleep or submit new
+ * operations to this channel)
+ */
+ if (desc->async_tx.callback)
+ desc->async_tx.callback(
+ desc->async_tx.callback_param);
+
+ dma_descriptor_unmap(&desc->async_tx);
+ /* unmap dma addresses
+ * (unmap_single vs unmap_page?)
+ *
+ * actually, ppc's dma_unmap_page() functions are empty, so
+ * the following code is just for the sake of completeness
+ */
+ if (chan && chan->needs_unmap && desc->group_head &&
+ desc->unmap_len) {
+ apm82181_desc_t *unmap = desc->group_head;
+ /* assume 1 slot per op always */
+ u32 slot_count = unmap->slot_cnt;
+
+ /* Run through the group list and unmap addresses */
+ for (i = 0; i < slot_count; i++) {
+ BUG_ON(!unmap);
+ apm82181_adma_unmap(chan, unmap);
+ unmap = unmap->hw_next;
+ }
+ desc->group_head = NULL;
+ }
+ }
+
+ /* run dependent operations */
+ dma_run_dependencies(&desc->async_tx);
+
+ return cookie;
+}
+
+/**
+ * apm82181_adma_clean_slot - clean up CDB slot (if ack is set)
+ */
+static int apm82181_adma_clean_slot(apm82181_desc_t *desc,
+ apm82181_ch_t *chan)
+{
+ /* the client is allowed to attach dependent operations
+ * until 'ack' is set
+ */
+ if (!async_tx_test_ack(&desc->async_tx))
+ return 0;
+
+ /* leave the last descriptor in the chain
+ * so we can append to it
+ */
+ if (list_is_last(&desc->chain_node, &chan->chain) ||
+ desc->phys == apm82181_chan_get_current_descriptor(chan))
+ return 1;
+
+ dev_dbg(chan->device->common.dev, "\tfree slot %llx: %d stride: %d\n",
+ desc->phys, desc->idx, desc->slots_per_op);
+
+ list_del(&desc->chain_node);
+ apm82181_adma_free_slots(desc, chan);
+ return 0;
+}
+
+/**
+ * __apm82181_adma_slot_cleanup - this is the common clean-up routine
+ * which runs through the channel CDBs list until reach the descriptor
+ * currently processed. When routine determines that all CDBs of group
+ * are completed then corresponding callbacks (if any) are called and slots
+ * are freed.
+ */
+static void __apm82181_adma_slot_cleanup(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *iter, *_iter, *group_start = NULL;
+ dma_cookie_t cookie = 0;
+ phys_addr_t current_desc = apm82181_chan_get_current_descriptor(chan);
+ int busy = apm82181_chan_is_busy(chan);
+ int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
+
+ DBG("apm82181 adma%d: %s\n",
+ chan->device->id, __FUNCTION__);
+ DBG("current_desc %llx\n", current_desc);
+
+ if (!current_desc) {
+ /* There were no transactions yet, so
+ * nothing to clean
+ */
+ return;
+ }
+
+ /* free completed slots from the chain starting with
+ * the oldest descriptor
+ */
+ list_for_each_entry_safe(iter, _iter, &chan->chain,
+ chain_node) {
+ DBG(" cookie: %d slot: %d "
+ "busy: %d this_desc: %llx next_desc: %x cur: %llx ack: %d\n",
+ iter->async_tx.cookie, iter->idx, busy, iter->phys,
+ apm82181_desc_get_link(iter, chan), current_desc,
+ async_tx_test_ack(&iter->async_tx));
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+
+ /* do not advance past the current descriptor loaded into the
+ * hardware channel,subsequent descriptors are either in process
+ * or have not been submitted
+ */
+ if (seen_current)
+ break;
+
+ /* stop the search if we reach the current descriptor and the
+ * channel is busy, or if it appears that the current descriptor
+ * needs to be re-read (i.e. has been appended to)
+ */
+ if (iter->phys == current_desc) {
+ BUG_ON(seen_current++);
+ if (busy || apm82181_desc_get_link(iter, chan)) {
+ /* not all descriptors of the group have
+ * been completed; exit.
+ */
+ break;
+ }
+ }
+
+ /* detect the start of a group transaction */
+ if (!slot_cnt && !slots_per_op) {
+ slot_cnt = iter->slot_cnt;
+ slots_per_op = iter->slots_per_op;
+ if (slot_cnt <= slots_per_op) {
+ slot_cnt = 0;
+ slots_per_op = 0;
+ }
+ }
+
+ if (slot_cnt) {
+ if (!group_start)
+ group_start = iter;
+ slot_cnt -= slots_per_op;
+ }
+
+ /* all the members of a group are complete */
+ if (slots_per_op != 0 && slot_cnt == 0) {
+ apm82181_desc_t *grp_iter, *_grp_iter;
+ int end_of_chain = 0;
+
+ /* clean up the group */
+ slot_cnt = group_start->slot_cnt;
+ grp_iter = group_start;
+ list_for_each_entry_safe_from(grp_iter, _grp_iter,
+ &chan->chain, chain_node) {
+
+ cookie = apm82181_adma_run_tx_complete_actions(
+ grp_iter, chan, cookie);
+
+ slot_cnt -= slots_per_op;
+ end_of_chain = apm82181_adma_clean_slot(
+ grp_iter, chan);
+ if (end_of_chain && slot_cnt) {
+ /* Should wait for ZeroSum complete */
+ if (cookie > 0)
+ chan->common.completed_cookie = cookie;
+ return;
+ }
+
+ if (slot_cnt == 0 || end_of_chain)
+ break;
+ }
+
+ /* the group should be complete at this point */
+ BUG_ON(slot_cnt);
+
+ slots_per_op = 0;
+ group_start = NULL;
+ if (end_of_chain)
+ break;
+ else
+ continue;
+ } else if (slots_per_op) /* wait for group completion */
+ continue;
+
+ cookie = apm82181_adma_run_tx_complete_actions(iter, chan,
+ cookie);
+
+ if (apm82181_adma_clean_slot(iter, chan))
+ break;
+ }
+
+ BUG_ON(!seen_current);
+
+ if (cookie > 0) {
+ chan->common.completed_cookie = cookie;
+ DBG("completed cookie %d\n", cookie);
+ }
+
+}
+
+/**
+ * apm82181_adma_tasklet - clean up watch-dog initiator
+ */
+static void apm82181_adma_tasklet (unsigned long data)
+{
+ apm82181_ch_t *chan = (apm82181_ch_t *) data;
+ spin_lock(&chan->lock);
+ INFO;
+ __apm82181_adma_slot_cleanup(chan);
+ spin_unlock(&chan->lock);
+}
+
+/**
+ * apm82181_adma_slot_cleanup - clean up scheduled initiator
+ */
+static void apm82181_adma_slot_cleanup (apm82181_ch_t *chan)
+{
+ spin_lock_bh(&chan->lock);
+ __apm82181_adma_slot_cleanup(chan);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * apm82181_adma_alloc_slots - allocate free slots (if any)
+ */
+static apm82181_desc_t *apm82181_adma_alloc_slots(
+ apm82181_ch_t *chan, int num_slots,
+ int slots_per_op)
+{
+ apm82181_desc_t *iter = NULL, *_iter, *alloc_start = NULL;
+ struct list_head chain = LIST_HEAD_INIT(chain);
+ int slots_found, retry = 0;
+
+
+ BUG_ON(!num_slots || !slots_per_op);
+ /* start search from the last allocated descrtiptor
+ * if a contiguous allocation can not be found start searching
+ * from the beginning of the list
+ */
+retry:
+ slots_found = 0;
+ if (retry == 0)
+ iter = chan->last_used;
+ else
+ iter = list_entry(&chan->all_slots, apm82181_desc_t,
+ slot_node);
+ prefetch(iter);
+ DBG("---iter at %p idx %d\n ",iter,iter->idx);
+ list_for_each_entry_safe_continue(iter, _iter, &chan->all_slots,
+ slot_node) {
+ prefetch(_iter);
+ prefetch(&_iter->async_tx);
+ if (iter->slots_per_op) {
+ slots_found = 0;
+ continue;
+ }
+
+ /* start the allocation if the slot is correctly aligned */
+ if (!slots_found++)
+ alloc_start = iter;
+ if (slots_found == num_slots) {
+ apm82181_desc_t *alloc_tail = NULL;
+ apm82181_desc_t *last_used = NULL;
+ iter = alloc_start;
+ while (num_slots) {
+ int i;
+
+ /* pre-ack all but the last descriptor */
+ if (num_slots != slots_per_op) {
+ async_tx_ack(&iter->async_tx);
+ }
+ list_add_tail(&iter->chain_node, &chain);
+ alloc_tail = iter;
+ iter->async_tx.cookie = 0;
+ iter->hw_next = NULL;
+ iter->flags = 0;
+ iter->slot_cnt = num_slots;
+ for (i = 0; i < slots_per_op; i++) {
+ iter->slots_per_op = slots_per_op - i;
+ last_used = iter;
+ iter = list_entry(iter->slot_node.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+ num_slots -= slots_per_op;
+ }
+ alloc_tail->group_head = alloc_start;
+ alloc_tail->async_tx.cookie = -EBUSY;
+ list_splice(&chain, &alloc_tail->group_list);
+ chan->last_used = last_used;
+ DBG("---slot allocated at %llx idx %d, hw_desc %p tx_ack %d\n",
+ alloc_tail->phys, alloc_tail->idx, alloc_tail->hw_desc,
+ async_tx_test_ack(&alloc_tail->async_tx));
+ return alloc_tail;
+ }
+ }
+ if (!retry++)
+ goto retry;
+#ifdef ADMA_DEBUG
+ static int empty_slot_cnt;
+ if(!(empty_slot_cnt%100))
+ printk(KERN_INFO"No empty slots trying to free some\n");
+ empty_slot_cnt++;
+#endif
+ /* try to free some slots if the allocation fails */
+ tasklet_schedule(&chan->irq_tasklet);
+ return NULL;
+}
+
+/**
+ * apm82181_chan_xor_slot_count - get the number of slots necessary for
+ * XOR operation
+ */
+static inline int apm82181_chan_xor_slot_count(size_t len, int src_cnt,
+ int *slots_per_op)
+{
+ int slot_cnt;
+
+ /* each XOR descriptor provides up to 16 source operands */
+ slot_cnt = *slots_per_op = (src_cnt + XOR_MAX_OPS - 1)/XOR_MAX_OPS;
+
+ if (likely(len <= APM82181_ADMA_XOR_MAX_BYTE_COUNT))
+ return slot_cnt;
+
+ printk(KERN_ERR "%s: len %d > max %d !!\n",
+ __func__, len, APM82181_ADMA_XOR_MAX_BYTE_COUNT);
+ BUG();
+ return slot_cnt;
+}
+
+/**
+ * apm82181_desc_init_null_xor - initialize the descriptor for NULL XOR
+ * pseudo operation
+ */
+static inline void apm82181_desc_init_null_xor(apm82181_desc_t *desc)
+{
+ memset (desc->hw_desc, 0, sizeof(xor_cb_t));
+ desc->hw_next = NULL;
+ desc->src_cnt = 0;
+ desc->dst_cnt = 1;
+}
+/**
+ * apm82181_chan_set_first_xor_descriptor - initi XORcore chain
+ */
+static inline void apm82181_chan_set_first_xor_descriptor(apm82181_ch_t *chan,
+ apm82181_desc_t *next_desc)
+{
+ volatile xor_regs_t *xor_reg;
+
+ xor_reg = chan->device->xor_base;
+
+ if (xor_reg->sr & XOR_SR_XCP_BIT)
+ printk(KERN_INFO "%s: Warn: XORcore is running "
+ "when try to set the first CDB!\n",
+ __func__);
+
+ xor_last_submit = xor_last_linked = next_desc;
+
+ xor_reg->crsr = XOR_CRSR_64BA_BIT;
+
+ xor_reg->cblalr = next_desc->phys;
+ xor_reg->cblahr = 0;
+ xor_reg->cbcr |= XOR_CBCR_LNK_BIT;
+
+ chan->hw_chain_inited = 1;
+}
+/**
+ * apm82181_chan_start_null_xor - initiate the first XOR operation (DMA engines
+ * use FIFOs (as opposite to chains used in XOR) so this is a XOR
+ * specific operation)
+ */
+static void apm82181_chan_start_null_xor(apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc, *group_start;
+ dma_cookie_t cookie;
+ int slot_cnt, slots_per_op;
+ volatile xor_regs_t *xor_reg = chan->device->xor_base;
+
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __func__);
+ INFO;
+ spin_lock_bh(&chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(0, 2, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ INFO;
+ group_start = sw_desc->group_head;
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ async_tx_ack(&sw_desc->async_tx);
+ apm82181_desc_init_null_xor(group_start);
+ INFO;
+
+ cookie = chan->common.cookie;
+ cookie++;
+ if (cookie <= 1)
+ cookie = 2;
+
+ /* initialize the completed cookie to be less than
+ * the most recently used cookie
+ */
+ chan->common.completed_cookie = cookie - 1;
+ chan->common.cookie = sw_desc->async_tx.cookie = cookie;
+
+ /* channel should not be busy */
+ BUG_ON(apm82181_chan_is_busy(chan));
+
+ /* set the descriptor address */
+ apm82181_chan_set_first_xor_descriptor(chan, sw_desc);
+
+ /* run the descriptor */
+ xor_reg->crsr = XOR_CRSR_64BA_BIT | XOR_CRSR_XAE_BIT;
+ } else
+ printk(KERN_ERR "apm82181 adma%d"
+ " failed to allocate null descriptor\n",
+ chan->device->id);
+ spin_unlock_bh(&chan->lock);
+}
+
+/**
+ * apm82181_adma_alloc_chan_resources - allocate pools for CDB slots
+ */
+static int apm82181_adma_alloc_chan_resources(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *slot = NULL;
+ char *hw_desc;
+ int i, db_sz;
+ int init = apm82181_chan->slots_allocated ? 0 : 1;
+
+ chan->chan_id = apm82181_chan->device->id;
+
+ /* Allocate descriptor slots */
+ i = apm82181_chan->slots_allocated;
+ if (apm82181_chan->device->id != APM82181_XOR_ID)
+ db_sz = sizeof (dma_cdb_t);
+ else
+ db_sz = sizeof (xor_cb_t);
+
+ for (; i < (apm82181_chan->device->pool_size/db_sz); i++) {
+ slot = kzalloc(sizeof(apm82181_desc_t), GFP_KERNEL);
+ if (!slot) {
+ printk(KERN_INFO "APM82181/GT ADMA Channel only initialized"
+ " %d descriptor slots", i--);
+ break;
+ }
+
+ hw_desc = (char *) apm82181_chan->device->dma_desc_pool_virt;
+ slot->hw_desc = (void *) &hw_desc[i * db_sz];
+ dma_async_tx_descriptor_init(&slot->async_tx, chan);
+ slot->async_tx.tx_submit = apm82181_adma_tx_submit;
+ INIT_LIST_HEAD(&slot->chain_node);
+ INIT_LIST_HEAD(&slot->slot_node);
+ INIT_LIST_HEAD(&slot->group_list);
+ slot->phys = apm82181_chan->device->dma_desc_pool + i * db_sz;
+ slot->idx = i;
+ spin_lock_bh(&apm82181_chan->lock);
+ apm82181_chan->slots_allocated++;
+ list_add_tail(&slot->slot_node, &apm82181_chan->all_slots);
+ spin_unlock_bh(&apm82181_chan->lock);
+ }
+
+ if (i && !apm82181_chan->last_used) {
+ apm82181_chan->last_used =
+ list_entry(apm82181_chan->all_slots.next,
+ apm82181_desc_t,
+ slot_node);
+ }
+
+ printk("apm82181 adma%d: allocated %d descriptor slots\n",
+ apm82181_chan->device->id, i);
+
+ /* initialize the channel and the chain with a null operation */
+ if (init) {
+ switch (apm82181_chan->device->id)
+ {
+ apm82181_chan->hw_chain_inited = 0;
+ case APM82181_PDMA0_ID:
+ apm82181_dma_tchan[0] = apm82181_chan;
+ break;
+ case APM82181_PDMA1_ID:
+ apm82181_dma_tchan[1] = apm82181_chan;
+ break;
+ case APM82181_PDMA2_ID:
+ apm82181_dma_tchan[2] = apm82181_chan;
+ break;
+ case APM82181_PDMA3_ID:
+ apm82181_dma_tchan[3] = apm82181_chan;
+ break;
+ case APM82181_XOR_ID:
+ apm82181_dma_tchan[4] = apm82181_chan;
+ apm82181_chan_start_null_xor(apm82181_chan);
+ break;
+ default:
+ BUG();
+ }
+ apm82181_chan->needs_unmap = 1;
+ }
+
+ return (i > 0) ? i : -ENOMEM;
+}
+
+/**
+ * apm82181_desc_assign_cookie - assign a cookie
+ */
+static dma_cookie_t apm82181_desc_assign_cookie(apm82181_ch_t *chan,
+ apm82181_desc_t *desc)
+{
+ dma_cookie_t cookie = chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ chan->common.cookie = desc->async_tx.cookie = cookie;
+ return cookie;
+}
+
+
+/**
+ * apm82181_adma_check_threshold - append CDBs to h/w chain if threshold
+ * has been achieved
+ */
+static void apm82181_adma_check_threshold(apm82181_ch_t *chan)
+{
+ dev_dbg(chan->device->common.dev, "apm82181 adma%d: pending: %d\n",
+ chan->device->id, chan->pending);
+ INFO;
+ if (chan->pending >= APM82181_ADMA_THRESHOLD) {
+ chan->pending = 0;
+ apm82181_chan_append(chan);
+ }
+}
+
+/**
+ * apm82181_adma_tx_submit - submit new descriptor group to the channel
+ * (it's not necessary that descriptors will be submitted to the h/w
+ * chains too right now)
+ */
+static dma_cookie_t apm82181_adma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ apm82181_desc_t *sw_desc = tx_to_apm82181_adma_slot(tx);
+ apm82181_ch_t *chan = to_apm82181_adma_chan(tx->chan);
+ apm82181_desc_t *group_start, *old_chain_tail;
+ int slot_cnt;
+ int slots_per_op;
+ dma_cookie_t cookie;
+ group_start = sw_desc->group_head;
+ slot_cnt = group_start->slot_cnt;
+ slots_per_op = group_start->slots_per_op;
+ INFO;
+ spin_lock_bh(&chan->lock);
+ cookie = apm82181_desc_assign_cookie(chan, sw_desc);
+
+ if (unlikely(list_empty(&chan->chain))) {
+ /* first peer */
+ list_splice_init(&sw_desc->group_list, &chan->chain);
+ chan_first_cdb[chan->device->id] = group_start;
+ } else {
+ /* isn't first peer, bind CDBs to chain */
+ old_chain_tail = list_entry(chan->chain.prev,
+ apm82181_desc_t, chain_node);
+ list_splice_init(&sw_desc->group_list,
+ &old_chain_tail->chain_node);
+ /* fix up the hardware chain */
+ apm82181_desc_set_link(chan, old_chain_tail, group_start);
+ }
+
+ /* increment the pending count by the number of operations */
+ chan->pending += slot_cnt / slots_per_op;
+ apm82181_adma_check_threshold(chan);
+ spin_unlock_bh(&chan->lock);
+
+ DBG("apm82181 adma%d:cookie: %d slot: %d tx %p\n",
+ chan->device->id, sw_desc->async_tx.cookie, sw_desc->idx, sw_desc);
+ return cookie;
+}
+/**
+ * apm82181_adma_prep_dma_xor - prepare CDB for a XOR operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_xor(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
+ unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+#ifdef ADMA_DEBUG
+ printk("\n%s(%d):\n\tsrc: ", __func__,
+ apm82181_chan->device->id);
+ for (slot_cnt=0; slot_cnt < src_cnt; slot_cnt++)
+ printk("0x%llx ", dma_src[slot_cnt]);
+ printk("\n\tdst: 0x%llx\n", dma_dest);
+#endif
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > APM82181_ADMA_XOR_MAX_BYTE_COUNT));
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s src_cnt: %d len: %u int_en: %d\n",
+ apm82181_chan->device->id, __func__, src_cnt, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ spin_lock_bh(&apm82181_chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_xor(group_start, src_cnt, flags);
+ apm82181_adma_set_dest(group_start, dma_dest, 0);
+ while (src_cnt--)
+ apm82181_adma_set_src(group_start,
+ dma_src[src_cnt], src_cnt);
+ apm82181_desc_set_byte_count(group_start, apm82181_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+/**
+ * apm82181_adma_prep_dma_interrupt - prepare CDB for a pseudo DMA operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_interrupt(
+ struct dma_chan *chan, unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s\n", apm82181_chan->device->id,
+ __FUNCTION__);
+ spin_lock_bh(&apm82181_chan->lock);
+ slot_cnt = slots_per_op = 1;
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_interrupt(group_start, apm82181_chan);
+ group_start->unmap_len = 0;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+/**
+ * apm82181_adma_prep_dma_memcpy - prepare CDB for a MEMCPY operation
+ */
+static struct dma_async_tx_descriptor *apm82181_adma_prep_dma_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest,
+ dma_addr_t dma_src, size_t len, unsigned long flags)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *sw_desc, *group_start;
+ int slot_cnt, slots_per_op;
+ if (unlikely(!len))
+ return NULL;
+ BUG_ON(unlikely(len > APM82181_ADMA_DMA_MAX_BYTE_COUNT));
+
+ spin_lock_bh(&apm82181_chan->lock);
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d: %s len: %u int_en %d \n",
+ apm82181_chan->device->id, __FUNCTION__, len,
+ flags & DMA_PREP_INTERRUPT ? 1 : 0);
+
+ slot_cnt = slots_per_op = 1;
+ sw_desc = apm82181_adma_alloc_slots(apm82181_chan, slot_cnt,
+ slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ flags |= DMA_PREP_INTERRUPT;
+ apm82181_desc_init_memcpy(group_start, flags);
+ apm82181_adma_set_dest(group_start, dma_dest, 0);
+ apm82181_adma_set_src(group_start, dma_src, 0);
+ apm82181_desc_set_byte_count(group_start, apm82181_chan, len);
+ sw_desc->unmap_len = len;
+ sw_desc->async_tx.flags = flags;
+ }
+ spin_unlock_bh(&apm82181_chan->lock);
+ return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
+
+/**
+ * apm82181_adma_set_dest - set destination address into descriptor
+ */
+static void apm82181_adma_set_dest(apm82181_desc_t *sw_desc,
+ dma_addr_t addr, int index)
+{
+ apm82181_ch_t *chan = to_apm82181_adma_chan(sw_desc->async_tx.chan);
+ BUG_ON(index >= sw_desc->dst_cnt);
+
+ switch (chan->device->id) {
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ /* to do: support transfers lengths >
+ * APM82181_ADMA_DMA/XOR_MAX_BYTE_COUNT
+ */
+ apm82181_desc_set_dest_addr(sw_desc->group_head,
+ // chan, 0x8, addr, index); // Enabling HB bus
+ chan, addr, index);
+ break;
+ case APM82181_XOR_ID:
+ sw_desc = apm82181_get_group_entry(sw_desc, index);
+ apm82181_desc_set_dest_addr(sw_desc, chan,
+ addr, index);
+ break;
+ default:
+ BUG();
+ }
+}
+
+
+/**
+ * apm82181_adma_free_chan_resources - free the resources allocated
+ */
+static void apm82181_adma_free_chan_resources(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ apm82181_desc_t *iter, *_iter;
+ int in_use_descs = 0;
+
+ apm82181_adma_slot_cleanup(apm82181_chan);
+
+ spin_lock_bh(&apm82181_chan->lock);
+ list_for_each_entry_safe(iter, _iter, &apm82181_chan->chain,
+ chain_node) {
+ in_use_descs++;
+ list_del(&iter->chain_node);
+ }
+ list_for_each_entry_safe_reverse(iter, _iter,
+ &apm82181_chan->all_slots, slot_node) {
+ list_del(&iter->slot_node);
+ kfree(iter);
+ apm82181_chan->slots_allocated--;
+ }
+ apm82181_chan->last_used = NULL;
+
+ dev_dbg(apm82181_chan->device->common.dev,
+ "apm82181 adma%d %s slots_allocated %d\n",
+ apm82181_chan->device->id,
+ __FUNCTION__, apm82181_chan->slots_allocated);
+ spin_unlock_bh(&apm82181_chan->lock);
+
+ /* one is ok since we left it on there on purpose */
+ if (in_use_descs > 1)
+ printk(KERN_ERR "GT: Freeing %d in use descriptors!\n",
+ in_use_descs - 1);
+}
+
+/**
+ * apm82181_adma_tx_status - poll the status of an ADMA transaction
+ * @chan: ADMA channel handle
+ * @cookie: ADMA transaction identifier
+ */
+static enum dma_status apm82181_adma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+ enum dma_status ret;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE)
+ return ret;
+
+ apm82181_adma_slot_cleanup(apm82181_chan);
+
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+/**
+ * apm82181_adma_eot_handler - end of transfer interrupt handler
+ */
+static irqreturn_t apm82181_adma_eot_handler(int irq, void *data)
+{
+ apm82181_ch_t *chan = data;
+
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__);
+ INFO;
+ if(chan->device->id == APM82181_XOR_ID)
+ tasklet_schedule(&chan->irq_tasklet);
+ apm82181_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * apm82181_adma_err_handler - DMA error interrupt handler;
+ * do the same things as a eot handler
+ */
+#if 0
+static irqreturn_t apm82181_adma_err_handler(int irq, void *data)
+{
+ apm82181_ch_t *chan = data;
+ dev_dbg(chan->device->common.dev,
+ "apm82181 adma%d: %s\n", chan->device->id, __FUNCTION__);
+ tasklet_schedule(&chan->irq_tasklet);
+ apm82181_adma_device_clear_eot_status(chan);
+
+ return IRQ_HANDLED;
+}
+#endif
+/**
+ * apm82181_test_callback - called when test operation has been done
+ */
+static void apm82181_test_callback (void *unused)
+{
+ complete(&apm82181_r5_test_comp);
+}
+
+/**
+ * apm82181_adma_issue_pending - flush all pending descriptors to h/w
+ */
+static void apm82181_adma_issue_pending(struct dma_chan *chan)
+{
+ apm82181_ch_t *apm82181_chan = to_apm82181_adma_chan(chan);
+
+ DBG("apm82181 adma%d: %s %d \n", apm82181_chan->device->id,
+ __FUNCTION__, apm82181_chan->pending);
+ if (apm82181_chan->pending) {
+ apm82181_chan->pending = 0;
+ apm82181_chan_append(apm82181_chan);
+ }
+}
+
+static inline void xor_hw_init (apm82181_dev_t *adev)
+{
+ volatile xor_regs_t *xor_reg = adev->xor_base;
+ /* Reset XOR */
+ xor_reg->crsr = XOR_CRSR_XASR_BIT;
+ xor_reg->crrr = XOR_CRSR_64BA_BIT;
+
+ /* enable XOR engine interrupts */
+ xor_reg->ier = XOR_IE_CBCIE_BIT |
+ XOR_IE_ICBIE_BIT | XOR_IE_ICIE_BIT | XOR_IE_RPTIE_BIT;
+}
+
+/*
+ * Per channel probe
+ */
+static int apm82181_dma_per_chan_probe(struct platform_device *ofdev)
+{
+ int ret = 0, irq;
+ const u32 *index, *dcr_regs, *pool_size;
+ apm82181_plb_dma_t *pdma;
+ apm82181_dev_t *adev;
+ apm82181_ch_t *chan;
+ struct device_node *np = ofdev->dev.of_node;
+ struct resource res;
+ int len;
+
+ INFO;
+ pdma = dev_get_drvdata(ofdev->dev.parent);
+ BUG_ON(!pdma);
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ adev->dev = &ofdev->dev;
+ index = of_get_property(np, "cell-index", NULL);
+ if(!index) {
+ printk(KERN_ERR "adma-channel: Device node %s has missing or invalid "
+ "cell-index property\n", np->full_name);
+ goto err;
+ }
+ adev->id = (int)*index;
+ /* The XOR engine/PLB DMA 4 channels have different resources/pool_sizes */
+ if (adev->id != APM82181_XOR_ID){
+ dcr_regs = of_get_property(np, "dcr-reg", &len);
+ if (!dcr_regs || (len != 2 * sizeof(u32))) {
+ printk(KERN_ERR "plb_dma channel%d: Can't get DCR register base !",
+ adev->id);
+ goto err;
+ }
+ adev->dcr_base = dcr_regs[0];
+
+ pool_size = of_get_property(np, "pool_size", NULL);
+ if(!pool_size) {
+ printk(KERN_ERR "plb_dma channel%d: Device node %s has missing or "
+ "invalid pool_size property\n", adev->id, np->full_name);
+ goto err;
+ }
+ adev->pool_size = *pool_size;
+ } else {
+ if (of_address_to_resource(np, 0, &res)) {
+ printk(KERN_ERR "adma_xor channel%d %s: could not get resource address.\n",
+ adev->id,np->full_name);
+ goto err;
+ }
+
+ DBG("XOR resource start = %llx end = %llx\n", res.start, res.end);
+ adev->xor_base = ioremap(res.start, res.end - res.start + 1);
+ if (!adev->xor_base){
+ printk(KERN_ERR "XOR engine registers memory mapping failed.\n");
+ goto err;
+ }
+ adev->pool_size = PAGE_SIZE << 1;
+ }
+
+ adev->pdma = pdma;
+ adev->ofdev = ofdev;
+ dev_set_drvdata(&(ofdev->dev),adev);
+
+ switch (adev->id){
+ case APM82181_PDMA0_ID:
+ case APM82181_PDMA1_ID:
+ case APM82181_PDMA2_ID:
+ case APM82181_PDMA3_ID:
+ dma_cap_set(DMA_MEMCPY,adev->cap_mask);
+ break;
+ case APM82181_XOR_ID:
+ dma_cap_set(DMA_XOR,adev->cap_mask);
+ dma_cap_set(DMA_INTERRUPT,adev->cap_mask);
+ break;
+ default:
+ BUG();
+ }
+ /* XOR h/w configuration */
+ if(adev->id == APM82181_XOR_ID)
+ xor_hw_init(adev);
+ /* allocate coherent memory for hardware descriptors
+ * note: writecombine gives slightly better performance, but
+ * requires that we explicitly drain the write buffer
+ */
+ if ((adev->dma_desc_pool_virt = dma_alloc_coherent(&ofdev->dev,
+ adev->pool_size, &adev->dma_desc_pool, GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_dma_alloc;
+ }
+
+ adev->common.cap_mask = adev->cap_mask;
+ INIT_LIST_HEAD(&adev->common.channels);
+ /* set base routines */
+ adev->common.device_alloc_chan_resources =
+ apm82181_adma_alloc_chan_resources;
+ adev->common.device_free_chan_resources =
+ apm82181_adma_free_chan_resources;
+ adev->common.device_tx_status = apm82181_adma_tx_status;
+ adev->common.device_issue_pending = apm82181_adma_issue_pending;
+ adev->common.dev = &ofdev->dev;
+
+ /* set prep routines based on capability */
+ if (dma_has_cap(DMA_MEMCPY, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_memcpy =
+ apm82181_adma_prep_dma_memcpy;
+ }
+
+ if (dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask)) {
+ adev->common.device_prep_dma_interrupt =
+ apm82181_adma_prep_dma_interrupt;
+ }
+
+ if (dma_has_cap(DMA_XOR, adev->common.cap_mask)) {
+ adev->common.max_xor = XOR_MAX_OPS;
+ adev->common.device_prep_dma_xor =
+ apm82181_adma_prep_dma_xor;
+ }
+
+ /* create a channel */
+ if ((chan = kzalloc(sizeof(*chan), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_chan_alloc;
+ }
+ tasklet_init(&chan->irq_tasklet, apm82181_adma_tasklet,
+ (unsigned long)chan);
+
+ irq = irq_of_parse_and_map(np, 0);
+ switch (adev->id){
+ case 0:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan0", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 1:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan1", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 2:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan2", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 3:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-chan3", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ case 4:
+ if (irq >= 0) {
+ ret = request_irq(irq, apm82181_adma_eot_handler,
+ IRQF_DISABLED, "adma-xor", chan);
+ if (ret) {
+ printk("Failed to request IRQ %d\n",irq);
+ ret = -EIO;
+ goto err_irq;
+ }
+ }
+ break;
+ default:
+ BUG();
+ }
+
+ spin_lock_init(&chan->lock);
+ chan->device = adev;
+ INIT_LIST_HEAD(&chan->chain);
+ INIT_LIST_HEAD(&chan->all_slots);
+ chan->common.device = &adev->common;
+ list_add_tail(&chan->common.device_node, &adev->common.channels);
+ adev->common.chancnt++;
+
+ printk( "AMCC(R) APM82181 ADMA Engine found [%d]: "
+ "( capabilities: %s%s%s%s%s%s)\n",
+ adev->id,
+ dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq_xor " : "",
+ dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " :
+ "",
+ dma_has_cap(DMA_XOR, adev->common.cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_XOR_VAL, adev->common.cap_mask) ? "xor_val " :
+ "",
+ dma_has_cap(DMA_MEMCPY, adev->common.cap_mask) ? "memcpy " : "",
+ dma_has_cap(DMA_INTERRUPT, adev->common.cap_mask) ? "int " : "");
+ INFO;
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(&ofdev->dev, "failed to register dma async device");
+ goto err_irq;
+ }
+ INFO;
+
+ goto out;
+err_irq:
+ kfree(chan);
+err_chan_alloc:
+ dma_free_coherent(&ofdev->dev, adev->pool_size,
+ adev->dma_desc_pool_virt, adev->dma_desc_pool);
+err_dma_alloc:
+ if (adev->xor_base)
+ iounmap(adev->xor_base);
+err:
+ kfree(adev);
+out:
+ return ret;
+}
+
+static struct of_device_id dma_4chan_match[] =
+{
+ {
+ .compatible = "amcc,apm82181-adma",
+ },
+ {},
+};
+
+static struct of_device_id dma_per_chan_match[] = {
+ {.compatible = "amcc,apm82181-dma-4channel",},
+ {.compatible = "amcc,xor",},
+ {},
+};
+/*
+ * apm82181_adma_probe - probe the asynch device
+ */
+static int apm82181_pdma_probe(struct platform_device *ofdev)
+{
+ int ret = 0;
+ apm82181_plb_dma_t *pdma;
+
+ if ((pdma = kzalloc(sizeof(*pdma), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pdma->dev = &ofdev->dev;
+ pdma->ofdev = ofdev;
+ printk(PPC4XX_EDMA "Probing AMCC APM82181 ADMA engines...\n");
+
+ dev_set_drvdata(&(ofdev->dev),pdma);
+ of_platform_bus_probe(ofdev->dev.of_node, dma_per_chan_match,&ofdev->dev);
+
+out:
+ return ret;
+}
+
+/*
+ * apm82181_test_xor - test are RAID-5 XOR capability enabled successfully.
+ * For this we just perform one DMA XOR operation with the 3 sources
+ * to a destination
+ */
+static int apm82181_test_xor (apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc, *group_start;
+ struct page *pg_src[3], *pg_dest;
+ char *a;
+ dma_addr_t dma_src_addr[3];
+ dma_addr_t dma_dst_addr;
+ int rval = -EFAULT, i;
+ int len = PAGE_SIZE, src_cnt = 3;
+ int slot_cnt, slots_per_op;
+ INFO;
+ printk("ADMA channel %d XOR testing\n",chan->device->id);
+ for(i = 0; i < 3; i++){
+ pg_src[i] = alloc_page(GFP_KERNEL);
+ if (!pg_src[i])
+ return -ENOMEM;
+ }
+ pg_dest = alloc_page(GFP_KERNEL);
+ if (!pg_dest)
+ return -ENOMEM;
+ /* Fill the test page with ones */
+ memset(page_address(pg_src[0]), 0xDA, len);
+ memset(page_address(pg_src[1]), 0xDA, len);
+ memset(page_address(pg_src[2]), 0x00, len);
+ memset(page_address(pg_dest), 0xA5, len);
+ for(i = 0; i < 3; i++){
+ a = page_address(pg_src[i]);
+ printk("The virtual addr of src %d =%x\n",i, (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ }
+ a = page_address(pg_dest);
+ printk("The virtual addr of dest=%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+
+ for(i = 0; i < 3; i++){
+ dma_src_addr[i] = dma_map_page(chan->device->dev, pg_src[i], 0, len,
+ DMA_BIDIRECTIONAL);
+ }
+ dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len,
+ DMA_BIDIRECTIONAL);
+ printk("dma_src_addr[0]: %llx; dma_src_addr[1]: %llx;\n "
+ "dma_src_addr[2]: %llx; dma_dst_addr %llx, len: %x\n", dma_src_addr[0],
+ dma_src_addr[1], dma_src_addr[2], dma_dst_addr, len);
+
+ spin_lock_bh(&chan->lock);
+ slot_cnt = apm82181_chan_xor_slot_count(len, src_cnt, &slots_per_op);
+ sw_desc = apm82181_adma_alloc_slots(chan, slot_cnt, slots_per_op);
+ if (sw_desc) {
+ group_start = sw_desc->group_head;
+ apm82181_desc_init_xor(group_start, src_cnt, DMA_PREP_INTERRUPT);
+ /* Setup addresses */
+ while (src_cnt--)
+ apm82181_adma_set_src(group_start,
+ dma_src_addr[src_cnt], src_cnt);
+ apm82181_adma_set_dest(group_start, dma_dst_addr, 0);
+ apm82181_desc_set_byte_count(group_start, chan, len);
+ sw_desc->unmap_len = PAGE_SIZE;
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ printk("Submit CDB...\n");
+ MEM_HEXDUMP(sw_desc->hw_desc, 96);
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = apm82181_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&apm82181_r5_test_comp);
+ apm82181_adma_tx_submit(&sw_desc->async_tx);
+ apm82181_adma_issue_pending(&chan->common);
+ //wait_for_completion(&apm82181_r5_test_comp);
+ /* wait for a while so that dma transaction finishes */
+ mdelay(100);
+ /* Now check if the test page zeroed */
+ a = page_address(pg_dest);
+ /* XOR result at destination */
+ MEM_HEXDUMP(a,50);
+ if ((*(u32*)a) == 0x00000000 && memcmp(a, a+4, PAGE_SIZE-4)==0) {
+ /* page dest XOR is corect as expected - RAID-5 enabled */
+ rval = 0;
+ } else {
+ /* RAID-5 was not enabled */
+ rval = -EINVAL;
+ }
+
+exit:
+ dma_unmap_page(chan->device->dev, dma_src_addr[0], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_src_addr[1], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_src_addr[2], PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(chan->device->dev, dma_dst_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ __free_page(pg_src[0]);
+ __free_page(pg_src[1]);
+ __free_page(pg_src[2]);
+ __free_page(pg_dest);
+ return rval;
+}
+
+
+/*
+ * apm82181_test_dma - test are RAID-5 capabilities enabled successfully.
+ * For this we just perform one WXOR operation with the same source
+ * and destination addresses, the GF-multiplier is 1; so if RAID-5
+ o/of_platform_driver_unregister(&apm82181_pdma_driver);
+ * capabilities are enabled then we'll get src/dst filled with zero.
+ */
+static int apm82181_test_dma (apm82181_ch_t *chan)
+{
+ apm82181_desc_t *sw_desc;
+ struct page *pg_src, *pg_dest;
+ char *a, *d;
+ dma_addr_t dma_src_addr;
+ dma_addr_t dma_dst_addr;
+ int rval = -EFAULT;
+ int len = PAGE_SIZE;
+
+ printk("PLB DMA channel %d memcpy testing\n",chan->device->id);
+ pg_src = alloc_page(GFP_KERNEL);
+ if (!pg_src)
+ return -ENOMEM;
+ pg_dest = alloc_page(GFP_KERNEL);
+ if (!pg_dest)
+ return -ENOMEM;
+ /* Fill the test page with ones */
+ memset(page_address(pg_src), 0x77, len);
+ memset(page_address(pg_dest), 0xa5, len);
+ a = page_address(pg_src);
+ printk("The virtual addr of src =%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ a = page_address(pg_dest);
+ printk("The virtual addr of dest=%x\n", (unsigned int)a);
+ MEM_HEXDUMP(a,50);
+ dma_src_addr = dma_map_page(chan->device->dev, pg_src, 0, len,
+ DMA_BIDIRECTIONAL);
+ dma_dst_addr = dma_map_page(chan->device->dev, pg_dest, 0, len,
+ DMA_BIDIRECTIONAL);
+ printk("dma_src_addr: %llx; dma_dst_addr %llx\n", dma_src_addr, dma_dst_addr);
+
+ spin_lock_bh(&chan->lock);
+ sw_desc = apm82181_adma_alloc_slots(chan, 1, 1);
+ if (sw_desc) {
+ /* 1 src, 1 dst, int_ena */
+ apm82181_desc_init_memcpy(sw_desc, DMA_PREP_INTERRUPT);
+ //apm82181_desc_init_memcpy(sw_desc, 0);
+ /* Setup adresses */
+ apm82181_adma_set_src(sw_desc, dma_src_addr, 0);
+ apm82181_adma_set_dest(sw_desc, dma_dst_addr, 0);
+ apm82181_desc_set_byte_count(sw_desc, chan, len);
+ sw_desc->unmap_len = PAGE_SIZE;
+ } else {
+ rval = -EFAULT;
+ spin_unlock_bh(&chan->lock);
+ goto exit;
+ }
+ spin_unlock_bh(&chan->lock);
+
+ printk("Submit CDB...\n");
+ MEM_HEXDUMP(sw_desc->hw_desc, 96);
+ async_tx_ack(&sw_desc->async_tx);
+ sw_desc->async_tx.callback = apm82181_test_callback;
+ sw_desc->async_tx.callback_param = NULL;
+
+ init_completion(&apm82181_r5_test_comp);
+ apm82181_adma_tx_submit(&sw_desc->async_tx);
+ apm82181_adma_issue_pending(&chan->common);
+ //wait_for_completion(&apm82181_r5_test_comp);
+
+ a = page_address(pg_src);
+ d = page_address(pg_dest);
+ if (!memcmp(a, d, len)) {
+ rval = 0;
+ } else {
+ rval = -EINVAL;
+ }
+
+ a = page_address(pg_src);
+ printk("\nAfter DMA done:");
+ printk("\nsrc %x value:\n", (unsigned int)a);
+ MEM_HEXDUMP(a,96);
+ a = page_address(pg_dest);
+ printk("\ndest%x value:\n", (unsigned int)a);
+ MEM_HEXDUMP(a,96);
+
+exit:
+ __free_page(pg_src);
+ __free_page(pg_dest);
+ return rval;
+}
+
+static struct platform_driver apm82181_pdma_driver = {
+ .driver = {
+ .name = "apm82181_plb_dma",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_4chan_match,
+ },
+ .probe = apm82181_pdma_probe,
+ //.remove = apm82181_pdma_remove,
+};
+struct platform_driver apm82181_dma_per_chan_driver = {
+ .driver = {
+ .name = "apm82181-dma-4channel",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_per_chan_match,
+ },
+ .probe = apm82181_dma_per_chan_probe,
+};
+
+static int __init apm82181_adma_per_chan_init (void)
+{
+ int rval;
+ rval = platform_driver_register(&apm82181_dma_per_chan_driver);
+ return rval;
+}
+
+static int __init apm82181_adma_init (void)
+{
+ int rval;
+ struct proc_dir_entry *p;
+
+ rval = platform_driver_register(&apm82181_pdma_driver);
+
+ return rval;
+}
+
+#if 0
+static void __exit apm82181_adma_exit (void)
+{
+ of_unregister_platform_driver(&apm82181_pdma_driver);
+ return;
+}
+module_exit(apm82181_adma_exit);
+#endif
+
+module_init(apm82181_adma_per_chan_init);
+module_init(apm82181_adma_init);
+
+MODULE_AUTHOR("Tai Tri Nguyen<ttnguyen@appliedmicro.com>");
+MODULE_DESCRIPTION("APM82181 ADMA Engine Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c
new file mode 100644
index 00000000000..821e279e0b7
--- /dev/null
+++ b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright(c) 2008 Applied Micro Circuits Corporation(AMCC). All rights reserved.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/async_tx.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <asm/dcr-regs.h>
+#include <asm/dcr.h>
+#include "ppc460ex_4chan_dma.h"
+
+
+
+#ifdef DEBUG_TEST
+#define dma_pr printk
+#else
+#define dma_pr
+#endif
+#define TEST_SIZE 12
+
+
+ppc460ex_plb_dma_dev_t *adev;
+
+
+
+int ppc460ex_get_dma_channel(void)
+{
+ int i;
+ unsigned int status = 0;
+ status = mfdcr(DCR_DMA2P40_SR);
+
+ for(i=0; i<MAX_PPC460EX_DMA_CHANNELS; i++) {
+ if ((status & (1 >> (20+i))) == 0)
+ return i;
+ }
+ return -ENODEV;
+}
+
+
+int ppc460ex_get_dma_status(void)
+{
+ return (mfdcr(DCR_DMA2P40_SR));
+
+}
+
+
+int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr)
+{
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCR_DMA2P40_SAH0 + ch_id*8, src_addr >> 32);
+#endif
+ mtdcr(DCR_DMA2P40_SAL0 + ch_id*8, (u32)src_addr);
+
+ return DMA_STATUS_GOOD;
+}
+
+int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr)
+{
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#ifdef PPC4xx_DMA_64BIT
+ mtdcr(DCR_DMA2P40_DAH0 + ch_id*8, dst_addr >> 32);
+#endif
+ mtdcr(DCR_DMA2P40_DAL0 + ch_id*8, (u32)dst_addr);
+
+ return DMA_STATUS_GOOD;
+}
+
+
+
+/*
+ * Sets the dma mode for single DMA transfers only.
+ * For scatter/gather transfers, the mode is passed to the
+ * alloc_dma_handle() function as one of the parameters.
+ *
+ * The mode is simply saved and used later. This allows
+ * the driver to call set_dma_mode() and set_dma_addr() in
+ * any order.
+ *
+ * Valid mode values are:
+ *
+ * DMA_MODE_READ peripheral to memory
+ * DMA_MODE_WRITE memory to peripheral
+ * DMA_MODE_MM memory to memory
+ * DMA_MODE_MM_DEVATSRC device-paced memory to memory, device at src
+ * DMA_MODE_MM_DEVATDST device-paced memory to memory, device at dst
+ */
+int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode)
+{
+
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("%s: bad channel %d\n", __FUNCTION__, dma_chan->chan_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ dma_chan->mode = mode;
+ return DMA_STATUS_GOOD;
+}
+
+
+
+
+/*
+ * Sets the DMA Count register. Note that 'count' is in bytes.
+ * However, the DMA Count register counts the number of "transfers",
+ * where each transfer is equal to the bus width. Thus, count
+ * MUST be a multiple of the bus width.
+ */
+void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count)
+{
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+//#ifdef DEBUG_4xxDMA
+
+ {
+ int error = 0;
+ switch (dma_chan->pwidth) {
+ case PW_8:
+ break;
+ case PW_16:
+ if (count & 0x1)
+ error = 1;
+ break;
+ case PW_32:
+ if (count & 0x3)
+ error = 1;
+ break;
+ case PW_64:
+ if (count & 0x7)
+ error = 1;
+ break;
+
+ case PW_128:
+ if (count & 0xf)
+ error = 1;
+ break;
+ default:
+ printk("set_dma_count: invalid bus width: 0x%x\n",
+ dma_chan->pwidth);
+ return;
+ }
+ if (error)
+ printk
+ ("Warning: set_dma_count count 0x%x bus width %d\n",
+ count, dma_chan->pwidth);
+ }
+//#endif
+ count = count >> dma_chan->shift;
+ //count = 10;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), count);
+
+}
+
+
+
+
+/*
+ * Enables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be enabled, if
+ * they were previously disabled.
+ */
+int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ dma_chan->int_enable = 1;
+
+
+ control = mfdcr(DCR_DMA2P40_CR0);
+ control |= DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCR_DMA2P40_CR0, control);
+
+
+
+#if 1
+ control = mfdcr(DCR_DMA2P40_CTC0);
+ control |= DMA_CTC_TCIE | DMA_CTC_ETIE| DMA_CTC_EIE;
+ mtdcr(DCR_DMA2P40_CTC0, control);
+
+#endif
+
+
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/*
+ * Disables the channel interrupt.
+ *
+ * If performing a scatter/gatter transfer, this function
+ * MUST be called before calling alloc_dma_handle() and building
+ * the sgl list. Otherwise, interrupts will not be disabled, if
+ * they were previously enabled.
+ */
+int ppc460ex_disable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ dma_chan->int_enable = 0;
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~DMA_CIE_ENABLE; /* Channel Interrupt Enable */
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * This function returns the channel configuration.
+ */
+int ppc460ex_get_channel_config(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_dma_ch)
+{
+ unsigned int polarity;
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ memcpy(p_dma_ch, dma_chan, sizeof(ppc460ex_plb_dma_ch_t));
+
+ polarity = mfdcr(DCR_DMA2P40_POL);
+
+ p_dma_ch->polarity = polarity & GET_DMA_POLARITY(ch_id);
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+
+ p_dma_ch->cp = GET_DMA_PRIORITY(control);
+ p_dma_ch->pwidth = GET_DMA_PW(control);
+ p_dma_ch->psc = GET_DMA_PSC(control);
+ p_dma_ch->pwc = GET_DMA_PWC(control);
+ p_dma_ch->phc = GET_DMA_PHC(control);
+ p_dma_ch->ce = GET_DMA_CE_ENABLE(control);
+ p_dma_ch->int_enable = GET_DMA_CIE_ENABLE(control);
+ p_dma_ch->shift = GET_DMA_PW(control);
+ p_dma_ch->pf = GET_DMA_PREFETCH(control);
+
+ return DMA_STATUS_GOOD;
+
+}
+
+/*
+ * Sets the priority for the DMA channel dmanr.
+ * Since this is setup by the hardware init function, this function
+ * can be used to dynamically change the priority of a channel.
+ *
+ * Acceptable priorities:
+ *
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ *
+ */
+int ppc460ex_set_channel_priority(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ unsigned int priority)
+{
+ unsigned int control;
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ if ((priority != PRIORITY_LOW) &&
+ (priority != PRIORITY_MID_LOW) &&
+ (priority != PRIORITY_MID_HIGH) && (priority != PRIORITY_HIGH)) {
+ printk("%s:bad priority: 0x%x\n", __FUNCTION__, priority);
+ }
+
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control |= SET_DMA_PRIORITY(priority);
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Returns the width of the peripheral attached to this channel. This assumes
+ * that someone who knows the hardware configuration, boot code or some other
+ * init code, already set the width.
+ *
+ * The return value is one of:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ *
+ * The function returns 0 on error.
+ */
+unsigned int ppc460ex_get_peripheral_width(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ return (GET_DMA_PW(control));
+}
+
+/*
+ * Enables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_enable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) | DMA_CTC_BTEN;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * Disables the burst on the channel (BTEN bit in the control/count register)
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_disable_burst(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BTEN;
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+
+/*
+ * Sets the burst size (number of peripheral widths) for the channel
+ * (BSIZ bits in the control/count register))
+ * must be one of:
+ * DMA_CTC_BSIZ_2
+ * DMA_CTC_BSIZ_4
+ * DMA_CTC_BSIZ_8
+ * DMA_CTC_BSIZ_16
+ * Note:
+ * For scatter/gather dma, this function MUST be called before the
+ * ppc4xx_alloc_dma_handle() func as the chan count register is copied into the
+ * sgl list and used as each sgl element is added.
+ */
+int ppc460ex_set_burst_size(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ unsigned int bsize)
+{
+ unsigned int ctc;
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ ctc = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8)) &~ DMA_CTC_BSIZ_MSK;
+ ctc |= (bsize & DMA_CTC_BSIZ_MSK);
+ mtdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8), ctc);
+ return DMA_STATUS_GOOD;
+}
+
+/*
+ * Returns the number of bytes left to be transferred.
+ * After a DMA transfer, this should return zero.
+ * Reading this while a DMA transfer is still in progress will return
+ * unpredictable results.
+ */
+int ppc460ex_get_dma_residue(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int count;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ count = mfdcr(DCR_DMA2P40_CTC0 + (ch_id * 0x8));
+ count &= DMA_CTC_TC_MASK ;
+
+ return (count << dma_chan->shift);
+
+}
+
+
+/*
+ * Configures a DMA channel, including the peripheral bus width, if a
+ * peripheral is attached to the channel, the polarity of the DMAReq and
+ * DMAAck signals, etc. This information should really be setup by the boot
+ * code, since most likely the configuration won't change dynamically.
+ * If the kernel has to call this function, it's recommended that it's
+ * called from platform specific init code. The driver should not need to
+ * call this function.
+ */
+int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_init)
+{
+ unsigned int polarity;
+ uint32_t control = 0;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+
+ DMA_MODE_READ = (unsigned long) DMA_TD; /* Peripheral to Memory */
+ DMA_MODE_WRITE = 0; /* Memory to Peripheral */
+
+ if (!p_init) {
+ printk("%s: NULL p_init\n", __FUNCTION__);
+ return DMA_STATUS_NULL_POINTER;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+#if DCR_DMA2P40_POL > 0
+ polarity = mfdcr(DCR_DMA2P40_POL);
+#else
+ polarity = 0;
+#endif
+
+ p_init->int_enable = 0;
+ p_init->buffer_enable = 1;
+ p_init->etd_output = 1;
+ p_init->tce_enable = 1;
+ p_init->pl = 0;
+ p_init->dai = 1;
+ p_init->sai = 1;
+ /* Duc Dang: make channel priority to 2, original is 3 */
+ p_init->cp = 2;
+ p_init->pwidth = PW_8;
+ p_init->psc = 0;
+ p_init->pwc = 0;
+ p_init->phc = 0;
+ p_init->pf = 1;
+
+
+ /* Setup the control register based on the values passed to
+ * us in p_init. Then, over-write the control register with this
+ * new value.
+ */
+#if 0
+ control |= SET_DMA_CONTROL;
+#endif
+ control = SET_DMA_CONTROL;
+ /* clear all polarity signals and then "or" in new signal levels */
+
+//PMB - Workaround
+ //control = 0x81A2CD80;
+ //control = 0x81A00180;
+
+
+ polarity &= ~GET_DMA_POLARITY(ch_id);
+ polarity |= p_init->polarity;
+
+#if DCR_DMA2P40_POL > 0
+ mtdcr(DCR_DMA2P40_POL, polarity);
+#endif
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ /* save these values in our dma channel structure */
+ //memcpy(dma_chan, p_init, sizeof(ppc460ex_plb_dma_ch_t));
+ /*
+ * The peripheral width values written in the control register are:
+ * PW_8 0
+ * PW_16 1
+ * PW_32 2
+ * PW_64 3
+ * PW_128 4
+ *
+ * Since the DMA count register takes the number of "transfers",
+ * we need to divide the count sent to us in certain
+ * functions by the appropriate number. It so happens that our
+ * right shift value is equal to the peripheral width value.
+ */
+ dma_chan->shift = p_init->pwidth;
+ dma_chan->sai = p_init->sai;
+ dma_chan->dai = p_init->dai;
+ dma_chan->tce_enable = p_init->tce_enable;
+ dma_chan->mode = DMA_MODE_MM;
+ /*
+ * Save the control word for easy access.
+ */
+ dma_chan->control = control;
+ mtdcr(DCR_DMA2P40_SR, 0xffffffff);
+
+
+ return DMA_STATUS_GOOD;
+}
+
+
+int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+ unsigned int status_bits[] = { DMA_CS0 | DMA_TS0 | DMA_CH0_ERR,
+ DMA_CS1 | DMA_TS1 | DMA_CH1_ERR};
+
+ if (dma_chan->in_use) {
+ printk("%s:enable_dma: channel %d in use\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_CHANNEL_NOTFREE;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk(KERN_ERR "%s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+#if 0
+ if (dma_chan->mode == DMA_MODE_READ) {
+ /* peripheral to memory */
+ ppc460ex_set_src_addr(ch_id, 0);
+ ppc460ex_set_dst_addr(ch_id, dma_chan->addr);
+ } else if (dma_chan->mode == DMA_MODE_WRITE) {
+ /* memory to peripheral */
+ ppc460ex_set_src_addr(ch_id, dma_chan->addr);
+ ppc460ex_set_dst_addr(ch_id, 0);
+ }
+#endif
+ /* for other xfer modes, the addresses are already set */
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~(DMA_TM_MASK | DMA_TD); /* clear all mode bits */
+ if (dma_chan->mode == DMA_MODE_MM) {
+ /* software initiated memory to memory */
+ control |= DMA_ETD_OUTPUT | DMA_TCE_ENABLE;
+ control |= DMA_MODE_MM;
+ if (dma_chan->dai) {
+ control |= DMA_DAI;
+ }
+ if (dma_chan->sai) {
+ control |= DMA_SAI;
+ }
+ }
+
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+ /*
+ * Clear the CS, TS, RI bits for the channel from DMASR. This
+ * has been observed to happen correctly only after the mode and
+ * ETD/DCE bits in DMACRx are set above. Must do this before
+ * enabling the channel.
+ */
+ mtdcr(DCR_DMA2P40_SR, status_bits[ch_id]);
+ /*
+ * For device-paced transfers, Terminal Count Enable apparently
+ * must be on, and this must be turned on after the mode, etc.
+ * bits are cleared above (at least on Redwood-6).
+ */
+
+ if ((dma_chan->mode == DMA_MODE_MM_DEVATDST) ||
+ (dma_chan->mode == DMA_MODE_MM_DEVATSRC))
+ control |= DMA_TCE_ENABLE;
+
+ /*
+ * Now enable the channel.
+ */
+
+ control |= (dma_chan->mode | DMA_CE_ENABLE);
+ control |= DMA_BEN;
+ //control = 0xc4effec0;
+
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+ dma_chan->in_use = 1;
+ return 0;
+
+}
+
+
+void
+ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id)
+{
+ unsigned int control;
+ ppc460ex_plb_dma_ch_t *dma_chan = adev->chan[ch_id];
+
+ if (!dma_chan->in_use) {
+ printk("disable_dma: channel %d not in use\n", ch_id);
+ return;
+ }
+
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("disable_dma: bad channel: %d\n", ch_id);
+ return;
+ }
+
+ control = mfdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8));
+ control &= ~DMA_CE_ENABLE;
+ mtdcr(DCR_DMA2P40_CR0 + (ch_id * 0x8), control);
+
+ dma_chan->in_use = 0;
+}
+
+
+
+
+/*
+ * Clears the channel status bits
+ */
+int ppc460ex_clear_dma_status(unsigned int ch_id)
+{
+ if (ch_id >= MAX_PPC460EX_DMA_CHANNELS) {
+ printk("KERN_ERR %s: bad channel %d\n", __FUNCTION__, ch_id);
+ return DMA_STATUS_BAD_CHANNEL;
+ }
+
+ mtdcr(DCR_DMA2P40_SR, ((u32)DMA_CH0_ERR | (u32)DMA_CS0 | (u32)DMA_TS0) >> ch_id);
+ return DMA_STATUS_GOOD;
+
+}
+
+
+/**
+ * ppc460ex_dma_eot_handler - end of transfer interrupt handler
+ */
+irqreturn_t ppc460ex_4chan_dma_eot_handler(int irq, void *data)
+{
+ unsigned int data_read = 0;
+ unsigned int try_cnt = 0;
+
+ //printk("transfer complete\n");
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ //printk("%s: status 0x%08x\n", __FUNCTION__, data_read);
+
+ do{
+ //while bit 3 TC done is 0
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ if (data_read & 0x00800000 ) {printk("test FAIL\n"); } //see if error bit is set
+ }while(((data_read & 0x80000000) != 0x80000000) && ++try_cnt <= 10);// TC is now 0
+
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ while (data_read & 0x00000800){ //while channel is busy
+ data_read = mfdcr(DCR_DMA2P40_SR);
+ printk("%s: status for busy 0x%08x\n", __FUNCTION__, data_read);
+ }
+ mtdcr(DCR_DMA2P40_SR, 0xffffffff);
+
+
+
+ return IRQ_HANDLED;
+}
+
+
+
+static struct of_device_id dma_per_chan_match[] = {
+ {
+ .compatible = "amcc,dma-4channel",
+ },
+ {},
+};
+
+
+
+
+#if 0
+/*** test code ***/
+static int ppc460ex_dma_memcpy_self_test(ppc460ex_plb_dma_dev_t *device, unsigned int dma_ch_id)
+{
+ ppc460ex_plb_dma_ch_t p_init;
+ int res = 0, i;
+ unsigned int control;
+ phys_addr_t *src;
+ phys_addr_t *dest;
+
+ phys_addr_t *gap;
+
+ phys_addr_t dma_dest, dma_src;
+
+ src = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+ gap = kzalloc(200, GFP_KERNEL);
+ if (!gap)
+ return -ENOMEM;
+
+
+
+ dest = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ printk("src = 0x%08x\n", (unsigned int)src);
+ printk("gap = 0x%08x\n", (unsigned int)gap);
+ printk("dest = 0x%08x\n", (unsigned int)dest);
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src)[i] = (u8)i;
+
+ printk("dump src\n");
+ DMA_HEXDUMP(src, TEST_SIZE);
+ DMA_HEXDUMP(dest, TEST_SIZE);
+#if 1
+ dma_src = dma_map_single(p_init.device->dev, src, TEST_SIZE,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(p_init.device->dev, dest, TEST_SIZE,
+ DMA_FROM_DEVICE);
+#endif
+ printk("%s:channel = %d chan 0x%08x\n", __FUNCTION__, device->chan[dma_ch_id]->chan_id,
+ (unsigned int)(device->chan));
+
+ p_init.polarity = 0;
+ p_init.pwidth = PW_32;
+ p_init.in_use = 0;
+ p_init.sai = 1;
+ p_init.dai = 1;
+ res = ppc460ex_init_dma_channel(device, dma_ch_id, &p_init);
+
+ if (res) {
+ printk("%32s: init_dma_channel return %d\n",
+ __FUNCTION__, res);
+ }
+ ppc460ex_clear_dma_status(dma_ch_id);
+
+ ppc460ex_set_src_addr(dma_ch_id, dma_src);
+ ppc460ex_set_dst_addr(dma_ch_id, dma_dest);
+
+ ppc460ex_set_dma_mode(device, dma_ch_id, DMA_MODE_MM);
+ ppc460ex_set_dma_count(device, dma_ch_id, TEST_SIZE);
+
+ res = ppc460ex_enable_dma_interrupt(device, dma_ch_id);
+ if (res) {
+ printk("%32s: en/disable_dma_interrupt\n",
+ __FUNCTION__);
+ }
+
+
+ if (dma_ch_id == 0)
+ control = mfdcr(DCR_DMA2P40_CR0);
+ else if (dma_ch_id == 1)
+ control = mfdcr(DCR_DMA2P40_CR1);
+
+
+ control &= ~(SET_DMA_BEN(1));
+ control &= ~(SET_DMA_PSC(3));
+ control &= ~(SET_DMA_PWC(0x3f));
+ control &= ~(SET_DMA_PHC(0x7));
+ control &= ~(SET_DMA_PL(1));
+
+
+
+ if (dma_ch_id == 0)
+ mtdcr(DCR_DMA2P40_CR0, control);
+ else if (dma_ch_id == 1)
+ mtdcr(DCR_DMA2P40_CR1, control);
+
+
+ ppc460ex_enable_dma(device, dma_ch_id);
+
+
+ if (memcmp(src, dest, TEST_SIZE)) {
+ printk("Self-test copy failed compare, disabling\n");
+ res = -ENODEV;
+ goto out;
+ }
+
+
+ return 0;
+
+ out: kfree(src);
+ kfree(dest);
+ return res;
+
+}
+
+
+
+static int test1(void)
+{
+ void *src, *dest;
+ void *src1, *dest1;
+ int i;
+ unsigned int chan;
+
+ src = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src)
+ return -ENOMEM;
+
+ dest = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest) {
+ kfree(src);
+ return -ENOMEM;
+ }
+
+ src1 = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!src1)
+ return -ENOMEM;
+
+ dest1 = kzalloc(TEST_SIZE, GFP_KERNEL);
+ if (!dest1) {
+ kfree(src1);
+ return -ENOMEM;
+ }
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src)[i] = (u8)i;
+
+ /* Fill in src buffer */
+ for (i = 0; i < TEST_SIZE; i++)
+ ((u8*)src1)[i] = (u8)0xaa;
+
+#ifdef DEBUG_TEST
+ DMA_HEXDUMP(src, TEST_SIZE);
+ DMA_HEXDUMP(dest, TEST_SIZE);
+ DMA_HEXDUMP(src1, TEST_SIZE);
+ DMA_HEXDUMP(dest1, TEST_SIZE);
+#endif
+ chan = ppc460ex_get_dma_channel();
+
+#ifdef ENABLE_SGL
+ test_sgdma_memcpy(src, dest, src1, dest1, TEST_SIZE, chan);
+#endif
+ test_dma_memcpy(src, dest, TEST_SIZE, chan);
+
+
+ out: kfree(src);
+ kfree(dest);
+ kfree(src1);
+ kfree(dest1);
+
+ return 0;
+
+}
+#endif
+
+
+
+/*******************************************************************************
+ * Module Initialization Routine
+ *******************************************************************************
+ */
+int ppc460ex_dma_per_chan_probe(struct platform_device *ofdev)
+{
+ int ret=0;
+ //ppc460ex_plb_dma_dev_t *adev;
+ ppc460ex_plb_dma_ch_t *new_chan;
+ int err;
+
+
+
+ adev = dev_get_drvdata(ofdev->dev.parent);
+ BUG_ON(!adev);
+ /* create a device */
+ if ((new_chan = kzalloc(sizeof(*new_chan), GFP_KERNEL)) == NULL) {
+ printk("ERROR:No Free memory for allocating dma channels\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ err = of_address_to_resource(ofdev->dev.of_node,0,&new_chan->reg);
+ if (err) {
+ printk("ERROR:Can't get %s property reg\n", __FUNCTION__);
+ goto err;
+ }
+ new_chan->device = adev;
+ new_chan->reg_base = ioremap(new_chan->reg.start,new_chan->reg.end - new_chan->reg.start + 1);
+#if 1
+ printk("PPC460ex PLB DMA engine @0x%02X_%08X size %d\n",
+ (u32)(new_chan->reg.start >> 32),
+ (u32)new_chan->reg.start,
+ (u32)(new_chan->reg.end - new_chan->reg.start + 1));
+#endif
+
+ switch(new_chan->reg.start) {
+ case 0x100:
+ new_chan->chan_id = 0;
+ break;
+ case 0x108:
+ new_chan->chan_id = 1;
+ break;
+ case 0x110:
+ new_chan->chan_id = 2;
+ break;
+ case 0x118:
+ new_chan->chan_id = 3;
+ break;
+ }
+ new_chan->chan_id = ((new_chan->reg.start - 0x100)& 0xfff) >> 3;
+ printk("new_chan->chan_id 0x%x\n",new_chan->chan_id);
+ adev->chan[new_chan->chan_id] = new_chan;
+ printk("new_chan->chan->chan_id 0x%x\n",adev->chan[new_chan->chan_id]->chan_id);
+ //adev->chan[new_chan->chan_id]->reg_base = new_chan->reg_base;
+
+ return 0;
+
+ err:
+ return ret;
+
+}
+
+int ppc460ex_dma_4chan_probe(struct platform_device *ofdev)
+{
+ int ret=0, irq = 0;
+ //ppc460ex_plb_dma_dev_t *adev;
+ ppc460ex_plb_dma_ch_t *chan = NULL;
+ struct device_node *np = ofdev->dev.of_node;
+
+ /* create a device */
+ if ((adev = kzalloc(sizeof(*adev), GFP_KERNEL)) == NULL) {
+ ret = -ENOMEM;
+ goto err_adev_alloc;
+ }
+ adev->dev = &ofdev->dev;
+#if !defined(CONFIG_APM821xx)
+ err = of_address_to_resource(np,0,&adev->reg);
+ if(err) {
+ printk(KERN_ERR"Can't get %s property 'reg'\n",ofdev->node->full_name);
+ }
+#endif
+ printk(KERN_INFO"Probing AMCC DMA driver\n");
+#if !defined(CONFIG_APM821xx)
+ adev->reg_base = ioremap(adev->reg.start, adev->reg.end - adev->reg.start + 1);
+#endif
+
+#if 1
+ irq = of_irq_to_resource(np, 0, NULL);
+ if (irq >= 0) {
+ ret = request_irq(irq, ppc460ex_4chan_dma_eot_handler,
+ IRQF_DISABLED, "Peripheral DMA0-1", chan);
+ if (ret) {
+ ret = -EIO;
+ goto err_irq;
+ }
+ //irq = platform_get_irq(adev, 0);
+ /* only DMA engines have a separate err IRQ
+ * so it's Ok if irq < 0 in XOR case
+ */
+ } else
+ ret = -ENXIO;
+
+#if !defined(CONFIG_APM821xx)
+ printk("PPC4xx PLB DMA engine @0x%02X_%08X size %d IRQ %d \n",
+ (u32)(adev->reg.start >> 32),
+ (u32)adev->reg.start,
+ (u32)(adev->reg.end - adev->reg.start + 1),
+ irq);
+#else
+ printk("PPC4xx PLB DMA engine IRQ %d\n", irq);
+#endif
+#endif
+ dev_set_drvdata(&(ofdev->dev),adev);
+ of_platform_bus_probe(np,dma_per_chan_match,&ofdev->dev);
+
+
+ //ppc460ex_dma_memcpy_self_test(adev, 0);
+ //test1();
+
+
+ return 0;
+
+
+err_adev_alloc:
+ //release_mem_region(adev->reg.start, adev->reg.end - adev->reg.start);
+err_irq:
+ kfree(chan);
+
+ return ret;
+}
+
+
+static struct of_device_id dma_4chan_match[] = {
+ {
+ .compatible = "amcc,dma",
+ },
+ {},
+};
+
+struct platform_driver ppc460ex_dma_4chan_driver = {
+ .driver = {
+ .name = "plb_dma",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_4chan_match,
+ },
+ .probe = ppc460ex_dma_4chan_probe,
+};
+
+struct platform_driver ppc460ex_dma_per_chan_driver = {
+ .driver = {
+ .name = "dma-4channel",
+ .owner = THIS_MODULE,
+ .of_match_table = dma_per_chan_match,
+ },
+ .probe = ppc460ex_dma_per_chan_probe,
+};
+
+
+static int __init mod_init (void)
+{
+ printk("%s:%d\n", __FUNCTION__, __LINE__);
+ return platform_driver_register(&ppc460ex_dma_4chan_driver);
+ printk("here 2\n");
+}
+
+static void __exit mod_exit(void)
+{
+ platform_driver_unregister(&ppc460ex_dma_4chan_driver);
+}
+
+static int __init ppc460ex_dma_per_chan_init (void)
+{
+ printk("%s:%d\n", __FUNCTION__, __LINE__);
+ return platform_driver_register(&ppc460ex_dma_per_chan_driver);
+ printk("here 3\n");
+}
+
+static void __exit ppc460ex_dma_per_chan_exit(void)
+{
+ platform_driver_unregister(&ppc460ex_dma_per_chan_driver);
+}
+
+subsys_initcall(ppc460ex_dma_per_chan_init);
+subsys_initcall(mod_init);
+
+//module_exit(mod_exit);
+
+//module_exit(ppc460ex_dma_per_chan_exit);
+
+MODULE_DESCRIPTION("AMCC PPC460EX 4 channel Engine Driver");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(ppc460ex_get_dma_status);
+EXPORT_SYMBOL_GPL(ppc460ex_set_src_addr);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dst_addr);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dma_mode);
+EXPORT_SYMBOL_GPL(ppc460ex_set_dma_count);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_dma_interrupt);
+EXPORT_SYMBOL_GPL(ppc460ex_init_dma_channel);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_dma);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_dma);
+EXPORT_SYMBOL_GPL(ppc460ex_clear_dma_status);
+EXPORT_SYMBOL_GPL(ppc460ex_get_dma_residue);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_dma_interrupt);
+EXPORT_SYMBOL_GPL(ppc460ex_get_channel_config);
+EXPORT_SYMBOL_GPL(ppc460ex_set_channel_priority);
+EXPORT_SYMBOL_GPL(ppc460ex_get_peripheral_width);
+EXPORT_SYMBOL_GPL(ppc460ex_enable_burst);
+EXPORT_SYMBOL_GPL(ppc460ex_disable_burst);
+EXPORT_SYMBOL_GPL(ppc460ex_set_burst_size);
+
+/************************************************************************/
diff --git a/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h
new file mode 100644
index 00000000000..c9448f34de4
--- /dev/null
+++ b/drivers/dma/ppc4xx/ppc460ex_4chan_dma.h
@@ -0,0 +1,531 @@
+
+
+#include <linux/types.h>
+
+
+
+
+#define DMA_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+
+
+#define MAX_PPC460EX_DMA_CHANNELS 4
+
+
+#define DCR_DMA0_BASE 0x200
+#define DCR_DMA1_BASE 0x208
+#define DCR_DMA2_BASE 0x210
+#define DCR_DMA3_BASE 0x218
+#define DCR_DMASR_BASE 0x220
+
+
+
+
+
+
+/* DMA Registers */
+#define DCR_DMA2P40_CR0 (DCR_DMA0_BASE + 0x0) /* DMA Channel Control 0 */
+#define DCR_DMA2P40_CTC0 (DCR_DMA0_BASE + 0x1) /* DMA Count 0 */
+#define DCR_DMA2P40_SAH0 (DCR_DMA0_BASE + 0x2) /* DMA Src Addr High 0 */
+#define DCR_DMA2P40_SAL0 (DCR_DMA0_BASE + 0x3) /* DMA Src Addr Low 0 */
+#define DCR_DMA2P40_DAH0 (DCR_DMA0_BASE + 0x4) /* DMA Dest Addr High 0 */
+#define DCR_DMA2P40_DAL0 (DCR_DMA0_BASE + 0x5) /* DMA Dest Addr Low 0 */
+#define DCR_DMA2P40_SGH0 (DCR_DMA0_BASE + 0x6) /* DMA SG Desc Addr High 0 */
+#define DCR_DMA2P40_SGL0 (DCR_DMA0_BASE + 0x7) /* DMA SG Desc Addr Low 0 */
+
+#define DCR_DMA2P40_CR1 (DCR_DMA1_BASE + 0x0) /* DMA Channel Control 1 */
+#define DCR_DMA2P40_CTC1 (DCR_DMA1_BASE + 0x1) /* DMA Count 1 */
+#define DCR_DMA2P40_SAH1 (DCR_DMA1_BASE + 0x2) /* DMA Src Addr High 1 */
+#define DCR_DMA2P40_SAL1 (DCR_DMA1_BASE + 0x3) /* DMA Src Addr Low 1 */
+#define DCR_DMA2P40_DAH1 (DCR_DMA1_BASE + 0x4) /* DMA Dest Addr High 1 */
+#define DCR_DMA2P40_DAL1 (DCR_DMA1_BASE + 0x5) /* DMA Dest Addr Low 1 */
+#define DCR_DMA2P40_SGH1 (DCR_DMA1_BASE + 0x6) /* DMA SG Desc Addr High 1 */
+#define DCR_DMA2P40_SGL1 (DCR_DMA1_BASE + 0x7) /* DMA SG Desc Addr Low 1 */
+
+#define DCR_DMA2P40_CR2 (DCR_DMA2_BASE + 0x0) /* DMA Channel Control 2 */
+#define DCR_DMA2P40_CTC2 (DCR_DMA2_BASE + 0x1) /* DMA Count 2 */
+#define DCR_DMA2P40_SAH2 (DCR_DMA2_BASE + 0x2) /* DMA Src Addr High 2 */
+#define DCR_DMA2P40_SAL2 (DCR_DMA2_BASE + 0x3) /* DMA Src Addr Low 2 */
+#define DCR_DMA2P40_DAH2 (DCR_DMA2_BASE + 0x4) /* DMA Dest Addr High 2 */
+#define DCR_DMA2P40_DAL2 (DCR_DMA2_BASE + 0x5) /* DMA Dest Addr Low 2 */
+#define DCR_DMA2P40_SGH2 (DCR_DMA2_BASE + 0x6) /* DMA SG Desc Addr High 2 */
+#define DCR_DMA2P40_SGL2 (DCR_DMA2_BASE + 0x7) /* DMA SG Desc Addr Low 2 */
+
+#define DCR_DMA2P40_CR3 (DCR_DMA3_BASE + 0x0) /* DMA Channel Control 3 */
+#define DCR_DMA2P40_CTC3 (DCR_DMA3_BASE + 0x1) /* DMA Count 3 */
+#define DCR_DMA2P40_SAH3 (DCR_DMA3_BASE + 0x2) /* DMA Src Addr High 3 */
+#define DCR_DMA2P40_SAL3 (DCR_DMA3_BASE + 0x3) /* DMA Src Addr Low 3 */
+#define DCR_DMA2P40_DAH3 (DCR_DMA3_BASE + 0x4) /* DMA Dest Addr High 3 */
+#define DCR_DMA2P40_DAL3 (DCR_DMA3_BASE + 0x5) /* DMA Dest Addr Low 3 */
+#define DCR_DMA2P40_SGH3 (DCR_DMA3_BASE + 0x6) /* DMA SG Desc Addr High 3 */
+#define DCR_DMA2P40_SGL3 (DCR_DMA3_BASE + 0x7) /* DMA SG Desc Addr Low 3 */
+
+#define DCR_DMA2P40_SR (DCR_DMASR_BASE + 0x0) /* DMA Status Register */
+#define DCR_DMA2P40_SGC (DCR_DMASR_BASE + 0x3) /* DMA Scatter/Gather Command */
+#define DCR_DMA2P40_SLP (DCR_DMASR_BASE + 0x5) /* DMA Sleep Register */
+#define DCR_DMA2P40_POL (DCR_DMASR_BASE + 0x6) /* DMA Polarity Register */
+
+
+
+/*
+ * Function return status codes
+ * These values are used to indicate whether or not the function
+ * call was successful, or a bad/invalid parameter was passed.
+ */
+#define DMA_STATUS_GOOD 0
+#define DMA_STATUS_BAD_CHANNEL 1
+#define DMA_STATUS_BAD_HANDLE 2
+#define DMA_STATUS_BAD_MODE 3
+#define DMA_STATUS_NULL_POINTER 4
+#define DMA_STATUS_OUT_OF_MEMORY 5
+#define DMA_STATUS_SGL_LIST_EMPTY 6
+#define DMA_STATUS_GENERAL_ERROR 7
+#define DMA_STATUS_CHANNEL_NOTFREE 8
+
+#define DMA_CHANNEL_BUSY 0x80000000
+
+/*
+ * These indicate status as returned from the DMA Status Register.
+ */
+#define DMA_STATUS_NO_ERROR 0
+#define DMA_STATUS_CS 1 /* Count Status */
+#define DMA_STATUS_TS 2 /* Transfer Status */
+#define DMA_STATUS_DMA_ERROR 3 /* DMA Error Occurred */
+#define DMA_STATUS_DMA_BUSY 4 /* The channel is busy */
+
+/*
+ * DMA Channel Control Registers
+ */
+#ifdef CONFIG_44x
+#define PPC4xx_DMA_64BIT
+#define DMA_CR_OFFSET 1
+#else
+#define DMA_CR_OFFSET 0
+#endif
+
+#define DMA_CE_ENABLE (1<<31) /* DMA Channel Enable */
+#define SET_DMA_CE_ENABLE(x) (((x)&0x1)<<31)
+#define GET_DMA_CE_ENABLE(x) (((x)&DMA_CE_ENABLE)>>31)
+
+#define DMA_CIE_ENABLE (1<<30) /* DMA Channel Interrupt Enable */
+#define SET_DMA_CIE_ENABLE(x) (((x)&0x1)<<30)
+#define GET_DMA_CIE_ENABLE(x) (((x)&DMA_CIE_ENABLE)>>30)
+
+#define DMA_TD (1<<29)
+#define SET_DMA_TD(x) (((x)&0x1)<<29)
+#define GET_DMA_TD(x) (((x)&DMA_TD)>>29)
+
+#define DMA_PL (1<<28) /* Peripheral Location */
+#define SET_DMA_PL(x) (((x)&0x1)<<28)
+#define GET_DMA_PL(x) (((x)&DMA_PL)>>28)
+
+#define EXTERNAL_PERIPHERAL 0
+#define INTERNAL_PERIPHERAL 1
+
+#define SET_DMA_PW(x) (((x)&0x7)<<(26-DMA_CR_OFFSET)) /* Peripheral Width */
+#define DMA_PW_MASK SET_DMA_PW(7)
+#define PW_8 0
+#define PW_16 1
+#define PW_32 2
+#define PW_64 3
+#define PW_128 4
+
+
+#define GET_DMA_PW(x) (((x)&DMA_PW_MASK)>>(26-DMA_CR_OFFSET))
+
+#define DMA_DAI (1<<(25-DMA_CR_OFFSET)) /* Destination Address Increment */
+#define SET_DMA_DAI(x) (((x)&0x1)<<(25-DMA_CR_OFFSET))
+
+#define DMA_SAI (1<<(24-DMA_CR_OFFSET)) /* Source Address Increment */
+#define SET_DMA_SAI(x) (((x)&0x1)<<(24-DMA_CR_OFFSET))
+
+#define DMA_BEN (1<<(23-DMA_CR_OFFSET)) /* Buffer Enable */
+#define SET_DMA_BEN(x) (((x)&0x1)<<(23-DMA_CR_OFFSET))
+
+#define SET_DMA_TM(x) (((x)&0x3)<<(21-DMA_CR_OFFSET)) /* Transfer Mode */
+#define DMA_TM_MASK SET_DMA_TM(3)
+#define TM_PERIPHERAL 0 /* Peripheral */
+#define TM_RESERVED 1 /* Reserved */
+#define TM_S_MM 2 /* Memory to Memory */
+#define TM_D_MM 3 /* Device Paced Memory to Memory */
+#define GET_DMA_TM(x) (((x)&DMA_TM_MASK)>>(21-DMA_CR_OFFSET))
+
+#define SET_DMA_PSC(x) (((x)&0x3)<<(19-DMA_CR_OFFSET)) /* Peripheral Setup Cycles */
+#define DMA_PSC_MASK SET_DMA_PSC(3)
+#define GET_DMA_PSC(x) (((x)&DMA_PSC_MASK)>>(19-DMA_CR_OFFSET))
+
+#define SET_DMA_PWC(x) (((x)&0x3F)<<(13-DMA_CR_OFFSET)) /* Peripheral Wait Cycles */
+#define DMA_PWC_MASK SET_DMA_PWC(0x3F)
+#define GET_DMA_PWC(x) (((x)&DMA_PWC_MASK)>>(13-DMA_CR_OFFSET))
+
+#define SET_DMA_PHC(x) (((x)&0x7)<<(10-DMA_CR_OFFSET)) /* Peripheral Hold Cycles */
+#define DMA_PHC_MASK SET_DMA_PHC(0x7)
+#define GET_DMA_PHC(x) (((x)&DMA_PHC_MASK)>>(10-DMA_CR_OFFSET))
+
+#define DMA_ETD_OUTPUT (1<<(9-DMA_CR_OFFSET)) /* EOT pin is a TC output */
+#define SET_DMA_ETD(x) (((x)&0x1)<<(9-DMA_CR_OFFSET))
+
+#define DMA_TCE_ENABLE (1<<(8-DMA_CR_OFFSET))
+#define SET_DMA_TCE(x) (((x)&0x1)<<(8-DMA_CR_OFFSET))
+
+#define DMA_DEC (1<<(2)) /* Address Decrement */
+#define SET_DMA_DEC(x) (((x)&0x1)<<2)
+#define GET_DMA_DEC(x) (((x)&DMA_DEC)>>2)
+
+
+/*
+ * Transfer Modes
+ * These modes are defined in a way that makes it possible to
+ * simply "or" in the value in the control register.
+ */
+
+#define DMA_MODE_MM (SET_DMA_TM(TM_S_MM)) /* memory to memory */
+
+ /* Device-paced memory to memory, */
+ /* device is at source address */
+#define DMA_MODE_MM_DEVATSRC (DMA_TD | SET_DMA_TM(TM_D_MM))
+
+ /* Device-paced memory to memory, */
+ /* device is at destination address */
+#define DMA_MODE_MM_DEVATDST (SET_DMA_TM(TM_D_MM))
+
+#define SGL_LIST_SIZE 16384
+#define DMA_PPC4xx_SIZE SGL_LIST_SIZE
+
+#define SET_DMA_PRIORITY(x) (((x)&0x3)<<(6-DMA_CR_OFFSET)) /* DMA Channel Priority */
+#define DMA_PRIORITY_MASK SET_DMA_PRIORITY(3)
+#define PRIORITY_LOW 0
+#define PRIORITY_MID_LOW 1
+#define PRIORITY_MID_HIGH 2
+#define PRIORITY_HIGH 3
+#define GET_DMA_PRIORITY(x) (((x)&DMA_PRIORITY_MASK)>>(6-DMA_CR_OFFSET))
+
+
+#define SET_DMA_PREFETCH(x) (((x)&0x3)<<(4-DMA_CR_OFFSET)) /* Memory Read Prefetch */
+#define DMA_PREFETCH_MASK SET_DMA_PREFETCH(3)
+#define PREFETCH_1 0 /* Prefetch 1 Double Word */
+#define PREFETCH_2 1
+#define PREFETCH_4 2
+#define GET_DMA_PREFETCH(x) (((x)&DMA_PREFETCH_MASK)>>(4-DMA_CR_OFFSET))
+
+#define DMA_PCE (1<<(3-DMA_CR_OFFSET)) /* Parity Check Enable */
+#define SET_DMA_PCE(x) (((x)&0x1)<<(3-DMA_CR_OFFSET))
+#define GET_DMA_PCE(x) (((x)&DMA_PCE)>>(3-DMA_CR_OFFSET))
+
+/*
+ * DMA Polarity Configuration Register
+ */
+#define DMAReq_ActiveLow(chan) (1<<(31-(chan*3)))
+#define DMAAck_ActiveLow(chan) (1<<(30-(chan*3)))
+#define EOT_ActiveLow(chan) (1<<(29-(chan*3))) /* End of Transfer */
+
+/*
+ * DMA Sleep Mode Register
+ */
+#define SLEEP_MODE_ENABLE (1<<21)
+
+/*
+ * DMA Status Register
+ */
+#define DMA_CS0 (1<<31) /* Terminal Count has been reached */
+#define DMA_CS1 (1<<30)
+#define DMA_CS2 (1<<29)
+#define DMA_CS3 (1<<28)
+
+#define DMA_TS0 (1<<27) /* End of Transfer has been requested */
+#define DMA_TS1 (1<<26)
+#define DMA_TS2 (1<<25)
+#define DMA_TS3 (1<<24)
+
+#define DMA_CH0_ERR (1<<23) /* DMA Chanel 0 Error */
+#define DMA_CH1_ERR (1<<22)
+#define DMA_CH2_ERR (1<<21)
+#define DMA_CH3_ERR (1<<20)
+
+#define DMA_IN_DMA_REQ0 (1<<19) /* Internal DMA Request is pending */
+#define DMA_IN_DMA_REQ1 (1<<18)
+#define DMA_IN_DMA_REQ2 (1<<17)
+#define DMA_IN_DMA_REQ3 (1<<16)
+
+#define DMA_EXT_DMA_REQ0 (1<<15) /* External DMA Request is pending */
+#define DMA_EXT_DMA_REQ1 (1<<14)
+#define DMA_EXT_DMA_REQ2 (1<<13)
+#define DMA_EXT_DMA_REQ3 (1<<12)
+
+#define DMA_CH0_BUSY (1<<11) /* DMA Channel 0 Busy */
+#define DMA_CH1_BUSY (1<<10)
+#define DMA_CH2_BUSY (1<<9)
+#define DMA_CH3_BUSY (1<<8)
+
+#define DMA_SG0 (1<<7) /* DMA Channel 0 Scatter/Gather in progress */
+#define DMA_SG1 (1<<6)
+#define DMA_SG2 (1<<5)
+#define DMA_SG3 (1<<4)
+
+/* DMA Channel Count Register */
+#define DMA_CTC_TCIE (1<<29) /* Terminal Count Interrupt Enable */
+#define DMA_CTC_ETIE (1<<28) /* EOT Interupt Enable */
+#define DMA_CTC_EIE (1<<27) /* Error Interrupt Enable */
+#define DMA_CTC_BTEN (1<<23) /* Burst Enable/Disable bit */
+#define DMA_CTC_BSIZ_MSK (3<<21) /* Mask of the Burst size bits */
+#define DMA_CTC_BSIZ_2 (0)
+#define DMA_CTC_BSIZ_4 (1<<21)
+#define DMA_CTC_BSIZ_8 (2<<21)
+#define DMA_CTC_BSIZ_16 (3<<21)
+#define DMA_CTC_TC_MASK 0xFFFFF
+
+/*
+ * DMA SG Command Register
+ */
+#define SSG_ENABLE(chan) (1<<(31-chan)) /* Start Scatter Gather */
+#define SSG_MASK_ENABLE(chan) (1<<(15-chan)) /* Enable writing to SSG0 bit */
+
+
+/*
+ * DMA Scatter/Gather Descriptor Bit fields
+ */
+#define SG_LINK (1<<31) /* Link */
+#define SG_TCI_ENABLE (1<<29) /* Enable Terminal Count Interrupt */
+#define SG_ETI_ENABLE (1<<28) /* Enable End of Transfer Interrupt */
+#define SG_ERI_ENABLE (1<<27) /* Enable Error Interrupt */
+#define SG_COUNT_MASK 0xFFFF /* Count Field */
+
+#define SET_DMA_CONTROL \
+ (SET_DMA_CIE_ENABLE(p_init->int_enable) | /* interrupt enable */ \
+ SET_DMA_BEN(p_init->buffer_enable) | /* buffer enable */\
+ SET_DMA_ETD(p_init->etd_output) | /* end of transfer pin */ \
+ SET_DMA_TCE(p_init->tce_enable) | /* terminal count enable */ \
+ SET_DMA_PL(p_init->pl) | /* peripheral location */ \
+ SET_DMA_DAI(p_init->dai) | /* dest addr increment */ \
+ SET_DMA_SAI(p_init->sai) | /* src addr increment */ \
+ SET_DMA_PRIORITY(p_init->cp) | /* channel priority */ \
+ SET_DMA_PW(p_init->pwidth) | /* peripheral/bus width */ \
+ SET_DMA_PSC(p_init->psc) | /* peripheral setup cycles */ \
+ SET_DMA_PWC(p_init->pwc) | /* peripheral wait cycles */ \
+ SET_DMA_PHC(p_init->phc) | /* peripheral hold cycles */ \
+ SET_DMA_PREFETCH(p_init->pf) /* read prefetch */)
+
+#define GET_DMA_POLARITY(chan) (DMAReq_ActiveLow(chan) | DMAAck_ActiveLow(chan) | EOT_ActiveLow(chan))
+
+
+/**
+ * struct ppc460ex_dma_device - internal representation of an DMA device
+ * @pdev: Platform device
+ * @id: HW DMA Device selector
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ * @common: embedded struct dma_device
+ */
+typedef struct ppc460ex_plb_dma_device {
+ //struct platform_device *pdev;
+ void __iomem *reg_base;
+ struct device *dev;
+ struct resource reg; /* Resource for register */
+ int id;
+ struct ppc460ex_plb_dma_chan *chan[MAX_PPC460EX_DMA_CHANNELS];
+ wait_queue_head_t queue;
+} ppc460ex_plb_dma_dev_t;
+
+typedef uint32_t sgl_handle_t;
+/**
+ * struct ppc460ex_dma_chan - internal representation of an ADMA channel
+ * @lock: serializes enqueue/dequeue operations to the slot pool
+ * @device: parent device
+ * @chain: device chain view of the descriptors
+ * @common: common dmaengine channel object members
+ * @all_slots: complete domain of slots usable by the channel
+ * @reg: Resource for register
+ * @pending: allows batching of hardware operations
+ * @completed_cookie: identifier for the most recently completed operation
+ * @slots_allocated: records the actual size of the descriptor slot pool
+ * @hw_chain_inited: h/w descriptor chain initialization flag
+ * @irq_tasklet: bottom half where ppc460ex_adma_slot_cleanup runs
+ * @needs_unmap: if buffers should not be unmapped upon final processing
+ */
+typedef struct ppc460ex_plb_dma_chan {
+ void __iomem *reg_base;
+ struct ppc460ex_plb_dma_device *device;
+ struct timer_list cleanup_watchdog;
+ struct resource reg; /* Resource for register */
+ unsigned int chan_id;
+ struct tasklet_struct irq_tasklet;
+ sgl_handle_t *phandle;
+ unsigned short in_use; /* set when channel is being used, clr when
+ * available.
+ */
+ /*
+ * Valid polarity settings:
+ * DMAReq_ActiveLow(n)
+ * DMAAck_ActiveLow(n)
+ * EOT_ActiveLow(n)
+ *
+ * n is 0 to max dma chans
+ */
+ unsigned int polarity;
+
+ char buffer_enable; /* Boolean: buffer enable */
+ char tce_enable; /* Boolean: terminal count enable */
+ char etd_output; /* Boolean: eot pin is a tc output */
+ char pce; /* Boolean: parity check enable */
+
+ /*
+ * Peripheral location:
+ * INTERNAL_PERIPHERAL (UART0 on the 405GP)
+ * EXTERNAL_PERIPHERAL
+ */
+ char pl; /* internal/external peripheral */
+
+ /*
+ * Valid pwidth settings:
+ * PW_8
+ * PW_16
+ * PW_32
+ * PW_64
+ */
+ unsigned int pwidth;
+
+ char dai; /* Boolean: dst address increment */
+ char sai; /* Boolean: src address increment */
+
+ /*
+ * Valid psc settings: 0-3
+ */
+ unsigned int psc; /* Peripheral Setup Cycles */
+
+ /*
+ * Valid pwc settings:
+ * 0-63
+ */
+ unsigned int pwc; /* Peripheral Wait Cycles */
+
+ /*
+ * Valid phc settings:
+ * 0-7
+ */
+ unsigned int phc; /* Peripheral Hold Cycles */
+
+ /*
+ * Valid cp (channel priority) settings:
+ * PRIORITY_LOW
+ * PRIORITY_MID_LOW
+ * PRIORITY_MID_HIGH
+ * PRIORITY_HIGH
+ */
+ unsigned int cp; /* channel priority */
+
+ /*
+ * Valid pf (memory read prefetch) settings:
+ *
+ * PREFETCH_1
+ * PREFETCH_2
+ * PREFETCH_4
+ */
+ unsigned int pf; /* memory read prefetch */
+
+ /*
+ * Boolean: channel interrupt enable
+ * NOTE: for sgl transfers, only the last descriptor will be setup to
+ * interrupt.
+ */
+ char int_enable;
+
+ char shift; /* easy access to byte_count shift, based on */
+ /* the width of the channel */
+
+ uint32_t control; /* channel control word */
+
+ /* These variabled are used ONLY in single dma transfers */
+ unsigned int mode; /* transfer mode */
+ phys_addr_t addr;
+ char ce; /* channel enable */
+ char int_on_final_sg;/* for scatter/gather - only interrupt on last sg */
+
+} ppc460ex_plb_dma_ch_t;
+
+/*
+ * PPC44x DMA implementations have a slightly different
+ * descriptor layout. Probably moved about due to the
+ * change to 64-bit addresses and link pointer. I don't
+ * know why they didn't just leave control_count after
+ * the dst_addr.
+ */
+#ifdef PPC4xx_DMA_64BIT
+typedef struct {
+ uint32_t control;
+ uint32_t control_count;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ phys_addr_t next;
+} ppc_sgl_t;
+#else
+typedef struct {
+ uint32_t control;
+ phys_addr_t src_addr;
+ phys_addr_t dst_addr;
+ uint32_t control_count;
+ uint32_t next;
+} ppc_sgl_t;
+#endif
+
+
+
+typedef struct {
+ unsigned int ch_id;
+ uint32_t control; /* channel ctrl word; loaded from each descrptr */
+ uint32_t sgl_control; /* LK, TCI, ETI, and ERI bits in sgl descriptor */
+ dma_addr_t dma_addr; /* dma (physical) address of this list */
+ dma_addr_t dummy; /*Dummy variable to allow quad word alignment*/
+ ppc_sgl_t *phead;
+ dma_addr_t phead_dma;
+ ppc_sgl_t *ptail;
+ dma_addr_t ptail_dma;
+} sgl_list_info_t;
+
+typedef struct {
+ phys_addr_t *src_addr;
+ phys_addr_t *dst_addr;
+ phys_addr_t dma_src_addr;
+ phys_addr_t dma_dst_addr;
+} pci_alloc_desc_t;
+
+#define PPC460EX_DMA_SGXFR_COMPLETE(id) (!((1 << (11-id)) & mfdcr(DCR_DMA2P40_SR)))
+#define PPC460EX_DMA_CHAN_BUSY(id) ( (1 << (11-id)) & mfdcr(DCR_DMA2P40_SR) )
+#define DMA_STATUS(id) (mfdcr(DCR_DMA2P40_SR))
+#define CLEAR_DMA_STATUS(id) (mtdcr(DCR_DMA2P40_SR, 0xFFFFFFFF))
+#define PPC460EX_DMA_SGSTAT_FREE(id) (!((1 << (7-id)) & mfdcr(DCR_DMA2P40_SR)) )
+#define PPC460EX_DMA_TC_REACHED(id) ( (1 << (31-id)) & mfdcr(DCR_DMA2P40_SR) )
+#define PPC460EX_DMA_CHAN_XFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && (PPC460EX_DMA_TC_REACHED(id)) )
+#define PPC460EX_DMA_CHAN_SGXFR_COMPLETE(id) ( (!PPC460EX_DMA_CHAN_BUSY(id)) && PPC460EX_DMA_SGSTAT_FREE(id) )
+#define PPC460EX_DMA_SG_IN_PROGRESS(id) ( (1 << (7-id)) | (1 << (11-id)) )
+#define PPC460EX_DMA_SG_OP_COMPLETE(id) ( (PPC460EX_DMA_SG_IN_PROGRESS(id) & DMA_STATUS(id) ) == 0)
+
+extern ppc460ex_plb_dma_dev_t *adev;
+int ppc460ex_init_dma_channel(ppc460ex_plb_dma_dev_t *adev,
+ unsigned int ch_id,
+ ppc460ex_plb_dma_ch_t *p_init);
+
+int ppc460ex_set_src_addr(int ch_id, phys_addr_t src_addr);
+
+int ppc460ex_set_dst_addr(int ch_id, phys_addr_t dst_addr);
+
+int ppc460ex_set_dma_mode(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int mode);
+
+void ppc460ex_set_dma_count(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id, unsigned int count);
+
+int ppc460ex_enable_dma_interrupt(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_enable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_get_dma_channel(void);
+
+void ppc460ex_disable_dma(ppc460ex_plb_dma_dev_t *adev, unsigned int ch_id);
+
+int ppc460ex_clear_dma_status(unsigned int ch_id);
+
+#if 0
+extern int test_dma_memcpy(void *src, void *dst, unsigned int length, unsigned int dma_ch);
+
+extern int test_sgdma_memcpy(void *src, void *dst, void *src1, void *dst1,
+ unsigned int length, unsigned int dma_ch);
+#endif
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 57985410f12..a66a3217f1d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -336,10 +336,10 @@ static const struct {
QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
- QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+ QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
- 0},
+ QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 0c9f803fc1a..b6ae89ea881 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
.map = gpio_rcar_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
};
struct gpio_rcar_info {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f36126383d2..d893e4da5dc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1616,22 +1616,6 @@ out:
return ret;
}
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
- struct i915_vma *vma;
-
- /*
- * Only the global gtt is relevant for gtt memory mappings, so restrict
- * list traversal to objects bound into the global address space. Note
- * that the active list should be empty, but better safe than sorry.
- */
- WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
- list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
- i915_gem_release_mmap(vma->obj);
- list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
- i915_gem_release_mmap(vma->obj);
-}
-
/**
* i915_gem_release_mmap - remove physical page mappings
* @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ i915_gem_release_mmap(obj);
+}
+
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 3521f998a17..34894b57306 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -31,7 +31,7 @@
struct i915_render_state {
struct drm_i915_gem_object *obj;
unsigned long ggtt_offset;
- void *batch;
+ u32 *batch;
u32 size;
u32 len;
};
@@ -80,7 +80,7 @@ free:
static void render_state_free(struct i915_render_state *so)
{
- kunmap(so->batch);
+ kunmap(kmap_to_page(so->batch));
i915_gem_object_ggtt_unpin(so->obj);
drm_gem_object_unreference(&so->obj->base);
kfree(so);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 267f069765a..c05c84f3f09 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2845,7 +2845,7 @@ static int semaphore_passed(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_engine_cs *signaller;
- u32 seqno, ctl;
+ u32 seqno;
ring->hangcheck.deadlock++;
@@ -2857,15 +2857,12 @@ static int semaphore_passed(struct intel_engine_cs *ring)
if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
return -1;
- /* cursory check for an unkickable deadlock */
- ctl = I915_READ_CTL(signaller);
- if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
- return -1;
-
if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
return 1;
- if (signaller->hangcheck.deadlock)
+ /* cursory check for an unkickable deadlock */
+ if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+ semaphore_passed(signaller) < 0)
return -1;
return 0;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 0b247110713..c0ea66192fe 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
gb_tile_moden = 0;
break;
}
+ rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 250bac3935a..15e4f28015e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4756,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c66952d4b00..3c69f58e46e 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b7204500a9a..60c47f82912 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -449,6 +449,7 @@ struct radeon_bo_va {
/* protected by vm mutex */
struct list_head vm_list;
+ struct list_head vm_status;
/* constant after initialization */
struct radeon_vm *vm;
@@ -867,6 +868,9 @@ struct radeon_vm {
struct list_head va;
unsigned id;
+ /* BOs freed, but not yet updated in the PT */
+ struct list_head freed;
+
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
@@ -875,6 +879,8 @@ struct radeon_vm {
/* array of page tables, one for each page directory entry */
struct radeon_vm_pt *page_tables;
+ struct radeon_bo_va *ib_bo_va;
+
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
@@ -2832,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_update_page_directory(struct radeon_device *rdev,
struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm);
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
@@ -2847,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va);
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 71a14346147..ae763f60c8a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
struct radeon_device *rdev = p->rdev;
+ struct radeon_bo_va *bo_va;
int i, r;
r = radeon_vm_update_page_directory(rdev, vm);
if (r)
return r;
- r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+ r = radeon_vm_clear_freed(rdev, vm);
+ if (r)
+ return r;
+
+ if (vm->ib_bo_va == NULL) {
+ DRM_ERROR("Tmp BO not in VM!\n");
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
&rdev->ring_tmp_bo.bo->tbo.mem);
if (r)
return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
continue;
bo = p->relocs[i].robj;
- r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+ bo_va = radeon_vm_bo_find(vm, bo);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 03686fab842..697add2cd4e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
if (!radeon_check_pot_argument(radeon_vm_size)) {
dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
- if (radeon_vm_size < 4) {
- dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+ if (radeon_vm_size < 1) {
+ dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/*
* Max GPUVM size for Cayman, SI and CI are 40 bits.
*/
- if (radeon_vm_size > 1024*1024) {
- dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+ if (radeon_vm_size > 1024) {
+ dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
radeon_vm_size);
- radeon_vm_size = 4096;
+ radeon_vm_size = 4;
}
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
if (radeon_vm_block_size < 9) {
- dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+ dev_warn(rdev->dev, "VM page table size (%d) too small\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
if (radeon_vm_block_size > 24 ||
- radeon_vm_size < (1ull << radeon_vm_block_size)) {
- dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+ (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+ dev_warn(rdev->dev, "VM page table size (%d) too large\n",
radeon_vm_block_size);
radeon_vm_block_size = 9;
}
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* Adjust VM size here.
* Max GPUVM size for cayman+ is 40 bits.
*/
- rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+ rdev->vm_manager.max_pfn = radeon_vm_size << 18;
/* Set asic functions */
r = radeon_asic_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index cb1421369e3..e9e36108424 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -173,7 +173,7 @@ int radeon_dpm = -1;
int radeon_aspm = -1;
int radeon_runtime_pm = -1;
int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
int radeon_vm_block_size = 9;
int radeon_deep_color = 0;
@@ -243,7 +243,7 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
module_param_named(hard_reset, radeon_hard_reset, int, 0444);
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
module_param_named(vm_size, radeon_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 35d931881b4..d25ae6acfd5 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN) {
struct radeon_fpriv *fpriv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm;
int r;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- r = radeon_vm_init(rdev, &fpriv->vm);
+ vm = &fpriv->vm;
+ r = radeon_vm_init(rdev, vm);
if (r) {
kfree(fpriv);
return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
/* map the ib pool buffer read only into
* virtual address space */
- bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
- rdev->ring_tmp_bo.bo);
- r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+ vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+ rdev->ring_tmp_bo.bo);
+ r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+ RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
return r;
}
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/* new gpu have virtual address space support */
if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
struct radeon_fpriv *fpriv = file_priv->driver_priv;
- struct radeon_bo_va *bo_va;
+ struct radeon_vm *vm = &fpriv->vm;
int r;
if (rdev->accel_working) {
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) {
- bo_va = radeon_vm_bo_find(&fpriv->vm,
- rdev->ring_tmp_bo.bo);
- if (bo_va)
- radeon_vm_bo_rmv(rdev, bo_va);
+ if (vm->ib_bo_va)
+ radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
}
}
- radeon_vm_fini(rdev, &fpriv->vm);
+ radeon_vm_fini(rdev, vm);
kfree(fpriv);
file_priv->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index eecff6bbd34..725d3669014 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
+ INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
head = &tmp->vm_list;
}
+ if (bo_va->soffset) {
+ /* add a clone of the bo_va to clear the old address */
+ tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (!tmp) {
+ mutex_unlock(&vm->mutex);
+ return -ENOMEM;
+ }
+ tmp->soffset = bo_va->soffset;
+ tmp->eoffset = bo_va->eoffset;
+ tmp->vm = vm;
+ list_add(&tmp->vm_status, &vm->freed);
+ }
+
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
* Object have to be reserved and mutex must be locked!
*/
int radeon_vm_bo_update(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_bo *bo,
+ struct radeon_bo_va *bo_va,
struct ttm_mem_reg *mem)
{
+ struct radeon_vm *vm = bo_va->vm;
struct radeon_ib ib;
- struct radeon_bo_va *bo_va;
unsigned nptes, ndw;
uint64_t addr;
int r;
- bo_va = radeon_vm_bo_find(vm, bo);
- if (bo_va == NULL) {
- dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
- return -EINVAL;
- }
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
- bo, vm);
+ bo_va->bo, vm);
return -EINVAL;
}
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
trace_radeon_vm_bo_update(bo_va);
- nptes = radeon_bo_ngpu_pages(bo);
+ nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
/* padding, etc. */
ndw = 64;
@@ -911,33 +919,61 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
}
/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ list_del(&bo_va->vm_status);
+ r = radeon_vm_bo_update(rdev, bo_va, NULL);
+ kfree(bo_va);
+ if (r)
+ return r;
+ }
+ return 0;
+
+}
+
+/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
*
* Object have to be reserved!
*/
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
- struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_bo_va *bo_va)
{
- int r = 0;
+ struct radeon_vm *vm = bo_va->vm;
- mutex_lock(&bo_va->vm->mutex);
- if (bo_va->soffset)
- r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+ list_del(&bo_va->bo_list);
+ mutex_lock(&vm->mutex);
list_del(&bo_va->vm_list);
- mutex_unlock(&bo_va->vm->mutex);
- list_del(&bo_va->bo_list);
- kfree(bo_va);
- return r;
+ if (bo_va->soffset) {
+ bo_va->bo = NULL;
+ list_add(&bo_va->vm_status, &vm->freed);
+ } else {
+ kfree(bo_va);
+ }
+
+ mutex_unlock(&vm->mutex);
}
/**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
int r;
vm->id = 0;
+ vm->ib_bo_va = NULL;
vm->fence = NULL;
vm->last_flush = NULL;
vm->last_id_use = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->va);
+ INIT_LIST_HEAD(&vm->freed);
pd_size = radeon_vm_directory_size(rdev);
pd_entries = radeon_vm_num_pdes(rdev);
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
kfree(bo_va);
}
}
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+ kfree(bo_va);
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_bo_unref(&vm->page_tables[i].bo);
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eba0225259a..9e854fd016d 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
+ wptr &= ~RB_OVERFLOW;
}
return (wptr & rdev->ih.ptr_mask);
}
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 20da6ff183d..32e50be9c4a 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,15 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
pi->at[i] = TRINITY_AT_DFLT;
- /* There are stability issues reported on latops with
- * bapm installed when switching between AC and battery
- * power. At the same time, some desktop boards hang
- * if it's not enabled and dpm is enabled.
+ /* There are stability issues reported on with
+ * bapm enabled when switching between AC and battery
+ * power. At the same time, some MSI boards hang
+ * if it's not enabled and dpm is enabled. Just enable
+ * it for MSI boards right now.
*/
- if (rdev->flags & RADEON_IS_MOBILITY)
- pi->enable_bapm = false;
- else
+ if (rdev->pdev->subsystem_vendor == 0x1462)
pi->enable_bapm = true;
+ else
+ pi->enable_bapm = false;
pi->enable_nbps_policy = true;
pi->enable_sclk_ds = true;
pi->enable_gfx_power_gating = true;
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index efee4c59239..34b9a601ad0 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
*/
static inline s8 TEMP_TO_REG(int val)
{
- return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+ return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
}
static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
err = kstrtoul(buf, 10, &val);
if (err)
return err;
+ if (val > 255)
+ return -EINVAL;
data->vrm = val;
return count;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index 8fb46aab2d8..a04c49f2a01 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
config BLK_DEV_CS5520
tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
config BLK_DEV_CS5530
tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
config BLK_DEV_CS5535
tristate "AMD CS5535 chipset support"
- depends on X86 && !X86_64
+ depends on X86_32
select BLK_DEV_IDEDMA_PCI
help
Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
config BLK_DEV_SC1200
tristate "National SCx200 chipset support"
+ depends on X86_32 || COMPILE_TEST
select BLK_DEV_IDEDMA_PCI
help
This driver adds support for the on-board IDE controller on the
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 2a744a91370..a3d3b1733c4 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
if (irq_handler == NULL)
irq_handler = ide_intr;
- if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
- goto out_up;
+ if (!host->get_lock)
+ if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+ goto out_up;
#if !defined(__mc68000__)
printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
ide_proc_unregister_port(hwif);
- free_irq(hwif->irq, hwif);
+ if (!hwif->host->get_lock)
+ free_irq(hwif->irq, hwif);
device_unregister(hwif->portdev);
device_unregister(&hwif->gendev);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index a7e68c81f89..a077cc86421 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -68,13 +68,13 @@
/* Defaults values */
#define BMA180_DEF_PMODE 0
#define BMA180_DEF_BW 20
-#define BMA180_DEF_SCALE 250
+#define BMA180_DEF_SCALE 2452
/* Available values for sysfs */
#define BMA180_FLP_FREQ_AVAILABLE \
"10 20 40 75 150 300"
#define BMA180_SCALE_AVAILABLE \
- "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
+ "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
struct bma180_data {
struct i2c_client *client;
@@ -94,7 +94,7 @@ enum bma180_axis {
};
static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
+static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
{
@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->mutex);
return ret;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ if (val2)
+ return -EINVAL;
mutex_lock(&data->mutex);
ret = bma180_set_bw(data, val);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 36b1ae92e23..9f1a1400990 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -966,7 +966,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
/* Now we have the two masks, work from least sig and build up sizes */
for_each_set_bit(out_ind,
- indio_dev->active_scan_mask,
+ buffer->scan_mask,
indio_dev->masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
indio_dev->masklength,
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1c4c0db0555..29ca0bb4f56 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
}
static int input_get_disposition(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
+ unsigned int type, unsigned int code, int *pval)
{
int disposition = INPUT_IGNORE_EVENT;
+ int value = *pval;
switch (type) {
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
break;
}
+ *pval = value;
return disposition;
}
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
{
int disposition;
- disposition = input_get_disposition(dev, type, code, value);
+ disposition = input_get_disposition(dev, type, code, &value);
if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
dev->event(dev, type, code, value);
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index 758b4873141..de7be4f03d9 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int keyscan_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
mutex_unlock(&input->mutex);
return retval;
}
+#endif
static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index e4104f9b2e6..fed5102e180 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
module_platform_driver(sirfsoc_pwrc_driver);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
MODULE_ALIAS("platform:sirfsoc-pwrc");
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ec772d962f0..ef9e0b8a9aa 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN2002",
+ "LEN2004", NULL},
1024, 5112, 2024, 4832
},
{
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0049",
"LEN2000",
"LEN2001", /* Edge E431 */
- "LEN2002",
+ "LEN2002", /* Edge E531 */
"LEN2003",
"LEN2004", /* L440 */
"LEN2005",
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 381b20d4c56..136b7b204f5 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -402,6 +402,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Acer Aspire 5710 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 977d05cd9e2..e73cf2c71f3 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
* a=(pi*r^2)/C.
*/
int a = data[5];
- int x_res = input_abs_get_res(input, ABS_X);
- int y_res = input_abs_get_res(input, ABS_Y);
- width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+ int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+ int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+ width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
height = width * y_res / x_res;
}
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
input_abs_set_res(input_dev, ABS_X, features->x_resolution);
input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
} else {
- if (features->touch_max <= 2) {
+ if (features->touch_max == 1) {
input_set_abs_params(input_dev, ABS_X, 0,
features->x_max, features->x_fuzz, 0);
input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
case MTTPC:
case MTTPC_B:
case TABLETPC2FG:
- if (features->device_type == BTN_TOOL_FINGER) {
- unsigned int flags = INPUT_MT_DIRECT;
-
- if (wacom_wac->features.type == TABLETPC2FG)
- flags = 0;
-
- input_mt_init_slots(input_dev, features->touch_max, flags);
- }
+ if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
/* fall through */
case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_RIGHT, input_dev->keybit);
if (features->touch_max) {
- /* touch interface */
- unsigned int flags = INPUT_MT_POINTER;
-
- __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
input_set_abs_params(input_dev,
ABS_MT_TOUCH_MINOR,
0, features->y_max, 0, 0);
- } else {
- __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- flags = 0;
}
- input_mt_init_slots(input_dev, features->touch_max, flags);
+ input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
} else {
/* buttons/keys only interface */
__clear_bit(ABS_X, input_dev->absbit);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 4e793a17361..2ce649520fe 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
*/
err = of_property_read_u32(node, "ti,coordinate-readouts",
&ts_dev->coordinate_readouts);
- if (err < 0)
+ if (err < 0) {
+ dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
err = of_property_read_u32(node, "ti,coordiante-readouts",
&ts_dev->coordinate_readouts);
+ }
+
if (err < 0)
return err;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index c44950d3eb7..b7ae0a0dd5b 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2400,6 +2400,7 @@ allocerr:
error:
freeurbs(cs);
usb_set_intfdata(interface, NULL);
+ usb_put_dev(udev);
gigaset_freecs(cs);
return rc;
}
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a333b7f798d..62f0688d45a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -638,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
fprog.len = len;
fprog.filter = code;
- if (is->pass_filter)
+ if (is->pass_filter) {
sk_unattached_filter_destroy(is->pass_filter);
- err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+ is->pass_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&is->pass_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
return err;
@@ -657,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
fprog.len = len;
fprog.filter = code;
- if (is->active_filter)
+ if (is->active_filter) {
sk_unattached_filter_destroy(is->active_filter);
- err = sk_unattached_filter_create(&is->active_filter, &fprog);
+ is->active_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&is->active_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
return err;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 4e84095833d..d724459860d 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1541,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
BUG_ON(block_size < 1 << SECTOR_SHIFT ||
(block_size & (block_size - 1)));
- c = kmalloc(sizeof(*c), GFP_KERNEL);
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
r = -ENOMEM;
goto bad_client;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5f054c44b48..2c63326638b 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -231,7 +231,7 @@ struct cache {
/*
* cache_size entries, dirty if set
*/
- dm_cblock_t nr_dirty;
+ atomic_t nr_dirty;
unsigned long *dirty_bitset;
/*
@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
{
if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+ atomic_inc(&cache->nr_dirty);
policy_set_dirty(cache->policy, oblock);
}
}
@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
{
if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
policy_clear_dirty(cache->policy, oblock);
- cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
- if (!from_cblock(cache->nr_dirty))
+ if (atomic_dec_return(&cache->nr_dirty) == 0)
dm_table_event(cache->ti->table);
}
}
@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->quiescing_ack, 0);
r = -ENOMEM;
- cache->nr_dirty = 0;
+ atomic_set(&cache->nr_dirty, 0);
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
*error = "could not allocate dirty bitset";
@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
- DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
+ DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
(unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned) atomic_read(&cache->stats.write_miss),
(unsigned) atomic_read(&cache->stats.demotion),
(unsigned) atomic_read(&cache->stats.promotion),
- (unsigned long long) from_cblock(cache->nr_dirty));
+ (unsigned long) atomic_read(&cache->nr_dirty));
if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough ");
diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c
index 8637d2ed762..2e3cdcfa0a6 100644
--- a/drivers/media/dvb-frontends/si2168.c
+++ b/drivers/media/dvb-frontends/si2168.c
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
- if (!(cmd->args[0] >> 7) & 0x01) {
+ if (!((cmd->args[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
goto err_mutex_unlock;
}
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
if (ret)
goto err;
- cmd.args[0] = 0x05;
- cmd.args[1] = 0x00;
- cmd.args[2] = 0xaa;
- cmd.args[3] = 0x4d;
- cmd.args[4] = 0x56;
- cmd.args[5] = 0x40;
- cmd.args[6] = 0x00;
- cmd.args[7] = 0x00;
- cmd.wlen = 8;
- cmd.rlen = 1;
- ret = si2168_cmd_execute(s, &cmd);
- if (ret)
- goto err;
-
/* cold state - try to download firmware */
dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
KBUILD_MODNAME, si2168_ops.info.name);
diff --git a/drivers/media/dvb-frontends/si2168_priv.h b/drivers/media/dvb-frontends/si2168_priv.h
index 2a343e896f4..53f7f06ae34 100644
--- a/drivers/media/dvb-frontends/si2168_priv.h
+++ b/drivers/media/dvb-frontends/si2168_priv.h
@@ -22,7 +22,7 @@
#include <linux/firmware.h>
#include <linux/i2c-mux.h>
-#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw"
+#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
/* state struct */
struct si2168 {
diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
index 522fe00f5ee..9619be5d482 100644
--- a/drivers/media/dvb-frontends/tda10071.c
+++ b/drivers/media/dvb-frontends/tda10071.c
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
+ fe_modulation_t modulation;
dev_dbg(&priv->i2c->dev,
"%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
switch (c->delivery_system) {
case SYS_DVBS:
+ modulation = QPSK;
rolloff = 0;
pilot = 2;
break;
case SYS_DVBS2:
+ modulation = c->modulation;
+
switch (c->rolloff) {
case ROLLOFF_20:
rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
- c->modulation == TDA10071_MODCOD[i].modulation &&
+ modulation == TDA10071_MODCOD[i].modulation &&
c->fec_inner == TDA10071_MODCOD[i].fec) {
mode = TDA10071_MODCOD[i].val;
dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
switch ((buf[1] >> 0) & 0x01) {
case 0:
- c->inversion = INVERSION_OFF;
+ c->inversion = INVERSION_ON;
break;
case 1:
- c->inversion = INVERSION_ON;
+ c->inversion = INVERSION_OFF;
break;
}
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
if (ret)
goto error;
- c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+ c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
return ret;
error:
diff --git a/drivers/media/dvb-frontends/tda10071_priv.h b/drivers/media/dvb-frontends/tda10071_priv.h
index 4baf14bfb65..42048619273 100644
--- a/drivers/media/dvb-frontends/tda10071_priv.h
+++ b/drivers/media/dvb-frontends/tda10071_priv.h
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
{ SYS_DVBS2, QPSK, FEC_8_9, 0x0a },
{ SYS_DVBS2, QPSK, FEC_9_10, 0x0b },
/* 8PSK */
+ { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
{ SYS_DVBS2, PSK_8, FEC_3_5, 0x0c },
{ SYS_DVBS2, PSK_8, FEC_2_3, 0x0d },
{ SYS_DVBS2, PSK_8, FEC_3_4, 0x0e },
diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
index e65c760e4e8..0006d6bf8c1 100644
--- a/drivers/media/pci/saa7134/saa7134-empress.c
+++ b/drivers/media/pci/saa7134/saa7134-empress.c
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
.read = vb2_fop_read,
.poll = vb2_fop_poll,
.mmap = vb2_fop_mmap,
- .ioctl = video_ioctl2,
+ .unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops ts_ioctl_ops = {
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index a7ed1649790..1e4ec697fb1 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -269,6 +269,7 @@ err:
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return ret;
}
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index 5bb085b19bc..b431b58f39e 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -233,6 +233,7 @@ err:
list_del(&buf->list);
vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
}
+ spin_unlock_irqrestore(&common->irqlock, flags);
return ret;
}
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 271a752cee5..fa4cc7b880a 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
jiffies_to_msecs(jiffies) -
(jiffies_to_msecs(timeout) - TIMEOUT));
- if (!(buf[0] >> 7) & 0x01) {
+ if (!((buf[0] >> 7) & 0x01)) {
ret = -ETIMEDOUT;
goto err_mutex_unlock;
} else {
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 021e4d35e4d..7b9b75f6077 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
if (ret < 0)
goto err;
- if (tmp == 0x00)
- dev_dbg(&d->udev->dev,
- "%s: [%d]tuner not set, using default\n",
- __func__, i);
- else
+ dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
+ __func__, i, tmp);
+
+ /* tuner sanity check */
+ if (state->chip_type == 0x9135) {
+ if (state->chip_version == 0x02) {
+ /* IT9135 BX (v2) */
+ switch (tmp) {
+ case AF9033_TUNER_IT9135_60:
+ case AF9033_TUNER_IT9135_61:
+ case AF9033_TUNER_IT9135_62:
+ state->af9033_config[i].tuner = tmp;
+ break;
+ }
+ } else {
+ /* IT9135 AX (v1) */
+ switch (tmp) {
+ case AF9033_TUNER_IT9135_38:
+ case AF9033_TUNER_IT9135_51:
+ case AF9033_TUNER_IT9135_52:
+ state->af9033_config[i].tuner = tmp;
+ break;
+ }
+ }
+ } else {
+ /* AF9035 */
state->af9033_config[i].tuner = tmp;
+ }
- dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
- __func__, i, state->af9033_config[i].tuner);
+ if (state->af9033_config[i].tuner != tmp) {
+ dev_info(&d->udev->dev,
+ "%s: [%d] overriding tuner from %02x to %02x\n",
+ KBUILD_MODNAME, i, tmp,
+ state->af9033_config[i].tuner);
+ }
switch (state->af9033_config[i].tuner) {
case AF9033_TUNER_TUA9001:
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index 2fd1c5e31a0..339adce7c7a 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x2620)},
{USB_DEVICE(0x093a, 0x2621)},
{USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+ {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
{USB_DEVICE(0x093a, 0x2625)},
{USB_DEVICE(0x093a, 0x2626)},
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 0500c4175d5..6bce01a674f 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
}
/*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
/* function expects dev->io_mutex to be hold by caller */
int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_AUDIO_ENCODING:
if (dev->flags & HDPVR_FLAG_AC3_CAP) {
opt->audio_codec = ctrl->val;
- return hdpvr_set_audio(dev, opt->audio_input,
+ return hdpvr_set_audio(dev, opt->audio_input + 1,
opt->audio_codec);
}
return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
V4L2_CID_MPEG_AUDIO_ENCODING,
ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
- 0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+ 0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
V4L2_CID_MPEG_VIDEO_ENCODING,
V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index 4ae54caadd0..ce1c9f5d9de 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
aspect.denominator = 9;
} else if (ratio == 34) {
aspect.numerator = 4;
- aspect.numerator = 3;
+ aspect.denominator = 3;
} else if (ratio == 68) {
aspect.numerator = 15;
- aspect.numerator = 9;
+ aspect.denominator = 9;
} else {
aspect.numerator = hor_landscape + 99;
aspect.denominator = 100;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 824108cd9fd..12430be6448 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -287,7 +287,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
}
- priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+ priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index c83584a2671..5a1891faba8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -339,7 +339,8 @@ static int xgbe_probe(struct platform_device *pdev)
/* Calculate the number of Tx and Rx rings to be created */
pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
pdata->hw_feat.tx_ch_cnt);
- if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+ if (ret) {
dev_err(dev, "error setting real tx queue count\n");
goto err_io;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4cab09d3f80..8206a293e6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -346,6 +346,7 @@ struct sw_tx_bd {
u8 flags;
/* Set on the first BD descriptor when there is a split BD */
#define BNX2X_TSO_SPLIT_BD (1<<0)
+#define BNX2X_HAS_SECOND_PBD (1<<1)
};
struct sw_rx_page {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4b875da1c7e..c43e7238de2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+ /* Skip second parse bd... */
+ --nbd;
+ bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+ }
+
/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -3889,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set encapsulation flag in start BD */
SET_FLAG(tx_start_bd->general_data,
ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+ tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
nbd++;
} else if (xmit_type & XMIT_CSUM) {
/* Set PBD in checksum offload case w/o encapsulation */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index bd0600cf726..25eddd90f48 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -379,6 +379,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case PORT_FIBRE:
case PORT_DA:
+ case PORT_NONE:
if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
bp->port.supported[1] & SUPPORTED_FIBRE)) {
DP(BNX2X_MSG_ETHTOOL,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 16281ad2da1..4e615debe47 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
+ if (skb_padto(skb, ETH_ZLEN)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
ret = bcmgenet_put_tx_csum(dev, skb);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 14c00048bbe..82322b1c841 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -129,14 +129,15 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
name);
}
- cq->irq_desc =
- irq_to_desc(mlx4_eq_get_irq(mdev->dev,
- cq->vector));
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
mdev->dev->caps.num_comp_vectors;
}
+
+ cq->irq_desc =
+ irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+ cq->vector));
} else {
/* For TX we use the same irq per
ring we assigned for the RX */
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 06bdc31a828..61623e9af57 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4240,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
+ RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ break;
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_42:
case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 1c24a8f368b..d813bfb1a84 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
return err;
}
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+ struct vio_driver_state *vio = &vnet->vio;
+
+ return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
{
unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
struct vnet_port *port;
hlist_for_each_entry(port, hp, hash) {
+ if (!port_is_up(port))
+ continue;
if (ether_addr_equal(port->raddr, skb->data))
return port;
}
- port = NULL;
- if (!list_empty(&vp->port_list))
- port = list_entry(vp->port_list.next, struct vnet_port, list);
-
- return port;
+ list_for_each_entry(port, &vp->port_list, list) {
+ if (!port->switch_port)
+ continue;
+ if (!port_is_up(port))
+ continue;
+ return port;
+ }
+ return NULL;
}
struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
@@ -1083,6 +1095,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
return vp;
}
+static void vnet_cleanup(void)
+{
+ struct vnet *vp;
+ struct net_device *dev;
+
+ mutex_lock(&vnet_list_mutex);
+ while (!list_empty(&vnet_list)) {
+ vp = list_first_entry(&vnet_list, struct vnet, list);
+ list_del(&vp->list);
+ dev = vp->dev;
+ /* vio_unregister_driver() should have cleaned up port_list */
+ BUG_ON(!list_empty(&vp->port_list));
+ unregister_netdev(dev);
+ free_netdev(dev);
+ }
+ mutex_unlock(&vnet_list_mutex);
+}
+
static const char *local_mac_prop = "local-mac-address";
static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1270,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
kfree(port);
- unregister_netdev(vp->dev);
}
return 0;
}
@@ -1268,6 +1297,7 @@ static int __init vnet_init(void)
static void __exit vnet_exit(void)
{
vio_unregister_driver(&vnet_port_driver);
+ vnet_cleanup();
}
module_init(vnet_init);
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 4ed38eaecea..d97d5f39a04 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
net_device->send_section_map =
kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
- if (net_device->send_section_map == NULL)
+ if (net_device->send_section_map == NULL) {
+ ret = -ENOMEM;
goto cleanup;
+ }
goto exit;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4eaadcfcb0f..203651ebccb 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -255,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
bus->dev.parent = bus->parent;
bus->dev.class = &mdio_bus_class;
+ bus->dev.driver = bus->parent->driver;
bus->dev.groups = NULL;
dev_set_name(&bus->dev, "%s", bus->id);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 35d753d22f7..22c57be4dfa 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
phydev->bus->phy_map[phydev->addr] = phydev;
/* Run all of the fixups for this PHY */
- err = phy_init_hw(phydev);
+ err = phy_scan_fixups(phydev);
if (err) {
pr_err("PHY %d failed to initialize\n", phydev->addr);
goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
struct device *d = &phydev->dev;
+ struct module *bus_module;
int err;
/* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
return -EBUSY;
}
+ /* Increment the bus module reference count */
+ bus_module = phydev->bus->dev.driver ?
+ phydev->bus->dev.driver->owner : NULL;
+ if (!try_module_get(bus_module)) {
+ dev_err(&dev->dev, "failed to get the bus module\n");
+ return -EIO;
+ }
+
phydev->attached_dev = dev;
dev->phydev = phydev;
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
void phy_detach(struct phy_device *phydev)
{
int i;
+
+ if (phydev->bus->dev.driver)
+ module_put(phydev->bus->dev.driver->owner);
+
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_suspend(phydev);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index e2f20f807de..d5b77ef3a21 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -757,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
};
ppp_lock(ppp);
- if (ppp->pass_filter)
+ if (ppp->pass_filter) {
sk_unattached_filter_destroy(ppp->pass_filter);
- err = sk_unattached_filter_create(&ppp->pass_filter,
- &fprog);
+ ppp->pass_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->pass_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
@@ -778,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
};
ppp_lock(ppp);
- if (ppp->active_filter)
+ if (ppp->active_filter) {
sk_unattached_filter_destroy(ppp->active_filter);
- err = sk_unattached_filter_create(&ppp->active_filter,
- &fprog);
+ ppp->active_filter = NULL;
+ }
+ if (fprog.filter != NULL)
+ err = sk_unattached_filter_create(&ppp->active_filter,
+ &fprog);
+ else
+ err = 0;
kfree(code);
ppp_unlock(ppp);
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 9ea4bfe5d31..2a32d9167d3 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -341,6 +341,22 @@ next_desc:
usb_driver_release_interface(driver, info->data);
return -ENODEV;
}
+
+ /* Some devices don't initialise properly. In particular
+ * the packet filter is not reset. There are devices that
+ * don't do reset all the way. So the packet filter should
+ * be set to a sane initial value.
+ */
+ usb_control_msg(dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ USB_CDC_SET_ETHERNET_PACKET_FILTER,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
+ intf->cur_altsetting->desc.bInterfaceNumber,
+ NULL,
+ 0,
+ USB_CTRL_SET_TIMEOUT
+ );
return 0;
bad_desc:
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index 5d95a13dbe2..735f7dadb9a 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -194,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
.driver_info = (unsigned long)&huawei_cdc_ncm_info,
},
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
/* Terminating entry */
{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index c4638c67f6b..22756db53dc 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
@@ -757,6 +758,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9055, 8)}, /* Netgear AirCard 341U */
{QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
+ {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7bad2d31663..3eab74c7c55 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -282,7 +282,7 @@
/* USB_DEV_STAT */
#define STAT_SPEED_MASK 0x0006
#define STAT_SPEED_HIGH 0x0000
-#define STAT_SPEED_FULL 0x0001
+#define STAT_SPEED_FULL 0x0002
/* USB_TX_AGG */
#define TX_AGG_MAX_THRESHOLD 0x03
@@ -2292,9 +2292,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
/* rx share fifo credit full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
- ocp_data &= STAT_SPEED_MASK;
- if (ocp_data == STAT_SPEED_FULL) {
+ if (tp->udev->speed == USB_SPEED_FULL ||
+ tp->udev->speed == USB_SPEED_LOW) {
/* rx share fifo credit near full threshold */
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
RXFIFO_THR2_FULL);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ade33ef8282..9f79192c9aa 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -339,7 +339,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
ndm->ndm_state = fdb->state;
ndm->ndm_ifindex = vxlan->dev->ifindex;
ndm->ndm_flags = fdb->flags;
- ndm->ndm_type = NDA_DST;
+ ndm->ndm_type = RTN_UNICAST;
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 5895f197869..fa9fdfa128c 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
{
struct x25_asy *sl = netdev_priv(dev);
unsigned char *xbuff, *rbuff;
- int len = 2 * newmtu;
+ int len;
+ if (newmtu > 65534)
+ return -EINVAL;
+
+ len = 2 * newmtu;
xbuff = kmalloc(len + 4, GFP_ATOMIC);
rbuff = kmalloc(len + 4, GFP_ATOMIC);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 66acb2cbd9d..7c28cb55610 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -887,6 +887,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+ /*
+ * No aggregation session is running, but there may be frames
+ * from a previous session or a failed attempt in the queue.
+ * Send them out as normal data frames
+ */
+ if (!tid->active)
+ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
bf->bf_state.bf_type = 0;
return bf;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 725ba49576b..8b79081d488 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -1072,8 +1072,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
- /* Also enable probe requests to pass */
- cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+ /*
+ * pass probe requests and beacons from other APs (needed
+ * for ht protection)
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_BEACON);
/* Fill the data specific for ap mode */
iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1094,6 +1098,13 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
/* Fill the common data for all mac context types */
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
+ /*
+ * pass probe requests and beacons from other APs (needed
+ * for ht protection)
+ */
+ cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+ MAC_FILTER_IN_BEACON);
+
/* Fill the data specific for GO mode */
iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
action == FW_CTXT_ACTION_ADD);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 9bfb90680cd..98556d03c1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -303,13 +303,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
}
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
- !iwlwifi_mod_params.uapsd_disable) {
- hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
- hw->uapsd_queues = IWL_UAPSD_AC_INFO;
- hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
- }
-
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
hw->chanctx_data_size = sizeof(u16);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1844a47636b..c65b636bcab 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
{
struct gnttab_map_grant_ref *gop_map = *gopp_map;
u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+ /* This always points to the shinfo of the skb being checked, which
+ * could be either the first or the one on the frag_list
+ */
struct skb_shared_info *shinfo = skb_shinfo(skb);
+ /* If this is non-NULL, we are currently checking the frag_list skb, and
+ * this points to the shinfo of the first one
+ */
+ struct skb_shared_info *first_shinfo = NULL;
int nr_frags = shinfo->nr_frags;
+ const bool sharedslot = nr_frags &&
+ frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
int i, err;
- struct sk_buff *first_skb = NULL;
/* Check status of header. */
err = (*gopp_copy)->status;
- (*gopp_copy)++;
if (unlikely(err)) {
if (net_ratelimit())
netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
(*gopp_copy)->status,
pending_idx,
(*gopp_copy)->source.u.ref);
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+ /* The first frag might still have this slot mapped */
+ if (!sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
}
+ (*gopp_copy)++;
check_frags:
for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
pending_idx,
gop_map->handle);
/* Had a previous error? Invalidate this fragment. */
- if (unlikely(err))
+ if (unlikely(err)) {
xenvif_idx_unmap(queue, pending_idx);
+ /* If the mapping of the first frag was OK, but
+ * the header's copy failed, and they are
+ * sharing a slot, send an error
+ */
+ if (i == 0 && sharedslot)
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_ERROR);
+ else
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
continue;
}
@@ -1075,42 +1097,53 @@ check_frags:
gop_map->status,
pending_idx,
gop_map->ref);
+
xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
- /* First error: invalidate preceding fragments. */
+
+ /* First error: if the header haven't shared a slot with the
+ * first frag, release it as well.
+ */
+ if (!sharedslot)
+ xenvif_idx_release(queue,
+ XENVIF_TX_CB(skb)->pending_idx,
+ XEN_NETIF_RSP_OKAY);
+
+ /* Invalidate preceding fragments of this skb. */
for (j = 0; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
+
+ /* And if we found the error while checking the frag_list, unmap
+ * the first skb's frags
+ */
+ if (first_shinfo) {
+ for (j = 0; j < first_shinfo->nr_frags; j++) {
+ pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+ xenvif_idx_unmap(queue, pending_idx);
+ xenvif_idx_release(queue, pending_idx,
+ XEN_NETIF_RSP_OKAY);
+ }
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
- if (skb_has_frag_list(skb)) {
- first_skb = skb;
- skb = shinfo->frag_list;
- shinfo = skb_shinfo(skb);
+ if (skb_has_frag_list(skb) && !first_shinfo) {
+ first_shinfo = skb_shinfo(skb);
+ shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
nr_frags = shinfo->nr_frags;
goto check_frags;
}
- /* There was a mapping error in the frag_list skb. We have to unmap
- * the first skb's frags
- */
- if (first_skb && err) {
- int j;
- shinfo = skb_shinfo(first_skb);
- for (j = 0; j < shinfo->nr_frags; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xenvif_idx_unmap(queue, pending_idx);
- }
- }
-
*gopp_map = gop_map;
return err;
}
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
/* Check the remap error code. */
if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+ /* If there was an error, xenvif_tx_check_gop is
+ * expected to release all the frags which were mapped,
+ * so kfree_skb shouldn't do it again
+ */
skb_shinfo(skb)->nr_frags = 0;
+ if (skb_has_frag_list(skb)) {
+ struct sk_buff *nskb =
+ skb_shinfo(skb)->frag_list;
+ skb_shinfo(nskb)->nr_frags = 0;
+ }
kfree_skb(skb);
continue;
}
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
tx_unmap_op.status);
BUG();
}
-
- xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
}
static inline int rx_work_todo(struct xenvif_queue *queue)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index b777d8f46bd..9aa012e6ea0 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,54 @@
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
+/*
+ * of_fdt_limit_memory - limit the number of regions in the /memory node
+ * @limit: maximum entries
+ *
+ * Adjust the flattened device tree to have at most 'limit' number of
+ * memory entries in the /memory node. This function may be called
+ * any time after initial_boot_param is set.
+ */
+void of_fdt_limit_memory(int limit)
+{
+ int memory;
+ int len;
+ const void *val;
+ int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+ int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+ const uint32_t *addr_prop;
+ const uint32_t *size_prop;
+ int root_offset;
+ int cell_size;
+
+ root_offset = fdt_path_offset(initial_boot_params, "/");
+ if (root_offset < 0)
+ return;
+
+ addr_prop = fdt_getprop(initial_boot_params, root_offset,
+ "#address-cells", NULL);
+ if (addr_prop)
+ nr_address_cells = fdt32_to_cpu(*addr_prop);
+
+ size_prop = fdt_getprop(initial_boot_params, root_offset,
+ "#size-cells", NULL);
+ if (size_prop)
+ nr_size_cells = fdt32_to_cpu(*size_prop);
+
+ cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
+
+ memory = fdt_path_offset(initial_boot_params, "/memory");
+ if (memory > 0) {
+ val = fdt_getprop(initial_boot_params, memory, "reg", &len);
+ if (len > limit*cell_size) {
+ len = limit*cell_size;
+ pr_debug("Limiting number of entries to %d\n", limit);
+ fdt_setprop(initial_boot_params, memory, "reg", val,
+ len);
+ }
+ }
+}
+
/**
* of_fdt_is_compatible - Return true if given node from the given blob has
* compat in its compatible list
@@ -937,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
}
#endif
-bool __init early_init_dt_scan(void *params)
+bool __init early_init_dt_verify(void *params)
{
if (!params)
return false;
@@ -951,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
return false;
}
+ return true;
+}
+
+
+void __init early_init_dt_scan_nodes(void)
+{
/* Retrieve various information from the /chosen node */
of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
@@ -959,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
/* Setup memory, calling early_init_dt_add_memory_arch */
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+bool __init early_init_dt_scan(void *params)
+{
+ bool status;
+
+ status = early_init_dt_verify(params);
+ if (!status)
+ return false;
+ early_init_dt_scan_nodes();
return true;
}
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 2872ece81f3..44333bd8f90 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -5,6 +5,12 @@
# Parport configuration.
#
+config ARCH_MIGHT_HAVE_PC_PARPORT
+ bool
+ help
+ Select this config option from the architecture Kconfig if
+ the architecture might have PC parallel port hardware.
+
menuconfig PARPORT
tristate "Parallel port support"
depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
If unsure, say Y.
-config ARCH_MIGHT_HAVE_PC_PARPORT
- bool
- help
- Select this config option from the architecture Kconfig if
- the architecture might have PC parallel port hardware.
-
if PARPORT
config PARPORT_PC
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1bd6363bc95..9f43916637c 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
status = readl(info->irqmux_base);
- for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+ for_each_set_bit(n, &status, info->nbanks)
__gpio_irq_handler(&info->banks[n]);
chained_irq_exit(chip, desc);
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index b81448b2c75..a5c6cb773e5 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -319,8 +319,7 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
struct pnp_dev *pnp = _pnp;
/* true means it matched */
- return !acpi->physical_node_count
- && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
+ return pnp->data == acpi;
}
static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 9b60b1f3261..44341dc5b14 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
"desc %p not ACKed\n", tx_desc);
}
+ if (ret == NULL) {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: unable to obtain tx descriptor\n", __func__);
+ goto err_out;
+ }
+
i = bdma_chan->wr_count_next % bdma_chan->bd_num;
if (i == bdma_chan->bd_num - 1) {
i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
tx_desc->txd.phys = bdma_chan->bd_phys +
i * sizeof(struct tsi721_dma_desc);
tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-
+err_out:
spin_unlock_bh(&bdma_chan->lock);
return ret;
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 15b3459f865..220acb4cbee 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
} else
raw3270_writesf_readpart(rp);
memset(&rp->init_reset, 0, sizeof(rp->init_reset));
- memset(&rp->init_data, 0, sizeof(rp->init_data));
}
static int
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 69ef4f8cfac..4038437ff03 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
int rc;
ap_dev->drv = ap_drv;
+
+ spin_lock_bh(&ap_device_list_lock);
+ list_add(&ap_dev->list, &ap_device_list);
+ spin_unlock_bh(&ap_device_list_lock);
+
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
- if (!rc) {
+ if (rc) {
spin_lock_bh(&ap_device_list_lock);
- list_add(&ap_dev->list, &ap_device_list);
+ list_del_init(&ap_dev->list);
spin_unlock_bh(&ap_device_list_lock);
}
return rc;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f7e316368c9..3f50dfcb322 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -733,6 +733,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
scsi_next_command(cmd);
return;
}
+ } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
+ /*
+ * Certain non BLOCK_PC requests are commands that don't
+ * actually transfer anything (FLUSH), so cannot use
+ * good_bytes != blk_rq_bytes(req) as the signal for an error.
+ * This sets the error explicitly for the problem case.
+ */
+ error = __scsi_error_from_host_byte(cmd, result);
}
/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index 78b0fba7047..8afc6fee40c 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_OMAP4
bool "OMAP 4 Camera support"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+ depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
select VIDEOBUF2_DMA_CONTIG
---help---
Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 8b25c1aa202..ebb19b22f47 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -530,8 +530,10 @@ int rtw_resume_process23a(struct rtw_adapter *padapter)
pwrpriv->bkeepfwalive = false;
DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open23a(pnetdev, true) != 0)
+ if (pm_netdev_open23a(pnetdev, true) != 0) {
+ up(&pwrpriv->lock);
goto exit;
+ }
netif_device_attach(pnetdev);
netif_carrier_on(pnetdev);
diff --git a/drivers/staging/vt6655/bssdb.c b/drivers/staging/vt6655/bssdb.c
index 59679cd4681..69b80e80b01 100644
--- a/drivers/staging/vt6655/bssdb.c
+++ b/drivers/staging/vt6655/bssdb.c
@@ -981,7 +981,7 @@ start:
pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
}
- {
+ if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
pDevice->byReAssocCount++;
/* 10 sec timeout */
if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 1d3908d044d..5a5fd937a44 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -2318,6 +2318,7 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
int handled = 0;
unsigned char byData = 0;
int ii = 0;
+ unsigned long flags;
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
@@ -2331,7 +2332,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
handled = 1;
MACvIntDisable(pDevice->PortOffset);
- spin_lock_irq(&pDevice->lock);
+
+ spin_lock_irqsave(&pDevice->lock, flags);
//Make sure current page is 0
VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
@@ -2560,7 +2562,8 @@ static irqreturn_t device_intr(int irq, void *dev_instance) {
if (byOrgPageSel == 1)
MACvSelectPage1(pDevice->PortOffset);
- spin_unlock_irq(&pDevice->lock);
+ spin_unlock_irqrestore(&pDevice->lock, flags);
+
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
return IRQ_RETVAL(handled);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 5d4de88fe5b..eeba7544f0c 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1195,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
int gnttab_init(void)
{
int i;
+ unsigned long max_nr_grant_frames;
unsigned int max_nr_glist_frames, nr_glist_frames;
unsigned int nr_init_grefs;
int ret;
gnttab_request_version();
+ max_nr_grant_frames = gnttab_max_grant_frames();
nr_grant_frames = 1;
/* Determine the maximum number of frames required for the
* grant reference free list on the current hypervisor.
*/
BUG_ON(grefs_per_grant_frame == 0);
- max_nr_glist_frames = (gnttab_max_grant_frames() *
+ max_nr_glist_frames = (max_nr_grant_frames *
grefs_per_grant_frame / RPP);
gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1223,6 +1225,11 @@ int gnttab_init(void)
}
}
+ ret = arch_gnttab_init(max_nr_grant_frames,
+ nr_status_frames(max_nr_grant_frames));
+ if (ret < 0)
+ goto ini_nomem;
+
if (gnttab_setup() < 0) {
ret = -ENODEV;
goto ini_nomem;