aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma/mv_xor.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/mv_xor.c')
-rw-r--r--drivers/dma/mv_xor.c707
1 files changed, 334 insertions, 373 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index e362e2b80ef..394cbc5c93e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -26,6 +26,9 @@
#include <linux/platform_device.h>
#include <linux/memory.h>
#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
#include <linux/platform_data/dma-mv_xor.h>
#include "dmaengine.h"
@@ -34,14 +37,14 @@
static void mv_xor_issue_pending(struct dma_chan *chan);
#define to_mv_xor_chan(chan) \
- container_of(chan, struct mv_xor_chan, common)
-
-#define to_mv_xor_device(dev) \
- container_of(dev, struct mv_xor_device, common)
+ container_of(chan, struct mv_xor_chan, dmachan)
#define to_mv_xor_slot(tx) \
container_of(tx, struct mv_xor_desc_slot, async_tx)
+#define mv_chan_to_devp(chan) \
+ ((chan)->dmadev.dev)
+
static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
@@ -51,20 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
hw_desc->desc_command = (1 << 31);
}
-static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_dest_addr;
-}
-
-static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
- int src_idx)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_src_addr[src_idx];
-}
-
-
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count)
{
@@ -86,11 +75,6 @@ static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
hw_desc->phy_next_desc = 0;
}
-static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
-{
- desc->value = val;
-}
-
static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
dma_addr_t addr)
{
@@ -109,48 +93,32 @@ static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
int index, dma_addr_t addr)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
- hw_desc->phy_src_addr[index] = addr;
+ hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
if (desc->type == DMA_XOR)
hw_desc->desc_command |= (1 << index);
}
static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
{
- return __raw_readl(XOR_CURR_DESC(chan));
+ return readl_relaxed(XOR_CURR_DESC(chan));
}
static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
u32 next_desc_addr)
{
- __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
-}
-
-static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
-{
- __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
-}
-
-static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
-{
- __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
-}
-
-static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
-{
- __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
- __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
+ writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
}
static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
{
- u32 val = __raw_readl(XOR_INTR_MASK(chan));
+ u32 val = readl_relaxed(XOR_INTR_MASK(chan));
val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
- __raw_writel(val, XOR_INTR_MASK(chan));
+ writel_relaxed(val, XOR_INTR_MASK(chan));
}
static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
{
- u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+ u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
return intr_cause;
}
@@ -166,14 +134,14 @@ static int mv_is_err_intr(u32 intr_cause)
static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
{
u32 val = ~(1 << (chan->idx * 16));
- dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
{
u32 val = 0xFFFF0000 >> (chan->idx * 16);
- __raw_writel(val, XOR_INTR_CAUSE(chan));
+ writel_relaxed(val, XOR_INTR_CAUSE(chan));
}
static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@ -183,8 +151,6 @@ static int mv_can_chain(struct mv_xor_desc_slot *desc)
if (chain_old_tail->type != desc->type)
return 0;
- if (desc->type == DMA_MEMSET)
- return 0;
return 1;
}
@@ -193,7 +159,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
enum dma_transaction_type type)
{
u32 op_mode;
- u32 config = __raw_readl(XOR_CONFIG(chan));
+ u32 config = readl_relaxed(XOR_CONFIG(chan));
switch (type) {
case DMA_XOR:
@@ -202,36 +168,38 @@ static void mv_set_mode(struct mv_xor_chan *chan,
case DMA_MEMCPY:
op_mode = XOR_OPERATION_MODE_MEMCPY;
break;
- case DMA_MEMSET:
- op_mode = XOR_OPERATION_MODE_MEMSET;
- break;
default:
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error: unsupported operation %d.\n",
- type);
+ dev_err(mv_chan_to_devp(chan),
+ "error: unsupported operation %d\n",
+ type);
BUG();
return;
}
config &= ~0x7;
config |= op_mode;
- __raw_writel(config, XOR_CONFIG(chan));
+
+#if defined(__BIG_ENDIAN)
+ config |= XOR_DESCRIPTOR_SWAP;
+#else
+ config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
+
+ writel_relaxed(config, XOR_CONFIG(chan));
chan->current_type = type;
}
static void mv_chan_activate(struct mv_xor_chan *chan)
{
- u32 activation;
+ dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
- dev_dbg(chan->device->common.dev, " activate chan.\n");
- activation = __raw_readl(XOR_ACTIVATION(chan));
- activation |= 0x1;
- __raw_writel(activation, XOR_ACTIVATION(chan));
+ /* writel ensures all descriptors are flushed before activation */
+ writel(BIT(0), XOR_ACTIVATION(chan));
}
static char mv_chan_is_busy(struct mv_xor_chan *chan)
{
- u32 state = __raw_readl(XOR_ACTIVATION(chan));
+ u32 state = readl_relaxed(XOR_ACTIVATION(chan));
state = (state >> 4) & 0x3;
@@ -251,7 +219,7 @@ static int mv_chan_xor_slot_count(size_t len, int src_cnt)
static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *slot)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
__func__, __LINE__, slot);
slot->slots_per_op = 0;
@@ -266,25 +234,16 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
struct mv_xor_desc_slot *sw_desc)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
__func__, __LINE__, sw_desc);
if (sw_desc->type != mv_chan->current_type)
mv_set_mode(mv_chan, sw_desc->type);
- if (sw_desc->type == DMA_MEMSET) {
- /* for memset requests we need to program the engine, no
- * descriptors used.
- */
- struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
- mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
- mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
- mv_chan_set_value(mv_chan, sw_desc->value);
- } else {
- /* set the hardware chain */
- mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
- }
+ /* set the hardware chain */
+ mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
+
mv_chan->pending += sw_desc->slot_cnt;
- mv_xor_issue_pending(&mv_chan->common);
+ mv_xor_issue_pending(&mv_chan->dmachan);
}
static dma_cookie_t
@@ -303,43 +262,9 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
desc->async_tx.callback(
desc->async_tx.callback_param);
- /* unmap dma addresses
- * (unmap_single vs unmap_page?)
- */
- if (desc->group_head && desc->unmap_len) {
- struct mv_xor_desc_slot *unmap = desc->group_head;
- struct device *dev =
- &mv_chan->device->pdev->dev;
- u32 len = unmap->unmap_len;
- enum dma_ctrl_flags flags = desc->async_tx.flags;
- u32 src_cnt;
- dma_addr_t addr;
- dma_addr_t dest;
-
- src_cnt = unmap->unmap_src_cnt;
- dest = mv_desc_get_dest_addr(unmap);
- if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- enum dma_data_direction dir;
-
- if (src_cnt > 1) /* is xor ? */
- dir = DMA_BIDIRECTIONAL;
- else
- dir = DMA_FROM_DEVICE;
- dma_unmap_page(dev, dest, len, dir);
- }
-
- if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- while (src_cnt--) {
- addr = mv_desc_get_src_addr(unmap,
- src_cnt);
- if (addr == dest)
- continue;
- dma_unmap_page(dev, addr, len,
- DMA_TO_DEVICE);
- }
- }
+ dma_descriptor_unmap(&desc->async_tx);
+ if (desc->group_head)
desc->group_head = NULL;
- }
}
/* run dependent operations */
@@ -353,7 +278,7 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
{
struct mv_xor_desc_slot *iter, *_iter;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
completed_node) {
@@ -369,7 +294,7 @@ static int
mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
struct mv_xor_chan *mv_chan)
{
- dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
__func__, __LINE__, desc, desc->async_tx.flags);
list_del(&desc->chain_node);
/* the client is allowed to attach dependent operations
@@ -393,8 +318,8 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
u32 current_desc = mv_chan_get_current_desc(mv_chan);
int seen_current = 0;
- dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
- dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
+ dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
mv_xor_clean_completed_slots(mv_chan);
/* free completed slots from the chain starting with
@@ -438,7 +363,7 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
}
if (cookie > 0)
- mv_chan->common.completed_cookie = cookie;
+ mv_chan->dmachan.completed_cookie = cookie;
}
static void
@@ -547,7 +472,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie;
int new_hw_chain = 1;
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p: async_tx %p\n",
__func__, sw_desc, &sw_desc->async_tx);
@@ -570,8 +495,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
- old_chain_tail->async_tx.phys);
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+ &old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -600,13 +525,12 @@ submit_done:
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
- char *hw_desc;
+ void *virt_desc;
+ dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
- struct mv_xor_platform_data *plat_data =
- mv_chan->device->pdev->dev.platform_data;
- int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
+ int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE;
/* Allocate descriptor slots */
idx = mv_chan->slots_allocated;
@@ -617,17 +541,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ virt_desc = mv_chan->dma_desc_pool_virt;
+ slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->device->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ dma_desc = mv_chan->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
@@ -641,7 +564,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
struct mv_xor_desc_slot,
slot_node);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"allocated %d descriptor slots last_used: %p\n",
mv_chan->slots_allocated, mv_chan->last_used);
@@ -656,9 +579,9 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct mv_xor_desc_slot *sw_desc, *grp_start;
int slot_cnt;
- dev_dbg(mv_chan->device->common.dev,
- "%s dest: %x src %x len: %u flags: %ld\n",
- __func__, dest, src, len, flags);
+ dev_dbg(mv_chan_to_devp(mv_chan),
+ "%s dest: %pad src %pad len: %u flags: %ld\n",
+ __func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -680,51 +603,14 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p\n",
- __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+ __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL);
return sw_desc ? &sw_desc->async_tx : NULL;
}
static struct dma_async_tx_descriptor *
-mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
- size_t len, unsigned long flags)
-{
- struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
- struct mv_xor_desc_slot *sw_desc, *grp_start;
- int slot_cnt;
-
- dev_dbg(mv_chan->device->common.dev,
- "%s dest: %x len: %u flags: %ld\n",
- __func__, dest, len, flags);
- if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
- return NULL;
-
- BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
-
- spin_lock_bh(&mv_chan->lock);
- slot_cnt = mv_chan_memset_slot_count(len);
- sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
- if (sw_desc) {
- sw_desc->type = DMA_MEMSET;
- sw_desc->async_tx.flags = flags;
- grp_start = sw_desc->group_head;
- mv_desc_init(grp_start, flags);
- mv_desc_set_byte_count(grp_start, len);
- mv_desc_set_dest_addr(sw_desc->group_head, dest);
- mv_desc_set_block_fill_val(grp_start, value);
- sw_desc->unmap_src_cnt = 1;
- sw_desc->unmap_len = len;
- }
- spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
- "%s sw_desc %p async_tx %p \n",
- __func__, sw_desc, &sw_desc->async_tx);
- return sw_desc ? &sw_desc->async_tx : NULL;
-}
-
-static struct dma_async_tx_descriptor *
mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags)
{
@@ -737,9 +623,9 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
- dev_dbg(mv_chan->device->common.dev,
- "%s src_cnt: %d len: dest %x %u flags: %ld\n",
- __func__, src_cnt, len, dest, flags);
+ dev_dbg(mv_chan_to_devp(mv_chan),
+ "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
@@ -758,7 +644,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
}
spin_unlock_bh(&mv_chan->lock);
- dev_dbg(mv_chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(mv_chan),
"%s sw_desc %p async_tx %p \n",
__func__, sw_desc, &sw_desc->async_tx);
return sw_desc ? &sw_desc->async_tx : NULL;
@@ -791,12 +677,12 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
}
mv_chan->last_used = NULL;
- dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
+ dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
__func__, mv_chan->slots_allocated);
spin_unlock_bh(&mv_chan->lock);
if (in_use_descs)
- dev_err(mv_chan->device->common.dev,
+ dev_err(mv_chan_to_devp(mv_chan),
"freeing %d in use descriptors!\n", in_use_descs);
}
@@ -814,7 +700,7 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
enum dma_status ret;
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS) {
+ if (ret == DMA_COMPLETE) {
mv_xor_clean_completed_slots(mv_chan);
return ret;
}
@@ -827,43 +713,37 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
{
u32 val;
- val = __raw_readl(XOR_CONFIG(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "config 0x%08x.\n", val);
+ val = readl_relaxed(XOR_CONFIG(chan));
+ dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
- val = __raw_readl(XOR_ACTIVATION(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "activation 0x%08x.\n", val);
+ val = readl_relaxed(XOR_ACTIVATION(chan));
+ dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr cause 0x%08x.\n", val);
+ val = readl_relaxed(XOR_INTR_CAUSE(chan));
+ dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
- val = __raw_readl(XOR_INTR_MASK(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "intr mask 0x%08x.\n", val);
+ val = readl_relaxed(XOR_INTR_MASK(chan));
+ dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_CAUSE(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error cause 0x%08x.\n", val);
+ val = readl_relaxed(XOR_ERROR_CAUSE(chan));
+ dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
- val = __raw_readl(XOR_ERROR_ADDR(chan));
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error addr 0x%08x.\n", val);
+ val = readl_relaxed(XOR_ERROR_ADDR(chan));
+ dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
}
static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
u32 intr_cause)
{
if (intr_cause & (1 << 4)) {
- dev_dbg(chan->device->common.dev,
+ dev_dbg(mv_chan_to_devp(chan),
"ignore this error\n");
return;
}
- dev_printk(KERN_ERR, chan->device->common.dev,
- "error on chan %d. intr cause 0x%08x.\n",
- chan->idx, intr_cause);
+ dev_err(mv_chan_to_devp(chan),
+ "error on chan %d. intr cause 0x%08x\n",
+ chan->idx, intr_cause);
mv_dump_xor_regs(chan);
BUG();
@@ -874,7 +754,7 @@ static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
struct mv_xor_chan *chan = data;
u32 intr_cause = mv_chan_get_intr_cause(chan);
- dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
+ dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause);
if (mv_is_err_intr(intr_cause))
mv_xor_err_interrupt_handler(chan, intr_cause);
@@ -899,9 +779,8 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
/*
* Perform a transaction to verify the HW works.
*/
-#define MV_XOR_TEST_SIZE 2000
-static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
+static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
int i;
void *src, *dest;
@@ -909,64 +788,73 @@ static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
struct dma_chan *dma_chan;
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
int err = 0;
- struct mv_xor_chan *mv_chan;
- src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;
- dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}
/* Fill in src buffer */
- for (i = 0; i < MV_XOR_TEST_SIZE; i++)
+ for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i;
- /* Start copy, using first DMA channel */
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
- dest_dma = dma_map_single(dma_chan->device->dev, dest,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
+ src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->to_cnt = 1;
+ unmap->addr[0] = src_dma;
- src_dma = dma_map_single(dma_chan->device->dev, src,
- MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
+ dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dest_dma;
+
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
- MV_XOR_TEST_SIZE, 0);
+ PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
msleep(1);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy timed out, disabling\n");
+ DMA_COMPLETE) {
+ dev_err(dma_chan->device->dev,
+ "Self-test copy timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
- if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test copy failed compare, disabling\n");
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (memcmp(src, dest, PAGE_SIZE)) {
+ dev_err(dma_chan->device->dev,
+ "Self-test copy failed compare, disabling\n");
err = -ENODEV;
goto free_resources;
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
kfree(src);
@@ -975,8 +863,8 @@ out:
}
#define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
-static int __devinit
-mv_xor_xor_self_test(struct mv_xor_device *device)
+static int
+mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
{
int i, src_idx;
struct page *dest;
@@ -984,14 +872,15 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
- struct mv_xor_chan *mv_chan;
+ int src_count = MV_XOR_NUM_SRC_TEST;
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) {
while (src_idx--)
@@ -1008,13 +897,13 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
}
/* Fill in src buffers */
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx);
}
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
+ for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -1022,24 +911,35 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
memset(page_address(dest), 0, PAGE_SIZE);
- dma_chan = container_of(device->common.channels.next,
- struct dma_chan,
- device_node);
+ dma_chan = &mv_chan->dmachan;
if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
err = -ENODEV;
goto out;
}
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
+ GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
/* test xor */
- dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ for (i = 0; i < src_count; i++) {
+ unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_srcs[i] = unmap->addr[i];
+ unmap->to_cnt++;
+ }
- for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
- dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dest_dma = unmap->addr[src_count];
+ unmap->from_cnt = 1;
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
- MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
+ src_count, PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
@@ -1047,94 +947,95 @@ mv_xor_xor_self_test(struct mv_xor_device *device)
msleep(8);
if (mv_xor_status(dma_chan, cookie, NULL) !=
- DMA_SUCCESS) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor timed out, disabling\n");
+ DMA_COMPLETE) {
+ dev_err(dma_chan->device->dev,
+ "Self-test xor timed out, disabling\n");
err = -ENODEV;
goto free_resources;
}
- mv_chan = to_mv_xor_chan(dma_chan);
- dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
+ dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
u32 *ptr = page_address(dest);
if (ptr[i] != cmp_word) {
- dev_printk(KERN_ERR, dma_chan->device->dev,
- "Self-test xor failed compare, disabling."
- " index %d, data %x, expected %x\n", i,
- ptr[i], cmp_word);
+ dev_err(dma_chan->device->dev,
+ "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n",
+ i, ptr[i], cmp_word);
err = -ENODEV;
goto free_resources;
}
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
- src_idx = MV_XOR_NUM_SRC_TEST;
+ src_idx = src_count;
while (src_idx--)
__free_page(xor_srcs[src_idx]);
__free_page(dest);
return err;
}
-static int __devexit mv_xor_remove(struct platform_device *dev)
+/* This driver does not implement any of the optional DMA operations. */
+static int
+mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ return -ENOSYS;
+}
+
+static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
{
- struct mv_xor_device *device = platform_get_drvdata(dev);
struct dma_chan *chan, *_chan;
- struct mv_xor_chan *mv_chan;
- struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
+ struct device *dev = mv_chan->dmadev.dev;
- dma_async_device_unregister(&device->common);
+ dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(&dev->dev, plat_data->pool_size,
- device->dma_desc_pool_virt, device->dma_desc_pool);
+ dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
- list_for_each_entry_safe(chan, _chan, &device->common.channels,
- device_node) {
- mv_chan = to_mv_xor_chan(chan);
+ list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels,
+ device_node) {
list_del(&chan->device_node);
}
+ free_irq(mv_chan->irq, mv_chan);
+
return 0;
}
-static int __devinit mv_xor_probe(struct platform_device *pdev)
+static struct mv_xor_chan *
+mv_xor_channel_add(struct mv_xor_device *xordev,
+ struct platform_device *pdev,
+ int idx, dma_cap_mask_t cap_mask, int irq)
{
int ret = 0;
- int irq;
- struct mv_xor_device *adev;
struct mv_xor_chan *mv_chan;
struct dma_device *dma_dev;
- struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
+ mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
+ if (!mv_chan)
+ return ERR_PTR(-ENOMEM);
- adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
- if (!adev)
- return -ENOMEM;
+ mv_chan->idx = idx;
+ mv_chan->irq = irq;
- dma_dev = &adev->common;
+ dma_dev = &mv_chan->dmadev;
/* allocate coherent memory for hardware descriptors
* note: writecombine gives slightly better performance, but
* requires that we explicitly flush the writes
*/
- adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
- plat_data->pool_size,
- &adev->dma_desc_pool,
- GFP_KERNEL);
- if (!adev->dma_desc_pool_virt)
- return -ENOMEM;
-
- adev->id = plat_data->hw_id;
+ mv_chan->dma_desc_pool_virt =
+ dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE,
+ &mv_chan->dma_desc_pool, GFP_KERNEL);
+ if (!mv_chan->dma_desc_pool_virt)
+ return ERR_PTR(-ENOMEM);
/* discover transaction capabilites from the platform data */
- dma_dev->cap_mask = plat_data->cap_mask;
- adev->pdev = pdev;
- platform_set_drvdata(pdev, adev);
-
- adev->shared = platform_get_drvdata(plat_data->shared);
+ dma_dev->cap_mask = cap_mask;
INIT_LIST_HEAD(&dma_dev->channels);
@@ -1143,45 +1044,27 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
dma_dev->device_tx_status = mv_xor_status;
dma_dev->device_issue_pending = mv_xor_issue_pending;
+ dma_dev->device_control = mv_xor_control;
dma_dev->dev = &pdev->dev;
/* set prep routines based on capability */
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
- if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
- dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
dma_dev->max_xor = 8;
dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
}
- mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
- if (!mv_chan) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
- mv_chan->device = adev;
- mv_chan->idx = plat_data->hw_id;
- mv_chan->mmr_base = adev->shared->xor_base;
-
- if (!mv_chan->mmr_base) {
- ret = -ENOMEM;
- goto err_free_dma;
- }
+ mv_chan->mmr_base = xordev->xor_base;
+ mv_chan->mmr_high_base = xordev->xor_high_base;
tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
mv_chan);
/* clear errors before enabling interrupts */
mv_xor_device_clear_err_status(mv_chan);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_free_dma;
- }
- ret = devm_request_irq(&pdev->dev, irq,
- mv_xor_interrupt_handler,
- 0, dev_name(&pdev->dev), mv_chan);
+ ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler,
+ 0, dev_name(&pdev->dev), mv_chan);
if (ret)
goto err_free_dma;
@@ -1193,47 +1076,46 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mv_chan->chain);
INIT_LIST_HEAD(&mv_chan->completed_slots);
INIT_LIST_HEAD(&mv_chan->all_slots);
- mv_chan->common.device = dma_dev;
- dma_cookie_init(&mv_chan->common);
+ mv_chan->dmachan.device = dma_dev;
+ dma_cookie_init(&mv_chan->dmachan);
- list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
+ list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels);
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
- ret = mv_xor_memcpy_self_test(adev);
+ ret = mv_xor_memcpy_self_test(mv_chan);
dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
- ret = mv_xor_xor_self_test(adev);
+ ret = mv_xor_xor_self_test(mv_chan);
dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
if (ret)
- goto err_free_dma;
+ goto err_free_irq;
}
- dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
- "( %s%s%s%s)\n",
- dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
- dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
- dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+ dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n",
+ dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
+ dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
+ dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
dma_async_device_register(dma_dev);
- goto out;
+ return mv_chan;
+err_free_irq:
+ free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
- adev->dma_desc_pool_virt, adev->dma_desc_pool);
- out:
- return ret;
+ dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
+ return ERR_PTR(ret);
}
static void
-mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
+mv_xor_conf_mbus_windows(struct mv_xor_device *xordev,
const struct mbus_dram_target_info *dram)
{
- void __iomem *base = msp->xor_base;
+ void __iomem *base = xordev->xor_high_base;
u32 win_enable = 0;
int i;
@@ -1258,99 +1140,179 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
writel(win_enable, base + WINDOW_BAR_ENABLE(0));
writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(0));
+ writel(0, base + WINDOW_OVERRIDE_CTRL(1));
}
-static struct platform_driver mv_xor_driver = {
- .probe = mv_xor_probe,
- .remove = __devexit_p(mv_xor_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_NAME,
- },
-};
-
-static int mv_xor_shared_probe(struct platform_device *pdev)
+static int mv_xor_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram;
- struct mv_xor_shared_private *msp;
+ struct mv_xor_device *xordev;
+ struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct resource *res;
+ int i, ret;
- dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
+ dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
- msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
- if (!msp)
+ xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
+ if (!xordev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
- msp->xor_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_base)
+ xordev->xor_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_base)
return -EBUSY;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res)
return -ENODEV;
- msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!msp->xor_high_base)
+ xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!xordev->xor_high_base)
return -EBUSY;
- platform_set_drvdata(pdev, msp);
+ platform_set_drvdata(pdev, xordev);
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
dram = mv_mbus_dram_info();
if (dram)
- mv_xor_conf_mbus_windows(msp, dram);
+ mv_xor_conf_mbus_windows(xordev, dram);
/* Not all platforms can gate the clock, so it is not
* an error if the clock does not exists.
*/
- msp->clk = clk_get(&pdev->dev, NULL);
- if (!IS_ERR(msp->clk))
- clk_prepare_enable(msp->clk);
+ xordev->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(xordev->clk))
+ clk_prepare_enable(xordev->clk);
+
+ if (pdev->dev.of_node) {
+ struct device_node *np;
+ int i = 0;
+
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ struct mv_xor_chan *chan;
+ dma_cap_mask_t cap_mask;
+ int irq;
+
+ dma_cap_zero(cap_mask);
+ if (of_property_read_bool(np, "dmacap,memcpy"))
+ dma_cap_set(DMA_MEMCPY, cap_mask);
+ if (of_property_read_bool(np, "dmacap,xor"))
+ dma_cap_set(DMA_XOR, cap_mask);
+ if (of_property_read_bool(np, "dmacap,interrupt"))
+ dma_cap_set(DMA_INTERRUPT, cap_mask);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
+ irq_dispose_mapping(irq);
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] = chan;
+ i++;
+ }
+ } else if (pdata && pdata->channels) {
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ struct mv_xor_channel_data *cd;
+ struct mv_xor_chan *chan;
+ int irq;
+
+ cd = &pdata->channels[i];
+ if (!cd) {
+ ret = -ENODEV;
+ goto err_channel_add;
+ }
+
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0) {
+ ret = irq;
+ goto err_channel_add;
+ }
+
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cd->cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
+ goto err_channel_add;
+ }
+
+ xordev->channels[i] = chan;
+ }
+ }
return 0;
+
+err_channel_add:
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++)
+ if (xordev->channels[i]) {
+ mv_xor_channel_remove(xordev->channels[i]);
+ if (pdev->dev.of_node)
+ irq_dispose_mapping(xordev->channels[i]->irq);
+ }
+
+ if (!IS_ERR(xordev->clk)) {
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
+ }
+
+ return ret;
}
-static int mv_xor_shared_remove(struct platform_device *pdev)
+static int mv_xor_remove(struct platform_device *pdev)
{
- struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
+ struct mv_xor_device *xordev = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+ if (xordev->channels[i])
+ mv_xor_channel_remove(xordev->channels[i]);
+ }
- if (!IS_ERR(msp->clk)) {
- clk_disable_unprepare(msp->clk);
- clk_put(msp->clk);
+ if (!IS_ERR(xordev->clk)) {
+ clk_disable_unprepare(xordev->clk);
+ clk_put(xordev->clk);
}
return 0;
}
-static struct platform_driver mv_xor_shared_driver = {
- .probe = mv_xor_shared_probe,
- .remove = mv_xor_shared_remove,
+#ifdef CONFIG_OF
+static struct of_device_id mv_xor_dt_ids[] = {
+ { .compatible = "marvell,orion-xor", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
+#endif
+
+static struct platform_driver mv_xor_driver = {
+ .probe = mv_xor_probe,
+ .remove = mv_xor_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = MV_XOR_SHARED_NAME,
+ .owner = THIS_MODULE,
+ .name = MV_XOR_NAME,
+ .of_match_table = of_match_ptr(mv_xor_dt_ids),
},
};
static int __init mv_xor_init(void)
{
- int rc;
-
- rc = platform_driver_register(&mv_xor_shared_driver);
- if (!rc) {
- rc = platform_driver_register(&mv_xor_driver);
- if (rc)
- platform_driver_unregister(&mv_xor_shared_driver);
- }
- return rc;
+ return platform_driver_register(&mv_xor_driver);
}
module_init(mv_xor_init);
@@ -1359,7 +1321,6 @@ module_init(mv_xor_init);
static void __exit mv_xor_exit(void)
{
platform_driver_unregister(&mv_xor_driver);
- platform_driver_unregister(&mv_xor_shared_driver);
return;
}