diff options
Diffstat (limited to 'drivers/dma/mpc512x_dma.c')
| -rw-r--r-- | drivers/dma/mpc512x_dma.c | 540 | 
1 files changed, 417 insertions, 123 deletions
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 4e9cbf30059..2ad43738ac8 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -1,6 +1,8 @@  /*   * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.   * Copyright (C) Semihalf 2009 + * Copyright (C) Ilya Yanok, Emcraft Systems 2010 + * Copyright (C) Alexander Popov, Promcontroller 2014   *   * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description   * (defines, structures and comments) was taken from MPC5121 DMA driver @@ -28,8 +30,18 @@   */  /* - * This is initial version of MPC5121 DMA driver. Only memory to memory - * transfers are supported (tested using dmatest module). + * MPC512x and MPC8308 DMA driver. It supports + * memory to memory data transfers (tested using dmatest module) and + * data transfers between memory and peripheral I/O memory + * by means of slave scatter/gather with these limitations: + *  - chunked transfers (described by s/g lists with more than one item) + *     are refused as long as proper support for scatter/gather is missing; + *  - transfers on MPC8308 always start from software as this SoC appears + *     not to have external request lines for peripheral flow control; + *  - only peripheral devices with 4-byte FIFO access register are supported; + *  - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently + *     source and destination addresses must be 4-byte aligned + *     and transfer size must be aligned on (4 * maxburst) boundary;   */  #include <linux/module.h> @@ -38,18 +50,30 @@  #include <linux/interrupt.h>  #include <linux/io.h>  #include <linux/slab.h> +#include <linux/of_address.h>  #include <linux/of_device.h> +#include <linux/of_irq.h>  #include <linux/of_platform.h>  #include <linux/random.h> +#include "dmaengine.h" +  /* Number of DMA Transfer descriptors allocated per channel */  #define MPC_DMA_DESCRIPTORS	64  /* Macro definitions */ -#define MPC_DMA_CHANNELS	64  #define MPC_DMA_TCD_OFFSET	0x1000 +/* + * Maximum channel counts for individual hardware variants + * and the maximum channel count over all supported controllers, + * used for data structure size + */ +#define MPC8308_DMACHAN_MAX	16 +#define MPC512x_DMACHAN_MAX	64 +#define MPC_DMA_CHANNELS	64 +  /* Arbitration mode of group and channel */  #define MPC_DMA_DMACR_EDCG	(1 << 31)  #define MPC_DMA_DMACR_ERGA	(1 << 3) @@ -70,6 +94,8 @@  #define MPC_DMA_DMAES_SBE	(1 << 1)  #define MPC_DMA_DMAES_DBE	(1 << 0) +#define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6) +  #define MPC_DMA_TSIZE_1		0x00  #define MPC_DMA_TSIZE_2		0x01  #define MPC_DMA_TSIZE_4		0x02 @@ -104,7 +130,10 @@ struct __attribute__ ((__packed__)) mpc_dma_regs {  	/* 0x30 */  	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */  	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */ -	u32 dmaihsa;		/* DMA interrupt high select AXE(ch63~32) */ +	union { +		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */ +		u32 dmagpor;	/* (General purpose register on MPC8308) */ +	};  	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */  	/* 0x40 ~ 0xff */  	u32 reserve0[48];	/* Reserved */ @@ -171,6 +200,7 @@ struct mpc_dma_desc {  	dma_addr_t			tcd_paddr;  	int				error;  	struct list_head		node; +	int				will_access_peripheral;  };  struct mpc_dma_chan { @@ -182,7 +212,12 @@ struct mpc_dma_chan {  	struct list_head		completed;  	struct mpc_dma_tcd		*tcd;  	dma_addr_t			tcd_paddr; -	dma_cookie_t			completed_cookie; + +	/* Settings for access to peripheral FIFO */ +	dma_addr_t			src_per_paddr; +	u32				src_tcd_nunits; +	dma_addr_t			dst_per_paddr; +	u32				dst_tcd_nunits;  	/* Lock for this structure */  	spinlock_t			lock; @@ -195,7 +230,9 @@ struct mpc_dma {  	struct mpc_dma_regs __iomem	*regs;  	struct mpc_dma_tcd __iomem	*tcd;  	int				irq; +	int				irq2;  	uint				error_status; +	int				is_mpc8308;  	/* Lock for error_status field in this structure */  	spinlock_t			error_status_lock; @@ -232,8 +269,23 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)  	struct mpc_dma_desc *mdesc;  	int cid = mchan->chan.chan_id; -	/* Move all queued descriptors to active list */ -	list_splice_tail_init(&mchan->queued, &mchan->active); +	while (!list_empty(&mchan->queued)) { +		mdesc = list_first_entry(&mchan->queued, +						struct mpc_dma_desc, node); +		/* +		 * Grab either several mem-to-mem transfer descriptors +		 * or one peripheral transfer descriptor, +		 * don't mix mem-to-mem and peripheral transfer descriptors +		 * within the same 'active' list. +		 */ +		if (mdesc->will_access_peripheral) { +			if (list_empty(&mchan->active)) +				list_move_tail(&mdesc->node, &mchan->active); +			break; +		} else { +			list_move_tail(&mdesc->node, &mchan->active); +		} +	}  	/* Chain descriptors into one transaction */  	list_for_each_entry(mdesc, &mchan->active, node) { @@ -252,12 +304,24 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)  		prev = mdesc;  	} -	prev->tcd->start = 0;  	prev->tcd->int_maj = 1;  	/* Send first descriptor in chain into hardware */  	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); -	out_8(&mdma->regs->dmassrt, cid); + +	if (first != prev) +		mdma->tcd[cid].e_sg = 1; + +	if (mdma->is_mpc8308) { +		/* MPC8308, no request lines, software initiated start */ +		out_8(&mdma->regs->dmassrt, cid); +	} else if (first->will_access_peripheral) { +		/* Peripherals involved, start by external request signal */ +		out_8(&mdma->regs->dmaserq, cid); +	} else { +		/* Memory to memory transfer, software initiated start */ +		out_8(&mdma->regs->dmassrt, cid); +	}  }  /* Handle interrupt on one half of DMA controller (32 channels) */ @@ -274,6 +338,9 @@ static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)  		spin_lock(&mchan->lock); +		out_8(&mdma->regs->dmacint, ch + off); +		out_8(&mdma->regs->dmacerr, ch + off); +  		/* Check error status */  		if (es & (1 << ch))  			list_for_each_entry(mdesc, &mchan->active, node) @@ -302,36 +369,68 @@ static irqreturn_t mpc_dma_irq(int irq, void *data)  	spin_unlock(&mdma->error_status_lock);  	/* Handle interrupt on each channel */ -	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), +	if (mdma->dma.chancnt > 32) { +		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),  					in_be32(&mdma->regs->dmaerrh), 32); +	}  	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),  					in_be32(&mdma->regs->dmaerrl), 0); -	/* Ack interrupt on all channels */ -	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); -	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); -	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); -	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); -  	/* Schedule tasklet */  	tasklet_schedule(&mdma->tasklet);  	return IRQ_HANDLED;  } -/* DMA Tasklet */ -static void mpc_dma_tasklet(unsigned long data) +/* process completed descriptors */ +static void mpc_dma_process_completed(struct mpc_dma *mdma)  { -	struct mpc_dma *mdma = (void *)data;  	dma_cookie_t last_cookie = 0;  	struct mpc_dma_chan *mchan;  	struct mpc_dma_desc *mdesc;  	struct dma_async_tx_descriptor *desc;  	unsigned long flags;  	LIST_HEAD(list); -	uint es;  	int i; +	for (i = 0; i < mdma->dma.chancnt; i++) { +		mchan = &mdma->channels[i]; + +		/* Get all completed descriptors */ +		spin_lock_irqsave(&mchan->lock, flags); +		if (!list_empty(&mchan->completed)) +			list_splice_tail_init(&mchan->completed, &list); +		spin_unlock_irqrestore(&mchan->lock, flags); + +		if (list_empty(&list)) +			continue; + +		/* Execute callbacks and run dependencies */ +		list_for_each_entry(mdesc, &list, node) { +			desc = &mdesc->desc; + +			if (desc->callback) +				desc->callback(desc->callback_param); + +			last_cookie = desc->cookie; +			dma_run_dependencies(desc); +		} + +		/* Free descriptors */ +		spin_lock_irqsave(&mchan->lock, flags); +		list_splice_tail_init(&list, &mchan->free); +		mchan->chan.completed_cookie = last_cookie; +		spin_unlock_irqrestore(&mchan->lock, flags); +	} +} + +/* DMA Tasklet */ +static void mpc_dma_tasklet(unsigned long data) +{ +	struct mpc_dma *mdma = (void *)data; +	unsigned long flags; +	uint es; +  	spin_lock_irqsave(&mdma->error_status_lock, flags);  	es = mdma->error_status;  	mdma->error_status = 0; @@ -370,35 +469,7 @@ static void mpc_dma_tasklet(unsigned long data)  			dev_err(mdma->dma.dev, "- Destination Bus Error\n");  	} -	for (i = 0; i < mdma->dma.chancnt; i++) { -		mchan = &mdma->channels[i]; - -		/* Get all completed descriptors */ -		spin_lock_irqsave(&mchan->lock, flags); -		if (!list_empty(&mchan->completed)) -			list_splice_tail_init(&mchan->completed, &list); -		spin_unlock_irqrestore(&mchan->lock, flags); - -		if (list_empty(&list)) -			continue; - -		/* Execute callbacks and run dependencies */ -		list_for_each_entry(mdesc, &list, node) { -			desc = &mdesc->desc; - -			if (desc->callback) -				desc->callback(desc->callback_param); - -			last_cookie = desc->cookie; -			dma_run_dependencies(desc); -		} - -		/* Free descriptors */ -		spin_lock_irqsave(&mchan->lock, flags); -		list_splice_tail_init(&list, &mchan->free); -		mchan->completed_cookie = last_cookie; -		spin_unlock_irqrestore(&mchan->lock, flags); -	} +	mpc_dma_process_completed(mdma);  }  /* Submit descriptor to hardware */ @@ -421,13 +492,7 @@ static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)  		mpc_dma_execute(mchan);  	/* Update cookie */ -	cookie = mchan->chan.cookie + 1; -	if (cookie <= 0) -		cookie = 1; - -	mchan->chan.cookie = cookie; -	mdesc->desc.cookie = cookie; - +	cookie = dma_cookie_assign(txd);  	spin_unlock_irqrestore(&mchan->lock, flags);  	return cookie; @@ -544,18 +609,7 @@ static enum dma_status  mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,  	       struct dma_tx_state *txstate)  { -	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); -	unsigned long flags; -	dma_cookie_t last_used; -	dma_cookie_t last_complete; - -	spin_lock_irqsave(&mchan->lock, flags); -	last_used = mchan->chan.cookie; -	last_complete = mchan->completed_cookie; -	spin_unlock_irqrestore(&mchan->lock, flags); - -	dma_set_tx_state(txstate, last_complete, last_used, 0); -	return dma_async_is_complete(cookie, last_complete, last_used); +	return dma_cookie_status(chan, cookie, txstate);  }  /* Prepare descriptor for memory to memory copy */ @@ -563,6 +617,7 @@ static struct dma_async_tx_descriptor *  mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,  					size_t len, unsigned long flags)  { +	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);  	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);  	struct mpc_dma_desc *mdesc = NULL;  	struct mpc_dma_tcd *tcd; @@ -577,10 +632,14 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,  	}  	spin_unlock_irqrestore(&mchan->lock, iflags); -	if (!mdesc) +	if (!mdesc) { +		/* try to free completed descriptors */ +		mpc_dma_process_completed(mdma);  		return NULL; +	}  	mdesc->error = 0; +	mdesc->will_access_peripheral = 0;  	tcd = mdesc->tcd;  	/* Prepare Transfer Control Descriptor for this transaction */ @@ -591,7 +650,8 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,  		tcd->dsize = MPC_DMA_TSIZE_32;  		tcd->soff = 32;  		tcd->doff = 32; -	} else if (IS_ALIGNED(src | dst | len, 16)) { +	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { +		/* MPC8308 doesn't support 16 byte transfers */  		tcd->ssize = MPC_DMA_TSIZE_16;  		tcd->dsize = MPC_DMA_TSIZE_16;  		tcd->soff = 16; @@ -627,8 +687,194 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,  	return &mdesc->desc;  } -static int __devinit mpc_dma_probe(struct platform_device *op, -					const struct of_device_id *match) +static struct dma_async_tx_descriptor * +mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, +		unsigned int sg_len, enum dma_transfer_direction direction, +		unsigned long flags, void *context) +{ +	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); +	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); +	struct mpc_dma_desc *mdesc = NULL; +	dma_addr_t per_paddr; +	u32 tcd_nunits; +	struct mpc_dma_tcd *tcd; +	unsigned long iflags; +	struct scatterlist *sg; +	size_t len; +	int iter, i; + +	/* Currently there is no proper support for scatter/gather */ +	if (sg_len != 1) +		return NULL; + +	if (!is_slave_direction(direction)) +		return NULL; + +	for_each_sg(sgl, sg, sg_len, i) { +		spin_lock_irqsave(&mchan->lock, iflags); + +		mdesc = list_first_entry(&mchan->free, +						struct mpc_dma_desc, node); +		if (!mdesc) { +			spin_unlock_irqrestore(&mchan->lock, iflags); +			/* Try to free completed descriptors */ +			mpc_dma_process_completed(mdma); +			return NULL; +		} + +		list_del(&mdesc->node); + +		if (direction == DMA_DEV_TO_MEM) { +			per_paddr = mchan->src_per_paddr; +			tcd_nunits = mchan->src_tcd_nunits; +		} else { +			per_paddr = mchan->dst_per_paddr; +			tcd_nunits = mchan->dst_tcd_nunits; +		} + +		spin_unlock_irqrestore(&mchan->lock, iflags); + +		if (per_paddr == 0 || tcd_nunits == 0) +			goto err_prep; + +		mdesc->error = 0; +		mdesc->will_access_peripheral = 1; + +		/* Prepare Transfer Control Descriptor for this transaction */ +		tcd = mdesc->tcd; + +		memset(tcd, 0, sizeof(struct mpc_dma_tcd)); + +		if (!IS_ALIGNED(sg_dma_address(sg), 4)) +			goto err_prep; + +		if (direction == DMA_DEV_TO_MEM) { +			tcd->saddr = per_paddr; +			tcd->daddr = sg_dma_address(sg); +			tcd->soff = 0; +			tcd->doff = 4; +		} else { +			tcd->saddr = sg_dma_address(sg); +			tcd->daddr = per_paddr; +			tcd->soff = 4; +			tcd->doff = 0; +		} + +		tcd->ssize = MPC_DMA_TSIZE_4; +		tcd->dsize = MPC_DMA_TSIZE_4; + +		len = sg_dma_len(sg); +		tcd->nbytes = tcd_nunits * 4; +		if (!IS_ALIGNED(len, tcd->nbytes)) +			goto err_prep; + +		iter = len / tcd->nbytes; +		if (iter >= 1 << 15) { +			/* len is too big */ +			goto err_prep; +		} +		/* citer_linkch contains the high bits of iter */ +		tcd->biter = iter & 0x1ff; +		tcd->biter_linkch = iter >> 9; +		tcd->citer = tcd->biter; +		tcd->citer_linkch = tcd->biter_linkch; + +		tcd->e_sg = 0; +		tcd->d_req = 1; + +		/* Place descriptor in prepared list */ +		spin_lock_irqsave(&mchan->lock, iflags); +		list_add_tail(&mdesc->node, &mchan->prepared); +		spin_unlock_irqrestore(&mchan->lock, iflags); +	} + +	return &mdesc->desc; + +err_prep: +	/* Put the descriptor back */ +	spin_lock_irqsave(&mchan->lock, iflags); +	list_add_tail(&mdesc->node, &mchan->free); +	spin_unlock_irqrestore(&mchan->lock, iflags); + +	return NULL; +} + +static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, +							unsigned long arg) +{ +	struct mpc_dma_chan *mchan; +	struct mpc_dma *mdma; +	struct dma_slave_config *cfg; +	unsigned long flags; + +	mchan = dma_chan_to_mpc_dma_chan(chan); +	switch (cmd) { +	case DMA_TERMINATE_ALL: +		/* Disable channel requests */ +		mdma = dma_chan_to_mpc_dma(chan); + +		spin_lock_irqsave(&mchan->lock, flags); + +		out_8(&mdma->regs->dmacerq, chan->chan_id); +		list_splice_tail_init(&mchan->prepared, &mchan->free); +		list_splice_tail_init(&mchan->queued, &mchan->free); +		list_splice_tail_init(&mchan->active, &mchan->free); + +		spin_unlock_irqrestore(&mchan->lock, flags); + +		return 0; + +	case DMA_SLAVE_CONFIG: +		/* +		 * Software constraints: +		 *  - only transfers between a peripheral device and +		 *     memory are supported; +		 *  - only peripheral devices with 4-byte FIFO access register +		 *     are supported; +		 *  - minimal transfer chunk is 4 bytes and consequently +		 *     source and destination addresses must be 4-byte aligned +		 *     and transfer size must be aligned on (4 * maxburst) +		 *     boundary; +		 *  - during the transfer RAM address is being incremented by +		 *     the size of minimal transfer chunk; +		 *  - peripheral port's address is constant during the transfer. +		 */ + +		cfg = (void *)arg; + +		if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || +		    cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || +		    !IS_ALIGNED(cfg->src_addr, 4) || +		    !IS_ALIGNED(cfg->dst_addr, 4)) { +			return -EINVAL; +		} + +		spin_lock_irqsave(&mchan->lock, flags); + +		mchan->src_per_paddr = cfg->src_addr; +		mchan->src_tcd_nunits = cfg->src_maxburst; +		mchan->dst_per_paddr = cfg->dst_addr; +		mchan->dst_tcd_nunits = cfg->dst_maxburst; + +		/* Apply defaults */ +		if (mchan->src_tcd_nunits == 0) +			mchan->src_tcd_nunits = 1; +		if (mchan->dst_tcd_nunits == 0) +			mchan->dst_tcd_nunits = 1; + +		spin_unlock_irqrestore(&mchan->lock, flags); + +		return 0; + +	default: +		/* Unknown command */ +		break; +	} + +	return -ENXIO; +} + +static int mpc_dma_probe(struct platform_device *op)  {  	struct device_node *dn = op->dev.of_node;  	struct device *dev = &op->dev; @@ -642,19 +888,31 @@ static int __devinit mpc_dma_probe(struct platform_device *op,  	mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);  	if (!mdma) {  		dev_err(dev, "Memory exhausted!\n"); -		return -ENOMEM; +		retval = -ENOMEM; +		goto err;  	}  	mdma->irq = irq_of_parse_and_map(dn, 0);  	if (mdma->irq == NO_IRQ) {  		dev_err(dev, "Error mapping IRQ!\n"); -		return -EINVAL; +		retval = -EINVAL; +		goto err; +	} + +	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { +		mdma->is_mpc8308 = 1; +		mdma->irq2 = irq_of_parse_and_map(dn, 1); +		if (mdma->irq2 == NO_IRQ) { +			dev_err(dev, "Error mapping IRQ!\n"); +			retval = -EINVAL; +			goto err_dispose1; +		}  	}  	retval = of_address_to_resource(dn, 0, &res);  	if (retval) {  		dev_err(dev, "Error parsing memory region!\n"); -		return retval; +		goto err_dispose2;  	}  	regs_start = res.start; @@ -662,46 +920,62 @@ static int __devinit mpc_dma_probe(struct platform_device *op,  	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {  		dev_err(dev, "Error requesting memory region!\n"); -		return -EBUSY; +		retval = -EBUSY; +		goto err_dispose2;  	}  	mdma->regs = devm_ioremap(dev, regs_start, regs_size);  	if (!mdma->regs) {  		dev_err(dev, "Error mapping memory region!\n"); -		return -ENOMEM; +		retval = -ENOMEM; +		goto err_dispose2;  	}  	mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)  							+ MPC_DMA_TCD_OFFSET); -	retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, -									mdma); +	retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);  	if (retval) {  		dev_err(dev, "Error requesting IRQ!\n"); -		return -EINVAL; +		retval = -EINVAL; +		goto err_dispose2; +	} + +	if (mdma->is_mpc8308) { +		retval = request_irq(mdma->irq2, &mpc_dma_irq, 0, +							DRV_NAME, mdma); +		if (retval) { +			dev_err(dev, "Error requesting IRQ2!\n"); +			retval = -EINVAL; +			goto err_free1; +		}  	}  	spin_lock_init(&mdma->error_status_lock);  	dma = &mdma->dma;  	dma->dev = dev; -	dma->chancnt = MPC_DMA_CHANNELS; +	if (mdma->is_mpc8308) +		dma->chancnt = MPC8308_DMACHAN_MAX; +	else +		dma->chancnt = MPC512x_DMACHAN_MAX;  	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;  	dma->device_free_chan_resources = mpc_dma_free_chan_resources;  	dma->device_issue_pending = mpc_dma_issue_pending;  	dma->device_tx_status = mpc_dma_tx_status;  	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; +	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; +	dma->device_control = mpc_dma_device_control;  	INIT_LIST_HEAD(&dma->channels);  	dma_cap_set(DMA_MEMCPY, dma->cap_mask); +	dma_cap_set(DMA_SLAVE, dma->cap_mask);  	for (i = 0; i < dma->chancnt; i++) {  		mchan = &mdma->channels[i];  		mchan->chan.device = dma; -		mchan->chan.chan_id = i; -		mchan->chan.cookie = 1; -		mchan->completed_cookie = mchan->chan.cookie; +		dma_cookie_init(&mchan->chan);  		INIT_LIST_HEAD(&mchan->free);  		INIT_LIST_HEAD(&mchan->prepared); @@ -721,45 +995,74 @@ static int __devinit mpc_dma_probe(struct platform_device *op,  	 * - Round-robin group arbitration,  	 * - Round-robin channel arbitration.  	 */ -	out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | -				MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); - -	/* Disable hardware DMA requests */ -	out_be32(&mdma->regs->dmaerqh, 0); -	out_be32(&mdma->regs->dmaerql, 0); - -	/* Disable error interrupts */ -	out_be32(&mdma->regs->dmaeeih, 0); -	out_be32(&mdma->regs->dmaeeil, 0); - -	/* Clear interrupts status */ -	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); -	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); -	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); -	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); - -	/* Route interrupts to IPIC */ -	out_be32(&mdma->regs->dmaihsa, 0); -	out_be32(&mdma->regs->dmailsa, 0); +	if (mdma->is_mpc8308) { +		/* MPC8308 has 16 channels and lacks some registers */ +		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); + +		/* enable snooping */ +		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); +		/* Disable error interrupts */ +		out_be32(&mdma->regs->dmaeeil, 0); + +		/* Clear interrupts status */ +		out_be32(&mdma->regs->dmaintl, 0xFFFF); +		out_be32(&mdma->regs->dmaerrl, 0xFFFF); +	} else { +		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | +					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); + +		/* Disable hardware DMA requests */ +		out_be32(&mdma->regs->dmaerqh, 0); +		out_be32(&mdma->regs->dmaerql, 0); + +		/* Disable error interrupts */ +		out_be32(&mdma->regs->dmaeeih, 0); +		out_be32(&mdma->regs->dmaeeil, 0); + +		/* Clear interrupts status */ +		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); +		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); +		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); +		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); + +		/* Route interrupts to IPIC */ +		out_be32(&mdma->regs->dmaihsa, 0); +		out_be32(&mdma->regs->dmailsa, 0); +	}  	/* Register DMA engine */  	dev_set_drvdata(dev, mdma);  	retval = dma_async_device_register(dma); -	if (retval) { -		devm_free_irq(dev, mdma->irq, mdma); -		irq_dispose_mapping(mdma->irq); -	} +	if (retval) +		goto err_free2; + +	return retval; +err_free2: +	if (mdma->is_mpc8308) +		free_irq(mdma->irq2, mdma); +err_free1: +	free_irq(mdma->irq, mdma); +err_dispose2: +	if (mdma->is_mpc8308) +		irq_dispose_mapping(mdma->irq2); +err_dispose1: +	irq_dispose_mapping(mdma->irq); +err:  	return retval;  } -static int __devexit mpc_dma_remove(struct platform_device *op) +static int mpc_dma_remove(struct platform_device *op)  {  	struct device *dev = &op->dev;  	struct mpc_dma *mdma = dev_get_drvdata(dev);  	dma_async_device_unregister(&mdma->dma); -	devm_free_irq(dev, mdma->irq, mdma); +	if (mdma->is_mpc8308) { +		free_irq(mdma->irq2, mdma); +		irq_dispose_mapping(mdma->irq2); +	} +	free_irq(mdma->irq, mdma);  	irq_dispose_mapping(mdma->irq);  	return 0; @@ -767,12 +1070,13 @@ static int __devexit mpc_dma_remove(struct platform_device *op)  static struct of_device_id mpc_dma_match[] = {  	{ .compatible = "fsl,mpc5121-dma", }, +	{ .compatible = "fsl,mpc8308-dma", },  	{},  }; -static struct of_platform_driver mpc_dma_driver = { +static struct platform_driver mpc_dma_driver = {  	.probe		= mpc_dma_probe, -	.remove		= __devexit_p(mpc_dma_remove), +	.remove		= mpc_dma_remove,  	.driver = {  		.name = DRV_NAME,  		.owner = THIS_MODULE, @@ -780,17 +1084,7 @@ static struct of_platform_driver mpc_dma_driver = {  	},  }; -static int __init mpc_dma_init(void) -{ -	return of_register_platform_driver(&mpc_dma_driver); -} -module_init(mpc_dma_init); - -static void __exit mpc_dma_exit(void) -{ -	of_unregister_platform_driver(&mpc_dma_driver); -} -module_exit(mpc_dma_exit); +module_platform_driver(mpc_dma_driver);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");  | 
