diff options
Diffstat (limited to 'drivers/dma/ppc460ex-adma.c')
-rw-r--r-- | drivers/dma/ppc460ex-adma.c | 5409 |
1 files changed, 5409 insertions, 0 deletions
diff --git a/drivers/dma/ppc460ex-adma.c b/drivers/dma/ppc460ex-adma.c new file mode 100644 index 00000000000..2ef1e9d6052 --- /dev/null +++ b/drivers/dma/ppc460ex-adma.c @@ -0,0 +1,5409 @@ +/* + * Copyright(c) 2006 DENX Engineering. All rights reserved. + * + * Author: Yuri Tikhonov <yur@emcraft.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + +/* + * This driver supports the asynchrounous DMA copy and RAID engines available + * on the AMCC PPC460ex Processors. + * Based on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x) + * ADMA driver written by D.Williams. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/async_tx.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/of_platform.h> +#include <linux/proc_fs.h> +#include <asm/dcr.h> +#include <asm/dcr-regs.h> +#include <asm/ppc460ex_adma.h> +#include <asm/ppc460ex_xor.h> +#include <asm/ppc4xx_ocm.h> +/* The list of channels exported by ppc460ex ADMA */ +struct list_head +ppc_adma_chan_list = LIST_HEAD_INIT(ppc_adma_chan_list); + +/* This flag is set when want to refetch the xor chain in the interrupt + * handler + */ +static u32 do_xor_refetch = 0; + +/* Pointers to last submitted to DMA0, DMA1 CDBs */ +static ppc460ex_desc_t *chan_last_sub[3]; +static ppc460ex_desc_t *chan_first_cdb[3]; + +/* Pointer to last linked and submitted xor CB */ +static ppc460ex_desc_t *xor_last_linked = NULL; +static ppc460ex_desc_t *xor_last_submit = NULL; + +/* This array is used in data-check operations for storing a pattern */ +static char ppc460ex_qword[16]; + +/* Since RXOR operations use the common register (MQ0_CF2H) for setting-up + * the block size in transactions, then we do not allow to activate more than + * only one RXOR transactions simultaneously. So use this var to store + * the information about is RXOR currently active (PPC460EX_RXOR_RUN bit is + * set) or not (PPC460EX_RXOR_RUN is clear). + */ +static unsigned long ppc460ex_rxor_state; + +/* /proc interface is used here to enable the h/w RAID-6 capabilities + */ +static struct proc_dir_entry *ppc460ex_proot; +static struct proc_dir_entry *ppc460ex_pqroot; + +/* These are used in enable & check routines + */ +static u32 ppc460ex_r6_enabled; +static u32 ppc460ex_r5_enabled; +static ppc460ex_ch_t *ppc460ex_r6_tchan; +static ppc460ex_ch_t *ppc460ex_r5_tchan; +static struct completion ppc460ex_r6_test_comp; +static struct completion ppc460ex_r5_test_comp; + +static int ppc460ex_adma_dma2rxor_prep_src (ppc460ex_desc_t *desc, + ppc460ex_rxor_cursor_t *cursor, int index, + int src_cnt, u32 addr); +static void ppc460ex_adma_dma2rxor_set_src (ppc460ex_desc_t *desc, + int index, dma_addr_t addr); +static void ppc460ex_adma_dma2rxor_set_mult (ppc460ex_desc_t *desc, + int index, u8 mult); +#if 1 +static inline void pr_dma(int x, char *str) +{ + if(mfdcr(0x60)) { + printk("<%s> Line:%d\n",str,x); + } +} +#else +static inline void pr_dma(int x, char *str) +{ +} +#endif +phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size) +{ + phys_addr_t page_4gb = 0; + + return (page_4gb | addr); +} +/*********************************************************************** + * HW specific initialization + * ********************************************************************/ +static u64 ppc460ex_adma_dmamask = DMA_32BIT_MASK; + +/* DMA and XOR platform devices' resources */ + +static struct resource ppc460ex_dma_1_resources[] = { + { + .flags = IORESOURCE_MEM, + }, + { + .start = DMA1_CS_FIFO_NEED_SERVICE_IRQ, + .end = DMA1_CS_FIFO_NEED_SERVICE_IRQ, + .flags = IORESOURCE_IRQ + }, + { + .start = DMA_ERROR_IRQ, + .end = DMA_ERROR_IRQ, + .flags = IORESOURCE_IRQ + } +}; + + +/* DMA and XOR platform devices' data */ + +/* DMA0,1 engines use FIFO to maintain CDBs, so we + * should allocate the pool accordingly to size of this + * FIFO. Thus, the pool size depends on the FIFO depth: + * how much CDBs pointers FIFO may contaun then so much + * CDBs we should provide in pool. + * That is + * CDB size = 32B; + * CDBs number = (DMA0_FIFO_SIZE >> 3); + * Pool size = CDBs number * CDB size = + * = (DMA0_FIFO_SIZE >> 3) << 5 = DMA0_FIFO_SIZE << 2. + * + * As far as the XOR engine is concerned, it does not + * use FIFOs but uses linked list. So there is no dependency + * between pool size to allocate and the engine configuration. + */ + +static struct ppc460ex_adma_platform_data ppc460ex_dma_1_data = { + .hw_id = PPC460EX_DMA1_ID, + .pool_size = DMA1_FIFO_SIZE << 2, +}; + +/* DMA and XOR platform devices definitions */ +#if 1 +static struct platform_device ppc460ex_dma_1_channel = { + .name = "PPC460EX-ADMA", + .id = PPC460EX_DMA1_ID, + .num_resources = ARRAY_SIZE(ppc460ex_dma_1_resources), + .resource = ppc460ex_dma_1_resources, + .dev = { + .dma_mask = &ppc460ex_adma_dmamask, + .coherent_dma_mask = DMA_64BIT_MASK, + .platform_data = (void *) &ppc460ex_dma_1_data, + }, +}; +#endif + +/* + * Init DMA0/1 and XOR engines; allocate memory for DMAx FIFOs; set platform_device + * memory resources addresses + */ +static void ppc460ex_configure_raid_devices(void) +{ + void *fifo_buf; + volatile i2o_regs_t *i2o_reg; + volatile dma_regs_t *dma_reg1; + /* + * volatile dma_regs_t *dma_reg0, *dma_reg1; + volatile xor_regs_t *xor_reg; + */ + u32 mask; + + /* + * Map registers and allocate fifo buffer + */ + if (!(i2o_reg = ioremap(I2O_MMAP_BASE, I2O_MMAP_SIZE))) { + printk(KERN_ERR "I2O registers mapping failed.\n"); + return; + } + if (!(dma_reg1 = ioremap(DMA1_MMAP_BASE, DMA_MMAP_SIZE))) { + printk(KERN_ERR "DMA1 registers mapping failed.\n"); + goto err1; + } + + /* Provide memory regions for DMA's FIFOs: I2O, DMA0 and DMA1 share + * the base address of FIFO memory space. + * Actually we need twice more physical memory than programmed in the + * <fsiz> register (because there are two FIFOs foreach DMA: CP and CS) + */ + fifo_buf = kmalloc(( DMA1_FIFO_SIZE)<<1, GFP_KERNEL); + if (!fifo_buf) { + printk(KERN_ERR "DMA FIFO buffer allocating failed.\n"); + goto err2; + } + + /* + * Configure h/w + */ + /* Reset I2O/DMA */ + SDR_WRITE(SDR0_SRST0, SDR0_SRST_I2ODMA); + SDR_WRITE(SDR0_SRST0, 0); + + + /* Setup the base address of mmaped registers */ + mtdcr(DCRN_I2O0_IBAH, (u32)(I2O_MMAP_BASE >> 32)); + mtdcr(DCRN_I2O0_IBAL, (u32)(I2O_MMAP_BASE) | I2O_REG_ENABLE); + + /* SetUp FIFO memory space base address */ + out_le32(&i2o_reg->ifbah, 0); + out_le32(&i2o_reg->ifbal, ((u32)__pa(fifo_buf))); + + /* set zero FIFO size for I2O, so the whole fifo_buf is used by DMAs. + * DMA0_FIFO_SIZE is defined in bytes, <fsiz> - in number of CDB pointers (8byte). + * DMA FIFO Length = CSlength + CPlength, where + * CSlength = CPlength = (fsiz + 1) * 8. + */ + out_le32(&i2o_reg->ifsiz, 0); + out_le32(&dma_reg1->fsiz, DMA_FIFO_ENABLE | ((DMA1_FIFO_SIZE>>3) - 2)); + /* Configure DMA engine */ + out_le32(&dma_reg1->cfg, DMA_CFG_DXEPR_HP | DMA_CFG_DFMPP_HP | DMA_CFG_FALGN); + + /* Clear Status */ + out_le32(&dma_reg1->dsts, ~0); + + /* + * Prepare WXOR/RXOR (finally it is being enabled via /proc interface of + * the ppc460ex ADMA driver) + */ + /* Set HB alias */ + mtdcr(DCRN_MQ0_BAUH, DMA_CUED_XOR_HB); + + /* Set: + * - LL transaction passing limit to 1; + * - Memory controller cycle limit to 1; + * - Galois Polynomial to 0x14d (default) + */ + mtdcr(DCRN_MQ0_CFBHL, 0x88a68000 | (1 << MQ0_CFBHL_TPLM) | + (1 << MQ0_CFBHL_HBCL) | + (PPC460EX_DEFAULT_POLY << MQ0_CFBHL_POLY)); + + /* Unmask 'CS FIFO Attention' interrupts and + * enable generating interrupts on errors + */ + mask = in_le32(&i2o_reg->iopim) & ~( + I2O_IOPIM_P0SNE | I2O_IOPIM_P1SNE | + I2O_IOPIM_P0EM | I2O_IOPIM_P1EM); + out_le32(&i2o_reg->iopim, mask); + + /* enable XOR engine interrupts */ + + /* + * Unmap I2O registers + */ + iounmap(i2o_reg); + printk("<%s> line %d\n", __FUNCTION__, __LINE__); + + /* Configure MQ as follows: + * MQ: 0x80001C80. This means + * - AddrAck First Request, + * - Read Passing Limit = 1, + * - Read Passing Enable, + * - Read Flow Through Enable, + * - MCIF Cycle Limit = 1. + */ +#if 1 + mdelay(1000); + mask = (1 << MQ_CF1_AAFR) | ((1 & MQ_CF1_RPLM_MSK) << MQ_CF1_RPLM) | + (1 << MQ_CF1_RPEN) | (1 << MQ_CF1_RFTE) | + ((1 & MQ_CF1_WRCL_MSK) << MQ_CF1_WRCL); + mtdcr(DCRN_MQ0_CF1H, mask); + mtdcr(DCRN_MQ0_CF1L, mask); +#endif + printk("<%s> line %d\n", __FUNCTION__, __LINE__); + + /* Configure PLB as follows: + * PLB: 0xDF000000. This means + * - Priority level 00 fair priority, + * - Priority level 01 fair priority, + * - Priority level 11 fair priority, + * - High Bus Utilization enabled, + * - 4 Deep read pipe, + * - 2 Deep write pipe. + */ + mask = (1 << PLB_ACR_PPM0) | (1 << PLB_ACR_PPM1) | (1 << PLB_ACR_PPM3) | + (1 << PLB_ACR_HBU) | ((3 & PLB_ACR_RDP_MSK) << PLB_ACR_RDP) | + (1 << PLB_ACR_WRP); + mtdcr(DCRN_PLB0_ACR, mask); + mtdcr(DCRN_PLB1_ACR, mask); + printk("<%s> line %d\n", __FUNCTION__, __LINE__); + + /* + * Set resource addresses + */ + + ppc460ex_dma_1_channel.resource[0].start = (resource_size_t)(dma_reg1); + ppc460ex_dma_1_channel.resource[0].end = + ppc460ex_dma_1_channel.resource[0].start+DMA_MMAP_SIZE; + printk( " ppc460ex_dma_1_channel.resource[0].start=0x%lx \n", + ppc460ex_dma_1_channel.resource[0].start); + printk("<%s> line %d dma_reg1=0x%lx \n", __FUNCTION__, __LINE__,dma_reg1); + + + printk("<%s> line %d\n", __FUNCTION__, __LINE__); + return; +err2: + iounmap(dma_reg1); +err1: + iounmap(i2o_reg); + return; +} +#if 1 +static struct platform_device *ppc460ex_devs[] __initdata = { +/* &ppc460ex_dma_0_channel, */ + &ppc460ex_dma_1_channel, + /*&ppc460ex_xor_channel, */ +}; +#endif + +/****************************************************************************** + * Command (Descriptor) Blocks low-level routines + ******************************************************************************/ +/** + * ppc460ex_desc_init_interrupt - initialize the descriptor for INTERRUPT + * pseudo operation + */ +static inline void ppc460ex_desc_init_interrupt (ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan) +{ + xor_cb_t *p; + + switch (chan->device->id) { + case PPC460EX_XOR_ID: + p = desc->hw_desc; + memset (desc->hw_desc, 0, sizeof(xor_cb_t)); + /* NOP with Command Block Complete Enable */ + p->cbc = XOR_CBCR_CBCE_BIT; + break; + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + memset (desc->hw_desc, 0, sizeof(dma_cdb_t)); + /* NOP with interrupt */ + set_bit(PPC460EX_DESC_INT, &desc->flags); + break; + default: + printk(KERN_ERR "Unsupported id %d in %s\n", chan->device->id, + __FUNCTION__); + break; + } +} + +/** + * ppc460ex_desc_init_null_xor - initialize the descriptor for NULL XOR + * pseudo operation + */ +static inline void ppc460ex_desc_init_null_xor(ppc460ex_desc_t *desc) +{ + memset (desc->hw_desc, 0, sizeof(xor_cb_t)); + desc->hw_next = NULL; + desc->src_cnt = 0; + desc->dst_cnt = 1; +} + +/** + * ppc460ex_desc_init_pqxor_xor - initialize the descriptor for PQ_XOR + * operation in DMA2 controller + */ +static inline void ppc460ex_desc_init_dma2rxor(ppc460ex_desc_t *desc, + int dst_cnt, int src_cnt, unsigned long flags) +{ + xor_cb_t *hw_desc = desc->hw_desc; + + memset (desc->hw_desc, 0, sizeof(xor_cb_t)); + desc->hw_next = NULL; + desc->src_cnt = src_cnt; + desc->dst_cnt = dst_cnt; + memset (desc->reverse_flags, 0, sizeof (desc->reverse_flags)); + desc->descs_per_op = 0; + + hw_desc->cbc = XOR_CBCR_TGT_BIT; + if (flags & DMA_PREP_INTERRUPT) + /* Enable interrupt on complete */ + hw_desc->cbc |= XOR_CBCR_CBCE_BIT; +} + +/** + * ppc460ex_desc_init_pq - initialize the descriptor for PQ_XOR operation + */ +static inline void ppc460ex_desc_init_pq(ppc460ex_desc_t *desc, + int dst_cnt, int src_cnt, unsigned long flags, + unsigned long op) +{ + dma_cdb_t *hw_desc; + ppc460ex_desc_t *iter; + u8 dopc; + + + /* Common initialization of a PQ descriptors chain */ + + set_bits(op, &desc->flags); + desc->src_cnt = src_cnt; + desc->dst_cnt = dst_cnt; + + dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ? + DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2; + + list_for_each_entry(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + memset (iter->hw_desc, 0, sizeof(dma_cdb_t)); + + if (likely(!list_is_last(&iter->chain_node, + &desc->group_list))) { + /* set 'next' pointer */ + iter->hw_next = list_entry(iter->chain_node.next, + ppc460ex_desc_t, chain_node); + clear_bit(PPC460EX_DESC_INT, &iter->flags); + } else { + /* this is the last descriptor. + * this slot will be pasted from ADMA level + * each time it wants to configure parameters + * of the transaction (src, dst, ...) + */ + iter->hw_next = NULL; + if (flags & DMA_PREP_INTERRUPT) + set_bit(PPC460EX_DESC_INT, &iter->flags); + else + clear_bit(PPC460EX_DESC_INT, &iter->flags); + } + } + + /* Set OPS depending on WXOR/RXOR type of operation */ + if (!test_bit(PPC460EX_DESC_RXOR, &desc->flags)) { + /* This is a WXOR only chain: + * - first descriptors are for zeroing destinations + * if PPC460EX_ZERO_P/Q set; + * - descriptors remained are for GF-XOR operations. + */ + iter = list_first_entry(&desc->group_list, + ppc460ex_desc_t, chain_node); + + if (test_bit(PPC460EX_ZERO_P, &desc->flags)) { + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + } + + if (test_bit(PPC460EX_ZERO_Q, &desc->flags)) { + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + } + + list_for_each_entry_from(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + hw_desc->opc = dopc; + } + } else { + /* This is either RXOR-only or mixed RXOR/WXOR */ + + /* The first 1 or 2 slots in chain are always RXOR, + * if need to calculate P & Q, then there are two + * RXOR slots; if only P or only Q, then there is one + */ + iter = list_first_entry(&desc->group_list, + ppc460ex_desc_t, chain_node); + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + + if (desc->dst_cnt == DMA_DEST_MAX_NUM) { + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + } + + /* The remain descs (if any) are WXORs */ + if (test_bit(PPC460EX_DESC_WXOR, &desc->flags)) { + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + list_for_each_entry_from(iter, &desc->group_list, + chain_node) { + hw_desc = iter->hw_desc; + hw_desc->opc = dopc; + } + } + } +} +void ppc460ex_desc_init_xor(ppc460ex_desc_t *desc, + int dst_cnt, int src_cnt, unsigned long flags, + unsigned long op) +{ + dma_cdb_t *hw_desc; + ppc460ex_desc_t *iter; + u8 dopc; + + + /* Common initialization of a PQ descriptors chain */ + + set_bits(op, &desc->flags); + desc->src_cnt = src_cnt; + desc->dst_cnt = dst_cnt; + + dopc = (desc->dst_cnt == DMA_DEST_MAX_NUM) ? + DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2; + + list_for_each_entry(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + memset (iter->hw_desc, 0, sizeof(dma_cdb_t)); + + if (likely(!list_is_last(&iter->chain_node, + &desc->group_list))) { + /* set 'next' pointer */ + iter->hw_next = list_entry(iter->chain_node.next, + ppc460ex_desc_t, chain_node); + clear_bit(PPC460EX_DESC_INT, &iter->flags); + } else { + /* this is the last descriptor. + * this slot will be pasted from ADMA level + * each time it wants to configure parameters + * of the transaction (src, dst, ...) + */ + iter->hw_next = NULL; + if (flags & DMA_PREP_INTERRUPT) + set_bit(PPC460EX_DESC_INT, &iter->flags); + else + clear_bit(PPC460EX_DESC_INT, &iter->flags); + } + } + + /* Set OPS depending on WXOR/RXOR type of operation */ + if (!test_bit(PPC460EX_DESC_RXOR, &desc->flags)) { + /* This is a WXOR only chain: + * - first descriptors are for zeroing destinations + * if PPC460EX_ZERO_P/Q set; + * - descriptors remained are for GF-XOR operations. + */ + iter = list_first_entry(&desc->group_list, + ppc460ex_desc_t, chain_node); + + if (test_bit(PPC460EX_ZERO_P, &desc->flags)) { + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + } + + + list_for_each_entry_from(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + hw_desc->opc = dopc; + } + } else { + /* This is either RXOR-only or mixed RXOR/WXOR */ + + /* The first 1 or 2 slots in chain are always RXOR, + * if need to calculate P & Q, then there are two + * RXOR slots; if only P or only Q, then there is one + */ + iter = list_first_entry(&desc->group_list, + ppc460ex_desc_t, chain_node); + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + + if (desc->dst_cnt == DMA_DEST_MAX_NUM) { + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + } + + /* The remain descs (if any) are WXORs */ + if (test_bit(PPC460EX_DESC_WXOR, &desc->flags)) { + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + list_for_each_entry_from(iter, &desc->group_list, + chain_node) { + hw_desc = iter->hw_desc; + hw_desc->opc = dopc; + } + } + } +} +/** + * ppc460ex_desc_init_dma01_xor - initialize the descriptor for P_XOR operation + */ +static inline void ppc460ex_desc_init_dma01_xor(ppc460ex_desc_t *desc, + int dst_cnt, int src_cnt, unsigned long flags, + unsigned long op) +{ + dma_cdb_t *hw_desc; + ppc460ex_desc_t *iter; + + /* Common initialization of a XOR descriptors chain */ + + set_bits(op, &desc->flags); + desc->src_cnt = src_cnt; + desc->dst_cnt = dst_cnt; + + list_for_each_entry(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + memset (iter->hw_desc, 0, sizeof(dma_cdb_t)); + + if (likely(!list_is_last(&iter->chain_node, + &desc->group_list))) { + /* set 'next' pointer */ + iter->hw_next = list_entry(iter->chain_node.next, + ppc460ex_desc_t, chain_node); + clear_bit(PPC460EX_DESC_INT, &iter->flags); + } else { + /* this is the last descriptor. + * this slot will be pasted from ADMA level + * each time it wants to configure parameters + * of the transaction (src, dst, ...) + */ + iter->hw_next = NULL; + if (flags & DMA_PREP_INTERRUPT) + set_bit(PPC460EX_DESC_INT, &iter->flags); + else + clear_bit(PPC460EX_DESC_INT, &iter->flags); + } + } + + /* Set OPS depending on WXOR/RXOR type of operation */ + if (!test_bit(PPC460EX_DESC_RXOR, &desc->flags)) { + /* This is a WXOR only chain: + * - first <dst_cnt> descriptors are for zeroing destinations + * if PPC460EX_ZERO_P is set; + * - descriptors remained are for GF-XOR operations. + */ + iter = list_first_entry(&desc->group_list, + ppc460ex_desc_t, chain_node); + + if (dst_cnt && test_bit(PPC460EX_ZERO_P, + &desc->flags)) { + /* MV_SG1_SG2 to zero P or Q if this is + * just PQ_XOR operation and MV_SG1_SG2 + * if only Q has to be calculated + */ + hw_desc = iter->hw_desc; + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + iter = list_first_entry(&iter->chain_node, + ppc460ex_desc_t, chain_node); + } + list_for_each_entry(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + if (desc->dst_cnt == DMA_DEST_MAX_NUM) + hw_desc->opc = DMA_CDB_OPC_MULTICAST; + else + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + } + } else { + /* This is either RXOR-only or mixed RXOR/WXOR + * The first slot in chain is always RXOR, + * the slots remained (if there are) are WXOR + */ + list_for_each_entry(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + /* No DMA_CDB_OPC_MULTICAST option for RXOR */ + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; + } + } +} + +/** + * ppc460ex_desc_init_pqzero_sum - initialize the descriptor for PQ_VAL + * operation + */ +static inline void ppc460ex_desc_init_pqzero_sum(ppc460ex_desc_t *desc, + int dst_cnt, int src_cnt) +{ + dma_cdb_t *hw_desc; + ppc460ex_desc_t *iter; + int i = 0; + + /* initialize each descriptor in chain */ + list_for_each_entry(iter, &desc->group_list, chain_node) { + hw_desc = iter->hw_desc; + memset (iter->hw_desc, 0, sizeof(dma_cdb_t)); + + /* This is a PQ_VAL operation: + * - first <dst_cnt> descriptors are for GF-XOR operations; + * - <dst_cnt> descriptors remained are for checking the result. + */ + if (i++ < src_cnt) + /* MV_SG1_SG2 if only Q is being verified + * MULTICAST if both P and Q are being verified + */ + hw_desc->opc = (dst_cnt == DMA_DEST_MAX_NUM) ? + DMA_CDB_OPC_MULTICAST : DMA_CDB_OPC_MV_SG1_SG2; + else + /* DMA_CDB_OPC_DCHECK128 operation */ + hw_desc->opc = DMA_CDB_OPC_DCHECK128; + + if (likely(!list_is_last(&iter->chain_node, + &desc->group_list))) { + /* set 'next' pointer */ + iter->hw_next = list_entry(iter->chain_node.next, + ppc460ex_desc_t, chain_node); + } else { + /* this is the last descriptor. + * this slot will be pasted from ADMA level + * each time it wants to configure parameters + * of the transaction (src, dst, ...) + */ + iter->hw_next = NULL; + /* always enable interrupt generating since we get + * the status of pqzero from the handler + */ + set_bit(PPC460EX_DESC_INT, &iter->flags); + } + } + desc->src_cnt = src_cnt; + desc->dst_cnt = dst_cnt; +} + +/** + * ppc460ex_desc_init_memcpy - initialize the descriptor for MEMCPY operation + */ +static inline void ppc460ex_desc_init_memcpy(ppc460ex_desc_t *desc, + unsigned long flags) +{ + dma_cdb_t *hw_desc = desc->hw_desc; + + memset (desc->hw_desc, 0, sizeof(dma_cdb_t)); + desc->hw_next = NULL; + desc->src_cnt = 1; + desc->dst_cnt = 1; + + if (flags & DMA_PREP_INTERRUPT) + set_bit(PPC460EX_DESC_INT, &desc->flags); + else + clear_bit(PPC460EX_DESC_INT, &desc->flags); + + hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; +} + +/** + * ppc460ex_desc_init_memset - initialize the descriptor for MEMSET operation + */ +static inline void ppc460ex_desc_init_memset(ppc460ex_desc_t *desc, int value, + unsigned long flags) +{ + dma_cdb_t *hw_desc = desc->hw_desc; + + memset (desc->hw_desc, 0, sizeof(dma_cdb_t)); + desc->hw_next = NULL; + desc->src_cnt = 1; + desc->dst_cnt = 1; + + if (flags & DMA_PREP_INTERRUPT) + set_bit(PPC460EX_DESC_INT, &desc->flags); + else + clear_bit(PPC460EX_DESC_INT, &desc->flags); + + hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value); + hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value); + hw_desc->opc = DMA_CDB_OPC_DFILL128; +} + +/** + * ppc460ex_desc_set_src_addr - set source address into the descriptor + */ +static inline void ppc460ex_desc_set_src_addr( ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan, int src_idx, + dma_addr_t addrh, dma_addr_t addrl) +{ + dma_cdb_t *dma_hw_desc; + phys_addr_t addr64, tmplow, tmphi; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + if (!addrh) { + addr64 = fixup_bigphys_addr(addrl, sizeof(phys_addr_t)); + tmphi = (addr64 >> 32); + tmplow = (addr64 & 0xFFFFFFFF); + } else { + tmphi = addrh; + tmplow = addrl; + } + dma_hw_desc = desc->hw_desc; + dma_hw_desc->sg1l = cpu_to_le32((u32)tmplow); + dma_hw_desc->sg1u = cpu_to_le32((u32)tmphi); + break; + } +} + +/** + * ppc460ex_desc_set_src_mult - set source address mult into the descriptor + */ +static inline void ppc460ex_desc_set_src_mult( ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan, u32 mult_index, int sg_index, + unsigned char mult_value) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + u32 *psgu; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + dma_hw_desc = desc->hw_desc; + + switch(sg_index){ + /* for RXOR operations set multiplier + * into source cued address + */ + case DMA_CDB_SG_SRC: + psgu = &dma_hw_desc->sg1u; + break; + /* for WXOR operations set multiplier + * into destination cued address(es) + */ + case DMA_CDB_SG_DST1: + psgu = &dma_hw_desc->sg2u; + break; + case DMA_CDB_SG_DST2: + psgu = &dma_hw_desc->sg3u; + break; + default: + BUG(); + } + + *psgu |= cpu_to_le32(mult_value << mult_index); + if(mfdcr(0x60) == 0xfee8) { + printk("Line--%d mult_value = 0x%x mult_index=0x%x *psgu=0x%x\n",__LINE__, mult_value,mult_index,*psgu); + } + *psgu |= cpu_to_le32( 1 << mult_index); + if(mfdcr(0x60) == 0xfee8) { + printk("Line--%d mult_value = 0x%x mult_index=0x%x *psgu=0x%x\n",__LINE__, mult_value,mult_index,*psgu); + } + break; + case PPC460EX_XOR_ID: + xor_hw_desc = desc->hw_desc; + break; + default: + BUG(); + } +} + +/** + * ppc460ex_desc_set_dest_addr - set destination address into the descriptor + */ +static inline void ppc460ex_desc_set_dest_addr(ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan, + dma_addr_t addrh, dma_addr_t addrl, + u32 dst_idx) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + phys_addr_t addr64, tmphi, tmplow; + u32 *psgu, *psgl; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + if (!addrh) { + addr64 = fixup_bigphys_addr(addrl, sizeof(phys_addr_t)); + tmphi = (addr64 >> 32); + tmplow = (addr64 & 0xFFFFFFFF); + } else { + tmphi = addrh; + tmplow = addrl; + } + dma_hw_desc = desc->hw_desc; + + psgu = dst_idx ? &dma_hw_desc->sg3u : &dma_hw_desc->sg2u; + psgl = dst_idx ? &dma_hw_desc->sg3l : &dma_hw_desc->sg2l; + + *psgl = cpu_to_le32((u32)tmplow); + *psgu |= cpu_to_le32((u32)tmphi); + break; + case PPC460EX_XOR_ID: + xor_hw_desc = desc->hw_desc; + xor_hw_desc->cbtal = addrl; + xor_hw_desc->cbtah = 0; + break; + } +} + +/** + * ppc460ex_desc_set_byte_count - set number of data bytes involved + * into the operation + */ +static inline void ppc460ex_desc_set_byte_count(ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan, u32 byte_count) +{ + dma_cdb_t *dma_hw_desc; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + dma_hw_desc = desc->hw_desc; + dma_hw_desc->cnt = cpu_to_le32(byte_count); + break; + } +} + +/** + * ppc460ex_desc_set_rxor_block_size - set RXOR block size + */ +static inline void ppc460ex_desc_set_rxor_block_size(u32 byte_count) +{ + /* assume that byte_count is aligned on the 512-boundary; + * thus write it directly to the register (bits 23:31 are + * reserved there). + */ + mtdcr(DCRN_MQ0_CF2H, byte_count); +} + +/** + * ppc460ex_desc_set_dcheck - set CHECK pattern + */ +static inline void ppc460ex_desc_set_dcheck(ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan, u8 *qword) +{ + dma_cdb_t *dma_hw_desc; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + dma_hw_desc = desc->hw_desc; + out_le32(&dma_hw_desc->sg3l, qword[0]); + out_le32(&dma_hw_desc->sg3u, qword[4]); + out_le32(&dma_hw_desc->sg2l, qword[8]); + out_le32(&dma_hw_desc->sg2u, qword[12]); + break; + default: + BUG(); + } +} + +/** + * ppc460ex_xor_set_link - set link address in xor CB + */ +static inline void ppc460ex_xor_set_link (ppc460ex_desc_t *prev_desc, + ppc460ex_desc_t *next_desc) +{ + xor_cb_t *xor_hw_desc = prev_desc->hw_desc; + + if (unlikely(!next_desc || !(next_desc->phys))) { + printk(KERN_ERR "%s: next_desc=0x%p; next_desc->phys=0x%x\n", + __FUNCTION__, next_desc, + next_desc ? next_desc->phys : 0); + BUG(); + } + + xor_hw_desc->cbs = 0; + xor_hw_desc->cblal = next_desc->phys; + xor_hw_desc->cblah = 0; + xor_hw_desc->cbc |= XOR_CBCR_LNK_BIT; +} + +/** + * ppc460ex_desc_set_link - set the address of descriptor following this + * descriptor in chain + */ +static inline void ppc460ex_desc_set_link(ppc460ex_ch_t *chan, + ppc460ex_desc_t *prev_desc, ppc460ex_desc_t *next_desc) +{ + unsigned long flags; + ppc460ex_desc_t *tail = next_desc; + + if (unlikely(!prev_desc || !next_desc || + (prev_desc->hw_next && prev_desc->hw_next != next_desc))) { + /* If previous next is overwritten something is wrong. + * though we may refetch from append to initiate list + * processing; in this case - it's ok. + */ + printk(KERN_ERR "%s: prev_desc=0x%p; next_desc=0x%p; " + "prev->hw_next=0x%p\n", __FUNCTION__, prev_desc, + next_desc, prev_desc ? prev_desc->hw_next : 0); + BUG(); + } + + local_irq_save(flags); + + /* do s/w chaining both for DMA and XOR descriptors */ + prev_desc->hw_next = next_desc; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + break; + case PPC460EX_XOR_ID: + /* bind descriptor to the chain */ + while (tail->hw_next) + tail = tail->hw_next; + xor_last_linked = tail; + + if (prev_desc == xor_last_submit) + /* do not link to the last submitted CB */ + break; + ppc460ex_xor_set_link (prev_desc, next_desc); + break; + } + + local_irq_restore(flags); +} + +/** + * ppc460ex_desc_get_src_addr - extract the source address from the descriptor + */ +static inline u32 ppc460ex_desc_get_src_addr(ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan, int src_idx) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + dma_hw_desc = desc->hw_desc; + /* May have 0, 1, 2, or 3 sources */ + switch (dma_hw_desc->opc) { + case DMA_CDB_OPC_NO_OP: + case DMA_CDB_OPC_DFILL128: + return 0; + case DMA_CDB_OPC_DCHECK128: + if (unlikely(src_idx)) { + printk(KERN_ERR "%s: try to get %d source for" + " DCHECK128\n", __FUNCTION__, src_idx); + BUG(); + } + return le32_to_cpu(dma_hw_desc->sg1l); + case DMA_CDB_OPC_MULTICAST: + case DMA_CDB_OPC_MV_SG1_SG2: + if (unlikely(src_idx > 2)) { + printk(KERN_ERR "%s: try to get %d source from" + " DMA descr\n", __FUNCTION__, src_idx); + BUG(); + } + if (src_idx) { + if (le32_to_cpu(dma_hw_desc->sg1u) & + DMA_CUED_XOR_WIN_MSK) { + u8 region; + + if (src_idx == 1) + return le32_to_cpu( + dma_hw_desc->sg1l) + + desc->unmap_len; + + region = (le32_to_cpu( + dma_hw_desc->sg1u)) >> + DMA_CUED_REGION_OFF; + + region &= DMA_CUED_REGION_MSK; + switch (region) { + case DMA_RXOR123: + return le32_to_cpu( + dma_hw_desc->sg1l) + + (desc->unmap_len << 1); + case DMA_RXOR124: + return le32_to_cpu( + dma_hw_desc->sg1l) + + (desc->unmap_len * 3); + case DMA_RXOR125: + return le32_to_cpu( + dma_hw_desc->sg1l) + + (desc->unmap_len << 2); + default: + printk (KERN_ERR + "%s: try to" + " get src3 for region %02x" + "PPC460EX_DESC_RXOR12?\n", + __FUNCTION__, region); + BUG(); + } + } else { + printk(KERN_ERR + "%s: try to get %d" + " source for non-cued descr\n", + __FUNCTION__, src_idx); + BUG(); + } + } + return le32_to_cpu(dma_hw_desc->sg1l); + default: + printk(KERN_ERR "%s: unknown OPC 0x%02x\n", + __FUNCTION__, dma_hw_desc->opc); + BUG(); + } + return le32_to_cpu(dma_hw_desc->sg1l); + case PPC460EX_XOR_ID: + /* May have up to 16 sources */ + xor_hw_desc = desc->hw_desc; + return xor_hw_desc->ops[src_idx].l; + } + return 0; +} + +/** + * ppc460ex_desc_get_dest_addr - extract the destination address from the + * descriptor + */ +static inline u32 ppc460ex_desc_get_dest_addr(ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan, int idx) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + dma_hw_desc = desc->hw_desc; + + if (likely(!idx)) + return le32_to_cpu(dma_hw_desc->sg2l); + return le32_to_cpu(dma_hw_desc->sg3l); + case PPC460EX_XOR_ID: + xor_hw_desc = desc->hw_desc; + return xor_hw_desc->cbtal; + } + return 0; +} + +/** + * ppc460ex_desc_get_byte_count - extract the byte count from the descriptor + */ +static inline u32 ppc460ex_desc_get_byte_count(ppc460ex_desc_t *desc, + ppc460ex_ch_t *chan) +{ + dma_cdb_t *dma_hw_desc; + xor_cb_t *xor_hw_desc; + + switch (chan->device->id) { + case PPC460EX_DMA0_ID: + case PPC460EX_DMA1_ID: + dma_hw_desc = desc-> |