/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pch_dma.h>
#include "dmaengine.h"
#define DRV_NAME "pch-dma"
#define DMA_CTL0_DISABLE 0x0
#define DMA_CTL0_SG 0x1
#define DMA_CTL0_ONESHOT 0x2
#define DMA_CTL0_MODE_MASK_BITS 0x3
#define DMA_CTL0_DIR_SHIFT_BITS 2
#define DMA_CTL0_BITS_PER_CH 4
#define DMA_CTL2_START_SHIFT_BITS 8
#define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
#define DMA_STATUS_IDLE 0x0
#define DMA_STATUS_DESC_READ 0x1
#define DMA_STATUS_WAIT 0x2
#define DMA_STATUS_ACCESS 0x3
#define DMA_STATUS_BITS_PER_CH 2
#define DMA_STATUS_MASK_BITS 0x3
#define DMA_STATUS_SHIFT_BITS 16
#define DMA_STATUS_IRQ(x) (0x1 << (x))
#define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
#define DMA_STATUS2_ERR(x) (0x1 << (x))
#define DMA_DESC_WIDTH_SHIFT_BITS 12
#define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
#define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
#define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
#define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
#define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
#define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
#define DMA_DESC_END_WITHOUT_IRQ 0x0
#define DMA_DESC_END_WITH_IRQ 0x1
#define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
#define DMA_DESC_FOLLOW_WITH_IRQ 0x3
#define MAX_CHAN_NR 12
#define DMA_MASK_CTL0_MODE 0x33333333
#define DMA_MASK_CTL2_MODE 0x00003333
static unsigned int init_nr_desc_per_channel = 64;
module_param(init_nr_desc_per_channel, uint, 0644);
MODULE_PARM_DESC(init_nr_desc_per_channel,
"initial descriptors per channel (default: 64)");
struct pch_dma_desc_regs {
u32 dev_addr;
u32 mem_addr;
u32 size;
u32 next;
};
struct pch_dma_regs {
u32 dma_ctl0;
u32 dma_ctl1;
u32 dma_ctl2;
u32 dma_ctl3;
u32 dma_sts0;
u32 dma_sts1;
u32 dma_sts2;
u32 reserved3;
struct pch_dma_desc_regs desc[MAX_CHAN_NR];
};
struct pch_dma_desc {
struct pch_dma_desc_regs regs;
struct dma_async_tx_descriptor txd;
struct list_head desc_node;
struct list_head tx_list;
};
struct pch_dma_chan {
struct dma_chan chan;
void __iomem *membase;
enum dma_transfer_direction dir;
struct tasklet_struct tasklet;
unsigned long err_status;
spinlock_t lock;
struct list_head active_list;
struct list_head queue;
struct list_head free_list;
unsigned int descs_allocated;
};
#define PDC_DEV_ADDR 0x00
#define PDC_MEM_ADDR 0x04
#define PDC_SIZE 0x08
#define PDC_NEXT 0x0C
#define channel_readl(pdc, name) \
readl((pdc)->membase + PDC_##name)
#define channel_writel(pdc, name, val) \
writel((val), (pdc)->membase + PDC_##name)
struct pch_dma {
struct dma_device dma;
void __iomem *membase;
struct pci_pool *pool;
struct pch_dma_regs regs;
struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
struct pch_dma_chan channels[MAX_CHAN_NR];
};
#define PCH_DMA_CTL0 0x00
#define PCH_DMA_CTL1 0x04
#define PCH_DMA_CTL2 0x08
#define PCH_DMA_CTL3 0x0C
#define PCH_DMA_STS0 0x10
#define PCH_DMA_STS1 0x14
#define PCH_DMA_STS2 0x18
#define dma_readl(pd, name) \
readl((pd)->membase + PCH_DMA_##name)
#define dma_writel(pd, name, val) \
writel((val), (pd)->membase + PCH_DMA_##name)
static inline
struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
{
return container_of(txd, struct pch_dma_desc, txd);
}
static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
{
return container_of(chan, struct pch_dma_chan, chan);
}
static inline struct pch_dma *to_pd(struct dma_device *ddev)
{
return container_of(ddev, struct pch_dma, dma);
}
static inline struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
static inline struct device *chan2parent(struct dma_chan *chan)
{
return chan->dev->device.parent;
}
static inline
struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->active_list,
struct pch_dma_desc, desc_node);
}
static inline
struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
{
return list_first_entry(&pd_chan->queue,
struct pch_dma_desc, desc_node);
}
static void pdc_enable_irq(struct dma_chan *chan, int enable)
{
struct pch_dma *pd = to_pd(chan->device);
u32 val;
int pos;
if (chan->chan_id < 8)
pos = chan->chan_id;
else
pos = chan->chan_id + 8;
val = dma_readl(pd, CTL2);
if (enable)
val |=