aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/plat-omap/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/plat-omap/dma.c')
-rw-r--r--arch/arm/plat-omap/dma.c41
1 files changed, 30 insertions, 11 deletions
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 47ec77af4cc..7fc8c045ad5 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -123,6 +123,7 @@ static struct dma_link_info *dma_linked_lch;
static int dma_lch_count;
static int dma_chan_count;
+static int omap_dma_reserve_channels;
static spinlock_t dma_chan_lock;
static struct omap_dma_lch *dma_chan;
@@ -737,7 +738,7 @@ int omap_request_dma(int dev_id, const char *dev_name,
* id.
*/
dma_write(dev_id | (1 << 10), CCR(free_ch));
- } else if (cpu_is_omap730() || cpu_is_omap15xx()) {
+ } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
dma_write(dev_id, CCR(free_ch));
}
@@ -759,19 +760,12 @@ void omap_free_dma(int lch)
{
unsigned long flags;
- spin_lock_irqsave(&dma_chan_lock, flags);
if (dma_chan[lch].dev_id == -1) {
pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
lch);
- spin_unlock_irqrestore(&dma_chan_lock, flags);
return;
}
- dma_chan[lch].dev_id = -1;
- dma_chan[lch].next_lch = -1;
- dma_chan[lch].callback = NULL;
- spin_unlock_irqrestore(&dma_chan_lock, flags);
-
if (cpu_class_is_omap1()) {
/* Disable all DMA interrupts for the channel. */
dma_write(0, CICR(lch));
@@ -797,6 +791,12 @@ void omap_free_dma(int lch)
dma_write(0, CCR(lch));
omap_clear_dma(lch);
}
+
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ dma_chan[lch].dev_id = -1;
+ dma_chan[lch].next_lch = -1;
+ dma_chan[lch].callback = NULL;
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
}
EXPORT_SYMBOL(omap_free_dma);
@@ -1900,7 +1900,7 @@ static int omap2_dma_handle_ch(int ch)
/* STATUS register count is from 1-32 while our is 0-31 */
static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
{
- u32 val;
+ u32 val, enable_reg;
int i;
val = dma_read(IRQSTATUS_L0);
@@ -1909,6 +1909,8 @@ static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
printk(KERN_WARNING "Spurious DMA IRQ\n");
return IRQ_HANDLED;
}
+ enable_reg = dma_read(IRQENABLE_L0);
+ val &= enable_reg; /* Dispatch only relevant interrupts */
for (i = 0; i < dma_lch_count && val != 0; i++) {
if (val & 1)
omap2_dma_handle_ch(i);
@@ -2321,6 +2323,10 @@ static int __init omap_init_dma(void)
return -ENODEV;
}
+ if (cpu_class_is_omap2() && omap_dma_reserve_channels
+ && (omap_dma_reserve_channels <= dma_lch_count))
+ dma_lch_count = omap_dma_reserve_channels;
+
dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
GFP_KERNEL);
if (!dma_chan)
@@ -2339,7 +2345,7 @@ static int __init omap_init_dma(void)
printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
dma_chan_count = 9;
enable_1510_mode = 1;
- } else if (cpu_is_omap16xx() || cpu_is_omap730()) {
+ } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
printk(KERN_INFO "OMAP DMA hardware version %d\n",
dma_read(HW_ID));
printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
@@ -2371,7 +2377,7 @@ static int __init omap_init_dma(void)
u8 revision = dma_read(REVISION) & 0xff;
printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
revision >> 4, revision & 0xf);
- dma_chan_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
+ dma_chan_count = dma_lch_count;
} else {
dma_chan_count = 0;
return 0;
@@ -2437,4 +2443,17 @@ static int __init omap_init_dma(void)
arch_initcall(omap_init_dma);
+/*
+ * Reserve the omap SDMA channels using cmdline bootarg
+ * "omap_dma_reserve_ch=". The valid range is 1 to 32
+ */
+static int __init omap_dma_cmdline_reserve_ch(char *str)
+{
+ if (get_option(&str, &omap_dma_reserve_channels) != 1)
+ omap_dma_reserve_channels = 0;
+ return 1;
+}
+
+__setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
+