aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2012-10-03 18:57:10 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-10-13 05:50:17 +0900
commit11464f790b08f7a8b0e5a98b994f53bd33ecb9e4 (patch)
tree517480d28c88a219b327a3a6cd538d04d56f1e63 /arch
parent987bb385ef97fba0dcaf531e63a9bc5ca2e2f9f1 (diff)
powerpc/iommu: Fix multiple issues with IOMMU pools code
commit d900bd7366463fd96a907b2c212242e2b68b27d8 upstream. There are a number of issues in the recent IOMMU pools code: - On a preempt kernel we might switch CPUs in the middle of building a scatter gather list. When this happens the handle hint passed in no longer falls within the local CPU's pool. Check for this and fall back to the pool hint. - We were missing a spin_unlock/spin_lock in one spot where we switch pools. - We need to provide locking around dart_tlb_invalidate_all and dart_tlb_invalidate_one now that the global lock is gone. Reported-by: Alexander Graf <agraf@suse.de> Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/iommu.c5
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c12
2 files changed, 16 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index ff5a6ce027b..8226c6cb348 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -215,7 +215,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
spin_lock_irqsave(&(pool->lock), flags);
again:
- if ((pass == 0) && handle && *handle)
+ if ((pass == 0) && handle && *handle &&
+ (*handle >= pool->start) && (*handle < pool->end))
start = *handle;
else
start = pool->hint;
@@ -236,7 +237,9 @@ again:
* but on second pass, start at 0 in pool 0.
*/
if ((start & mask) >= limit || pass > 0) {
+ spin_unlock(&(pool->lock));
pool = &(tbl->pools[0]);
+ spin_lock(&(pool->lock));
start = pool->start;
} else {
start &= mask;
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 4f2680f431b..b68b0db17b3 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -74,11 +74,16 @@ static int dart_is_u4;
#define DBG(...)
+static DEFINE_SPINLOCK(invalidate_lock);
+
static inline void dart_tlb_invalidate_all(void)
{
unsigned long l = 0;
unsigned int reg, inv_bit;
unsigned long limit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&invalidate_lock, flags);
DBG("dart: flush\n");
@@ -111,12 +116,17 @@ retry:
panic("DART: TLB did not flush after waiting a long "
"time. Buggy U3 ?");
}
+
+ spin_unlock_irqrestore(&invalidate_lock, flags);
}
static inline void dart_tlb_invalidate_one(unsigned long bus_rpn)
{
unsigned int reg;
unsigned int l, limit;
+ unsigned long flags;
+
+ spin_lock_irqsave(&invalidate_lock, flags);
reg = DART_CNTL_U4_ENABLE | DART_CNTL_U4_IONE |
(bus_rpn & DART_CNTL_U4_IONE_MASK);
@@ -138,6 +148,8 @@ wait_more:
panic("DART: TLB did not flush after waiting a long "
"time. Buggy U4 ?");
}
+
+ spin_unlock_irqrestore(&invalidate_lock, flags);
}
static void dart_flush(struct iommu_table *tbl)