From fc2bdefdde89b54d8fcde7bbf7d0adc0ce5cb044 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:22 +0000 Subject: sh: Plug PMB alloc memory leak If we fail to allocate a PMB entry in pmb_remap() we must remember to clear and free any PMB entries that we may have previously allocated, e.g. if we were allocating a multiple entry mapping. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index b1a714a92b1..58f935896b4 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -33,6 +33,8 @@ #define NR_PMB_ENTRIES 16 +static void __pmb_unmap(struct pmb_entry *); + static struct kmem_cache *pmb_cache; static unsigned long pmb_map; @@ -218,9 +220,10 @@ static struct { long pmb_remap(unsigned long vaddr, unsigned long phys, unsigned long size, unsigned long flags) { - struct pmb_entry *pmbp; + struct pmb_entry *pmbp, *pmbe; unsigned long wanted; int pmb_flags, i; + long err; /* Convert typical pgprot value to the PMB equivalent */ if (flags & _PAGE_CACHABLE) { @@ -236,20 +239,22 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, again: for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { - struct pmb_entry *pmbe; int ret; if (size < pmb_sizes[i].size) continue; pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag); - if (IS_ERR(pmbe)) - return PTR_ERR(pmbe); + if (IS_ERR(pmbe)) { + err = PTR_ERR(pmbe); + goto out; + } ret = set_pmb_entry(pmbe); if (ret != 0) { pmb_free(pmbe); - return -EBUSY; + err = -EBUSY; + goto out; } phys += pmb_sizes[i].size; @@ -270,6 +275,12 @@ again: goto again; return wanted - size; + +out: + if (pmbp) + __pmb_unmap(pmbp); + + return err; } void pmb_unmap(unsigned long addr) @@ -283,12 +294,19 @@ void pmb_unmap(unsigned long addr) if (unlikely(!pmbe)) return; + __pmb_unmap(pmbe); +} + +static void __pmb_unmap(struct pmb_entry *pmbe) +{ WARN_ON(!test_bit(pmbe->entry, &pmb_map)); do { struct pmb_entry *pmblink = pmbe; - clear_pmb_entry(pmbe); + if (pmbe->entry != PMB_NO_ENTRY) + clear_pmb_entry(pmbe); + pmbe = pmblink->link; pmb_free(pmblink); -- cgit v1.2.3-18-g5258 From a2767cfb1d9d97c3f861743f1ad595a80b75ec99 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 6 Oct 2009 21:22:34 +0000 Subject: sh: Don't allocate smaller sized mappings on every iteration Currently, we've got the less than ideal situation where if we need to allocate a 256MB mapping we'll allocate four entries like so, entry 1: 128MB entry 2: 64MB entry 3: 16MB entry 4: 16MB This is because as we execute the loop in pmb_remap() we will progressively try mapping the remaining address space with smaller and smaller sizes. This isn't good because the size we use on one iteration may be the perfect size to use on the next iteration, for instance when the initial size is divisible by one of the PMB mapping sizes. With this patch, we now only need two entries in the PMB to map 256MB of address space, entry 1: 128MB entry 2: 128MB Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/mm/pmb.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sh/mm/pmb.c') diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 58f935896b4..aade3110211 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -269,6 +269,13 @@ again: pmbp->link = pmbe; pmbp = pmbe; + + /* + * Instead of trying smaller sizes on every iteration + * (even if we succeed in allocating space), try using + * pmb_sizes[i].size again. + */ + i--; } if (size >= 0x1000000) -- cgit v1.2.3-18-g5258