From 7ee793a62fa8c544f8b844e6e87b2d8e8836b219 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 25 Feb 2014 11:01:19 -0800 Subject: cma: Remove potential deadlock situation CMA locking is currently very coarse. The cma_mutex protects both the bitmap and avoids concurrency with alloc_contig_range. There are several situations which may result in a deadlock on the CMA mutex currently, mostly involving AB/BA situations with alloc and free. Fix this issue by protecting the bitmap with a mutex per CMA region and use the existing mutex for protecting against concurrency with alloc_contig_range. Signed-off-by: Laura Abbott Signed-off-by: Marek Szyprowski --- drivers/base/dma-contiguous.c | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'drivers/base/dma-contiguous.c') diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 165c2c299e5..fe72bac9627 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -37,6 +37,7 @@ struct cma { unsigned long base_pfn; unsigned long count; unsigned long *bitmap; + struct mutex lock; }; struct cma *dma_contiguous_default_area; @@ -161,6 +162,7 @@ static int __init cma_activate_area(struct cma *cma) init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); + mutex_init(&cma->lock); return 0; } @@ -261,6 +263,13 @@ err: return ret; } +static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) +{ + mutex_lock(&cma->lock); + bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); + mutex_unlock(&cma->lock); +} + /** * dma_alloc_from_contiguous() - allocate pages from contiguous area * @dev: Pointer to device for which the allocation is performed. @@ -294,30 +303,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, mask = (1 << align) - 1; - mutex_lock(&cma_mutex); for (;;) { + mutex_lock(&cma->lock); pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, start, count, mask); - if (pageno >= cma->count) + if (pageno >= cma->count) { + mutex_unlock(&cma_mutex); break; + } + bitmap_set(cma->bitmap, pageno, count); + /* + * It's safe to drop the lock here. We've marked this region for + * our exclusive use. If the migration fails we will take the + * lock again and unmark it. + */ + mutex_unlock(&cma->lock); pfn = cma->base_pfn + pageno; + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); + mutex_unlock(&cma_mutex); if (ret == 0) { - bitmap_set(cma->bitmap, pageno, count); page = pfn_to_page(pfn); break; } else if (ret != -EBUSY) { + clear_cma_bitmap(cma, pfn, count); break; } + clear_cma_bitmap(cma, pfn, count); pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); /* try again with a bit different memory target */ start = pageno + mask + 1; } - mutex_unlock(&cma_mutex); pr_debug("%s(): returned %p\n", __func__, page); return page; } @@ -350,10 +370,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); - mutex_lock(&cma_mutex); - bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); free_contig_range(pfn, count); - mutex_unlock(&cma_mutex); + clear_cma_bitmap(cma, pfn, count); return true; } -- cgit v1.2.3-18-g5258 From bb56d0dc23aa3428c0b1901414bfcf698eb693fb Mon Sep 17 00:00:00 2001 From: Gioh Kim Date: Thu, 22 May 2014 13:31:37 +0900 Subject: drivers/base/dma-contiguous.c: erratum of dev_get_cma_area fix erratum get_dev_cma_area into dev_get_cma_area Signed-off-by: Gioh Kim Signed-off-by: Marek Szyprowski --- drivers/base/dma-contiguous.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base/dma-contiguous.c') diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index fe72bac9627..7b0217c863a 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -278,7 +278,7 @@ static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) * * This function allocates memory buffer for specified device. It uses * device specific contiguous memory area if available or the default - * global one. Requires architecture specific get_dev_cma_area() helper + * global one. Requires architecture specific dev_get_cma_area() helper * function. */ struct page *dma_alloc_from_contiguous(struct device *dev, int count, -- cgit v1.2.3-18-g5258 From f70e3c4f8b6ab61f713e040822ec51f5de498146 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 29 May 2014 15:29:18 +0900 Subject: CMA: correct unlock target 'cma: Remove potential deadlock situation' introduces per cma area mutex for bitmap management. It is good, but there is one mistake. When we can't find appropriate area in bitmap, we release cma_mutex global lock rather than cma->lock and this is a bug. So fix it. Signed-off-by: Joonsoo Kim Signed-off-by: Marek Szyprowski --- drivers/base/dma-contiguous.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/base/dma-contiguous.c') diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 7b0217c863a..c34ec336424 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -309,7 +309,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, start, count, mask); if (pageno >= cma->count) { - mutex_unlock(&cma_mutex); + mutex_unlock(&cma->lock); break; } bitmap_set(cma->bitmap, pageno, count); -- cgit v1.2.3-18-g5258 From 5ea3b1b2f8ad9162684431ce6188102ca4c64b7a Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Wed, 4 Jun 2014 16:06:54 -0700 Subject: cma: add placement specifier for "cma=" kernel parameter Currently, "cma=" kernel parameter is used to specify the size of CMA, but we can't specify where it is located. We want to locate CMA below 4GB for devices only supporting 32-bit addressing on 64-bit systems without iommu. This enables to specify the placement of CMA by extending "cma=" kernel parameter. Examples: 1. locate 64MB CMA below 4GB by "cma=64M@0-4G" 2. locate 64MB CMA exact at 512MB by "cma=64M@512M" Note that the DMA contiguous memory allocator on x86 assumes that page_address() works for the pages to allocate. So this change requires to limit end address of contiguous memory area upto max_pfn_mapped to prevent from locating it on highmem area by the argument of dma_contiguous_reserve(). Signed-off-by: Akinobu Mita Cc: Marek Szyprowski Cc: Konrad Rzeszutek Wilk Cc: David Woodhouse Cc: Don Dutile Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Andi Kleen Cc: Yinghai Lu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/base/dma-contiguous.c | 42 ++++++++++++++++++++++++++++++++---------- 1 file changed, 32 insertions(+), 10 deletions(-) (limited to 'drivers/base/dma-contiguous.c') diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index c34ec336424..83969f8c572 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c @@ -60,11 +60,22 @@ struct cma *dma_contiguous_default_area; */ static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; static phys_addr_t size_cmdline = -1; +static phys_addr_t base_cmdline; +static phys_addr_t limit_cmdline; static int __init early_cma(char *p) { pr_debug("%s(%s)\n", __func__, p); size_cmdline = memparse(p, &p); + if (*p != '@') + return 0; + base_cmdline = memparse(p + 1, &p); + if (*p != '-') { + limit_cmdline = base_cmdline + size_cmdline; + return 0; + } + limit_cmdline = memparse(p + 1, &p); + return 0; } early_param("cma", early_cma); @@ -108,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) void __init dma_contiguous_reserve(phys_addr_t limit) { phys_addr_t selected_size = 0; + phys_addr_t selected_base = 0; + phys_addr_t selected_limit = limit; + bool fixed = false; pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { selected_size = size_cmdline; + selected_base = base_cmdline; + selected_limit = min_not_zero(limit_cmdline, limit); + if (base_cmdline + size_cmdline == limit_cmdline) + fixed = true; } else { #ifdef CONFIG_CMA_SIZE_SEL_MBYTES selected_size = size_bytes; @@ -129,10 +147,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit) pr_debug("%s: reserving %ld MiB for global area\n", __func__, (unsigned long)selected_size / SZ_1M); - dma_contiguous_reserve_area(selected_size, 0, limit, - &dma_contiguous_default_area); + dma_contiguous_reserve_area(selected_size, selected_base, + selected_limit, + &dma_contiguous_default_area, + fixed); } -}; +} static DEFINE_MUTEX(cma_mutex); @@ -189,15 +209,20 @@ core_initcall(cma_init_reserved_areas); * @base: Base address of the reserved area optional, use 0 for any * @limit: End address of the reserved memory (optional, 0 for any). * @res_cma: Pointer to store the created cma region. + * @fixed: hint about where to place the reserved area * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas for specific * devices. + * + * If @fixed is true, reserve contiguous area at exactly @base. If false, + * reserve in range from @base to @limit. */ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, - phys_addr_t limit, struct cma **res_cma) + phys_addr_t limit, struct cma **res_cma, + bool fixed) { struct cma *cma = &cma_areas[cma_area_count]; phys_addr_t alignment; @@ -223,18 +248,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, limit &= ~(alignment - 1); /* Reserve memory */ - if (base) { + if (base && fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { - /* - * Use __memblock_alloc_base() since - * memblock_alloc_base() panic()s. - */ - phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); + phys_addr_t addr = memblock_alloc_range(size, alignment, base, + limit); if (!addr) { ret = -ENOMEM; goto err; -- cgit v1.2.3-18-g5258