diff options
Diffstat (limited to 'arch/arm64/mm/init.c')
| -rw-r--r-- | arch/arm64/mm/init.c | 90 |
1 files changed, 37 insertions, 53 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index de2de5db628..e90c5426fe1 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -30,8 +30,9 @@ #include <linux/memblock.h> #include <linux/sort.h> #include <linux/of_fdt.h> +#include <linux/dma-mapping.h> +#include <linux/dma-contiguous.h> -#include <asm/prom.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> @@ -39,17 +40,9 @@ #include "mm.h" -static unsigned long phys_initrd_start __initdata = 0; -static unsigned long phys_initrd_size __initdata = 0; - phys_addr_t memstart_addr __read_mostly = 0; -void __init early_init_dt_setup_initrd_arch(u64 start, u64 end) -{ - phys_initrd_start = start; - phys_initrd_size = end - start; -} - +#ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) { unsigned long start, size; @@ -59,29 +52,39 @@ static int __init early_initrd(char *p) if (*endp == ',') { size = memparse(endp + 1, NULL); - phys_initrd_start = start; - phys_initrd_size = size; + initrd_start = (unsigned long)__va(start); + initrd_end = (unsigned long)__va(start + size); } return 0; } early_param("initrd", early_initrd); +#endif -#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT) +/* + * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It + * currently assumes that for memory starting above 4G, 32-bit devices will + * use a DMA offset. + */ +static phys_addr_t max_zone_dma_phys(void) +{ + phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); + return min(offset + (1ULL << 32), memblock_end_of_DRAM()); +} static void __init zone_sizes_init(unsigned long min, unsigned long max) { struct memblock_region *reg; unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; - unsigned long max_dma32 = min; + unsigned long max_dma = min; memset(zone_size, 0, sizeof(zone_size)); -#ifdef CONFIG_ZONE_DMA32 /* 4GB maximum for 32-bit only capable devices */ - max_dma32 = max(min, min(max, MAX_DMA32_PFN)); - zone_size[ZONE_DMA32] = max_dma32 - min; -#endif - zone_size[ZONE_NORMAL] = max - max_dma32; + if (IS_ENABLED(CONFIG_ZONE_DMA)) { + max_dma = PFN_DOWN(max_zone_dma_phys()); + zone_size[ZONE_DMA] = max_dma - min; + } + zone_size[ZONE_NORMAL] = max - max_dma; memcpy(zhole_size, zone_size, sizeof(zhole_size)); @@ -91,15 +94,15 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) if (start >= max) continue; -#ifdef CONFIG_ZONE_DMA32 - if (start < max_dma32) { - unsigned long dma_end = min(end, max_dma32); - zhole_size[ZONE_DMA32] -= dma_end - start; + + if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) { + unsigned long dma_end = min(end, max_dma); + zhole_size[ZONE_DMA] -= dma_end - start; } -#endif - if (end > max_dma32) { + + if (end > max_dma) { unsigned long normal_end = min(end, max); - unsigned long normal_start = max(start, max_dma32); + unsigned long normal_start = max(start, max_dma); zhole_size[ZONE_NORMAL] -= normal_end - normal_start; } } @@ -132,18 +135,13 @@ static void arm64_memory_present(void) void __init arm64_memblock_init(void) { - u64 *reserve_map, base, size; + phys_addr_t dma_phys_limit = 0; /* Register the kernel text, kernel data and initrd with memblock */ memblock_reserve(__pa(_text), _end - _text); #ifdef CONFIG_BLK_DEV_INITRD - if (phys_initrd_size) { - memblock_reserve(phys_initrd_start, phys_initrd_size); - - /* Now convert initrd to virtual addresses */ - initrd_start = __phys_to_virt(phys_initrd_start); - initrd_end = initrd_start + phys_initrd_size; - } + if (initrd_start) + memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); #endif /* @@ -153,24 +151,12 @@ void __init arm64_memblock_init(void) memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE); memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE); - /* Reserve the dtb region */ - memblock_reserve(virt_to_phys(initial_boot_params), - be32_to_cpu(initial_boot_params->totalsize)); + early_init_fdt_scan_reserved_mem(); - /* - * Process the reserve map. This will probably overlap the initrd - * and dtb locations which are already reserved, but overlapping - * doesn't hurt anything - */ - reserve_map = ((void*)initial_boot_params) + - be32_to_cpu(initial_boot_params->off_mem_rsvmap); - while (1) { - base = be64_to_cpup(reserve_map++); - size = be64_to_cpup(reserve_map++); - if (!size) - break; - memblock_reserve(base, size); - } + /* 4GB maximum for 32-bit only capable devices */ + if (IS_ENABLED(CONFIG_ZONE_DMA)) + dma_phys_limit = max_zone_dma_phys(); + dma_contiguous_reserve(dma_phys_limit); memblock_allow_resize(); memblock_dump_all(); @@ -271,8 +257,6 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { - arm64_swiotlb_init(); - max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; #ifndef CONFIG_SPARSEMEM_VMEMMAP |
