diff options
Diffstat (limited to 'arch/s390/mm/vmem.c')
| -rw-r--r-- | arch/s390/mm/vmem.c | 178 |
1 files changed, 110 insertions, 68 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 34c43f23b28..fe9012a49aa 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -1,6 +1,4 @@ /* - * arch/s390/mm/vmem.c - * * Copyright IBM Corp. 2006 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ @@ -12,6 +10,7 @@ #include <linux/list.h> #include <linux/hugetlb.h> #include <linux/slab.h> +#include <linux/memblock.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/setup.h> @@ -61,17 +60,18 @@ static inline pmd_t *vmem_pmd_alloc(void) return pmd; } -static pte_t __ref *vmem_pte_alloc(void) +static pte_t __ref *vmem_pte_alloc(unsigned long address) { pte_t *pte; if (slab_is_available()) - pte = (pte_t *) page_table_alloc(&init_mm); + pte = (pte_t *) page_table_alloc(&init_mm, address); else - pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); + pte = alloc_bootmem_align(PTRS_PER_PTE * sizeof(pte_t), + PTRS_PER_PTE * sizeof(pte_t)); if (!pte) return NULL; - clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, + clear_table((unsigned long *) pte, _PAGE_INVALID, PTRS_PER_PTE * sizeof(pte_t)); return pte; } @@ -81,57 +81,65 @@ static pte_t __ref *vmem_pte_alloc(void) */ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) { - unsigned long address; + unsigned long end = start + size; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; - pte_t pte; int ret = -ENOMEM; - for (address = start; address < start + size; address += PAGE_SIZE) { + while (address < end) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); if (!pu_dir) goto out; - pgd_populate_kernel(&init_mm, pg_dir, pu_dir); + pgd_populate(&init_mm, pg_dir, pu_dir); } - pu_dir = pud_offset(pg_dir, address); +#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) + if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address && + !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { + pud_val(*pu_dir) = __pa(address) | + _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | + (ro ? _REGION_ENTRY_PROTECT : 0); + address += PUD_SIZE; + continue; + } +#endif if (pud_none(*pu_dir)) { pm_dir = vmem_pmd_alloc(); if (!pm_dir) goto out; - pud_populate_kernel(&init_mm, pu_dir, pm_dir); + pud_populate(&init_mm, pu_dir, pm_dir); } - - pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); pm_dir = pmd_offset(pu_dir, address); - -#ifdef __s390x__ - if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && - (address + HPAGE_SIZE <= start + size) && - (address >= HPAGE_SIZE)) { - pte_val(pte) |= _SEGMENT_ENTRY_LARGE; - pmd_val(*pm_dir) = pte_val(pte); - address += HPAGE_SIZE - PAGE_SIZE; +#if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC) + if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address && + !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { + pmd_val(*pm_dir) = __pa(address) | + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | + _SEGMENT_ENTRY_YOUNG | + (ro ? _SEGMENT_ENTRY_PROTECT : 0); + address += PMD_SIZE; continue; } #endif if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; - pmd_populate_kernel(&init_mm, pm_dir, pt_dir); + pmd_populate(&init_mm, pm_dir, pt_dir); } pt_dir = pte_offset_kernel(pm_dir, address); - *pt_dir = pte; + pte_val(*pt_dir) = __pa(address) | + pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); + address += PAGE_SIZE; } ret = 0; out: - flush_tlb_kernel_range(start, start + size); return ret; } @@ -141,58 +149,67 @@ out: */ static void vmem_remove_range(unsigned long start, unsigned long size) { - unsigned long address; + unsigned long end = start + size; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; pte_t pte; - pte_val(pte) = _PAGE_TYPE_EMPTY; - for (address = start; address < start + size; address += PAGE_SIZE) { + pte_val(pte) = _PAGE_INVALID; + while (address < end) { pg_dir = pgd_offset_k(address); + if (pgd_none(*pg_dir)) { + address += PGDIR_SIZE; + continue; + } pu_dir = pud_offset(pg_dir, address); - if (pud_none(*pu_dir)) + if (pud_none(*pu_dir)) { + address += PUD_SIZE; + continue; + } + if (pud_large(*pu_dir)) { + pud_clear(pu_dir); + address += PUD_SIZE; continue; + } pm_dir = pmd_offset(pu_dir, address); - if (pmd_none(*pm_dir)) + if (pmd_none(*pm_dir)) { + address += PMD_SIZE; continue; - - if (pmd_huge(*pm_dir)) { - pmd_clear_kernel(pm_dir); - address += HPAGE_SIZE - PAGE_SIZE; + } + if (pmd_large(*pm_dir)) { + pmd_clear(pm_dir); + address += PMD_SIZE; continue; } - pt_dir = pte_offset_kernel(pm_dir, address); *pt_dir = pte; + address += PAGE_SIZE; } - flush_tlb_kernel_range(start, start + size); + flush_tlb_kernel_range(start, end); } /* * Add a backed mem_map array to the virtual mem_map array. */ -int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) { - unsigned long address, start_addr, end_addr; + unsigned long address = start; pgd_t *pg_dir; pud_t *pu_dir; pmd_t *pm_dir; pte_t *pt_dir; - pte_t pte; int ret = -ENOMEM; - start_addr = (unsigned long) start; - end_addr = (unsigned long) (start + nr); - - for (address = start_addr; address < end_addr; address += PAGE_SIZE) { + for (address = start; address < end;) { pg_dir = pgd_offset_k(address); if (pgd_none(*pg_dir)) { pu_dir = vmem_pud_alloc(); if (!pu_dir) goto out; - pgd_populate_kernel(&init_mm, pg_dir, pu_dir); + pgd_populate(&init_mm, pg_dir, pu_dir); } pu_dir = pud_offset(pg_dir, address); @@ -200,15 +217,38 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) pm_dir = vmem_pmd_alloc(); if (!pm_dir) goto out; - pud_populate_kernel(&init_mm, pu_dir, pm_dir); + pud_populate(&init_mm, pu_dir, pm_dir); } pm_dir = pmd_offset(pu_dir, address); if (pmd_none(*pm_dir)) { - pt_dir = vmem_pte_alloc(); +#ifdef CONFIG_64BIT + /* Use 1MB frames for vmemmap if available. We always + * use large frames even if they are only partially + * used. + * Otherwise we would have also page tables since + * vmemmap_populate gets called for each section + * separately. */ + if (MACHINE_HAS_EDAT1) { + void *new_page; + + new_page = vmemmap_alloc_block(PMD_SIZE, node); + if (!new_page) + goto out; + pmd_val(*pm_dir) = __pa(new_page) | + _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | + _SEGMENT_ENTRY_CO; + address = (address + PMD_SIZE) & PMD_MASK; + continue; + } +#endif + pt_dir = vmem_pte_alloc(address); if (!pt_dir) goto out; - pmd_populate_kernel(&init_mm, pm_dir, pt_dir); + pmd_populate(&init_mm, pm_dir, pt_dir); + } else if (pmd_large(*pm_dir)) { + address = (address + PMD_SIZE) & PMD_MASK; + continue; } pt_dir = pte_offset_kernel(pm_dir, address); @@ -218,17 +258,21 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) new_page =__pa(vmem_alloc_pages(0)); if (!new_page) goto out; - pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL); - *pt_dir = pte; + pte_val(*pt_dir) = + __pa(new_page) | pgprot_val(PAGE_KERNEL); } + address += PAGE_SIZE; } - memset(start, 0, nr * sizeof(struct page)); + memset((void *)start, 0, end - start); ret = 0; out: - flush_tlb_kernel_range(start_addr, end_addr); return ret; } +void vmemmap_free(unsigned long start, unsigned long end) +{ +} + /* * Add memory segment to the segment list if it doesn't overlap with * an already present segment. @@ -329,14 +373,14 @@ out: void __init vmem_map_init(void) { unsigned long ro_start, ro_end; - unsigned long start, end; - int i; - - ro_start = ((unsigned long)&_stext) & PAGE_MASK; - ro_end = PFN_ALIGN((unsigned long)&_eshared); - for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { - start = memory_chunk[i].addr; - end = memory_chunk[i].addr + memory_chunk[i].size; + struct memblock_region *reg; + phys_addr_t start, end; + + ro_start = PFN_ALIGN((unsigned long)&_stext); + ro_end = (unsigned long)&_eshared & PAGE_MASK; + for_each_memblock(memory, reg) { + start = reg->base; + end = reg->base + reg->size - 1; if (start >= ro_end || end <= ro_start) vmem_add_mem(start, end - start, 0); else if (start >= ro_start && end <= ro_end) @@ -356,23 +400,21 @@ void __init vmem_map_init(void) } /* - * Convert memory chunk array to a memory segment list so there is a single - * list that contains both r/w memory and shared memory segments. + * Convert memblock.memory to a memory segment list so there is a single + * list that contains all memory segments. */ static int __init vmem_convert_memory_chunk(void) { + struct memblock_region *reg; struct memory_segment *seg; - int i; mutex_lock(&vmem_mutex); - for (i = 0; i < MEMORY_CHUNKS; i++) { - if (!memory_chunk[i].size) - continue; + for_each_memblock(memory, reg) { seg = kzalloc(sizeof(*seg), GFP_KERNEL); if (!seg) panic("Out of memory...\n"); - seg->start = memory_chunk[i].addr; - seg->size = memory_chunk[i].size; + seg->start = reg->base; + seg->size = reg->size; insert_memory_segment(seg); } mutex_unlock(&vmem_mutex); |
