diff options
Diffstat (limited to 'arch/ia64/mm/init.c')
| -rw-r--r-- | arch/ia64/mm/init.c | 411 |
1 files changed, 236 insertions, 175 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b38b6d213c1..25c350264a4 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -4,13 +4,13 @@ * Copyright (C) 1998-2003 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> */ -#include <linux/config.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/efi.h> #include <linux/elf.h> +#include <linux/memblock.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/module.h> @@ -20,10 +20,9 @@ #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/bitops.h> +#include <linux/kexec.h> -#include <asm/a.out.h> #include <asm/dma.h> -#include <asm/ia32.h> #include <asm/io.h> #include <asm/machvec.h> #include <asm/numa.h> @@ -31,24 +30,19 @@ #include <asm/pgalloc.h> #include <asm/sal.h> #include <asm/sections.h> -#include <asm/system.h> #include <asm/tlb.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include <asm/mca.h> - -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - -DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist); -DEFINE_PER_CPU(long, __pgtable_quicklist_size); +#include <asm/paravirt.h> extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; #ifdef CONFIG_VIRTUAL_MEM_MAP -unsigned long vmalloc_end = VMALLOC_END_INIT; -EXPORT_SYMBOL(vmalloc_end); +unsigned long VMALLOC_END = VMALLOC_END_INIT; +EXPORT_SYMBOL(VMALLOC_END); struct page *vmem_map; EXPORT_SYMBOL(vmem_map); #endif @@ -56,81 +50,49 @@ EXPORT_SYMBOL(vmem_map); struct page *zero_page_memmap_ptr; /* map entry for zero page */ EXPORT_SYMBOL(zero_page_memmap_ptr); -#define MIN_PGT_PAGES 25UL -#define MAX_PGT_FREES_PER_PASS 16L -#define PGT_FRACTION_OF_NODE_MEM 16 - -static inline long -max_pgt_pages(void) -{ - u64 node_free_pages, max_pgt_pages; - -#ifndef CONFIG_NUMA - node_free_pages = nr_free_pages(); -#else - node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id())); -#endif - max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM; - max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES); - return max_pgt_pages; -} - -static inline long -min_pages_to_free(void) -{ - long pages_to_free; - - pages_to_free = pgtable_quicklist_size - max_pgt_pages(); - pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS); - return pages_to_free; -} - void -check_pgt_cache(void) -{ - long pages_to_free; - - if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES)) - return; - - preempt_disable(); - while (unlikely((pages_to_free = min_pages_to_free()) > 0)) { - while (pages_to_free--) { - free_page((unsigned long)pgtable_quicklist_alloc()); - } - preempt_enable(); - preempt_disable(); - } - preempt_enable(); -} - -void -lazy_mmu_prot_update (pte_t pte) +__ia64_sync_icache_dcache (pte_t pte) { unsigned long addr; struct page *page; - if (!pte_exec(pte)) - return; /* not an executable page... */ - page = pte_page(pte); addr = (unsigned long) page_address(page); if (test_bit(PG_arch_1, &page->flags)) return; /* i-cache is already coherent with d-cache */ - flush_icache_range(addr, addr + PAGE_SIZE); + flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); set_bit(PG_arch_1, &page->flags); /* mark page as clean */ } +/* + * Since DMA is i-cache coherent, any (complete) pages that were written via + * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to + * flush them when they get mapped into an executable vm-area. + */ +void +dma_mark_clean(void *addr, size_t size) +{ + unsigned long pg_addr, end; + + pg_addr = PAGE_ALIGN((unsigned long) addr); + end = (unsigned long) addr + size; + while (pg_addr + PAGE_SIZE <= end) { + struct page *page = virt_to_page(pg_addr); + set_bit(PG_arch_1, &page->flags); + pg_addr += PAGE_SIZE; + } +} + inline void ia64_set_rbs_bot (void) { - unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; + unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16; if (stack_size > MAX_USER_STACK_SIZE) stack_size = MAX_USER_STACK_SIZE; - current->thread.rbs_bot = STACK_TOP - stack_size; + current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size); } /* @@ -151,14 +113,14 @@ ia64_init_addr_space (void) * the problem. When the process attempts to write to the register backing store * for the first time, it will get a SEGFAULT in this case. */ - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (vma) { - memset(vma, 0, sizeof(*vma)); + INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_mm = current->mm; vma->vm_start = current->thread.rbs_bot & PAGE_MASK; vma->vm_end = vma->vm_start + PAGE_SIZE; - vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7]; vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); @@ -170,13 +132,14 @@ ia64_init_addr_space (void) /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ if (!(current->personality & MMAP_PAGE_ZERO)) { - vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (vma) { - memset(vma, 0, sizeof(*vma)); + INIT_LIST_HEAD(&vma->anon_vma_chain); vma->vm_mm = current->mm; vma->vm_end = PAGE_SIZE; vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); - vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED; + vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | + VM_DONTEXPAND | VM_DONTDUMP; down_write(¤t->mm->mmap_sem); if (insert_vm_struct(current->mm, vma)) { up_write(¤t->mm->mmap_sem); @@ -191,25 +154,13 @@ ia64_init_addr_space (void) void free_initmem (void) { - unsigned long addr, eaddr; - - addr = (unsigned long) ia64_imva(__init_begin); - eaddr = (unsigned long) ia64_imva(__init_end); - while (addr < eaddr) { - ClearPageReserved(virt_to_page(addr)); - set_page_count(virt_to_page(addr), 1); - free_page(addr); - ++totalram_pages; - addr += PAGE_SIZE; - } - printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n", - (__init_end - __init_begin) >> 10); + free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end), + -1, "unused kernel"); } -void +void __init free_initrd_mem (unsigned long start, unsigned long end) { - struct page *page; /* * EFI uses 4KB pages while the kernel can use 4KB or bigger. * Thus EFI and the kernel may have different page sizes. It is @@ -250,18 +201,14 @@ free_initrd_mem (unsigned long start, unsigned long end) for (; start < end; start += PAGE_SIZE) { if (!virt_addr_valid(start)) continue; - page = virt_to_page(start); - ClearPageReserved(page); - set_page_count(page, 1); - free_page(start); - ++totalram_pages; + free_reserved_page(virt_to_page(start)); } } /* * This installs a clean page in the kernel's page table. */ -struct page * +static struct page * __init put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) { pgd_t *pgd; @@ -294,9 +241,10 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) return page; } -static void +static void __init setup_gate (void) { + void *gate_section; struct page *page; /* @@ -304,10 +252,11 @@ setup_gate (void) * headers etc. and once execute-only page to enable * privilege-promotion via "epc": */ - page = virt_to_page(ia64_imva(__start_gate_section)); + gate_section = paravirt_get_gate_section(); + page = virt_to_page(ia64_imva(gate_section)); put_kernel_page(page, GATE_ADDR, PAGE_READONLY); #ifdef HAVE_BUGGY_SEGREL - page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE)); + page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE)); put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE); #else put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE); @@ -329,11 +278,10 @@ setup_gate (void) ia64_patch_gate(); } -void __devinit -ia64_mmu_init (void *my_cpu_data) +void ia64_mmu_init(void *my_cpu_data) { - unsigned long psr, pta, impl_va_bits; - extern void __devinit tlb_init (void); + unsigned long pta, impl_va_bits; + extern void tlb_init(void); #ifdef CONFIG_DISABLE_VHPT # define VHPT_ENABLE_BIT 0 @@ -341,15 +289,6 @@ ia64_mmu_init (void *my_cpu_data) # define VHPT_ENABLE_BIT 1 #endif - /* Pin mapping for percpu area into TLB */ - psr = ia64_clear_ic(); - ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR, - pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)), - PERCPU_PAGE_SHIFT); - - ia64_set_psr(psr); - ia64_srlz_i(); - /* * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped * address space. The IA-64 architecture guarantees that at least 50 bits of @@ -410,9 +349,61 @@ ia64_mmu_init (void *my_cpu_data) } #ifdef CONFIG_VIRTUAL_MEM_MAP +int vmemmap_find_next_valid_pfn(int node, int i) +{ + unsigned long end_address, hole_next_pfn; + unsigned long stop_address; + pg_data_t *pgdat = NODE_DATA(node); + + end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; + end_address = PAGE_ALIGN(end_address); + stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)]; + + do { + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_offset_k(end_address); + if (pgd_none(*pgd)) { + end_address += PGDIR_SIZE; + continue; + } -int -create_mem_map_page_table (u64 start, u64 end, void *arg) + pud = pud_offset(pgd, end_address); + if (pud_none(*pud)) { + end_address += PUD_SIZE; + continue; + } + + pmd = pmd_offset(pud, end_address); + if (pmd_none(*pmd)) { + end_address += PMD_SIZE; + continue; + } + + pte = pte_offset_kernel(pmd, end_address); +retry_pte: + if (pte_none(*pte)) { + end_address += PAGE_SIZE; + pte++; + if ((end_address < stop_address) && + (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) + goto retry_pte; + continue; + } + /* Found next valid vmem_map page */ + break; + } while (end_address < stop_address); + + end_address = min(end_address, stop_address); + end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; + hole_next_pfn = end_address / sizeof(struct page); + return hole_next_pfn - pgdat->node_start_pfn; +} + +int __init create_mem_map_page_table(u64 start, u64 end, void *arg) { unsigned long address, start_page, end_page; struct page *map_start, *map_end; @@ -457,8 +448,8 @@ struct memmap_init_callback_data { unsigned long zone; }; -static int -virtual_memmap_init (u64 start, u64 end, void *arg) +static int __meminit +virtual_memmap_init(u64 start, u64 end, void *arg) { struct memmap_init_callback_data *args; struct page *map_start, *map_end; @@ -483,16 +474,17 @@ virtual_memmap_init (u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), - args->nid, args->zone, page_to_pfn(map_start)); + args->nid, args->zone, page_to_pfn(map_start), + MEMMAP_EARLY); return 0; } -void +void __meminit memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { if (!vmem_map) - memmap_init_zone(size, nid, zone, start_pfn); + memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY); else { struct page *start; struct memmap_init_callback_data args; @@ -519,8 +511,7 @@ ia64_pfn_valid (unsigned long pfn) } EXPORT_SYMBOL(ia64_pfn_valid); -int -find_largest_hole (u64 start, u64 end, void *arg) +int __init find_largest_hole(u64 start, u64 end, void *arg) { u64 *max_gap = arg; @@ -533,18 +524,38 @@ find_largest_hole (u64 start, u64 end, void *arg) last_end = end; return 0; } + #endif /* CONFIG_VIRTUAL_MEM_MAP */ -static int -count_reserved_pages (u64 start, u64 end, void *arg) +int __init register_active_ranges(u64 start, u64 len, int nid) { - unsigned long num_reserved = 0; - unsigned long *count = arg; + u64 end = start + len; - for (; start < end; start += PAGE_SIZE) - if (PageReserved(virt_to_page(start))) - ++num_reserved; - *count += num_reserved; +#ifdef CONFIG_KEXEC + if (start > crashk_res.start && start < crashk_res.end) + start = crashk_res.end; + if (end > crashk_res.start && end < crashk_res.end) + end = crashk_res.start; +#endif + + if (start < end) + memblock_add_node(__pa(start), end - start, nid); + return 0; +} + +int +find_max_min_low_pfn (u64 start, u64 end, void *arg) +{ + unsigned long pfn_start, pfn_end; +#ifdef CONFIG_FLATMEM + pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT; + pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT; +#else + pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT; + pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT; +#endif + min_low_pfn = min(min_low_pfn, pfn_start); + max_low_pfn = max(max_low_pfn, pfn_end); return 0; } @@ -556,7 +567,7 @@ count_reserved_pages (u64 start, u64 end, void *arg) * purposes. */ -static int nolwsys; +static int nolwsys __initdata; static int __init nolwsys_setup (char *s) @@ -567,13 +578,10 @@ nolwsys_setup (char *s) __setup("nolwsys", nolwsys_setup); -void +void __init mem_init (void) { - long reserved_pages, codesize, datasize, initsize; - pg_data_t *pgdat; int i; - static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); @@ -589,33 +597,13 @@ mem_init (void) #endif #ifdef CONFIG_FLATMEM - if (!mem_map) - BUG(); - max_mapnr = max_low_pfn; + BUG_ON(!mem_map); #endif + set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); - - kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); - kclist_add(&kcore_kernel, _stext, _end - _stext); - - for_each_pgdat(pgdat) - if (pgdat->bdata->node_bootmem_map) - totalram_pages += free_all_bootmem_node(pgdat); - - reserved_pages = 0; - efi_memmap_walk(count_reserved_pages, &reserved_pages); - - codesize = (unsigned long) _etext - (unsigned long) _stext; - datasize = (unsigned long) _edata - (unsigned long) _etext; - initsize = (unsigned long) __init_end - (unsigned long) __init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " - "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), - num_physpages << (PAGE_SHIFT - 10), codesize >> 10, - reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); - + free_all_bootmem(); + mem_init_print_info(NULL); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary @@ -623,30 +611,17 @@ mem_init (void) * code can tell them apart. */ for (i = 0; i < NR_syscalls; ++i) { - extern unsigned long fsyscall_table[NR_syscalls]; extern unsigned long sys_call_table[NR_syscalls]; + unsigned long *fsyscall_table = paravirt_get_fsyscall_table(); if (!fsyscall_table[i] || nolwsys) fsyscall_table[i] = sys_call_table[i] | 1; } setup_gate(); - -#ifdef CONFIG_IA32_SUPPORT - ia32_mem_init(); -#endif } #ifdef CONFIG_MEMORY_HOTPLUG -void online_page(struct page *page) -{ - ClearPageReserved(page); - set_page_count(page, 1); - __free_page(page); - totalram_pages++; - num_physpages++; -} - -int add_memory(u64 start, u64 size) +int arch_add_memory(int nid, u64 start, u64 size) { pg_data_t *pgdat; struct zone *zone; @@ -654,20 +629,106 @@ int add_memory(u64 start, u64 size) unsigned long nr_pages = size >> PAGE_SHIFT; int ret; - pgdat = NODE_DATA(0); + pgdat = NODE_DATA(nid); zone = pgdat->node_zones + ZONE_NORMAL; - ret = __add_pages(zone, start_pfn, nr_pages); + ret = __add_pages(nid, zone, start_pfn, nr_pages); if (ret) printk("%s: Problem encountered in __add_pages() as ret=%d\n", - __FUNCTION__, ret); + __func__, ret); return ret; } -int remove_memory(u64 start, u64 size) +#ifdef CONFIG_MEMORY_HOTREMOVE +int arch_remove_memory(u64 start, u64 size) { - return -EINVAL; + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + struct zone *zone; + int ret; + + zone = page_zone(pfn_to_page(start_pfn)); + ret = __remove_pages(zone, start_pfn, nr_pages); + if (ret) + pr_warn("%s: Problem encountered in __remove_pages() as" + " ret=%d\n", __func__, ret); + + return ret; } #endif +#endif + +/* + * Even when CONFIG_IA32_SUPPORT is not enabled it is + * useful to have the Linux/x86 domain registered to + * avoid an attempted module load when emulators call + * personality(PER_LINUX32). This saves several milliseconds + * on each such call. + */ +static struct exec_domain ia32_exec_domain; + +static int __init +per_linux32_init(void) +{ + ia32_exec_domain.name = "Linux/x86"; + ia32_exec_domain.handler = NULL; + ia32_exec_domain.pers_low = PER_LINUX32; + ia32_exec_domain.pers_high = PER_LINUX32; + ia32_exec_domain.signal_map = default_exec_domain.signal_map; + ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap; + register_exec_domain(&ia32_exec_domain); + + return 0; +} + +__initcall(per_linux32_init); + +/** + * show_mem - give short summary of memory stats + * + * Shows a simple page count of reserved and used pages in the system. + * For discontig machines, it does this on a per-pgdat basis. + */ +void show_mem(unsigned int filter) +{ + int total_reserved = 0; + unsigned long total_present = 0; + pg_data_t *pgdat; + + printk(KERN_INFO "Mem-info:\n"); + show_free_areas(filter); + printk(KERN_INFO "Node memory in pages:\n"); + for_each_online_pgdat(pgdat) { + unsigned long present; + unsigned long flags; + int reserved = 0; + int nid = pgdat->node_id; + int zoneid; + + if (skip_free_areas_node(filter, nid)) + continue; + pgdat_resize_lock(pgdat, &flags); + + for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { + struct zone *zone = &pgdat->node_zones[zoneid]; + if (!populated_zone(zone)) + continue; + + reserved += zone->present_pages - zone->managed_pages; + } + present = pgdat->node_present_pages; + + pgdat_resize_unlock(pgdat, &flags); + total_present += present; + total_reserved += reserved; + printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ", + nid, present, reserved); + } + printk(KERN_INFO "%ld pages of RAM\n", total_present); + printk(KERN_INFO "%d reserved pages\n", total_reserved); + printk(KERN_INFO "Total of %ld pages in page table cache\n", + quicklist_total_size()); + printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages()); +} |
