aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm/init.c')
-rw-r--r--arch/ia64/mm/init.c240
1 files changed, 118 insertions, 122 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index a4ca657c72c..25c350264a4 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -21,9 +22,7 @@
#include <linux/bitops.h>
#include <linux/kexec.h>
-#include <asm/a.out.h>
#include <asm/dma.h>
-#include <asm/ia32.h>
#include <asm/io.h>
#include <asm/machvec.h>
#include <asm/numa.h>
@@ -31,21 +30,19 @@
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/sections.h>
-#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/mca.h>
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+#include <asm/paravirt.h>
extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long vmalloc_end = VMALLOC_END_INIT;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long VMALLOC_END = VMALLOC_END_INIT;
+EXPORT_SYMBOL(VMALLOC_END);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
@@ -58,7 +55,6 @@ __ia64_sync_icache_dcache (pte_t pte)
{
unsigned long addr;
struct page *page;
- unsigned long order;
page = pte_page(pte);
addr = (unsigned long) page_address(page);
@@ -66,12 +62,7 @@ __ia64_sync_icache_dcache (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- if (PageCompound(page)) {
- order = compound_order(page);
- flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
- }
- else
- flush_icache_range(addr, addr + PAGE_SIZE);
+ flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
@@ -97,7 +88,7 @@ dma_mark_clean(void *addr, size_t size)
inline void
ia64_set_rbs_bot (void)
{
- unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+ unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
@@ -124,6 +115,7 @@ ia64_init_addr_space (void)
*/
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -142,10 +134,12 @@ ia64_init_addr_space (void)
if (!(current->personality & MMAP_PAGE_ZERO)) {
vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
- vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
+ vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
+ VM_DONTEXPAND | VM_DONTDUMP;
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
@@ -160,25 +154,13 @@ ia64_init_addr_space (void)
void
free_initmem (void)
{
- unsigned long addr, eaddr;
-
- addr = (unsigned long) ia64_imva(__init_begin);
- eaddr = (unsigned long) ia64_imva(__init_end);
- while (addr < eaddr) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- ++totalram_pages;
- addr += PAGE_SIZE;
- }
- printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
- (__init_end - __init_begin) >> 10);
+ free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
+ -1, "unused kernel");
}
void __init
free_initrd_mem (unsigned long start, unsigned long end)
{
- struct page *page;
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
@@ -219,11 +201,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
for (; start < end; start += PAGE_SIZE) {
if (!virt_addr_valid(start))
continue;
- page = virt_to_page(start);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(start);
- ++totalram_pages;
+ free_reserved_page(virt_to_page(start));
}
}
@@ -266,6 +244,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
static void __init
setup_gate (void)
{
+ void *gate_section;
struct page *page;
/*
@@ -273,10 +252,11 @@ setup_gate (void)
* headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
*/
- page = virt_to_page(ia64_imva(__start_gate_section));
+ gate_section = paravirt_get_gate_section();
+ page = virt_to_page(ia64_imva(gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL
- page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
+ page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
@@ -298,11 +278,10 @@ setup_gate (void)
ia64_patch_gate();
}
-void __devinit
-ia64_mmu_init (void *my_cpu_data)
+void ia64_mmu_init(void *my_cpu_data)
{
unsigned long pta, impl_va_bits;
- extern void __devinit tlb_init (void);
+ extern void tlb_init(void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
@@ -378,9 +357,7 @@ int vmemmap_find_next_valid_pfn(int node, int i)
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
-
- stop_address = (unsigned long) &vmem_map[
- pgdat->node_start_pfn + pgdat->node_spanned_pages];
+ stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
do {
pgd_t *pgd;
@@ -426,8 +403,7 @@ retry_pte:
return hole_next_pfn - pgdat->node_start_pfn;
}
-int __init
-create_mem_map_page_table (u64 start, u64 end, void *arg)
+int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
{
unsigned long address, start_page, end_page;
struct page *map_start, *map_end;
@@ -473,7 +449,7 @@ struct memmap_init_callback_data {
};
static int __meminit
-virtual_memmap_init (u64 start, u64 end, void *arg)
+virtual_memmap_init(u64 start, u64 end, void *arg)
{
struct memmap_init_callback_data *args;
struct page *map_start, *map_end;
@@ -535,8 +511,7 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int __init
-find_largest_hole (u64 start, u64 end, void *arg)
+int __init find_largest_hole(u64 start, u64 end, void *arg)
{
u64 *max_gap = arg;
@@ -552,13 +527,10 @@ find_largest_hole (u64 start, u64 end, void *arg)
#endif /* CONFIG_VIRTUAL_MEM_MAP */
-int __init
-register_active_ranges(u64 start, u64 end, void *arg)
+int __init register_active_ranges(u64 start, u64 len, int nid)
{
- int nid = paddr_to_nid(__pa(start));
+ u64 end = start + len;
- if (nid < 0)
- nid = 0;
#ifdef CONFIG_KEXEC
if (start > crashk_res.start && start < crashk_res.end)
start = crashk_res.end;
@@ -567,26 +539,12 @@ register_active_ranges(u64 start, u64 end, void *arg)
#endif
if (start < end)
- add_active_range(nid, __pa(start) >> PAGE_SHIFT,
- __pa(end) >> PAGE_SHIFT);
- return 0;
-}
-
-static int __init
-count_reserved_pages (u64 start, u64 end, void *arg)
-{
- unsigned long num_reserved = 0;
- unsigned long *count = arg;
-
- for (; start < end; start += PAGE_SIZE)
- if (PageReserved(virt_to_page(start)))
- ++num_reserved;
- *count += num_reserved;
+ memblock_add_node(__pa(start), end - start, nid);
return 0;
}
int
-find_max_min_low_pfn (unsigned long start, unsigned long end, void *arg)
+find_max_min_low_pfn (u64 start, u64 end, void *arg)
{
unsigned long pfn_start, pfn_end;
#ifdef CONFIG_FLATMEM
@@ -623,10 +581,7 @@ __setup("nolwsys", nolwsys_setup);
void __init
mem_init (void)
{
- long reserved_pages, codesize, datasize, initsize;
- pg_data_t *pgdat;
int i;
- static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
@@ -642,33 +597,13 @@ mem_init (void)
#endif
#ifdef CONFIG_FLATMEM
- if (!mem_map)
- BUG();
- max_mapnr = max_low_pfn;
+ BUG_ON(!mem_map);
#endif
+ set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
-
- kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
- kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
- kclist_add(&kcore_kernel, _stext, _end - _stext);
-
- for_each_online_pgdat(pgdat)
- if (pgdat->bdata->node_bootmem_map)
- totalram_pages += free_all_bootmem_node(pgdat);
-
- reserved_pages = 0;
- efi_memmap_walk(count_reserved_pages, &reserved_pages);
-
- codesize = (unsigned long) _etext - (unsigned long) _stext;
- datasize = (unsigned long) _edata - (unsigned long) _etext;
- initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
-
- printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
- "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
- num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
- reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
-
+ free_all_bootmem();
+ mem_init_print_info(NULL);
/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
@@ -676,29 +611,16 @@ mem_init (void)
* code can tell them apart.
*/
for (i = 0; i < NR_syscalls; ++i) {
- extern unsigned long fsyscall_table[NR_syscalls];
extern unsigned long sys_call_table[NR_syscalls];
+ unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
}
setup_gate();
-
-#ifdef CONFIG_IA32_SUPPORT
- ia32_mem_init();
-#endif
}
#ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
@@ -710,7 +632,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
- ret = __add_pages(zone, start_pfn, nr_pages);
+ ret = __add_pages(nid, zone, start_pfn, nr_pages);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
@@ -718,21 +640,95 @@ int arch_add_memory(int nid, u64 start, u64 size)
return ret;
}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
-int remove_memory(u64 start, u64 size)
+int arch_remove_memory(u64 start, u64 size)
{
- unsigned long start_pfn, end_pfn;
- unsigned long timeout = 120 * HZ;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct zone *zone;
int ret;
- start_pfn = start >> PAGE_SHIFT;
- end_pfn = start_pfn + (size >> PAGE_SHIFT);
- ret = offline_pages(start_pfn, end_pfn, timeout);
+
+ zone = page_zone(pfn_to_page(start_pfn));
+ ret = __remove_pages(zone, start_pfn, nr_pages);
if (ret)
- goto out;
- /* we can free mem_map at this point */
-out:
+ pr_warn("%s: Problem encountered in __remove_pages() as"
+ " ret=%d\n", __func__, ret);
+
return ret;
}
-EXPORT_SYMBOL_GPL(remove_memory);
-#endif /* CONFIG_MEMORY_HOTREMOVE */
#endif
+#endif
+
+/*
+ * Even when CONFIG_IA32_SUPPORT is not enabled it is
+ * useful to have the Linux/x86 domain registered to
+ * avoid an attempted module load when emulators call
+ * personality(PER_LINUX32). This saves several milliseconds
+ * on each such call.
+ */
+static struct exec_domain ia32_exec_domain;
+
+static int __init
+per_linux32_init(void)
+{
+ ia32_exec_domain.name = "Linux/x86";
+ ia32_exec_domain.handler = NULL;
+ ia32_exec_domain.pers_low = PER_LINUX32;
+ ia32_exec_domain.pers_high = PER_LINUX32;
+ ia32_exec_domain.signal_map = default_exec_domain.signal_map;
+ ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+ register_exec_domain(&ia32_exec_domain);
+
+ return 0;
+}
+
+__initcall(per_linux32_init);
+
+/**
+ * show_mem - give short summary of memory stats
+ *
+ * Shows a simple page count of reserved and used pages in the system.
+ * For discontig machines, it does this on a per-pgdat basis.
+ */
+void show_mem(unsigned int filter)
+{
+ int total_reserved = 0;
+ unsigned long total_present = 0;
+ pg_data_t *pgdat;
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
+ printk(KERN_INFO "Node memory in pages:\n");
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+ unsigned long flags;
+ int reserved = 0;
+ int nid = pgdat->node_id;
+ int zoneid;
+
+ if (skip_free_areas_node(filter, nid))
+ continue;
+ pgdat_resize_lock(pgdat, &flags);
+
+ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+ struct zone *zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
+ continue;
+
+ reserved += zone->present_pages - zone->managed_pages;
+ }
+ present = pgdat->node_present_pages;
+
+ pgdat_resize_unlock(pgdat, &flags);
+ total_present += present;
+ total_reserved += reserved;
+ printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ",
+ nid, present, reserved);
+ }
+ printk(KERN_INFO "%ld pages of RAM\n", total_present);
+ printk(KERN_INFO "%d reserved pages\n", total_reserved);
+ printk(KERN_INFO "Total of %ld pages in page table cache\n",
+ quicklist_total_size());
+ printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
+}