aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/contig.c245
-rw-r--r--arch/ia64/mm/discontig.c307
-rw-r--r--arch/ia64/mm/extable.c28
-rw-r--r--arch/ia64/mm/fault.c148
-rw-r--r--arch/ia64/mm/hugetlbpage.c61
-rw-r--r--arch/ia64/mm/init.c360
-rw-r--r--arch/ia64/mm/ioremap.c93
-rw-r--r--arch/ia64/mm/numa.c46
-rw-r--r--arch/ia64/mm/tlb.c418
9 files changed, 1129 insertions, 577 deletions
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index daf977ff292..52715a71aed 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -16,7 +16,9 @@
*/
#include <linux/bootmem.h>
#include <linux/efi.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
+#include <linux/nmi.h>
#include <linux/swap.h>
#include <asm/meminit.h>
@@ -29,72 +31,10 @@
static unsigned long max_gap;
#endif
-/**
- * show_mem - display a memory statistics summary
- *
- * Just walks the pages in the system and describes where they're allocated.
- */
-void
-show_mem (void)
-{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
-
- printk(KERN_INFO "Mem-info:\n");
- show_free_areas();
-
- printk(KERN_INFO "Free swap: %6ldkB\n",
- nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- for (i = 0; i < max_mapnr; i++) {
- if (!pfn_valid(i)) {
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- if (max_gap < LARGE_GAP)
- continue;
- i = vmemmap_find_next_valid_pfn(0, i) - 1;
-#endif
- continue;
- }
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map + i))
- shared += page_count(mem_map + i) - 1;
- }
- printk(KERN_INFO "%d pages of RAM\n", total);
- printk(KERN_INFO "%d reserved pages\n", reserved);
- printk(KERN_INFO "%d pages shared\n", shared);
- printk(KERN_INFO "%d pages swap cached\n", cached);
- printk(KERN_INFO "%ld pages in page table cache\n",
- pgtable_quicklist_total_size());
-}
-
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
/**
- * find_max_pfn - adjust the maximum page number callback
- * @start: start of range
- * @end: end of range
- * @arg: address of pointer to global max_pfn variable
- *
- * Passed as a callback function to efi_memmap_walk() to determine the highest
- * available page frame number in the system.
- */
-int
-find_max_pfn (unsigned long start, unsigned long end, void *arg)
-{
- unsigned long *max_pfnp = arg, pfn;
-
- pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
- if (pfn > *max_pfnp)
- *max_pfnp = pfn;
- return 0;
-}
-
-/**
* find_bootmap_location - callback to find a memory area for the bootmap
* @start: start of region
* @end: end of region
@@ -104,10 +44,10 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg)
* bootmap_start. This address must be page-aligned.
*/
static int __init
-find_bootmap_location (unsigned long start, unsigned long end, void *arg)
+find_bootmap_location (u64 start, u64 end, void *arg)
{
- unsigned long needed = *(unsigned long *)arg;
- unsigned long range_start, range_end, free_start;
+ u64 needed = *(unsigned long *)arg;
+ u64 range_start, range_end, free_start;
int i;
#if IGNORE_PFN0
@@ -141,6 +81,112 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
return 0;
}
+#ifdef CONFIG_SMP
+static void *cpu_data;
+/**
+ * per_cpu_init - setup per-cpu variables
+ *
+ * Allocate and setup per-cpu data areas.
+ */
+void *per_cpu_init(void)
+{
+ static bool first_time = true;
+ void *cpu0_data = __cpu0_per_cpu;
+ unsigned int cpu;
+
+ if (!first_time)
+ goto skip;
+ first_time = false;
+
+ /*
+ * get_free_pages() cannot be used before cpu_init() done.
+ * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
+ * to avoid that AP calls get_zeroed_page().
+ */
+ for_each_possible_cpu(cpu) {
+ void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
+
+ memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
+ per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
+ (unsigned long)__per_cpu_start);
+
+ cpu_data += PERCPU_PAGE_SIZE;
+ }
+skip:
+ return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+}
+
+static inline void
+alloc_per_cpu_data(void)
+{
+ cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
+ PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+}
+
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init
+setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *gi;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int rc;
+
+ ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ gi = &ai->groups[0];
+
+ /* units are assigned consecutively to possible cpus */
+ for_each_possible_cpu(cpu)
+ gi->cpu_map[gi->nr_units++] = cpu;
+
+ /* set parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
+#else
+#define alloc_per_cpu_data() do { } while (0)
+#endif /* CONFIG_SMP */
+
/**
* find_memory - setup memory map
*
@@ -155,9 +201,10 @@ find_memory (void)
reserve_memory();
/* first find highest page frame number */
- max_pfn = 0;
- efi_memmap_walk(find_max_pfn, &max_pfn);
-
+ min_low_pfn = ~0UL;
+ max_low_pfn = 0;
+ efi_memmap_walk(find_max_min_low_pfn, NULL);
+ max_pfn = max_low_pfn;
/* how many bytes to cover all the pages */
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
@@ -167,55 +214,16 @@ find_memory (void)
if (bootmap_start == ~0UL)
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
- bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+ bootmap_size = init_bootmem_node(NODE_DATA(0),
+ (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
- reserve_bootmem(bootmap_start, bootmap_size);
+ reserve_bootmem(bootmap_start, bootmap_size, BOOTMEM_DEFAULT);
find_initrd();
-}
-#ifdef CONFIG_SMP
-/**
- * per_cpu_init - setup per-cpu variables
- *
- * Allocate and setup per-cpu data areas.
- */
-void * __cpuinit
-per_cpu_init (void)
-{
- void *cpu_data;
- int cpu;
- static int first_time=1;
-
- /*
- * get_free_pages() cannot be used before cpu_init() done. BSP
- * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
- * get_zeroed_page().
- */
- if (first_time) {
- first_time=0;
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- }
- }
- return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-}
-#endif /* CONFIG_SMP */
-
-static int
-count_pages (u64 start, u64 end, void *arg)
-{
- unsigned long *count = arg;
-
- *count += (end - start) >> PAGE_SHIFT;
- return 0;
+ alloc_per_cpu_data();
}
/*
@@ -226,18 +234,17 @@ void __init
paging_init (void)
{
unsigned long max_dma;
- unsigned long nid = 0;
unsigned long max_zone_pfns[MAX_NR_ZONES];
- num_physpages = 0;
- efi_memmap_walk(count_pages, &num_physpages);
-
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_VIRTUAL_MEM_MAP
- efi_memmap_walk(register_active_ranges, &nid);
+ efi_memmap_walk(filter_memory, register_active_ranges);
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
@@ -249,8 +256,8 @@ paging_init (void)
map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmalloc_end -= map_size;
- vmem_map = (struct page *) vmalloc_end;
+ VMALLOC_END -= map_size;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
/*
@@ -264,7 +271,7 @@ paging_init (void)
printk("Virtual mem_map starts at 0x%p\n", mem_map);
}
#else /* !CONFIG_VIRTUAL_MEM_MAP */
- add_active_range(0, 0, max_low_pfn);
+ memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
free_area_init_nodes(max_zone_pfns);
#endif /* !CONFIG_VIRTUAL_MEM_MAP */
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d497b6b0f5b..87862680536 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -16,11 +16,13 @@
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/nmi.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/nodemask.h>
+#include <linux/slab.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/meminit.h>
@@ -35,9 +37,9 @@ struct early_node_data {
struct ia64_node_data *node_data;
unsigned long pernode_addr;
unsigned long pernode_size;
- struct bootmem_data bootmem_data;
- unsigned long num_physpages;
+#ifdef CONFIG_ZONE_DMA
unsigned long num_dma_physpages;
+#endif
unsigned long min_pfn;
unsigned long max_pfn;
};
@@ -45,7 +47,7 @@ struct early_node_data {
static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
static nodemask_t memory_less_mask __initdata;
-static pg_data_t *pgdat_list[MAX_NUMNODES];
+pg_data_t *pgdat_list[MAX_NUMNODES];
/*
* To prevent cache aliasing effects, align per-node structures so that they
@@ -72,23 +74,20 @@ static pg_data_t *pgdat_list[MAX_NUMNODES];
static int __init build_node_maps(unsigned long start, unsigned long len,
int node)
{
- unsigned long cstart, epfn, end = start + len;
- struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+ unsigned long spfn, epfn, end = start + len;
+ struct bootmem_data *bdp = &bootmem_node_data[node];
epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
- cstart = GRANULEROUNDDOWN(start);
+ spfn = GRANULEROUNDDOWN(start) >> PAGE_SHIFT;
if (!bdp->node_low_pfn) {
- bdp->node_boot_start = cstart;
+ bdp->node_min_pfn = spfn;
bdp->node_low_pfn = epfn;
} else {
- bdp->node_boot_start = min(cstart, bdp->node_boot_start);
+ bdp->node_min_pfn = min(spfn, bdp->node_min_pfn);
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
}
- min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
- max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
-
return 0;
}
@@ -104,7 +103,7 @@ static int __meminit early_nr_cpus_node(int node)
{
int cpu, n = 0;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for_each_possible_early_cpu(cpu)
if (node == node_cpuid[cpu].nid)
n++;
@@ -124,6 +123,7 @@ static unsigned long __meminit compute_pernodesize(int node)
pernodesize += node * L1_CACHE_BYTES;
pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
+ pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
pernodesize = PAGE_ALIGN(pernodesize);
return pernodesize;
}
@@ -142,19 +142,121 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
#ifdef CONFIG_SMP
int cpu;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
- if (node == node_cpuid[cpu].nid) {
- memcpy(__va(cpu_data), __phys_per_cpu_start,
- __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
- __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- }
+ for_each_possible_early_cpu(cpu) {
+ void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+ if (node != node_cpuid[cpu].nid)
+ continue;
+
+ memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+ __per_cpu_start;
+
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA,
+ (unsigned long)cpu_data -
+ (unsigned long)__per_cpu_start);
+
+ cpu_data += PERCPU_PAGE_SIZE;
}
#endif
return cpu_data;
}
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *uninitialized_var(gi);
+ unsigned int *cpu_map;
+ void *base;
+ unsigned long base_offset;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int node, prev_node, unit, nr_units, rc;
+
+ ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ cpu_map = ai->groups[0].cpu_map;
+
+ /* determine base */
+ base = (void *)ULONG_MAX;
+ for_each_possible_cpu(cpu)
+ base = min(base,
+ (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+ base_offset = (void *)__per_cpu_start - base;
+
+ /* build cpu_map, units are grouped by node */
+ unit = 0;
+ for_each_node(node)
+ for_each_possible_cpu(cpu)
+ if (node == node_cpuid[cpu].nid)
+ cpu_map[unit++] = cpu;
+ nr_units = unit;
+
+ /* set basic parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ /*
+ * CPUs are put into groups according to node. Walk cpu_map
+ * and create new groups at node boundaries.
+ */
+ prev_node = -1;
+ ai->nr_groups = 0;
+ for (unit = 0; unit < nr_units; unit++) {
+ cpu = cpu_map[unit];
+ node = node_cpuid[cpu].nid;
+
+ if (node == prev_node) {
+ gi->nr_units++;
+ continue;
+ }
+ prev_node = node;
+
+ gi = &ai->groups[ai->nr_groups++];
+ gi->nr_units = 1;
+ gi->base_offset = __per_cpu_offset[cpu] + base_offset;
+ gi->cpu_map = &cpu_map[unit];
+ }
+
+ rc = pcpu_setup_first_chunk(ai, base);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
+#endif
+
/**
* fill_pernode - initialize pernode data.
* @node: the node id.
@@ -166,7 +268,7 @@ static void __init fill_pernode(int node, unsigned long pernode,
{
void *cpu_data;
int cpus = early_nr_cpus_node(node);
- struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+ struct bootmem_data *bdp = &bootmem_node_data[node];
mem_data[node].pernode_addr = pernode;
mem_data[node].pernode_size = pernodesize;
@@ -221,20 +323,21 @@ static void __init fill_pernode(int node, unsigned long pernode,
static int __init find_pernode_space(unsigned long start, unsigned long len,
int node)
{
- unsigned long epfn;
+ unsigned long spfn, epfn;
unsigned long pernodesize = 0, pernode, pages, mapsize;
- struct bootmem_data *bdp = &mem_data[node].bootmem_data;
+ struct bootmem_data *bdp = &bootmem_node_data[node];
+ spfn = start >> PAGE_SHIFT;
epfn = (start + len) >> PAGE_SHIFT;
- pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
+ pages = bdp->node_low_pfn - bdp->node_min_pfn;
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
/*
* Make sure this memory falls within this node's usable memory
* since we may have thrown some away in build_maps().
*/
- if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
+ if (spfn < bdp->node_min_pfn || epfn > bdp->node_low_pfn)
return 0;
/* Don't setup this node's local space twice... */
@@ -296,15 +399,15 @@ static void __init reserve_pernode_space(void)
bdp = pdp->bdata;
/* First the bootmem_map itself */
- pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
+ pages = bdp->node_low_pfn - bdp->node_min_pfn;
size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
base = __pa(bdp->node_bootmem_map);
- reserve_bootmem_node(pdp, base, size);
+ reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
/* Now the per-node space */
size = mem_data[node].pernode_size;
base = __pa(mem_data[node].pernode_addr);
- reserve_bootmem_node(pdp, base, size);
+ reserve_bootmem_node(pdp, base, size, BOOTMEM_DEFAULT);
}
}
@@ -318,7 +421,7 @@ static void __meminit scatter_node_data(void)
* node_online_map is not set for hot-added nodes at this time,
* because we are halfway through initialization of the new node's
* structures. If for_each_online_node() is used, a new node's
- * pg_data_ptrs will be not initialized. Insted of using it,
+ * pg_data_ptrs will be not initialized. Instead of using it,
* pgdat_list[] is checked.
*/
for_each_node(node) {
@@ -345,9 +448,10 @@ static void __init initialize_pernode_data(void)
#ifdef CONFIG_SMP
/* Set the node_data pointer for each per-cpu struct */
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for_each_possible_early_cpu(cpu) {
node = node_cpuid[cpu].nid;
- per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
+ per_cpu(ia64_cpu_info, cpu).node_data =
+ mem_data[node].node_data;
}
#else
{
@@ -355,7 +459,7 @@ static void __init initialize_pernode_data(void)
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__cpu_info - __per_cpu_start));
+ ((char *)&ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
@@ -412,37 +516,6 @@ static void __init memory_less_nodes(void)
return;
}
-#ifdef CONFIG_SPARSEMEM
-/**
- * register_sparse_mem - notify SPARSEMEM that this memory range exists.
- * @start: physical start of range
- * @end: physical end of range
- * @arg: unused
- *
- * Simply calls SPARSEMEM to register memory section(s).
- */
-static int __init register_sparse_mem(unsigned long start, unsigned long end,
- void *arg)
-{
- int nid;
-
- start = __pa(start) >> PAGE_SHIFT;
- end = __pa(end) >> PAGE_SHIFT;
- nid = early_pfn_to_nid(start);
- memory_present(nid, start, end);
-
- return 0;
-}
-
-static void __init arch_sparse_init(void)
-{
- efi_memmap_walk(register_sparse_mem, NULL);
- sparse_init();
-}
-#else
-#define arch_sparse_init() do {} while (0)
-#endif
-
/**
* find_memory - walk the EFI memory map and setup the bootmem allocator
*
@@ -467,12 +540,16 @@ void __init find_memory(void)
/* These actually end up getting called by call_pernode_memory() */
efi_memmap_walk(filter_rsvd_memory, build_node_maps);
efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
+ efi_memmap_walk(find_max_min_low_pfn, NULL);
for_each_online_node(node)
- if (mem_data[node].bootmem_data.node_low_pfn) {
+ if (bootmem_node_data[node].node_low_pfn) {
node_clear(node, memory_less_mask);
mem_data[node].min_pfn = ~0UL;
}
+
+ efi_memmap_walk(filter_memory, register_active_ranges);
+
/*
* Initialize the boot memory maps in reverse order since that's
* what the bootmem allocator expects
@@ -486,14 +563,14 @@ void __init find_memory(void)
else if (node_isset(node, memory_less_mask))
continue;
- bdp = &mem_data[node].bootmem_data;
+ bdp = &bootmem_node_data[node];
pernode = mem_data[node].pernode_addr;
pernodesize = mem_data[node].pernode_size;
map = pernode + pernodesize;
init_bootmem_node(pgdat_list[node],
map>>PAGE_SHIFT,
- bdp->node_boot_start>>PAGE_SHIFT,
+ bdp->node_min_pfn,
bdp->node_low_pfn);
}
@@ -515,18 +592,14 @@ void __init find_memory(void)
* find_pernode_space() does most of this already, we just need to set
* local_per_cpu_offset
*/
-void __cpuinit *per_cpu_init(void)
+void *per_cpu_init(void)
{
int cpu;
static int first_time = 1;
-
- if (smp_processor_id() != 0)
- return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
-
if (first_time) {
first_time = 0;
- for (cpu = 0; cpu < NR_CPUS; cpu++)
+ for_each_possible_early_cpu(cpu)
per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
}
@@ -535,65 +608,6 @@ void __cpuinit *per_cpu_init(void)
#endif /* CONFIG_SMP */
/**
- * show_mem - give short summary of memory stats
- *
- * Shows a simple page count of reserved and used pages in the system.
- * For discontig machines, it does this on a per-pgdat basis.
- */
-void show_mem(void)
-{
- int i, total_reserved = 0;
- int total_shared = 0, total_cached = 0;
- unsigned long total_present = 0;
- pg_data_t *pgdat;
-
- printk(KERN_INFO "Mem-info:\n");
- show_free_areas();
- printk(KERN_INFO "Free swap: %6ldkB\n",
- nr_swap_pages<<(PAGE_SHIFT-10));
- printk(KERN_INFO "Node memory in pages:\n");
- for_each_online_pgdat(pgdat) {
- unsigned long present;
- unsigned long flags;
- int shared = 0, cached = 0, reserved = 0;
-
- pgdat_resize_lock(pgdat, &flags);
- present = pgdat->node_present_pages;
- for(i = 0; i < pgdat->node_spanned_pages; i++) {
- struct page *page;
- if (pfn_valid(pgdat->node_start_pfn + i))
- page = pfn_to_page(pgdat->node_start_pfn + i);
- else {
- i = vmemmap_find_next_valid_pfn(pgdat->node_id,
- i) - 1;
- continue;
- }
- if (PageReserved(page))
- reserved++;
- else if (PageSwapCache(page))
- cached++;
- else if (page_count(page))
- shared += page_count(page)-1;
- }
- pgdat_resize_unlock(pgdat, &flags);
- total_present += present;
- total_reserved += reserved;
- total_cached += cached;
- total_shared += shared;
- printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, "
- "shrd: %10d, swpd: %10d\n", pgdat->node_id,
- present, reserved, shared, cached);
- }
- printk(KERN_INFO "%ld pages of RAM\n", total_present);
- printk(KERN_INFO "%d reserved pages\n", total_reserved);
- printk(KERN_INFO "%d pages shared\n", total_shared);
- printk(KERN_INFO "%d pages swap cached\n", total_cached);
- printk(KERN_INFO "Total of %ld pages in page table cache\n",
- pgtable_quicklist_total_size());
- printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
-}
-
-/**
* call_pernode_memory - use SRAT to call callback functions with node info
* @start: physical start of range
* @len: length of range
@@ -654,13 +668,12 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n
{
unsigned long end = start + len;
- add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT);
- mem_data[node].num_physpages += len >> PAGE_SHIFT;
+#ifdef CONFIG_ZONE_DMA
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
+#endif
start = GRANULEROUNDDOWN(start);
- start = ORDERROUNDDOWN(start);
end = GRANULEROUNDUP(end);
mem_data[node].max_pfn = max(mem_data[node].max_pfn,
end >> PAGE_SHIFT);
@@ -686,20 +699,20 @@ void __init paging_init(void)
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- arch_sparse_init();
-
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
+ sparse_memory_present_with_active_regions(MAX_NUMNODES);
+ sparse_init();
+
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmem_map = (struct page *) vmalloc_end;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
#endif
for_each_online_node(node) {
- num_physpages += mem_data[node].num_physpages;
pfn_offset = mem_data[node].min_pfn;
#ifdef CONFIG_VIRTUAL_MEM_MAP
@@ -709,13 +722,17 @@ void __init paging_init(void)
max_pfn = mem_data[node].max_pfn;
}
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init_nodes(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
+#ifdef CONFIG_MEMORY_HOTPLUG
pg_data_t *arch_alloc_nodedata(int nid)
{
unsigned long size = compute_pernodesize(nid);
@@ -733,3 +750,15 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
pgdat_list[update_node] = update_pgdat;
scatter_node_data();
}
+#endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
+{
+ return vmemmap_populate_basepages(start, end, node);
+}
+
+void vmemmap_free(unsigned long start, unsigned long end)
+{
+}
+#endif
diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c
index 71c50dd8f87..c99a41e29fe 100644
--- a/arch/ia64/mm/extable.c
+++ b/arch/ia64/mm/extable.c
@@ -8,7 +8,7 @@
#include <linux/sort.h>
#include <asm/uaccess.h>
-#include <asm/module.h>
+#include <linux/module.h>
static int cmp_ex(const void *a, const void *b)
{
@@ -53,6 +53,32 @@ void sort_extable (struct exception_table_entry *start,
cmp_ex, swap_ex);
}
+static inline unsigned long ex_to_addr(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->addr + x->addr;
+}
+
+#ifdef CONFIG_MODULES
+/*
+ * Any entry referring to the module init will be at the beginning or
+ * the end.
+ */
+void trim_init_extable(struct module *m)
+{
+ /*trim the beginning*/
+ while (m->num_exentries &&
+ within_module_init(ex_to_addr(&m->extable[0]), m)) {
+ m->extable++;
+ m->num_exentries--;
+ }
+ /*trim the end*/
+ while (m->num_exentries &&
+ within_module_init(ex_to_addr(&m->extable[m->num_exentries-1]),
+ m))
+ m->num_exentries--;
+}
+#endif /* CONFIG_MODULES */
+
const struct exception_table_entry *
search_extable (const struct exception_table_entry *first,
const struct exception_table_entry *last,
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 14ef7cceb20..7225dad8709 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -7,49 +7,36 @@
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/prefetch.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
-#include <asm/kdebug.h>
-extern void die (char *, struct pt_regs *, long);
+extern int die(char *, struct pt_regs *, long);
#ifdef CONFIG_KPROBES
-ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
-
-/* Hook to register for page fault notifications */
-int register_page_fault_notifier(struct notifier_block *nb)
-{
- return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
-}
-
-int unregister_page_fault_notifier(struct notifier_block *nb)
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
- return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
-}
+ int ret = 0;
+
+ if (!user_mode(regs)) {
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+ if (kprobe_running() && kprobe_fault_handler(regs, trap))
+ ret = 1;
+ preempt_enable();
+ }
-static inline int notify_page_fault(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
-{
- struct die_args args = {
- .regs = regs,
- .str = str,
- .err = err,
- .trapnr = trap,
- .signr = sig
- };
- return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+ return ret;
}
#else
-static inline int notify_page_fault(enum die_val val, const char *str,
- struct pt_regs *regs, long err, int trap, int sig)
+static inline int notify_page_fault(struct pt_regs *regs, int trap)
{
- return NOTIFY_DONE;
+ return 0;
}
#endif
@@ -85,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
return pte_present(pte);
}
+# define VM_READ_BIT 0
+# define VM_WRITE_BIT 1
+# define VM_EXEC_BIT 2
+
void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
@@ -93,6 +84,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct mm_struct *mm = current->mm;
struct siginfo si;
unsigned long mask;
+ int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
/* mmap_sem is performance critical.... */
prefetchw(&mm->mmap_sem);
@@ -118,18 +114,28 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* This is to handle the kprobes on user space access instructions
*/
- if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
- SIGSEGV) == NOTIFY_STOP)
+ if (notify_page_fault(regs, TRAP_BRKPT))
return;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (mask & VM_WRITE)
+ flags |= FAULT_FLAG_WRITE;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma);
- if (!vma)
+ if (!vma && !prev_vma )
goto bad_area;
- /* find_vma_prev() returns vma such that address < vma->vm_end or NULL */
- if (address < vma->vm_start)
+ /*
+ * find_vma_prev() returns vma such that address < vma->vm_end or NULL
+ *
+ * May find no vma, but could be that the last vm area is the
+ * register backing store that needs to expand upwards, in
+ * this case vma will be null, but prev_vma will ne non-null
+ */
+ if (( !vma && prev_vma ) || (address < vma->vm_start) )
goto check_expansion;
good_area:
@@ -137,53 +143,67 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/* OK, we've got a good vm_area for this memory area. Check the access permissions: */
-# define VM_READ_BIT 0
-# define VM_WRITE_BIT 1
-# define VM_EXEC_BIT 2
-
# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
|| (1 << VM_EXEC_BIT) != VM_EXEC)
# error File is out of sync with <linux/mm.h>. Please update.
# endif
- mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
- | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
- | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
+ if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
+ goto bad_area;
if ((vma->vm_flags & mask) != mask)
goto bad_area;
- survive:
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- switch (handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0)) {
- case VM_FAULT_MINOR:
- ++current->min_flt;
- break;
- case VM_FAULT_MAJOR:
- ++current->maj_flt;
- break;
- case VM_FAULT_SIGBUS:
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We ran out of memory, or some other thing happened
* to us that made us unable to handle the page fault
* gracefully.
*/
- signal = SIGBUS;
- goto bad_area;
- case VM_FAULT_OOM:
- goto out_of_memory;
- default:
+ if (fault & VM_FAULT_OOM) {
+ goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGBUS) {
+ signal = SIGBUS;
+ goto bad_area;
+ }
BUG();
}
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
up_read(&mm->mmap_sem);
return;
check_expansion:
if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
+ if (!vma)
+ goto bad_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
@@ -271,20 +291,16 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
else
printk(KERN_ALERT "Unable to handle kernel paging request at "
"virtual address %016lx\n", address);
- die("Oops", regs, isr);
+ if (die("Oops", regs, isr))
+ regs = NULL;
bust_spinlocks(0);
- do_exit(SIGKILL);
+ if (regs)
+ do_exit(SIGKILL);
return;
out_of_memory:
up_read(&mm->mmap_sem);
- if (current->pid == 1) {
- yield();
- down_read(&mm->mmap_sem);
- goto survive;
- }
- printk(KERN_CRIT "VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
}
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index eee5c1cfbe3..76069c18ee4 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -13,18 +13,19 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
-#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/sysctl.h>
+#include <linux/log2.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
+unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
+EXPORT_SYMBOL(hpage_shift);
pte_t *
-huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
+huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
@@ -37,7 +38,7 @@ huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
if (pud) {
pmd = pmd_alloc(mm, pud, taddr);
if (pmd)
- pte = pte_alloc_map(mm, pmd, taddr);
+ pte = pte_alloc_map(mm, NULL, pmd, taddr);
}
return pte;
}
@@ -64,13 +65,19 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr)
return pte;
}
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+
#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
/*
* Don't actually need to do any preparation, but need to make sure
* the address is in the right region.
*/
-int prepare_hugepage_range(unsigned long addr, unsigned long len)
+int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
{
if (len & ~HPAGE_MASK)
return -EINVAL;
@@ -101,13 +108,19 @@ int pmd_huge(pmd_t pmd)
{
return 0;
}
+
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
{
return NULL;
}
-void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -135,25 +148,31 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb,
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- struct vm_area_struct *vmm;
+ struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
if (len & ~HPAGE_MASK)
return -EINVAL;
+
+ /* Handle MAP_FIXED */
+ if (flags & MAP_FIXED) {
+ if (prepare_hugepage_range(file, addr, len))
+ return -EINVAL;
+ return addr;
+ }
+
/* This code assumes that RGN_HPAGE != 0. */
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
addr = HPAGE_REGION_BASE;
- else
- addr = ALIGN(addr, HPAGE_SIZE);
- for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
- return -ENOMEM;
- if (!vmm || (addr + len) <= vmm->vm_start)
- return addr;
- addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
- }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = addr;
+ info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+ info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
static int __init hugetlb_setup_sz(char *str)
@@ -168,7 +187,7 @@ static int __init hugetlb_setup_sz(char *str)
tr_pages = 0x15557000UL;
size = memparse(str, &str);
- if (*str || (size & (size-1)) || !(tr_pages & size) ||
+ if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
size <= PAGE_SIZE ||
size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
printk(KERN_WARNING "Invalid huge page size specified\n");
@@ -181,6 +200,6 @@ static int __init hugetlb_setup_sz(char *str)
* override here with new page shift.
*/
ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
- return 1;
+ return 0;
}
-__setup("hugepagesz=", hugetlb_setup_sz);
+early_param("hugepagesz", hugetlb_setup_sz);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ff87a5cba39..25c350264a4 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -10,6 +10,7 @@
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/elf.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
@@ -19,10 +20,9 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/bitops.h>
+#include <linux/kexec.h>
-#include <asm/a.out.h>
#include <asm/dma.h>
-#include <asm/ia32.h>
#include <asm/io.h>
#include <asm/machvec.h>
#include <asm/numa.h>
@@ -30,24 +30,19 @@
#include <asm/pgalloc.h>
#include <asm/sal.h>
#include <asm/sections.h>
-#include <asm/system.h>
#include <asm/tlb.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/mca.h>
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
-DEFINE_PER_CPU(long, __pgtable_quicklist_size);
+#include <asm/paravirt.h>
extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long vmalloc_end = VMALLOC_END_INIT;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long VMALLOC_END = VMALLOC_END_INIT;
+EXPORT_SYMBOL(VMALLOC_END);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
@@ -55,63 +50,11 @@ EXPORT_SYMBOL(vmem_map);
struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
-#define MIN_PGT_PAGES 25UL
-#define MAX_PGT_FREES_PER_PASS 16L
-#define PGT_FRACTION_OF_NODE_MEM 16
-
-static inline long
-max_pgt_pages(void)
-{
- u64 node_free_pages, max_pgt_pages;
-
-#ifndef CONFIG_NUMA
- node_free_pages = nr_free_pages();
-#else
- node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
-#endif
- max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
- max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
- return max_pgt_pages;
-}
-
-static inline long
-min_pages_to_free(void)
-{
- long pages_to_free;
-
- pages_to_free = pgtable_quicklist_size - max_pgt_pages();
- pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
- return pages_to_free;
-}
-
void
-check_pgt_cache(void)
-{
- long pages_to_free;
-
- if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
- return;
-
- preempt_disable();
- while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
- while (pages_to_free--) {
- free_page((unsigned long)pgtable_quicklist_alloc());
- }
- preempt_enable();
- preempt_disable();
- }
- preempt_enable();
-}
-
-void
-lazy_mmu_prot_update (pte_t pte)
+__ia64_sync_icache_dcache (pte_t pte)
{
unsigned long addr;
struct page *page;
- unsigned long order;
-
- if (!pte_exec(pte))
- return; /* not an executable page... */
page = pte_page(pte);
addr = (unsigned long) page_address(page);
@@ -119,23 +62,37 @@ lazy_mmu_prot_update (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- if (PageCompound(page)) {
- order = (unsigned long) (page[1].lru.prev);
- flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
- }
- else
- flush_icache_range(addr, addr + PAGE_SIZE);
+ flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
+/*
+ * Since DMA is i-cache coherent, any (complete) pages that were written via
+ * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
+ * flush them when they get mapped into an executable vm-area.
+ */
+void
+dma_mark_clean(void *addr, size_t size)
+{
+ unsigned long pg_addr, end;
+
+ pg_addr = PAGE_ALIGN((unsigned long) addr);
+ end = (unsigned long) addr + size;
+ while (pg_addr + PAGE_SIZE <= end) {
+ struct page *page = virt_to_page(pg_addr);
+ set_bit(PG_arch_1, &page->flags);
+ pg_addr += PAGE_SIZE;
+ }
+}
+
inline void
ia64_set_rbs_bot (void)
{
- unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+ unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
- current->thread.rbs_bot = STACK_TOP - stack_size;
+ current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
}
/*
@@ -156,14 +113,14 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
- memset(vma, 0, sizeof(*vma));
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
- vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
@@ -175,13 +132,14 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
if (vma) {
- memset(vma, 0, sizeof(*vma));
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
- vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
+ vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
+ VM_DONTEXPAND | VM_DONTDUMP;
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
@@ -196,25 +154,13 @@ ia64_init_addr_space (void)
void
free_initmem (void)
{
- unsigned long addr, eaddr;
-
- addr = (unsigned long) ia64_imva(__init_begin);
- eaddr = (unsigned long) ia64_imva(__init_end);
- while (addr < eaddr) {
- ClearPageReserved(virt_to_page(addr));
- init_page_count(virt_to_page(addr));
- free_page(addr);
- ++totalram_pages;
- addr += PAGE_SIZE;
- }
- printk(KERN_INFO "Freeing unused kernel memory: %ldkB freed\n",
- (__init_end - __init_begin) >> 10);
+ free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
+ -1, "unused kernel");
}
void __init
free_initrd_mem (unsigned long start, unsigned long end)
{
- struct page *page;
/*
* EFI uses 4KB pages while the kernel can use 4KB or bigger.
* Thus EFI and the kernel may have different page sizes. It is
@@ -255,11 +201,7 @@ free_initrd_mem (unsigned long start, unsigned long end)
for (; start < end; start += PAGE_SIZE) {
if (!virt_addr_valid(start))
continue;
- page = virt_to_page(start);
- ClearPageReserved(page);
- init_page_count(page);
- free_page(start);
- ++totalram_pages;
+ free_reserved_page(virt_to_page(start));
}
}
@@ -302,6 +244,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
static void __init
setup_gate (void)
{
+ void *gate_section;
struct page *page;
/*
@@ -309,10 +252,11 @@ setup_gate (void)
* headers etc. and once execute-only page to enable
* privilege-promotion via "epc":
*/
- page = virt_to_page(ia64_imva(__start_gate_section));
+ gate_section = paravirt_get_gate_section();
+ page = virt_to_page(ia64_imva(gate_section));
put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
#ifdef HAVE_BUGGY_SEGREL
- page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
+ page = virt_to_page(ia64_imva(gate_section + PAGE_SIZE));
put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
#else
put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
@@ -334,11 +278,10 @@ setup_gate (void)
ia64_patch_gate();
}
-void __devinit
-ia64_mmu_init (void *my_cpu_data)
+void ia64_mmu_init(void *my_cpu_data)
{
- unsigned long psr, pta, impl_va_bits;
- extern void __devinit tlb_init (void);
+ unsigned long pta, impl_va_bits;
+ extern void tlb_init(void);
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
@@ -346,15 +289,6 @@ ia64_mmu_init (void *my_cpu_data)
# define VHPT_ENABLE_BIT 1
#endif
- /* Pin mapping for percpu area into TLB */
- psr = ia64_clear_ic();
- ia64_itr(0x2, IA64_TR_PERCPU_DATA, PERCPU_ADDR,
- pte_val(pfn_pte(__pa(my_cpu_data) >> PAGE_SHIFT, PAGE_KERNEL)),
- PERCPU_PAGE_SHIFT);
-
- ia64_set_psr(psr);
- ia64_srlz_i();
-
/*
* Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
* address space. The IA-64 architecture guarantees that at least 50 bits of
@@ -423,9 +357,7 @@ int vmemmap_find_next_valid_pfn(int node, int i)
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
-
- stop_address = (unsigned long) &vmem_map[
- pgdat->node_start_pfn + pgdat->node_spanned_pages];
+ stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
do {
pgd_t *pgd;
@@ -471,8 +403,7 @@ retry_pte:
return hole_next_pfn - pgdat->node_start_pfn;
}
-int __init
-create_mem_map_page_table (u64 start, u64 end, void *arg)
+int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
{
unsigned long address, start_page, end_page;
struct page *map_start, *map_end;
@@ -517,8 +448,8 @@ struct memmap_init_callback_data {
unsigned long zone;
};
-static int
-virtual_memmap_init (u64 start, u64 end, void *arg)
+static int __meminit
+virtual_memmap_init(u64 start, u64 end, void *arg)
{
struct memmap_init_callback_data *args;
struct page *map_start, *map_end;
@@ -543,16 +474,17 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
- args->nid, args->zone, page_to_pfn(map_start));
+ args->nid, args->zone, page_to_pfn(map_start),
+ MEMMAP_EARLY);
return 0;
}
-void
+void __meminit
memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map)
- memmap_init_zone(size, nid, zone, start_pfn);
+ memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY);
else {
struct page *start;
struct memmap_init_callback_data args;
@@ -579,8 +511,7 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int __init
-find_largest_hole (u64 start, u64 end, void *arg)
+int __init find_largest_hole(u64 start, u64 end, void *arg)
{
u64 *max_gap = arg;
@@ -594,29 +525,37 @@ find_largest_hole (u64 start, u64 end, void *arg)
return 0;
}
-int __init
-register_active_ranges(u64 start, u64 end, void *nid)
+#endif /* CONFIG_VIRTUAL_MEM_MAP */
+
+int __init register_active_ranges(u64 start, u64 len, int nid)
{
- BUG_ON(nid == NULL);
- BUG_ON(*(unsigned long *)nid >= MAX_NUMNODES);
+ u64 end = start + len;
- add_active_range(*(unsigned long *)nid,
- __pa(start) >> PAGE_SHIFT,
- __pa(end) >> PAGE_SHIFT);
+#ifdef CONFIG_KEXEC
+ if (start > crashk_res.start && start < crashk_res.end)
+ start = crashk_res.end;
+ if (end > crashk_res.start && end < crashk_res.end)
+ end = crashk_res.start;
+#endif
+
+ if (start < end)
+ memblock_add_node(__pa(start), end - start, nid);
return 0;
}
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
-static int __init
-count_reserved_pages (u64 start, u64 end, void *arg)
+int
+find_max_min_low_pfn (u64 start, u64 end, void *arg)
{
- unsigned long num_reserved = 0;
- unsigned long *count = arg;
-
- for (; start < end; start += PAGE_SIZE)
- if (PageReserved(virt_to_page(start)))
- ++num_reserved;
- *count += num_reserved;
+ unsigned long pfn_start, pfn_end;
+#ifdef CONFIG_FLATMEM
+ pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
+ pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
+#else
+ pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
+ pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
+#endif
+ min_low_pfn = min(min_low_pfn, pfn_start);
+ max_low_pfn = max(max_low_pfn, pfn_end);
return 0;
}
@@ -642,10 +581,7 @@ __setup("nolwsys", nolwsys_setup);
void __init
mem_init (void)
{
- long reserved_pages, codesize, datasize, initsize;
- pg_data_t *pgdat;
int i;
- static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
@@ -661,33 +597,13 @@ mem_init (void)
#endif
#ifdef CONFIG_FLATMEM
- if (!mem_map)
- BUG();
- max_mapnr = max_low_pfn;
+ BUG_ON(!mem_map);
#endif
+ set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
-
- kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE);
- kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START);
- kclist_add(&kcore_kernel, _stext, _end - _stext);
-
- for_each_online_pgdat(pgdat)
- if (pgdat->bdata->node_bootmem_map)
- totalram_pages += free_all_bootmem_node(pgdat);
-
- reserved_pages = 0;
- efi_memmap_walk(count_reserved_pages, &reserved_pages);
-
- codesize = (unsigned long) _etext - (unsigned long) _stext;
- datasize = (unsigned long) _edata - (unsigned long) _etext;
- initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
-
- printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, "
- "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
- num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
- reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
-
+ free_all_bootmem();
+ mem_init_print_info(NULL);
/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
@@ -695,29 +611,16 @@ mem_init (void)
* code can tell them apart.
*/
for (i = 0; i < NR_syscalls; ++i) {
- extern unsigned long fsyscall_table[NR_syscalls];
extern unsigned long sys_call_table[NR_syscalls];
+ unsigned long *fsyscall_table = paravirt_get_fsyscall_table();
if (!fsyscall_table[i] || nolwsys)
fsyscall_table[i] = sys_call_table[i] | 1;
}
setup_gate();
-
-#ifdef CONFIG_IA32_SUPPORT
- ia32_mem_init();
-#endif
}
#ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- totalram_pages++;
- num_physpages++;
-}
-
int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
@@ -729,18 +632,103 @@ int arch_add_memory(int nid, u64 start, u64 size)
pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
- ret = __add_pages(zone, start_pfn, nr_pages);
+ ret = __add_pages(nid, zone, start_pfn, nr_pages);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
- __FUNCTION__, ret);
+ __func__, ret);
return ret;
}
-int remove_memory(u64 start, u64 size)
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int arch_remove_memory(u64 start, u64 size)
{
- return -EINVAL;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct zone *zone;
+ int ret;
+
+ zone = page_zone(pfn_to_page(start_pfn));
+ ret = __remove_pages(zone, start_pfn, nr_pages);
+ if (ret)
+ pr_warn("%s: Problem encountered in __remove_pages() as"
+ " ret=%d\n", __func__, ret);
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(remove_memory);
#endif
+#endif
+
+/*
+ * Even when CONFIG_IA32_SUPPORT is not enabled it is
+ * useful to have the Linux/x86 domain registered to
+ * avoid an attempted module load when emulators call
+ * personality(PER_LINUX32). This saves several milliseconds
+ * on each such call.
+ */
+static struct exec_domain ia32_exec_domain;
+
+static int __init
+per_linux32_init(void)
+{
+ ia32_exec_domain.name = "Linux/x86";
+ ia32_exec_domain.handler = NULL;
+ ia32_exec_domain.pers_low = PER_LINUX32;
+ ia32_exec_domain.pers_high = PER_LINUX32;
+ ia32_exec_domain.signal_map = default_exec_domain.signal_map;
+ ia32_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
+ register_exec_domain(&ia32_exec_domain);
+
+ return 0;
+}
+
+__initcall(per_linux32_init);
+
+/**
+ * show_mem - give short summary of memory stats
+ *
+ * Shows a simple page count of reserved and used pages in the system.
+ * For discontig machines, it does this on a per-pgdat basis.
+ */
+void show_mem(unsigned int filter)
+{
+ int total_reserved = 0;
+ unsigned long total_present = 0;
+ pg_data_t *pgdat;
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
+ printk(KERN_INFO "Node memory in pages:\n");
+ for_each_online_pgdat(pgdat) {
+ unsigned long present;
+ unsigned long flags;
+ int reserved = 0;
+ int nid = pgdat->node_id;
+ int zoneid;
+
+ if (skip_free_areas_node(filter, nid))
+ continue;
+ pgdat_resize_lock(pgdat, &flags);
+
+ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+ struct zone *zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
+ continue;
+
+ reserved += zone->present_pages - zone->managed_pages;
+ }
+ present = pgdat->node_present_pages;
+
+ pgdat_resize_unlock(pgdat, &flags);
+ total_present += present;
+ total_reserved += reserved;
+ printk(KERN_INFO "Node %4d: RAM: %11ld, rsvd: %8d, ",
+ nid, present, reserved);
+ }
+ printk(KERN_INFO "%ld pages of RAM\n", total_present);
+ printk(KERN_INFO "%d reserved pages\n", total_reserved);
+ printk(KERN_INFO "Total of %ld pages in page table cache\n",
+ quicklist_total_size());
+ printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
+}
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 4280c074d64..43964cde621 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -1,5 +1,5 @@
/*
- * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
+ * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -10,51 +10,116 @@
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/efi.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
#include <asm/io.h>
#include <asm/meminit.h>
static inline void __iomem *
-__ioremap (unsigned long offset, unsigned long size)
+__ioremap_uc(unsigned long phys_addr)
{
- return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
+ return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
}
void __iomem *
-ioremap (unsigned long offset, unsigned long size)
+early_ioremap (unsigned long phys_addr, unsigned long size)
{
u64 attr;
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return (void __iomem *) phys_to_virt(phys_addr);
+ return __ioremap_uc(phys_addr);
+}
+
+void __iomem *
+ioremap (unsigned long phys_addr, unsigned long size)
+{
+ void __iomem *addr;
+ struct vm_struct *area;
+ unsigned long offset;
+ pgprot_t prot;
+ u64 attr;
unsigned long gran_base, gran_size;
+ unsigned long page_base;
/*
* For things in kern_memmap, we must use the same attribute
* as the rest of the kernel. For more details, see
* Documentation/ia64/aliasing.txt.
*/
- attr = kern_mem_attribute(offset, size);
+ attr = kern_mem_attribute(phys_addr, size);
if (attr & EFI_MEMORY_WB)
- return (void __iomem *) phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(phys_addr);
else if (attr & EFI_MEMORY_UC)
- return __ioremap(offset, size);
+ return __ioremap_uc(phys_addr);
/*
* Some chipsets don't support UC access to memory. If
* WB is supported for the whole granule, we prefer that.
*/
- gran_base = GRANULEROUNDDOWN(offset);
- gran_size = GRANULEROUNDUP(offset + size) - gran_base;
+ gran_base = GRANULEROUNDDOWN(phys_addr);
+ gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
- return (void __iomem *) phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(phys_addr);
+
+ /*
+ * WB is not supported for the whole granule, so we can't use
+ * the region 7 identity mapping. If we can safely cover the
+ * area with kernel page table mappings, we can use those
+ * instead.
+ */
+ page_base = phys_addr & PAGE_MASK;
+ size = PAGE_ALIGN(phys_addr + size) - page_base;
+ if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
+ prot = PAGE_KERNEL;
+
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
- return __ioremap(offset, size);
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ area->phys_addr = phys_addr;
+ addr = (void __iomem *) area->addr;
+ if (ioremap_page_range((unsigned long) addr,
+ (unsigned long) addr + size, phys_addr, prot)) {
+ vunmap((void __force *) addr);
+ return NULL;
+ }
+
+ return (void __iomem *) (offset + (char __iomem *)addr);
+ }
+
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap);
void __iomem *
-ioremap_nocache (unsigned long offset, unsigned long size)
+ioremap_nocache (unsigned long phys_addr, unsigned long size)
{
- if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
+ if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
return NULL;
- return __ioremap(offset, size);
+ return __ioremap_uc(phys_addr);
}
EXPORT_SYMBOL(ioremap_nocache);
+
+void
+early_iounmap (volatile void __iomem *addr, unsigned long size)
+{
+}
+
+void
+iounmap (volatile void __iomem *addr)
+{
+ if (REGION_NUMBER(addr) == RGN_GATE)
+ vunmap((void *) ((unsigned long) addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 64e4c21f311..ea21d4cad54 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -16,6 +16,7 @@
#include <linux/node.h>
#include <linux/init.h>
#include <linux/bootmem.h>
+#include <linux/module.h>
#include <asm/mmzone.h>
#include <asm/numa.h>
@@ -26,7 +27,9 @@
*/
int num_node_memblks;
struct node_memblk_s node_memblk[NR_NODE_MEMBLKS];
-struct node_cpuid_s node_cpuid[NR_CPUS];
+struct node_cpuid_s node_cpuid[NR_CPUS] =
+ { [0 ... NR_CPUS-1] = { .phys_id = 0, .nid = NUMA_NO_NODE } };
+
/*
* This is a matrix with "distances" between nodes, they should be
* proportional to the memory access latency ratios.
@@ -55,18 +58,53 @@ paddr_to_nid(unsigned long paddr)
* SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
* the section resides.
*/
-int early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn)
{
int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
+ /*
+ * NOTE: The following SMP-unsafe globals are only used early in boot
+ * when the kernel is running single-threaded.
+ */
+ static int __meminitdata last_ssec, last_esec;
+ static int __meminitdata last_nid;
+
+ if (section >= last_ssec && section < last_esec)
+ return last_nid;
for (i = 0; i < num_node_memblks; i++) {
ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
esec = (node_memblk[i].start_paddr + node_memblk[i].size +
((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
- if (section >= ssec && section < esec)
+ if (section >= ssec && section < esec) {
+ last_ssec = ssec;
+ last_esec = esec;
+ last_nid = node_memblk[i].nid;
return node_memblk[i].nid;
+ }
}
- return 0;
+ return -1;
}
+
+void numa_clear_node(int cpu)
+{
+ unmap_cpu_from_node(cpu, NUMA_NO_NODE);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+/*
+ * SRAT information is stored in node_memblk[], then we can use SRAT
+ * information at memory-hot-add if necessary.
+ */
+
+int memory_add_physaddr_to_nid(u64 addr)
+{
+ int nid = paddr_to_nid(addr);
+ if (nid < 0)
+ return 0;
+ return nid;
+}
+
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
#endif
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ffad7624436..ed612976868 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -10,6 +10,10 @@
* IPI based ptc implementation and A-step IPI implementation.
* Rohit Seth <rohit.seth@intel.com>
* Ken Chen <kenneth.w.chen@intel.com>
+ * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation
+ * Copyright (C) 2007 Intel Corp
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Add multiple ptc.g/ptc.ga instruction support in global tlb purge.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -18,6 +22,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
+#include <linux/slab.h>
#include <asm/delay.h>
#include <asm/mmu_context.h>
@@ -25,19 +30,26 @@
#include <asm/pal.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
+#include <asm/processor.h>
+#include <asm/sal.h>
+#include <asm/tlb.h>
static struct {
- unsigned long mask; /* mask of supported purge page-sizes */
+ u64 mask; /* mask of supported purge page-sizes */
unsigned long max_bits; /* log2 of largest supported purge page-size */
} purge;
struct ia64_ctx ia64_ctx = {
- .lock = SPIN_LOCK_UNLOCKED,
- .next = 1,
- .max_ctx = ~0U
+ .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
+ .next = 1,
+ .max_ctx = ~0U
};
DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
+DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
+DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
+
+struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
* Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -83,30 +95,183 @@ wrap_mmu_context (struct mm_struct *mm)
local_flush_tlb_all();
}
+/*
+ * Implement "spinaphores" ... like counting semaphores, but they
+ * spin instead of sleeping. If there are ever any other users for
+ * this primitive it can be moved up to a spinaphore.h header.
+ */
+struct spinaphore {
+ unsigned long ticket;
+ unsigned long serve;
+};
+
+static inline void spinaphore_init(struct spinaphore *ss, int val)
+{
+ ss->ticket = 0;
+ ss->serve = val;
+}
+
+static inline void down_spin(struct spinaphore *ss)
+{
+ unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
+
+ if (time_before(t, ss->serve))
+ return;
+
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
+ if (time_before(t, serve))
+ return;
+ cpu_relax();
+ }
+}
+
+static inline void up_spin(struct spinaphore *ss)
+{
+ ia64_fetchadd(1, &ss->serve, rel);
+}
+
+static struct spinaphore ptcg_sem;
+static u16 nptcg = 1;
+static int need_ptcg_sem = 1;
+static int toolatetochangeptcgsem = 0;
+
+/*
+ * Kernel parameter "nptcg=" overrides max number of concurrent global TLB
+ * purges which is reported from either PAL or SAL PALO.
+ *
+ * We don't have sanity checking for nptcg value. It's the user's responsibility
+ * for valid nptcg value on the platform. Otherwise, kernel may hang in some
+ * cases.
+ */
+static int __init
+set_nptcg(char *str)
+{
+ int value = 0;
+
+ get_option(&str, &value);
+ setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
+
+ return 1;
+}
+
+__setup("nptcg=", set_nptcg);
+
+/*
+ * Maximum number of simultaneous ptc.g purges in the system can
+ * be defined by PAL_VM_SUMMARY (in which case we should take
+ * the smallest value for any cpu in the system) or by the PAL
+ * override table (in which case we should ignore the value from
+ * PAL_VM_SUMMARY).
+ *
+ * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g
+ * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case,
+ * we should ignore the value from either PAL_VM_SUMMARY or PAL override table.
+ *
+ * Complicating the logic here is the fact that num_possible_cpus()
+ * isn't fully setup until we start bringing cpus online.
+ */
void
-ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
- unsigned long end, unsigned long nbits)
+setup_ptcg_sem(int max_purges, int nptcg_from)
{
- static DEFINE_SPINLOCK(ptcg_lock);
+ static int kp_override;
+ static int palo_override;
+ static int firstcpu = 1;
- if (mm != current->active_mm || !current->mm) {
- flush_tlb_all();
+ if (toolatetochangeptcgsem) {
+ if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
+ BUG_ON(1 < nptcg);
+ else
+ BUG_ON(max_purges < nptcg);
return;
}
- /* HW requires global serialization of ptc.ga. */
- spin_lock(&ptcg_lock);
- {
- do {
- /*
- * Flush ALAT entries also.
- */
- ia64_ptcga(start, (nbits<<2));
- ia64_srlz_i();
- start += (1UL << nbits);
- } while (start < end);
+ if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
+ kp_override = 1;
+ nptcg = max_purges;
+ goto resetsema;
+ }
+ if (kp_override) {
+ need_ptcg_sem = num_possible_cpus() > nptcg;
+ return;
+ }
+
+ if (nptcg_from == NPTCG_FROM_PALO) {
+ palo_override = 1;
+
+ /* In PALO max_purges == 0 really means it! */
+ if (max_purges == 0)
+ panic("Whoa! Platform does not support global TLB purges.\n");
+ nptcg = max_purges;
+ if (nptcg == PALO_MAX_TLB_PURGES) {
+ need_ptcg_sem = 0;
+ return;
+ }
+ goto resetsema;
+ }
+ if (palo_override) {
+ if (nptcg != PALO_MAX_TLB_PURGES)
+ need_ptcg_sem = (num_possible_cpus() > nptcg);
+ return;
+ }
+
+ /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */
+ if (max_purges == 0) max_purges = 1;
+
+ if (firstcpu) {
+ nptcg = max_purges;
+ firstcpu = 0;
}
- spin_unlock(&ptcg_lock);
+ if (max_purges < nptcg)
+ nptcg = max_purges;
+ if (nptcg == PAL_MAX_PURGES) {
+ need_ptcg_sem = 0;
+ return;
+ } else
+ need_ptcg_sem = (num_possible_cpus() > nptcg);
+
+resetsema:
+ spinaphore_init(&ptcg_sem, max_purges);
+}
+
+void
+ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long nbits)
+{
+ struct mm_struct *active_mm = current->active_mm;
+
+ toolatetochangeptcgsem = 1;
+
+ if (mm != active_mm) {
+ /* Restore region IDs for mm */
+ if (mm && active_mm) {
+ activate_context(mm);
+ } else {
+ flush_tlb_all();
+ return;
+ }
+ }
+
+ if (need_ptcg_sem)
+ down_spin(&ptcg_sem);
+
+ do {
+ /*
+ * Flush ALAT entries also.
+ */
+ ia64_ptcga(start, (nbits << 2));
+ ia64_srlz_i();
+ start += (1UL << nbits);
+ } while (start < end);
+
+ if (need_ptcg_sem)
+ up_spin(&ptcg_sem);
+
+ if (mm != active_mm) {
+ activate_context(active_mm);
+ }
}
void
@@ -157,7 +322,7 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
preempt_disable();
#ifdef CONFIG_SMP
- if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
+ if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
platform_global_tlb_purge(mm, start, end, nbits);
preempt_enable();
return;
@@ -172,15 +337,17 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
}
EXPORT_SYMBOL(flush_tlb_range);
-void __devinit
-ia64_tlb_init (void)
+void ia64_tlb_init(void)
{
- ia64_ptce_info_t ptce_info;
- unsigned long tr_pgbits;
+ ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */
+ u64 tr_pgbits;
long status;
+ pal_vm_info_1_u_t vm_info_1;
+ pal_vm_info_2_u_t vm_info_2;
+ int cpu = smp_processor_id();
if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
- printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
+ printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
"defaulting to architected purge page-sizes.\n", status);
purge.mask = 0x115557000UL;
}
@@ -194,4 +361,201 @@ ia64_tlb_init (void)
local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
+ status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
+
+ if (status) {
+ printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
+ per_cpu(ia64_tr_num, cpu) = 8;
+ return;
+ }
+ per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
+ if (per_cpu(ia64_tr_num, cpu) >
+ (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
+ per_cpu(ia64_tr_num, cpu) =
+ vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
+ if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
+ static int justonce = 1;
+ per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
+ if (justonce) {
+ justonce = 0;
+ printk(KERN_DEBUG "TR register number exceeds "
+ "IA64_TR_ALLOC_MAX!\n");
+ }
+ }
+}
+
+/*
+ * is_tr_overlap
+ *
+ * Check overlap with inserted TRs.
+ */
+static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
+{
+ u64 tr_log_size;
+ u64 tr_end;
+ u64 va_rr = ia64_get_rr(va);
+ u64 va_rid = RR_TO_RID(va_rr);
+ u64 va_end = va + (1<<log_size) - 1;
+
+ if (va_rid != RR_TO_RID(p->rr))
+ return 0;
+ tr_log_size = (p->itir & 0xff) >> 2;
+ tr_end = p->ifa + (1<<tr_log_size) - 1;
+
+ if (va > tr_end || p->ifa > va_end)
+ return 0;
+ return 1;
+
+}
+
+/*
+ * ia64_insert_tr in virtual mode. Allocate a TR slot
+ *
+ * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr
+ *
+ * va : virtual address.
+ * pte : pte entries inserted.
+ * log_size: range to be covered.
+ *
+ * Return value: <0 : error No.
+ *
+ * >=0 : slot number allocated for TR.
+ * Must be called with preemption disabled.
+ */
+int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
+{
+ int i, r;
+ unsigned long psr;
+ struct ia64_tr_entry *p;
+ int cpu = smp_processor_id();
+
+ if (!ia64_idtrs[cpu]) {
+ ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
+ sizeof (struct ia64_tr_entry), GFP_KERNEL);
+ if (!ia64_idtrs[cpu])
+ return -ENOMEM;
+ }
+ r = -EINVAL;
+ /*Check overlap with existing TR entries*/
+ if (target_mask & 0x1) {
+ p = ia64_idtrs[cpu];
+ for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
+ i++, p++) {
+ if (p->pte & 0x1)
+ if (is_tr_overlap(p, va, log_size)) {
+ printk(KERN_DEBUG "Overlapped Entry"
+ "Inserted for TR Reigster!!\n");
+ goto out;
+ }
+ }
+ }
+ if (target_mask & 0x2) {
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
+ for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
+ i++, p++) {
+ if (p->pte & 0x1)
+ if (is_tr_overlap(p, va, log_size)) {
+ printk(KERN_DEBUG "Overlapped Entry"
+ "Inserted for TR Reigster!!\n");
+ goto out;
+ }
+ }
+ }
+
+ for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
+ switch (target_mask & 0x3) {
+ case 1:
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
+ goto found;
+ continue;
+ case 2:
+ if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
+ goto found;
+ continue;
+ case 3:
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
+ !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
+ goto found;
+ continue;
+ default:
+ r = -EINVAL;
+ goto out;
+ }
+ }
+found:
+ if (i >= per_cpu(ia64_tr_num, cpu))
+ return -EBUSY;
+
+ /*Record tr info for mca hander use!*/
+ if (i > per_cpu(ia64_tr_used, cpu))
+ per_cpu(ia64_tr_used, cpu) = i;
+
+ psr = ia64_clear_ic();
+ if (target_mask & 0x1) {
+ ia64_itr(0x1, i, va, pte, log_size);
+ ia64_srlz_i();
+ p = ia64_idtrs[cpu] + i;
+ p->ifa = va;
+ p->pte = pte;
+ p->itir = log_size << 2;
+ p->rr = ia64_get_rr(va);
+ }
+ if (target_mask & 0x2) {
+ ia64_itr(0x2, i, va, pte, log_size);
+ ia64_srlz_i();
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
+ p->ifa = va;
+ p->pte = pte;
+ p->itir = log_size << 2;
+ p->rr = ia64_get_rr(va);
+ }
+ ia64_set_psr(psr);
+ r = i;
+out:
+ return r;
+}
+EXPORT_SYMBOL_GPL(ia64_itr_entry);
+
+/*
+ * ia64_purge_tr
+ *
+ * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr.
+ * slot: slot number to be freed.
+ *
+ * Must be called with preemption disabled.
+ */
+void ia64_ptr_entry(u64 target_mask, int slot)
+{
+ int cpu = smp_processor_id();
+ int i;
+ struct ia64_tr_entry *p;
+
+ if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
+ return;
+
+ if (target_mask & 0x1) {
+ p = ia64_idtrs[cpu] + slot;
+ if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
+ p->pte = 0;
+ ia64_ptr(0x1, p->ifa, p->itir>>2);
+ ia64_srlz_i();
+ }
+ }
+
+ if (target_mask & 0x2) {
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
+ if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
+ p->pte = 0;
+ ia64_ptr(0x2, p->ifa, p->itir>>2);
+ ia64_srlz_i();
+ }
+ }
+
+ for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
+ if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
+ ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
+ break;
+ }
+ per_cpu(ia64_tr_used, cpu) = i;
}
+EXPORT_SYMBOL_GPL(ia64_ptr_entry);