aboutsummaryrefslogtreecommitdiff
path: root/arch/alpha/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/alpha/mm')
-rw-r--r--arch/alpha/mm/Makefile2
-rw-r--r--arch/alpha/mm/fault.c40
-rw-r--r--arch/alpha/mm/init.c65
-rw-r--r--arch/alpha/mm/numa.c44
4 files changed, 42 insertions, 109 deletions
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index 09399c5386c..c993d3f93cf 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux alpha-specific parts of the memory manager.
#
-EXTRA_CFLAGS := -Werror
+ccflags-y := -Werror
obj-y := init.o fault.o extable.o
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index fadd5f882ff..98838a05ba6 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
@@ -90,6 +89,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
const struct exception_table_entry *fixup;
int fault, si_code = SEGV_MAPERR;
siginfo_t info;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
/* As of EV6, a load into $31/$f31 is a prefetch, and never faults
(or is suppressed by the PALcode). Support that for older CPUs
@@ -114,7 +114,9 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
if (address >= TASK_SIZE)
goto vmalloc_fault;
#endif
-
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
@@ -140,13 +142,17 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
} else {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
}
/* If for any reason at all we couldn't handle the fault,
make sure we exit gracefully rather than endlessly redo
the fault. */
- fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
- up_read(&mm->mmap_sem);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -154,10 +160,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
+ up_read(&mm->mmap_sem);
+
return;
/* Something tried to access memory that isn't in our memory map.
@@ -187,12 +209,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
out_of_memory:
+ up_read(&mm->mmap_sem);
if (!user_mode(regs))
goto no_context;
pagefault_out_of_memory();
return;
do_sigbus:
+ up_read(&mm->mmap_sem);
/* Send a sigbus, regardless of whether we were in kernel
or user mode. */
info.si_signo = SIGBUS;
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 86425ab53bf..a1bea91df56 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -22,7 +22,6 @@
#include <linux/vmalloc.h>
#include <linux/gfp.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -31,8 +30,8 @@
#include <asm/mmu_context.h>
#include <asm/console.h>
#include <asm/tlb.h>
-
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+#include <asm/setup.h>
+#include <asm/sections.h>
extern void die_if_kernel(char *,struct pt_regs *,long);
@@ -277,75 +276,25 @@ srm_paging_stop (void)
}
#endif
-#ifndef CONFIG_DISCONTIGMEM
-static void __init
-printk_memory_info(void)
-{
- unsigned long codesize, reservedpages, datasize, initsize, tmp;
- extern int page_is_ram(unsigned long) __init;
- extern char _text, _etext, _data, _edata;
- extern char __init_begin, __init_end;
-
- /* printk all informations */
- reservedpages = 0;
- for (tmp = 0; tmp < max_low_pfn; tmp++)
- /*
- * Only count reserved RAM pages
- */
- if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
- reservedpages++;
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_data;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
- printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10);
-}
-
void __init
mem_init(void)
{
- max_mapnr = num_physpages = max_low_pfn;
- totalram_pages += free_all_bootmem();
+ set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
-
- printk_memory_info();
-}
-#endif /* CONFIG_DISCONTIGMEM */
-
-void
-free_reserved_mem(void *start, void *end)
-{
- void *__start = start;
- for (; __start < end; __start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(__start));
- init_page_count(virt_to_page(__start));
- free_page((long)__start);
- totalram_pages++;
- }
+ free_all_bootmem();
+ mem_init_print_info(NULL);
}
void
free_initmem(void)
{
- extern char __init_begin, __init_end;
-
- free_reserved_mem(&__init_begin, &__init_end);
- printk ("Freeing unused kernel memory: %ldk freed\n",
- (&__init_end - &__init_begin) >> 10);
+ free_initmem_default(-1);
}
#ifdef CONFIG_BLK_DEV_INITRD
void
free_initrd_mem(unsigned long start, unsigned long end)
{
- free_reserved_mem((void *)start, (void *)end);
- printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
index 7b2c56d8f93..d543d71c28b 100644
--- a/arch/alpha/mm/numa.c
+++ b/arch/alpha/mm/numa.c
@@ -17,6 +17,7 @@
#include <asm/hwrpb.h>
#include <asm/pgalloc.h>
+#include <asm/sections.h>
pg_data_t node_data[MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
@@ -128,8 +129,6 @@ setup_memory_node(int nid, void *kernel_end)
if (node_max_pfn > max_low_pfn)
max_pfn = max_low_pfn = node_max_pfn;
- num_physpages += node_max_pfn - node_min_pfn;
-
#if 0 /* we'll try this one again in a little while */
/* Cute trick to make sure our local node data is on local memory */
node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
@@ -313,49 +312,10 @@ void __init paging_init(void)
zones_size[ZONE_DMA] = dma_local_pfn;
zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn;
}
+ node_set_state(nid, N_NORMAL_MEMORY);
free_area_init_node(nid, zones_size, start_pfn, NULL);
}
/* Initialize the kernel's ZERO_PGE. */
memset((void *)ZERO_PGE, 0, PAGE_SIZE);
}
-
-void __init mem_init(void)
-{
- unsigned long codesize, reservedpages, datasize, initsize, pfn;
- extern int page_is_ram(unsigned long) __init;
- extern char _text, _etext, _data, _edata;
- extern char __init_begin, __init_end;
- unsigned long nid, i;
- high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
- reservedpages = 0;
- for_each_online_node(nid) {
- /*
- * This will free up the bootmem, ie, slot 0 memory
- */
- totalram_pages += free_all_bootmem_node(NODE_DATA(nid));
-
- pfn = NODE_DATA(nid)->node_start_pfn;
- for (i = 0; i < node_spanned_pages(nid); i++, pfn++)
- if (page_is_ram(pfn) &&
- PageReserved(nid_page_nr(nid, i)))
- reservedpages++;
- }
-
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_data;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
-
- printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, "
- "%luk data, %luk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- num_physpages << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10);
-#if 0
- mem_stress();
-#endif
-}