aboutsummaryrefslogtreecommitdiff
path: root/arch/hexagon/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/hexagon/mm')
-rw-r--r--arch/hexagon/mm/init.c39
-rw-r--r--arch/hexagon/mm/vm_fault.c10
2 files changed, 29 insertions, 20 deletions
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index 69ffcfd2879..88977e42af0 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -1,7 +1,7 @@
/*
* Memory subsystem initialization for Hexagon
*
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,9 +31,10 @@
* Define a startpg just past the end of the kernel image and a lastpg
* that corresponds to the end of real or simulated platform memory.
*/
-#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET))
+#define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
-unsigned long bootmem_lastpg; /* Should be set by platform code */
+unsigned long bootmem_lastpg; /* Should be set by platform code */
+unsigned long __phys_offset; /* physical kernel offset >> 12 */
/* Set as variable to limit PMD copies */
int max_kernel_seg = 0x303;
@@ -44,7 +45,6 @@ unsigned long zero_page_mask;
/* indicate pfn's of high memory */
unsigned long highstart_pfn, highend_pfn;
-/* struct mmu_gather defined in asm-generic.h; */
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
/* Default cache attribute for newly created page tables */
@@ -70,10 +70,8 @@ unsigned long long kmap_generation;
void __init mem_init(void)
{
/* No idea where this is actually declared. Seems to evade LXR. */
- totalram_pages += free_all_bootmem();
- num_physpages = bootmem_lastpg; /* seriously, what? */
-
- printk(KERN_INFO "totalram_pages = %ld\n", totalram_pages);
+ free_all_bootmem();
+ mem_init_print_info(NULL);
/*
* To-Do: someone somewhere should wipe out the bootmem map
@@ -193,6 +191,9 @@ void __init setup_arch_memory(void)
* This needs to change for highmem setups.
*/
+ /* Prior to this, bootmem_lastpg is actually mem size */
+ bootmem_lastpg += ARCH_PFN_OFFSET;
+
/* Memory size needs to be a multiple of 16M */
bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
~((BIG_KERNEL_PAGE_SIZE) - 1));
@@ -201,12 +202,15 @@ void __init setup_arch_memory(void)
* Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
* memory allocation
*/
- bootmap_size = init_bootmem(bootmem_startpg, bootmem_lastpg -
- PFN_DOWN(DMA_RESERVED_BYTES));
+
+ max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES);
+ min_low_pfn = ARCH_PFN_OFFSET;
+ bootmap_size = init_bootmem_node(NODE_DATA(0), bootmem_startpg, min_low_pfn, max_low_pfn);
printk(KERN_INFO "bootmem_startpg: 0x%08lx\n", bootmem_startpg);
printk(KERN_INFO "bootmem_lastpg: 0x%08lx\n", bootmem_lastpg);
printk(KERN_INFO "bootmap_size: %d\n", bootmap_size);
+ printk(KERN_INFO "min_low_pfn: 0x%08lx\n", min_low_pfn);
printk(KERN_INFO "max_low_pfn: 0x%08lx\n", max_low_pfn);
/*
@@ -221,14 +225,17 @@ void __init setup_arch_memory(void)
/* this actually only goes to the end of the first gig */
segtable_end = segtable + (1<<(30-22));
- /* Move forward to the start of empty pages */
- segtable += bootmem_lastpg >> (22-PAGE_SHIFT);
+ /*
+ * Move forward to the start of empty pages; take into account
+ * phys_offset shift.
+ */
+ segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
{
- int i;
+ int i;
- for (i = 1 ; i <= DMA_RESERVE ; i++)
- segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
+ for (i = 1 ; i <= DMA_RESERVE ; i++)
+ segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
| __HEXAGON_C_UNC << 6
| __HVM_PDE_S_4MB);
@@ -256,7 +263,7 @@ void __init setup_arch_memory(void)
* Free all the memory that wasn't taken up by the bootmap, the DMA
* reserve, or kernel itself.
*/
- free_bootmem(PFN_PHYS(bootmem_startpg)+bootmap_size,
+ free_bootmem(PFN_PHYS(bootmem_startpg) + bootmap_size,
PFN_PHYS(bootmem_lastpg - bootmem_startpg) - bootmap_size -
DMA_RESERVED_BYTES);
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index 308ef0ce648..8704c932003 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -53,8 +53,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
int si_code = SEGV_MAPERR;
int fault;
const struct exception_table_entry *fixup;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (cause > 0 ? FAULT_FLAG_WRITE : 0);
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
/*
* If we're in an interrupt or have no user context,
@@ -65,6 +64,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
local_irq_enable();
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
retry:
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
@@ -96,6 +97,7 @@ good_area:
case FLT_STORE:
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
+ flags |= FAULT_FLAG_WRITE;
break;
}
@@ -147,7 +149,7 @@ good_area:
}
info.si_errno = 0;
info.si_addr = (void __user *)address;
- force_sig_info(info.si_code, &info, current);
+ force_sig_info(info.si_signo, &info, current);
return;
bad_area:
@@ -158,7 +160,7 @@ bad_area:
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void *)address;
- force_sig_info(SIGSEGV, &info, current);
+ force_sig_info(info.si_signo, &info, current);
return;
}
/* Kernel-mode fault falls through */