aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/mm')
-rw-r--r--arch/parisc/mm/fault.c220
-rw-r--r--arch/parisc/mm/init.c550
-rw-r--r--arch/parisc/mm/ioremap.c166
-rw-r--r--arch/parisc/mm/kmap.c166
4 files changed, 465 insertions, 637 deletions
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 0ad945d4c0a..3ca9c1131cf 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -1,5 +1,4 @@
-/* $Id: fault.c,v 1.5 2000/01/26 16:20:29 jsm Exp $
- *
+/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
@@ -20,14 +19,6 @@
#include <asm/uaccess.h>
#include <asm/traps.h>
-#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */
- /* dumped to the console via printk) */
-
-
-/* Defines for parisc_acctyp() */
-#define READ 0
-#define WRITE 1
-
/* Various important other fields */
#define bit22set(x) (x & 0x00000200)
#define bits23_25set(x) (x & 0x000001c0)
@@ -39,6 +30,8 @@
DEFINE_PER_CPU(struct exception_data, exception_data);
+int show_unhandled_signals = 1;
+
/*
* parisc_acctyp(unsigned int inst) --
* Given a PA-RISC memory access instruction, determine if the
@@ -143,18 +136,93 @@ parisc_acctyp(unsigned long code, unsigned int inst)
}
#endif
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fix;
+
+ /* If we only stored 32bit addresses in the exception table we can drop
+ * out if we faulted on a 64bit address. */
+ if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn))
+ && (regs->iaoq[0] >> 32))
+ return 0;
+
+ fix = search_exception_tables(regs->iaoq[0]);
+ if (fix) {
+ struct exception_data *d;
+ d = this_cpu_ptr(&exception_data);
+ d->fault_ip = regs->iaoq[0];
+ d->fault_space = regs->isr;
+ d->fault_addr = regs->ior;
+
+ regs->iaoq[0] = ((fix->fixup) & ~3);
+ /*
+ * NOTE: In some cases the faulting instruction
+ * may be in the delay slot of a branch. We
+ * don't want to take the branch, so we don't
+ * increment iaoq[1], instead we set it to be
+ * iaoq[0]+4, and clear the B bit in the PSW
+ */
+ regs->iaoq[1] = regs->iaoq[0] + 4;
+ regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long code,
+ unsigned long address, struct task_struct *tsk,
+ struct vm_area_struct *vma)
+{
+ if (!unhandled_signal(tsk, SIGSEGV))
+ return;
+
+ if (!printk_ratelimit())
+ return;
+
+ pr_warn("\n");
+ pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx",
+ tsk->comm, code, address);
+ print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
+ if (vma)
+ pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n",
+ vma->vm_start, vma->vm_end);
+
+ show_regs(regs);
+}
+
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address)
{
struct vm_area_struct *vma, *prev_vma;
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- const struct exception_table_entry *fix;
+ struct task_struct *tsk;
+ struct mm_struct *mm;
unsigned long acc_type;
+ int fault;
+ unsigned int flags;
- if (in_interrupt() || !mm)
+ if (in_atomic())
goto no_context;
+ tsk = current;
+ mm = tsk->mm;
+ if (!mm)
+ goto no_context;
+
+ flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ acc_type = parisc_acctyp(code, regs->iir);
+ if (acc_type & VM_WRITE)
+ flags |= FAULT_FLAG_WRITE;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma || address < vma->vm_start)
@@ -166,8 +234,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
good_area:
- acc_type = parisc_acctyp(code,regs->iir);
-
if ((vma->vm_flags & acc_type) != acc_type)
goto bad_area;
@@ -177,22 +243,39 @@ good_area:
* fault.
*/
- switch (handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0)) {
- case VM_FAULT_MINOR:
- ++current->min_flt;
- break;
- case VM_FAULT_MAJOR:
- ++current->maj_flt;
- break;
- case VM_FAULT_SIGBUS:
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
/*
- * We hit a hared mapping outside of the file, or some
+ * We hit a shared mapping outside of the file, or some
* other thing happened to us that made us unable to
* handle the page fault gracefully.
*/
- goto bad_area;
- default:
- goto out_of_memory;
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto bad_area;
+ BUG();
+ }
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /*
+ * No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
}
up_read(&mm->mmap_sem);
return;
@@ -211,61 +294,56 @@ bad_area:
if (user_mode(regs)) {
struct siginfo si;
-#ifdef PRINT_USER_FAULTS
- printk(KERN_DEBUG "\n");
- printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
- tsk->pid, tsk->comm, code, address);
- if (vma) {
- printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
- vma->vm_start, vma->vm_end);
+ show_signal_msg(regs, code, address, tsk, vma);
+
+ switch (code) {
+ case 15: /* Data TLB miss fault/Data page fault */
+ /* send SIGSEGV when outside of vma */
+ if (!vma ||
+ address < vma->vm_start || address > vma->vm_end) {
+ si.si_signo = SIGSEGV;
+ si.si_code = SEGV_MAPERR;
+ break;
+ }
+
+ /* send SIGSEGV for wrong permissions */
+ if ((vma->vm_flags & acc_type) != acc_type) {
+ si.si_signo = SIGSEGV;
+ si.si_code = SEGV_ACCERR;
+ break;
+ }
+
+ /* probably address is outside of mapped file */
+ /* fall through */
+ case 17: /* NA data TLB miss / page fault */
+ case 18: /* Unaligned access - PCXS only */
+ si.si_signo = SIGBUS;
+ si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR;
+ break;
+ case 16: /* Non-access instruction TLB miss fault */
+ case 26: /* PCXL: Data memory access rights trap */
+ default:
+ si.si_signo = SIGSEGV;
+ si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
+ break;
}
- show_regs(regs);
-#endif
- /* FIXME: actually we need to get the signo and code correct */
- si.si_signo = SIGSEGV;
si.si_errno = 0;
- si.si_code = SEGV_MAPERR;
si.si_addr = (void __user *) address;
- force_sig_info(SIGSEGV, &si, current);
+ force_sig_info(si.si_signo, &si, current);
return;
}
no_context:
- if (!user_mode(regs)) {
- fix = search_exception_tables(regs->iaoq[0]);
-
- if (fix) {
- struct exception_data *d;
-
- d = &__get_cpu_var(exception_data);
- d->fault_ip = regs->iaoq[0];
- d->fault_space = regs->isr;
- d->fault_addr = regs->ior;
-
- regs->iaoq[0] = ((fix->fixup) & ~3);
-
- /*
- * NOTE: In some cases the faulting instruction
- * may be in the delay slot of a branch. We
- * don't want to take the branch, so we don't
- * increment iaoq[1], instead we set it to be
- * iaoq[0]+4, and clear the B bit in the PSW
- */
-
- regs->iaoq[1] = regs->iaoq[0] + 4;
- regs->gr[0] &= ~PSW_B; /* IPSW in gr[0] */
-
- return;
- }
+ if (!user_mode(regs) && fixup_exception(regs)) {
+ return;
}
parisc_terminate("Bad Address (null pointer deref?)", regs, code, address);
out_of_memory:
up_read(&mm->mmap_sem);
- printk(KERN_CRIT "VM: killing process %s\n", current->comm);
- if (user_mode(regs))
- do_exit(SIGKILL);
- goto no_context;
+ if (!user_mode(regs))
+ goto no_context;
+ pagefault_out_of_memory();
}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 2886ad70db4..0bef864264c 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -6,14 +6,15 @@
* changed by Philipp Rumpf
* Copyright 1999 Philipp Rumpf (prumpf@tux.org)
* Copyright 2004 Randolph Chung (tausq@debian.org)
+ * Copyright 2006-2007 Helge Deller (deller@gmx.de)
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
+#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h> /* for hppa_dma_ops and pcxl_dma_ops */
@@ -24,21 +25,30 @@
#include <linux/pagemap.h> /* for release_pages and page_cache_release */
#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/pdc_chassis.h>
#include <asm/mmzone.h>
+#include <asm/sections.h>
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-extern char _text; /* start of kernel code, defined by linker */
extern int data_start;
-extern char _end; /* end of BSS, defined by linker */
-extern char __init_begin, __init_end;
+extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
+
+#if PT_NLEVELS == 3
+/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout
+ * with the first pmd adjacent to the pgd and below it. gcc doesn't actually
+ * guarantee that global objects will be laid out in memory in the same order
+ * as the order of declaration, so put these in different sections and use
+ * the linker script to order them. */
+pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
+#endif
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
+pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
#ifdef CONFIG_DISCONTIGMEM
-struct node_map_data node_data[MAX_NUMNODES];
-bootmem_data_t bmem_data[MAX_NUMNODES];
-unsigned char pfnnid_map[PFNNID_MAP_MAX];
+struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
+signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
#endif
static struct resource data_resource = {
@@ -58,33 +68,32 @@ static struct resource pdcdata_resource = {
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
};
-static struct resource sysram_resources[MAX_PHYSMEM_RANGES];
+static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly;
/* The following array is initialized from the firmware specific
* information retrieved in kernel/inventory.c.
*/
-physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES];
-int npmem_ranges;
+physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly;
+int npmem_ranges __read_mostly;
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
#define MAX_MEM (~0UL)
-#else /* !__LP64__ */
+#else /* !CONFIG_64BIT */
#define MAX_MEM (3584U*1024U*1024U)
-#endif /* !__LP64__ */
+#endif /* !CONFIG_64BIT */
-static unsigned long mem_limit = MAX_MEM;
+static unsigned long mem_limit __read_mostly = MAX_MEM;
static void __init mem_limit_func(void)
{
char *cp, *end;
unsigned long limit;
- extern char saved_command_line[];
/* We need this before __setup() functions are called */
limit = MAX_MEM;
- for (cp = saved_command_line; *cp; ) {
+ for (cp = boot_command_line; *cp; ) {
if (memcmp(cp, "mem=", 4) == 0) {
cp += 4;
limit = memparse(cp, &end);
@@ -206,7 +215,6 @@ static void __init setup_bootmem(void)
mem_limit_func(); /* check for "mem=" argument */
mem_max = 0;
- num_physpages = 0;
for (i = 0; i < npmem_ranges; i++) {
unsigned long rsize;
@@ -221,10 +229,8 @@ static void __init setup_bootmem(void)
npmem_ranges = i + 1;
mem_max = mem_limit;
}
- num_physpages += pmem_ranges[i].pages;
break;
}
- num_physpages += pmem_ranges[i].pages;
mem_max += rsize;
}
@@ -264,12 +270,14 @@ static void __init setup_bootmem(void)
#ifdef CONFIG_DISCONTIGMEM
for (i = 0; i < MAX_PHYSMEM_RANGES; i++) {
memset(NODE_DATA(i), 0, sizeof(pg_data_t));
- NODE_DATA(i)->bdata = &bmem_data[i];
+ NODE_DATA(i)->bdata = &bootmem_node_data[i];
}
memset(pfnnid_map, 0xff, sizeof(pfnnid_map));
- for (i = 0; i < npmem_ranges; i++)
+ for (i = 0; i < npmem_ranges; i++) {
+ node_set_state(i, N_NORMAL_MEMORY);
node_set_online(i);
+ }
#endif
/*
@@ -300,21 +308,29 @@ static void __init setup_bootmem(void)
max_pfn = start_pfn + npages;
}
- if ((bootmap_pfn - bootmap_start_pfn) != bootmap_pages) {
- printk(KERN_WARNING "WARNING! bootmap sizing is messed up!\n");
- BUG();
- }
+ /* IOMMU is always used to access "high mem" on those boxes
+ * that can support enough mem that a PCI device couldn't
+ * directly DMA to any physical addresses.
+ * ISA DMA support will need to revisit this.
+ */
+ max_low_pfn = max_pfn;
+
+ /* bootmap sizing messed up? */
+ BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages);
/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
#define PDC_CONSOLE_IO_IODC_SIZE 32768
reserve_bootmem_node(NODE_DATA(0), 0UL,
- (unsigned long)(PAGE0->mem_free + PDC_CONSOLE_IO_IODC_SIZE));
- reserve_bootmem_node(NODE_DATA(0),__pa((unsigned long)&_text),
- (unsigned long)(&_end - &_text));
+ (unsigned long)(PAGE0->mem_free +
+ PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT);
+ reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START),
+ (unsigned long)(_end - KERNEL_BINARY_TEXT_START),
+ BOOTMEM_DEFAULT);
reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),
- ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT));
+ ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),
+ BOOTMEM_DEFAULT);
#ifndef CONFIG_DISCONTIGMEM
@@ -323,7 +339,8 @@ static void __init setup_bootmem(void)
for (i = 0; i < npmem_holes; i++) {
reserve_bootmem_node(NODE_DATA(0),
(pmem_holes[i].start_pfn << PAGE_SHIFT),
- (pmem_holes[i].pages << PAGE_SHIFT));
+ (pmem_holes[i].pages << PAGE_SHIFT),
+ BOOTMEM_DEFAULT);
}
#endif
@@ -341,14 +358,15 @@ static void __init setup_bootmem(void)
initrd_below_start_ok = 1;
printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
- reserve_bootmem_node(NODE_DATA(0),__pa(initrd_start), initrd_reserve);
+ reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start),
+ initrd_reserve, BOOTMEM_DEFAULT);
}
}
#endif
data_resource.start = virt_to_phys(&data_start);
- data_resource.end = virt_to_phys(&_end)-1;
- code_resource.start = virt_to_phys(&_text);
+ data_resource.end = virt_to_phys(_end) - 1;
+ code_resource.start = virt_to_phys(_text);
code_resource.end = virt_to_phys(&data_start)-1;
/* We don't know which region the kernel will be in, so try
@@ -362,191 +380,20 @@ static void __init setup_bootmem(void)
request_resource(&sysram_resources[0], &pdcdata_resource);
}
-void free_initmem(void)
+static int __init parisc_text_address(unsigned long vaddr)
{
- /* FIXME: */
-#if 0
- printk(KERN_INFO "NOT FREEING INITMEM (%dk)\n",
- (&__init_end - &__init_begin) >> 10);
- return;
-#else
- unsigned long addr;
-
- printk(KERN_INFO "Freeing unused kernel memory: ");
+ static unsigned long head_ptr __initdata;
-#if 1
- /* Attempt to catch anyone trying to execute code here
- * by filling the page with BRK insns.
- *
- * If we disable interrupts for all CPUs, then IPI stops working.
- * Kinda breaks the global cache flushing.
- */
- local_irq_disable();
-
- memset(&__init_begin, 0x00,
- (unsigned long)&__init_end - (unsigned long)&__init_begin);
-
- flush_data_cache();
- asm volatile("sync" : : );
- flush_icache_range((unsigned long)&__init_begin, (unsigned long)&__init_end);
- asm volatile("sync" : : );
+ if (!head_ptr)
+ head_ptr = PAGE_MASK & (unsigned long)
+ dereference_function_descriptor(&parisc_kernel_start);
- local_irq_enable();
-#endif
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
- free_page(addr);
- num_physpages++;
- totalram_pages++;
- }
-
- /* set up a new led state on systems shipped LED State panel */
- pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
-
- printk("%luk freed\n", (unsigned long)(&__init_end - &__init_begin) >> 10);
-#endif
+ return core_kernel_text(vaddr) || vaddr == head_ptr;
}
-/*
- * Just an arbitrary offset to serve as a "hole" between mapping areas
- * (between top of physical memory and a potential pcxl dma mapping
- * area, and below the vmalloc mapping area).
- *
- * The current 32K value just means that there will be a 32K "hole"
- * between mapping areas. That means that any out-of-bounds memory
- * accesses will hopefully be caught. The vmalloc() routines leaves
- * a hole of 4kB between each vmalloced area for the same reason.
- */
-
- /* Leave room for gateway page expansion */
-#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
-#error KERNEL_MAP_START is in gateway reserved region
-#endif
-#define MAP_START (KERNEL_MAP_START)
-
-#define VM_MAP_OFFSET (32*1024)
-#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
- & ~(VM_MAP_OFFSET-1)))
-
-void *vmalloc_start;
-EXPORT_SYMBOL(vmalloc_start);
-
-#ifdef CONFIG_PA11
-unsigned long pcxl_dma_start;
-#endif
-
-void __init mem_init(void)
-{
- high_memory = __va((max_pfn << PAGE_SHIFT));
-
-#ifndef CONFIG_DISCONTIGMEM
- max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1;
- totalram_pages += free_all_bootmem();
-#else
- {
- int i;
-
- for (i = 0; i < npmem_ranges; i++)
- totalram_pages += free_all_bootmem_node(NODE_DATA(i));
- }
-#endif
-
- printk(KERN_INFO "Memory: %luk available\n", num_physpages << (PAGE_SHIFT-10));
-
-#ifdef CONFIG_PA11
- if (hppa_dma_ops == &pcxl_dma_ops) {
- pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
- vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE);
- } else {
- pcxl_dma_start = 0;
- vmalloc_start = SET_MAP_OFFSET(MAP_START);
- }
-#else
- vmalloc_start = SET_MAP_OFFSET(MAP_START);
-#endif
-
-}
-
-int do_check_pgt_cache(int low, int high)
-{
- return 0;
-}
-
-unsigned long *empty_zero_page;
-
-void show_mem(void)
-{
- int i,free = 0,total = 0,reserved = 0;
- int shared = 0, cached = 0;
-
- printk(KERN_INFO "Mem-info:\n");
- show_free_areas();
- printk(KERN_INFO "Free swap: %6ldkB\n",
- nr_swap_pages<<(PAGE_SHIFT-10));
-#ifndef CONFIG_DISCONTIGMEM
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (!page_count(&mem_map[i]))
- free++;
- else
- shared += page_count(&mem_map[i]) - 1;
- }
-#else
- for (i = 0; i < npmem_ranges; i++) {
- int j;
-
- for (j = node_start_pfn(i); j < node_end_pfn(i); j++) {
- struct page *p;
-
- p = nid_page_nr(i, j) - node_start_pfn(i);
-
- total++;
- if (PageReserved(p))
- reserved++;
- else if (PageSwapCache(p))
- cached++;
- else if (!page_count(p))
- free++;
- else
- shared += page_count(p) - 1;
- }
- }
-#endif
- printk(KERN_INFO "%d pages of RAM\n", total);
- printk(KERN_INFO "%d reserved pages\n", reserved);
- printk(KERN_INFO "%d pages shared\n", shared);
- printk(KERN_INFO "%d pages swap cached\n", cached);
-
-
-#ifdef CONFIG_DISCONTIGMEM
- {
- struct zonelist *zl;
- int i, j, k;
-
- for (i = 0; i < npmem_ranges; i++) {
- for (j = 0; j < MAX_NR_ZONES; j++) {
- zl = NODE_DATA(i)->node_zonelists + j;
-
- printk("Zone list for zone %d on node %d: ", j, i);
- for (k = 0; zl->zones[k] != NULL; k++)
- printk("[%d/%s] ", zl->zones[k]->zone_pgdat->node_id, zl->zones[k]->name);
- printk("\n");
- }
- }
- }
-#endif
-}
-
-
-static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot)
+static void __init map_pages(unsigned long start_vaddr,
+ unsigned long start_paddr, unsigned long size,
+ pgprot_t pgprot, int force)
{
pgd_t *pg_dir;
pmd_t *pmd;
@@ -557,6 +404,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
unsigned long tmp1;
unsigned long tmp2;
unsigned long address;
+ unsigned long vaddr;
unsigned long ro_start;
unsigned long ro_end;
unsigned long fv_addr;
@@ -564,7 +412,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
extern const unsigned long fault_vector_20;
extern void * const linux_gateway_page;
- ro_start = __pa((unsigned long)&_text);
+ ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start);
fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
@@ -581,6 +429,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
address = start_paddr;
+ vaddr = start_vaddr;
while (address < end_paddr) {
#if PTRS_PER_PMD == 1
pmd = (pmd_t *)__pa(pg_dir);
@@ -592,7 +441,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
*/
if (!pmd) {
- pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER);
+ pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER);
pmd = (pmd_t *) __pa(pmd);
}
@@ -603,7 +452,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
/* now change pmd to kernel virtual addresses */
pmd = (pmd_t *)__va(pmd) + start_pmd;
- for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) {
+ for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
/*
* pg_table is physical at this point
@@ -612,7 +461,7 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
pg_table = (pte_t *)pmd_address(*pmd);
if (!pg_table) {
pg_table = (pte_t *)
- alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE);
+ alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE);
pg_table = (pte_t *) __pa(pg_table);
}
@@ -621,26 +470,39 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
/* now change pg_table to kernel virtual addresses */
pg_table = (pte_t *) __va(pg_table) + start_pte;
- for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) {
+ for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pte;
/*
* Map the fault vector writable so we can
* write the HPMC checksum.
*/
+ if (force)
+ pte = __mk_pte(address, pgprot);
+ else if (parisc_text_address(vaddr) &&
+ address != fv_addr)
+ pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+ else
+#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
if (address >= ro_start && address < ro_end
&& address != fv_addr
&& address != gw_addr)
- pte = __mk_pte(address, PAGE_KERNEL_RO);
+ pte = __mk_pte(address, PAGE_KERNEL_RO);
else
- pte = __mk_pte(address, pgprot);
+#endif
+ pte = __mk_pte(address, pgprot);
- if (address >= end_paddr)
- pte_val(pte) = 0;
+ if (address >= end_paddr) {
+ if (force)
+ break;
+ else
+ pte_val(pte) = 0;
+ }
set_pte(pg_table, pte);
address += PAGE_SIZE;
+ vaddr += PAGE_SIZE;
}
start_pte = 0;
@@ -651,6 +513,185 @@ static void __init map_pages(unsigned long start_vaddr, unsigned long start_padd
}
}
+void free_initmem(void)
+{
+ unsigned long init_begin = (unsigned long)__init_begin;
+ unsigned long init_end = (unsigned long)__init_end;
+
+ /* The init text pages are marked R-X. We have to
+ * flush the icache and mark them RW-
+ *
+ * This is tricky, because map_pages is in the init section.
+ * Do a dummy remap of the data section first (the data
+ * section is already PAGE_KERNEL) to pull in the TLB entries
+ * for map_kernel */
+ map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+ PAGE_KERNEL_RWX, 1);
+ /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
+ * map_pages */
+ map_pages(init_begin, __pa(init_begin), init_end - init_begin,
+ PAGE_KERNEL, 1);
+
+ /* force the kernel to see the new TLB entries */
+ __flush_tlb_range(0, init_begin, init_end);
+ /* Attempt to catch anyone trying to execute code here
+ * by filling the page with BRK insns.
+ */
+ memset((void *)init_begin, 0x00, init_end - init_begin);
+ /* finally dump all the instructions which were cached, since the
+ * pages are no-longer executable */
+ flush_icache_range(init_begin, init_end);
+
+ free_initmem_default(-1);
+
+ /* set up a new led state on systems shipped LED State panel */
+ pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
+}
+
+
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void)
+{
+ /* rodata memory was already mapped with KERNEL_RO access rights by
+ pagetable_init() and map_pages(). No need to do additional stuff here */
+ printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n",
+ (unsigned long)(__end_rodata - __start_rodata) >> 10);
+}
+#endif
+
+
+/*
+ * Just an arbitrary offset to serve as a "hole" between mapping areas
+ * (between top of physical memory and a potential pcxl dma mapping
+ * area, and below the vmalloc mapping area).
+ *
+ * The current 32K value just means that there will be a 32K "hole"
+ * between mapping areas. That means that any out-of-bounds memory
+ * accesses will hopefully be caught. The vmalloc() routines leaves
+ * a hole of 4kB between each vmalloced area for the same reason.
+ */
+
+ /* Leave room for gateway page expansion */
+#if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
+#error KERNEL_MAP_START is in gateway reserved region
+#endif
+#define MAP_START (KERNEL_MAP_START)
+
+#define VM_MAP_OFFSET (32*1024)
+#define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
+ & ~(VM_MAP_OFFSET-1)))
+
+void *parisc_vmalloc_start __read_mostly;
+EXPORT_SYMBOL(parisc_vmalloc_start);
+
+#ifdef CONFIG_PA11
+unsigned long pcxl_dma_start __read_mostly;
+#endif
+
+void __init mem_init(void)
+{
+ /* Do sanity checks on page table constants */
+ BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
+ BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
+ BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
+ BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
+ > BITS_PER_LONG);
+
+ high_memory = __va((max_pfn << PAGE_SHIFT));
+ set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1);
+ free_all_bootmem();
+
+#ifdef CONFIG_PA11
+ if (hppa_dma_ops == &pcxl_dma_ops) {
+ pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
+ parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
+ + PCXL_DMA_MAP_SIZE);
+ } else {
+ pcxl_dma_start = 0;
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+ }
+#else
+ parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
+#endif
+
+ mem_init_print_info(NULL);
+#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
+ printk("virtual kernel memory layout:\n"
+ " vmalloc : 0x%p - 0x%p (%4ld MB)\n"
+ " memory : 0x%p - 0x%p (%4ld MB)\n"
+ " .init : 0x%p - 0x%p (%4ld kB)\n"
+ " .data : 0x%p - 0x%p (%4ld kB)\n"
+ " .text : 0x%p - 0x%p (%4ld kB)\n",
+
+ (void*)VMALLOC_START, (void*)VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+
+ __va(0), high_memory,
+ ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
+
+ __init_begin, __init_end,
+ ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
+
+ _etext, _edata,
+ ((unsigned long)_edata - (unsigned long)_etext) >> 10,
+
+ _text, _etext,
+ ((unsigned long)_etext - (unsigned long)_text) >> 10);
+#endif
+}
+
+unsigned long *empty_zero_page __read_mostly;
+EXPORT_SYMBOL(empty_zero_page);
+
+void show_mem(unsigned int filter)
+{
+ int total = 0,reserved = 0;
+ pg_data_t *pgdat;
+
+ printk(KERN_INFO "Mem-info:\n");
+ show_free_areas(filter);
+
+ for_each_online_pgdat(pgdat) {
+ unsigned long flags;
+ int zoneid;
+
+ pgdat_resize_lock(pgdat, &flags);
+ for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
+ struct zone *zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
+ continue;
+
+ total += zone->present_pages;
+ reserved = zone->present_pages - zone->managed_pages;
+ }
+ pgdat_resize_unlock(pgdat, &flags);
+ }
+
+ printk(KERN_INFO "%d pages of RAM\n", total);
+ printk(KERN_INFO "%d reserved pages\n", reserved);
+
+#ifdef CONFIG_DISCONTIGMEM
+ {
+ struct zonelist *zl;
+ int i, j;
+
+ for (i = 0; i < npmem_ranges; i++) {
+ zl = node_zonelist(i, 0);
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ struct zoneref *z;
+ struct zone *zone;
+
+ printk("Zone list for zone %d on node %d: ", j, i);
+ for_each_zone_zonelist(zone, z, zl, j)
+ printk("[%d/%s] ", zone_to_nid(zone),
+ zone->name);
+ printk("\n");
+ }
+ }
+ }
+#endif
+}
+
/*
* pagetable_init() sets up the page tables
*
@@ -675,19 +716,18 @@ static void __init pagetable_init(void)
size = pmem_ranges[range].pages << PAGE_SHIFT;
map_pages((unsigned long)__va(start_paddr), start_paddr,
- size, PAGE_KERNEL);
+ size, PAGE_KERNEL, 0);
}
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_end && initrd_end > mem_limit) {
- printk("initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
+ printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
map_pages(initrd_start, __pa(initrd_start),
- initrd_end - initrd_start, PAGE_KERNEL);
+ initrd_end - initrd_start, PAGE_KERNEL, 0);
}
#endif
empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
- memset(empty_zero_page, 0, PAGE_SIZE);
}
static void __init gateway_init(void)
@@ -707,7 +747,7 @@ static void __init gateway_init(void)
*/
map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
- PAGE_SIZE, PAGE_GATEWAY);
+ PAGE_SIZE, PAGE_GATEWAY, 1);
}
#ifdef CONFIG_HPUX
@@ -782,8 +822,6 @@ map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm)
EXPORT_SYMBOL(map_hpux_gateway_page);
#endif
-extern void flush_tlb_all_local(void);
-
void __init paging_init(void)
{
int i;
@@ -792,14 +830,12 @@ void __init paging_init(void)
pagetable_init();
gateway_init();
flush_cache_all_local(); /* start with known state */
- flush_tlb_all_local();
+ flush_tlb_all_local(NULL);
for (i = 0; i < npmem_ranges; i++) {
- unsigned long zones_size[MAX_NR_ZONES] = { 0, 0, 0 };
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, };
- /* We have an IOMMU, so all memory can go into a single
- ZONE_DMA zone. */
- zones_size[ZONE_DMA] = pmem_ranges[i].pages;
+ zones_size[ZONE_NORMAL] = pmem_ranges[i].pages;
#ifdef CONFIG_DISCONTIGMEM
/* Need to initialize the pfnnid_map before we can initialize
@@ -814,7 +850,7 @@ void __init paging_init(void)
}
#endif
- free_area_init_node(i, NODE_DATA(i), zones_size,
+ free_area_init_node(i, zones_size,
pmem_ranges[i].start_pfn, NULL);
}
}
@@ -822,7 +858,7 @@ void __init paging_init(void)
#ifdef CONFIG_PA20
/*
- * Currently, all PA20 chips have 18 bit protection id's, which is the
+ * Currently, all PA20 chips have 18 bit protection IDs, which is the
* limiting factor (space ids are 32 bits).
*/
@@ -831,10 +867,10 @@ void __init paging_init(void)
#else
/*
- * Currently we have a one-to-one relationship between space id's and
- * protection id's. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
- * support 15 bit protection id's, so that is the limiting factor.
- * PCXT' has 18 bit protection id's, but only 16 bit spaceids, so it's
+ * Currently we have a one-to-one relationship between space IDs and
+ * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
+ * support 15 bit protection IDs, so that is the limiting factor.
+ * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
* probably not worth the effort for a special case here.
*/
@@ -865,8 +901,7 @@ unsigned long alloc_sid(void)
flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
spin_lock(&sid_lock);
}
- if (free_space_ids == 0)
- BUG();
+ BUG_ON(free_space_ids == 0);
}
free_space_ids--;
@@ -890,8 +925,7 @@ void free_sid(unsigned long spaceid)
spin_lock(&sid_lock);
- if (*dirty_space_offset & (1L << index))
- BUG(); /* attempt to free space id twice */
+ BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */
*dirty_space_offset |= (1L << index);
dirty_space_ids++;
@@ -966,24 +1000,23 @@ static void recycle_sids(void)
static unsigned long recycle_ndirty;
static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
-static unsigned int recycle_inuse = 0;
+static unsigned int recycle_inuse;
void flush_tlb_all(void)
{
int do_recycle;
+ __inc_irq_stat(irq_tlb_count);
do_recycle = 0;
spin_lock(&sid_lock);
if (dirty_space_ids > RECYCLE_THRESHOLD) {
- if (recycle_inuse) {
- BUG(); /* FIXME: Use a semaphore/wait queue here */
- }
+ BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
recycle_inuse++;
do_recycle++;
}
spin_unlock(&sid_lock);
- on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
+ on_each_cpu(flush_tlb_all_local, NULL, 1);
if (do_recycle) {
spin_lock(&sid_lock);
recycle_sids(recycle_ndirty,recycle_dirty_array);
@@ -994,8 +1027,9 @@ void flush_tlb_all(void)
#else
void flush_tlb_all(void)
{
+ __inc_irq_stat(irq_tlb_count);
spin_lock(&sid_lock);
- flush_tlb_all_local();
+ flush_tlb_all_local(NULL);
recycle_sids();
spin_unlock(&sid_lock);
}
@@ -1004,16 +1038,6 @@ void flush_tlb_all(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
-#if 0
- if (start < end)
- printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
- free_page(start);
- num_physpages++;
- totalram_pages++;
- }
-#endif
+ free_reserved_area((void *)start, (void *)end, -1, "initrd");
}
#endif
diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
index f2df502cdae..838d0259cd2 100644
--- a/arch/parisc/mm/ioremap.c
+++ b/arch/parisc/mm/ioremap.c
@@ -1,132 +1,24 @@
/*
* arch/parisc/mm/ioremap.c
*
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
* (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
*/
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include <asm/pgalloc.h>
-static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
-{
- unsigned long end;
-
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- if (address >= end)
- BUG();
- do {
- if (!pte_none(*pte)) {
- printk(KERN_ERR "remap_area_pte: page already exists\n");
- BUG();
- }
- set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
- address += PAGE_SIZE;
- phys_addr += PAGE_SIZE;
- pte++;
- } while (address && (address < end));
-}
-
-static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
- unsigned long phys_addr, unsigned long flags)
-{
- unsigned long end;
-
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- phys_addr -= address;
- if (address >= end)
- BUG();
- do {
- pte_t * pte = pte_alloc_kernel(NULL, pmd, address);
- if (!pte)
- return -ENOMEM;
- remap_area_pte(pte, address, end - address, address + phys_addr, flags);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address && (address < end));
- return 0;
-}
-
-#if (USE_HPPA_IOREMAP)
-static int remap_area_pages(unsigned long address, unsigned long phys_addr,
- unsigned long size, unsigned long flags)
-{
- int error;
- pgd_t * dir;
- unsigned long end = address + size;
-
- phys_addr -= address;
- dir = pgd_offset(&init_mm, address);
- flush_cache_all();
- if (address >= end)
- BUG();
- spin_lock(&init_mm.page_table_lock);
- do {
- pmd_t *pmd;
- pmd = pmd_alloc(dir, address);
- error = -ENOMEM;
- if (!pmd)
- break;
- if (remap_area_pmd(pmd, address, end - address,
- phys_addr + address, flags))
- break;
- error = 0;
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- spin_unlock(&init_mm.page_table_lock);
- flush_tlb_all();
- return error;
-}
-#endif /* USE_HPPA_IOREMAP */
-
-#ifdef CONFIG_DEBUG_IOREMAP
-static unsigned long last = 0;
-
-void gsc_bad_addr(unsigned long addr)
-{
- if (time_after(jiffies, last + HZ*10)) {
- printk("gsc_foo() called with bad address 0x%lx\n", addr);
- dump_stack();
- last = jiffies;
- }
-}
-EXPORT_SYMBOL(gsc_bad_addr);
-
-void __raw_bad_addr(const volatile void __iomem *addr)
-{
- if (time_after(jiffies, last + HZ*10)) {
- printk("__raw_foo() called with bad address 0x%p\n", addr);
- dump_stack();
- last = jiffies;
- }
-}
-EXPORT_SYMBOL(__raw_bad_addr);
-#endif
-
/*
* Generic mapping function (not visible outside):
*/
/*
* Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
+ * address space.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
@@ -134,26 +26,21 @@ EXPORT_SYMBOL(__raw_bad_addr);
*/
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
{
-#if !(USE_HPPA_IOREMAP)
+ void __iomem *addr;
+ struct vm_struct *area;
+ unsigned long offset, last_addr;
+ pgprot_t pgprot;
+#ifdef CONFIG_EISA
unsigned long end = phys_addr + size - 1;
/* Support EISA addresses */
- if ((phys_addr >= 0x00080000 && end < 0x000fffff)
- || (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
- phys_addr |= 0xfc000000;
+ if ((phys_addr >= 0x00080000 && end < 0x000fffff) ||
+ (phys_addr >= 0x00500000 && end < 0x03bfffff)) {
+ phys_addr |= F_EXTEND(0xfc000000);
+ flags |= _PAGE_NO_CACHE;
}
-
-#ifdef CONFIG_DEBUG_IOREMAP
- return (void __iomem *)(phys_addr - (0x1UL << NYBBLE_SHIFT));
-#else
- return (void __iomem *)phys_addr;
#endif
-#else
- void * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
-
/* Don't allow wraparound or zero size */
last_addr = phys_addr + size - 1;
if (!size || last_addr < phys_addr)
@@ -169,17 +56,22 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
t_addr = __va(phys_addr);
t_end = t_addr + (size - 1);
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
+ for (page = virt_to_page(t_addr);
+ page <= virt_to_page(t_end); page++) {
if(!PageReserved(page))
return NULL;
+ }
}
+ pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
+ _PAGE_ACCESSED | flags);
+
/*
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr) - phys_addr;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
/*
* Ok, go for it..
@@ -187,21 +79,21 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
area = get_vm_area(size, VM_IOREMAP);
if (!area)
return NULL;
- addr = area->addr;
- if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
+
+ addr = (void __iomem *) area->addr;
+ if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
+ phys_addr, pgprot)) {
vfree(addr);
return NULL;
}
- return (void __iomem *) (offset + (char *)addr);
-#endif
+
+ return (void __iomem *) (offset + (char __iomem *)addr);
}
+EXPORT_SYMBOL(__ioremap);
-void iounmap(void __iomem *addr)
+void iounmap(const volatile void __iomem *addr)
{
-#if !(USE_HPPA_IOREMAP)
- return;
-#else
if (addr > high_memory)
return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
-#endif
}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/parisc/mm/kmap.c b/arch/parisc/mm/kmap.c
deleted file mode 100644
index 1b1acd5e2f6..00000000000
--- a/arch/parisc/mm/kmap.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * kmap/page table map and unmap support routines
- *
- * Copyright 1999,2000 Hewlett-Packard Company
- * Copyright 2000 John Marvin <jsm at hp.com>
- * Copyright 2000 Grant Grundler <grundler at parisc-linux.org>
- * Copyright 2000 Philipp Rumpf <prumpf@tux.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-/*
-** Stolen mostly from arch/parisc/kernel/pci-dma.c
-*/
-
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/pci.h>
-
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
-
-#include <asm/io.h>
-#include <asm/page.h> /* get_order */
-
-#undef flush_cache_all
-#define flush_cache_all flush_all_caches
-
-typedef void (*pte_iterator_t) (pte_t * pte, unsigned long arg);
-
-#if 0
-/* XXX This routine could be used with iterate_page() to replace
- * unmap_uncached_page() and save a little code space but I didn't
- * do that since I'm not certain whether this is the right path. -PB
- */
-static void unmap_cached_pte(pte_t * pte, unsigned long addr, unsigned long arg)
-{
- pte_t page = *pte;
- pte_clear(&init_mm, addr, pte);
- if (!pte_none(page)) {
- if (pte_present(page)) {
- unsigned long map_nr = pte_pagenr(page);
- if (map_nr < max_mapnr)
- __free_page(mem_map + map_nr);
- } else {
- printk(KERN_CRIT
- "Whee.. Swapped out page in kernel page table\n");
- }
- }
-}
-#endif
-
-/* These two routines should probably check a few things... */
-static void set_uncached(pte_t * pte, unsigned long arg)
-{
- pte_val(*pte) |= _PAGE_NO_CACHE;
-}
-
-static void set_cached(pte_t * pte, unsigned long arg)
-{
- pte_val(*pte) &= ~_PAGE_NO_CACHE;
-}
-
-static inline void iterate_pte(pmd_t * pmd, unsigned long address,
- unsigned long size, pte_iterator_t op,
- unsigned long arg)
-{
- pte_t *pte;
- unsigned long end;
-
- if (pmd_none(*pmd))
- return;
- if (pmd_bad(*pmd)) {
- pmd_ERROR(*pmd);
- pmd_clear(pmd);
- return;
- }
- pte = pte_offset(pmd, address);
- address &= ~PMD_MASK;
- end = address + size;
- if (end > PMD_SIZE)
- end = PMD_SIZE;
- do {
- op(pte, arg);
- address += PAGE_SIZE;
- pte++;
- } while (address < end);
-}
-
-static inline void iterate_pmd(pgd_t * dir, unsigned long address,
- unsigned long size, pte_iterator_t op,
- unsigned long arg)
-{
- pmd_t *pmd;
- unsigned long end;
-
- if (pgd_none(*dir))
- return;
- if (pgd_bad(*dir)) {
- pgd_ERROR(*dir);
- pgd_clear(dir);
- return;
- }
- pmd = pmd_offset(dir, address);
- address &= ~PGDIR_MASK;
- end = address + size;
- if (end > PGDIR_SIZE)
- end = PGDIR_SIZE;
- do {
- iterate_pte(pmd, address, end - address, op, arg);
- address = (address + PMD_SIZE) & PMD_MASK;
- pmd++;
- } while (address < end);
-}
-
-static void iterate_pages(unsigned long address, unsigned long size,
- pte_iterator_t op, unsigned long arg)
-{
- pgd_t *dir;
- unsigned long end = address + size;
-
- dir = pgd_offset_k(address);
- flush_cache_all();
- do {
- iterate_pmd(dir, address, end - address, op, arg);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- dir++;
- } while (address && (address < end));
- flush_tlb_all();
-}
-
-void
-kernel_set_cachemode(unsigned long vaddr, unsigned long size, int what)
-{
- switch (what) {
- case IOMAP_FULL_CACHING:
- iterate_pages(vaddr, size, set_cached, 0);
- flush_tlb_range(NULL, vaddr, size);
- break;
- case IOMAP_NOCACHE_SER:
- iterate_pages(vaddr, size, set_uncached, 0);
- flush_tlb_range(NULL, vaddr, size);
- break;
- default:
- printk(KERN_CRIT
- "kernel_set_cachemode mode %d not understood\n",
- what);
- break;
- }
-}