diff options
Diffstat (limited to 'arch/parisc/mm')
| -rw-r--r-- | arch/parisc/mm/fault.c | 135 | ||||
| -rw-r--r-- | arch/parisc/mm/init.c | 440 | 
2 files changed, 307 insertions, 268 deletions
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 18162ce4261..3ca9c1131cf 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -19,10 +19,6 @@  #include <asm/uaccess.h>  #include <asm/traps.h> -#define PRINT_USER_FAULTS /* (turn this on if you want user faults to be */ -			 /*  dumped to the console via printk)          */ - -  /* Various important other fields */  #define bit22set(x)		(x & 0x00000200)  #define bits23_25set(x)		(x & 0x000001c0) @@ -34,6 +30,8 @@  DEFINE_PER_CPU(struct exception_data, exception_data); +int show_unhandled_signals = 1; +  /*   * parisc_acctyp(unsigned int inst) --   *    Given a PA-RISC memory access instruction, determine if the @@ -142,10 +140,16 @@ int fixup_exception(struct pt_regs *regs)  {  	const struct exception_table_entry *fix; +	/* If we only stored 32bit addresses in the exception table we can drop +	 * out if we faulted on a 64bit address. */ +	if ((sizeof(regs->iaoq[0]) > sizeof(fix->insn)) +		&& (regs->iaoq[0] >> 32)) +			return 0; +  	fix = search_exception_tables(regs->iaoq[0]);  	if (fix) {  		struct exception_data *d; -		d = &__get_cpu_var(exception_data); +		d = this_cpu_ptr(&exception_data);  		d->fault_ip = regs->iaoq[0];  		d->fault_space = regs->isr;  		d->fault_addr = regs->ior; @@ -167,18 +171,58 @@ int fixup_exception(struct pt_regs *regs)  	return 0;  } +/* + * Print out info about fatal segfaults, if the show_unhandled_signals + * sysctl is set: + */ +static inline void +show_signal_msg(struct pt_regs *regs, unsigned long code, +		unsigned long address, struct task_struct *tsk, +		struct vm_area_struct *vma) +{ +	if (!unhandled_signal(tsk, SIGSEGV)) +		return; + +	if (!printk_ratelimit()) +		return; + +	pr_warn("\n"); +	pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx", +	    tsk->comm, code, address); +	print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); +	if (vma) +		pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n", +				vma->vm_start, vma->vm_end); + +	show_regs(regs); +} +  void do_page_fault(struct pt_regs *regs, unsigned long code,  			      unsigned long address)  {  	struct vm_area_struct *vma, *prev_vma; -	struct task_struct *tsk = current; -	struct mm_struct *mm = tsk->mm; +	struct task_struct *tsk; +	struct mm_struct *mm;  	unsigned long acc_type;  	int fault; +	unsigned int flags; + +	if (in_atomic()) +		goto no_context; -	if (in_atomic() || !mm) +	tsk = current; +	mm = tsk->mm; +	if (!mm)  		goto no_context; +	flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; +	if (user_mode(regs)) +		flags |= FAULT_FLAG_USER; + +	acc_type = parisc_acctyp(code, regs->iir); +	if (acc_type & VM_WRITE) +		flags |= FAULT_FLAG_WRITE; +retry:  	down_read(&mm->mmap_sem);  	vma = find_vma_prev(mm, address, &prev_vma);  	if (!vma || address < vma->vm_start) @@ -190,8 +234,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,  good_area: -	acc_type = parisc_acctyp(code,regs->iir); -  	if ((vma->vm_flags & acc_type) != acc_type)  		goto bad_area; @@ -201,7 +243,11 @@ good_area:  	 * fault.  	 */ -	fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0); +	fault = handle_mm_fault(mm, vma, address, flags); + +	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) +		return; +  	if (unlikely(fault & VM_FAULT_ERROR)) {  		/*  		 * We hit a shared mapping outside of the file, or some @@ -214,10 +260,23 @@ good_area:  			goto bad_area;  		BUG();  	} -	if (fault & VM_FAULT_MAJOR) -		current->maj_flt++; -	else -		current->min_flt++; +	if (flags & FAULT_FLAG_ALLOW_RETRY) { +		if (fault & VM_FAULT_MAJOR) +			current->maj_flt++; +		else +			current->min_flt++; +		if (fault & VM_FAULT_RETRY) { +			flags &= ~FAULT_FLAG_ALLOW_RETRY; + +			/* +			 * No need to up_read(&mm->mmap_sem) as we would +			 * have already released it in __lock_page_or_retry +			 * in mm/filemap.c. +			 */ + +			goto retry; +		} +	}  	up_read(&mm->mmap_sem);  	return; @@ -235,22 +294,42 @@ bad_area:  	if (user_mode(regs)) {  		struct siginfo si; -#ifdef PRINT_USER_FAULTS -		printk(KERN_DEBUG "\n"); -		printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", -		    task_pid_nr(tsk), tsk->comm, code, address); -		if (vma) { -			printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", -					vma->vm_start, vma->vm_end); +		show_signal_msg(regs, code, address, tsk, vma); + +		switch (code) { +		case 15:	/* Data TLB miss fault/Data page fault */ +			/* send SIGSEGV when outside of vma */ +			if (!vma || +			    address < vma->vm_start || address > vma->vm_end) { +				si.si_signo = SIGSEGV; +				si.si_code = SEGV_MAPERR; +				break; +			} + +			/* send SIGSEGV for wrong permissions */ +			if ((vma->vm_flags & acc_type) != acc_type) { +				si.si_signo = SIGSEGV; +				si.si_code = SEGV_ACCERR; +				break; +			} + +			/* probably address is outside of mapped file */ +			/* fall through */ +		case 17:	/* NA data TLB miss / page fault */ +		case 18:	/* Unaligned access - PCXS only */ +			si.si_signo = SIGBUS; +			si.si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR; +			break; +		case 16:	/* Non-access instruction TLB miss fault */ +		case 26:	/* PCXL: Data memory access rights trap */ +		default: +			si.si_signo = SIGSEGV; +			si.si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR; +			break;  		} -		show_regs(regs); -#endif -		/* FIXME: actually we need to get the signo and code correct */ -		si.si_signo = SIGSEGV;  		si.si_errno = 0; -		si.si_code = SEGV_MAPERR;  		si.si_addr = (void __user *) address; -		force_sig_info(SIGSEGV, &si, current); +		force_sig_info(si.si_signo, &si, current);  		return;  	} diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index f4f4d700833..0bef864264c 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -31,13 +31,24 @@  #include <asm/mmzone.h>  #include <asm/sections.h> -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); -  extern int  data_start; +extern void parisc_kernel_start(void);	/* Kernel entry point in head.S */ + +#if PT_NLEVELS == 3 +/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout + * with the first pmd adjacent to the pgd and below it. gcc doesn't actually + * guarantee that global objects will be laid out in memory in the same order + * as the order of declaration, so put these in different sections and use + * the linker script to order them. */ +pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); +#endif + +pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));  #ifdef CONFIG_DISCONTIGMEM  struct node_map_data node_data[MAX_NUMNODES] __read_mostly; -unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; +signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;  #endif  static struct resource data_resource = { @@ -204,7 +215,6 @@ static void __init setup_bootmem(void)  	mem_limit_func();       /* check for "mem=" argument */  	mem_max = 0; -	num_physpages = 0;  	for (i = 0; i < npmem_ranges; i++) {  		unsigned long rsize; @@ -219,10 +229,8 @@ static void __init setup_bootmem(void)  				npmem_ranges = i + 1;  				mem_max = mem_limit;  			} -	        num_physpages += pmem_ranges[i].pages;  			break;  		} -	    num_physpages += pmem_ranges[i].pages;  		mem_max += rsize;  	} @@ -266,8 +274,10 @@ static void __init setup_bootmem(void)  	}  	memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); -	for (i = 0; i < npmem_ranges; i++) +	for (i = 0; i < npmem_ranges; i++) { +		node_set_state(i, N_NORMAL_MEMORY);  		node_set_online(i); +	}  #endif  	/* @@ -315,8 +325,9 @@ static void __init setup_bootmem(void)  	reserve_bootmem_node(NODE_DATA(0), 0UL,  			(unsigned long)(PAGE0->mem_free +  				PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); -	reserve_bootmem_node(NODE_DATA(0), __pa((unsigned long)_text), -			(unsigned long)(_end - _text), BOOTMEM_DEFAULT); +	reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START), +			(unsigned long)(_end - KERNEL_BINARY_TEXT_START), +			BOOTMEM_DEFAULT);  	reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT),  			((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT),  			BOOTMEM_DEFAULT); @@ -369,37 +380,172 @@ static void __init setup_bootmem(void)  	request_resource(&sysram_resources[0], &pdcdata_resource);  } +static int __init parisc_text_address(unsigned long vaddr) +{ +	static unsigned long head_ptr __initdata; + +	if (!head_ptr) +		head_ptr = PAGE_MASK & (unsigned long) +			dereference_function_descriptor(&parisc_kernel_start); + +	return core_kernel_text(vaddr) || vaddr == head_ptr; +} + +static void __init map_pages(unsigned long start_vaddr, +			     unsigned long start_paddr, unsigned long size, +			     pgprot_t pgprot, int force) +{ +	pgd_t *pg_dir; +	pmd_t *pmd; +	pte_t *pg_table; +	unsigned long end_paddr; +	unsigned long start_pmd; +	unsigned long start_pte; +	unsigned long tmp1; +	unsigned long tmp2; +	unsigned long address; +	unsigned long vaddr; +	unsigned long ro_start; +	unsigned long ro_end; +	unsigned long fv_addr; +	unsigned long gw_addr; +	extern const unsigned long fault_vector_20; +	extern void * const linux_gateway_page; + +	ro_start = __pa((unsigned long)_text); +	ro_end   = __pa((unsigned long)&data_start); +	fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; +	gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; + +	end_paddr = start_paddr + size; + +	pg_dir = pgd_offset_k(start_vaddr); + +#if PTRS_PER_PMD == 1 +	start_pmd = 0; +#else +	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); +#endif +	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); + +	address = start_paddr; +	vaddr = start_vaddr; +	while (address < end_paddr) { +#if PTRS_PER_PMD == 1 +		pmd = (pmd_t *)__pa(pg_dir); +#else +		pmd = (pmd_t *)pgd_address(*pg_dir); + +		/* +		 * pmd is physical at this point +		 */ + +		if (!pmd) { +			pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); +			pmd = (pmd_t *) __pa(pmd); +		} + +		pgd_populate(NULL, pg_dir, __va(pmd)); +#endif +		pg_dir++; + +		/* now change pmd to kernel virtual addresses */ + +		pmd = (pmd_t *)__va(pmd) + start_pmd; +		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { + +			/* +			 * pg_table is physical at this point +			 */ + +			pg_table = (pte_t *)pmd_address(*pmd); +			if (!pg_table) { +				pg_table = (pte_t *) +					alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); +				pg_table = (pte_t *) __pa(pg_table); +			} + +			pmd_populate_kernel(NULL, pmd, __va(pg_table)); + +			/* now change pg_table to kernel virtual addresses */ + +			pg_table = (pte_t *) __va(pg_table) + start_pte; +			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { +				pte_t pte; + +				/* +				 * Map the fault vector writable so we can +				 * write the HPMC checksum. +				 */ +				if (force) +					pte =  __mk_pte(address, pgprot); +				else if (parisc_text_address(vaddr) && +					 address != fv_addr) +					pte = __mk_pte(address, PAGE_KERNEL_EXEC); +				else +#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) +				if (address >= ro_start && address < ro_end +							&& address != fv_addr +							&& address != gw_addr) +					pte = __mk_pte(address, PAGE_KERNEL_RO); +				else +#endif +					pte = __mk_pte(address, pgprot); + +				if (address >= end_paddr) { +					if (force) +						break; +					else +						pte_val(pte) = 0; +				} + +				set_pte(pg_table, pte); + +				address += PAGE_SIZE; +				vaddr += PAGE_SIZE; +			} +			start_pte = 0; + +			if (address >= end_paddr) +			    break; +		} +		start_pmd = 0; +	} +} +  void free_initmem(void)  { -	unsigned long addr;  	unsigned long init_begin = (unsigned long)__init_begin;  	unsigned long init_end = (unsigned long)__init_end; -#ifdef CONFIG_DEBUG_KERNEL +	/* The init text pages are marked R-X.  We have to +	 * flush the icache and mark them RW- +	 * +	 * This is tricky, because map_pages is in the init section. +	 * Do a dummy remap of the data section first (the data +	 * section is already PAGE_KERNEL) to pull in the TLB entries +	 * for map_kernel */ +	map_pages(init_begin, __pa(init_begin), init_end - init_begin, +		  PAGE_KERNEL_RWX, 1); +	/* now remap at PAGE_KERNEL since the TLB is pre-primed to execute +	 * map_pages */ +	map_pages(init_begin, __pa(init_begin), init_end - init_begin, +		  PAGE_KERNEL, 1); + +	/* force the kernel to see the new TLB entries */ +	__flush_tlb_range(0, init_begin, init_end);  	/* Attempt to catch anyone trying to execute code here  	 * by filling the page with BRK insns.  	 */  	memset((void *)init_begin, 0x00, init_end - init_begin); +	/* finally dump all the instructions which were cached, since the +	 * pages are no-longer executable */  	flush_icache_range(init_begin, init_end); -#endif -	/* align __init_begin and __init_end to page size, -	   ignoring linker script where we might have tried to save RAM */ -	init_begin = PAGE_ALIGN(init_begin); -	init_end = PAGE_ALIGN(init_end); -	for (addr = init_begin; addr < init_end; addr += PAGE_SIZE) { -		ClearPageReserved(virt_to_page(addr)); -		init_page_count(virt_to_page(addr)); -		free_page(addr); -		num_physpages++; -		totalram_pages++; -	} +	free_initmem_default(-1);  	/* set up a new led state on systems shipped LED State panel */  	pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE); -	 -	printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n", -		(init_end - init_begin) >> 10);  } @@ -444,8 +590,6 @@ unsigned long pcxl_dma_start __read_mostly;  void __init mem_init(void)  { -	int codesize, reservedpages, datasize, initsize; -  	/* Do sanity checks on page table constants */  	BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));  	BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t)); @@ -454,45 +598,8 @@ void __init mem_init(void)  			> BITS_PER_LONG);  	high_memory = __va((max_pfn << PAGE_SHIFT)); - -#ifndef CONFIG_DISCONTIGMEM -	max_mapnr = page_to_pfn(virt_to_page(high_memory - 1)) + 1; -	totalram_pages += free_all_bootmem(); -#else -	{ -		int i; - -		for (i = 0; i < npmem_ranges; i++) -			totalram_pages += free_all_bootmem_node(NODE_DATA(i)); -	} -#endif - -	codesize = (unsigned long)_etext - (unsigned long)_text; -	datasize = (unsigned long)_edata - (unsigned long)_etext; -	initsize = (unsigned long)__init_end - (unsigned long)__init_begin; - -	reservedpages = 0; -{ -	unsigned long pfn; -#ifdef CONFIG_DISCONTIGMEM -	int i; - -	for (i = 0; i < npmem_ranges; i++) { -		for (pfn = node_start_pfn(i); pfn < node_end_pfn(i); pfn++) { -			if (PageReserved(pfn_to_page(pfn))) -				reservedpages++; -		} -	} -#else /* !CONFIG_DISCONTIGMEM */ -	for (pfn = 0; pfn < max_pfn; pfn++) { -		/* -		 * Only count reserved RAM pages -		 */ -		if (PageReserved(pfn_to_page(pfn))) -			reservedpages++; -	} -#endif -} +	set_max_mapnr(page_to_pfn(virt_to_page(high_memory - 1)) + 1); +	free_all_bootmem();  #ifdef CONFIG_PA11  	if (hppa_dma_ops == &pcxl_dma_ops) { @@ -507,15 +614,7 @@ void __init mem_init(void)  	parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);  #endif -	printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", -		nr_free_pages() << (PAGE_SHIFT-10), -		num_physpages << (PAGE_SHIFT-10), -		codesize >> 10, -		reservedpages << (PAGE_SHIFT-10), -		datasize >> 10, -		initsize >> 10 -	); - +	mem_init_print_info(NULL);  #ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */  	printk("virtual kernel memory layout:\n"  	       "    vmalloc : 0x%p - 0x%p   (%4ld MB)\n" @@ -544,55 +643,32 @@ void __init mem_init(void)  unsigned long *empty_zero_page __read_mostly;  EXPORT_SYMBOL(empty_zero_page); -void show_mem(void) +void show_mem(unsigned int filter)  { -	int i,free = 0,total = 0,reserved = 0; -	int shared = 0, cached = 0; +	int total = 0,reserved = 0; +	pg_data_t *pgdat;  	printk(KERN_INFO "Mem-info:\n"); -	show_free_areas(); -#ifndef CONFIG_DISCONTIGMEM -	i = max_mapnr; -	while (i-- > 0) { -		total++; -		if (PageReserved(mem_map+i)) -			reserved++; -		else if (PageSwapCache(mem_map+i)) -			cached++; -		else if (!page_count(&mem_map[i])) -			free++; -		else -			shared += page_count(&mem_map[i]) - 1; -	} -#else -	for (i = 0; i < npmem_ranges; i++) { -		int j; +	show_free_areas(filter); + +	for_each_online_pgdat(pgdat) { +		unsigned long flags; +		int zoneid; + +		pgdat_resize_lock(pgdat, &flags); +		for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { +			struct zone *zone = &pgdat->node_zones[zoneid]; +			if (!populated_zone(zone)) +				continue; -		for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { -			struct page *p; -			unsigned long flags; - -			pgdat_resize_lock(NODE_DATA(i), &flags); -			p = nid_page_nr(i, j) - node_start_pfn(i); - -			total++; -			if (PageReserved(p)) -				reserved++; -			else if (PageSwapCache(p)) -				cached++; -			else if (!page_count(p)) -				free++; -			else -				shared += page_count(p) - 1; -			pgdat_resize_unlock(NODE_DATA(i), &flags); -        	} +			total += zone->present_pages; +			reserved = zone->present_pages - zone->managed_pages; +		} +		pgdat_resize_unlock(pgdat, &flags);  	} -#endif +  	printk(KERN_INFO "%d pages of RAM\n", total);  	printk(KERN_INFO "%d reserved pages\n", reserved); -	printk(KERN_INFO "%d pages shared\n", shared); -	printk(KERN_INFO "%d pages swap cached\n", cached); -  #ifdef CONFIG_DISCONTIGMEM  	{ @@ -616,114 +692,6 @@ void show_mem(void)  #endif  } - -static void __init map_pages(unsigned long start_vaddr, unsigned long start_paddr, unsigned long size, pgprot_t pgprot) -{ -	pgd_t *pg_dir; -	pmd_t *pmd; -	pte_t *pg_table; -	unsigned long end_paddr; -	unsigned long start_pmd; -	unsigned long start_pte; -	unsigned long tmp1; -	unsigned long tmp2; -	unsigned long address; -	unsigned long ro_start; -	unsigned long ro_end; -	unsigned long fv_addr; -	unsigned long gw_addr; -	extern const unsigned long fault_vector_20; -	extern void * const linux_gateway_page; - -	ro_start = __pa((unsigned long)_text); -	ro_end   = __pa((unsigned long)&data_start); -	fv_addr  = __pa((unsigned long)&fault_vector_20) & PAGE_MASK; -	gw_addr  = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK; - -	end_paddr = start_paddr + size; - -	pg_dir = pgd_offset_k(start_vaddr); - -#if PTRS_PER_PMD == 1 -	start_pmd = 0; -#else -	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); -#endif -	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); - -	address = start_paddr; -	while (address < end_paddr) { -#if PTRS_PER_PMD == 1 -		pmd = (pmd_t *)__pa(pg_dir); -#else -		pmd = (pmd_t *)pgd_address(*pg_dir); - -		/* -		 * pmd is physical at this point -		 */ - -		if (!pmd) { -			pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE << PMD_ORDER); -			pmd = (pmd_t *) __pa(pmd); -		} - -		pgd_populate(NULL, pg_dir, __va(pmd)); -#endif -		pg_dir++; - -		/* now change pmd to kernel virtual addresses */ - -		pmd = (pmd_t *)__va(pmd) + start_pmd; -		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++,pmd++) { - -			/* -			 * pg_table is physical at this point -			 */ - -			pg_table = (pte_t *)pmd_address(*pmd); -			if (!pg_table) { -				pg_table = (pte_t *) -					alloc_bootmem_low_pages_node(NODE_DATA(0),PAGE_SIZE); -				pg_table = (pte_t *) __pa(pg_table); -			} - -			pmd_populate_kernel(NULL, pmd, __va(pg_table)); - -			/* now change pg_table to kernel virtual addresses */ - -			pg_table = (pte_t *) __va(pg_table) + start_pte; -			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++,pg_table++) { -				pte_t pte; - -				/* -				 * Map the fault vector writable so we can -				 * write the HPMC checksum. -				 */ -#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) -				if (address >= ro_start && address < ro_end -							&& address != fv_addr -							&& address != gw_addr) -				    pte = __mk_pte(address, PAGE_KERNEL_RO); -				else -#endif -				    pte = __mk_pte(address, pgprot); - -				if (address >= end_paddr) -					pte_val(pte) = 0; - -				set_pte(pg_table, pte); - -				address += PAGE_SIZE; -			} -			start_pte = 0; - -			if (address >= end_paddr) -			    break; -		} -		start_pmd = 0; -	} -} -  /*   * pagetable_init() sets up the page tables   * @@ -748,19 +716,18 @@ static void __init pagetable_init(void)  		size = pmem_ranges[range].pages << PAGE_SHIFT;  		map_pages((unsigned long)__va(start_paddr), start_paddr, -			size, PAGE_KERNEL); +			  size, PAGE_KERNEL, 0);  	}  #ifdef CONFIG_BLK_DEV_INITRD  	if (initrd_end && initrd_end > mem_limit) {  		printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);  		map_pages(initrd_start, __pa(initrd_start), -			initrd_end - initrd_start, PAGE_KERNEL); +			  initrd_end - initrd_start, PAGE_KERNEL, 0);  	}  #endif  	empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); -	memset(empty_zero_page, 0, PAGE_SIZE);  }  static void __init gateway_init(void) @@ -780,7 +747,7 @@ static void __init gateway_init(void)  	 */  	map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page), -		PAGE_SIZE, PAGE_GATEWAY); +		  PAGE_SIZE, PAGE_GATEWAY, 1);  }  #ifdef CONFIG_HPUX @@ -1039,6 +1006,7 @@ void flush_tlb_all(void)  {  	int do_recycle; +	__inc_irq_stat(irq_tlb_count);  	do_recycle = 0;  	spin_lock(&sid_lock);  	if (dirty_space_ids > RECYCLE_THRESHOLD) { @@ -1059,6 +1027,7 @@ void flush_tlb_all(void)  #else  void flush_tlb_all(void)  { +	__inc_irq_stat(irq_tlb_count);  	spin_lock(&sid_lock);  	flush_tlb_all_local(NULL);  	recycle_sids(); @@ -1069,15 +1038,6 @@ void flush_tlb_all(void)  #ifdef CONFIG_BLK_DEV_INITRD  void free_initrd_mem(unsigned long start, unsigned long end)  { -	if (start >= end) -		return; -	printk(KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10); -	for (; start < end; start += PAGE_SIZE) { -		ClearPageReserved(virt_to_page(start)); -		init_page_count(virt_to_page(start)); -		free_page(start); -		num_physpages++; -		totalram_pages++; -	} +	free_reserved_area((void *)start, (void *)end, -1, "initrd");  }  #endif  | 
