diff options
Diffstat (limited to 'arch/powerpc/mm/mem.c')
| -rw-r--r-- | arch/powerpc/mm/mem.c | 549 |
1 files changed, 280 insertions, 269 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 15aac0d78df..2c8e90f5789 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -5,7 +5,6 @@ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras - * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com) * * Derived from "arch/i386/mm/init.c" @@ -18,12 +17,12 @@ * */ -#include <linux/config.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> +#include <linux/gfp.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/stddef.h> @@ -32,6 +31,10 @@ #include <linux/highmem.h> #include <linux/initrd.h> #include <linux/pagemap.h> +#include <linux/suspend.h> +#include <linux/memblock.h> +#include <linux/hugetlb.h> +#include <linux/slab.h> #include <asm/pgalloc.h> #include <asm/prom.h> @@ -43,10 +46,12 @@ #include <asm/machdep.h> #include <asm/btext.h> #include <asm/tlb.h> -#include <asm/prom.h> -#include <asm/lmb.h> #include <asm/sections.h> +#include <asm/sparsemem.h> #include <asm/vdso.h> +#include <asm/fixmap.h> +#include <asm/swiotlb.h> +#include <asm/rtas.h> #include "mmu_decl.h" @@ -57,38 +62,35 @@ int init_bootmem_done; int mem_init_done; -unsigned long memory_limit; +unsigned long long memory_limit; -extern void hash_preload(struct mm_struct *mm, unsigned long ea, - unsigned long access, unsigned long trap); +#ifdef CONFIG_HIGHMEM +pte_t *kmap_pte; +EXPORT_SYMBOL(kmap_pte); +pgprot_t kmap_prot; +EXPORT_SYMBOL(kmap_prot); -/* - * This is called by /dev/mem to know if a given address has to - * be mapped non-cacheable or not - */ -int page_is_ram(unsigned long pfn) +static inline pte_t *virt_to_kpte(unsigned long vaddr) { - unsigned long paddr = (pfn << PAGE_SHIFT); + return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), + vaddr), vaddr), vaddr); +} +#endif +int page_is_ram(unsigned long pfn) +{ #ifndef CONFIG_PPC64 /* XXX for now */ - return paddr < __pa(high_memory); + return pfn < max_pfn; #else - int i; - for (i=0; i < lmb.memory.cnt; i++) { - unsigned long base; - - base = lmb.memory.region[i].base; + unsigned long paddr = (pfn << PAGE_SHIFT); + struct memblock_region *reg; - if ((paddr >= base) && - (paddr < (base + lmb.memory.region[i].size))) { + for_each_memblock(memory, reg) + if (paddr >= reg->base && paddr < (reg->base + reg->size)) return 1; - } - } - return 0; #endif } -EXPORT_SYMBOL(page_is_ram); pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size, pgprot_t vma_prot) @@ -97,131 +99,85 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot); if (!page_is_ram(pfn)) - vma_prot = __pgprot(pgprot_val(vma_prot) - | _PAGE_GUARDED | _PAGE_NO_CACHE); + vma_prot = pgprot_noncached(vma_prot); + return vma_prot; } EXPORT_SYMBOL(phys_mem_access_prot); #ifdef CONFIG_MEMORY_HOTPLUG -void online_page(struct page *page) +#ifdef CONFIG_NUMA +int memory_add_physaddr_to_nid(u64 start) { - ClearPageReserved(page); - set_page_count(page, 0); - free_cold_page(page); - totalram_pages++; - num_physpages++; + return hot_add_scn_to_nid(start); } +#endif -int __devinit add_memory(u64 start, u64 size) +int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata; struct zone *zone; - int nid; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; - nid = hot_add_scn_to_nid(start); pgdata = NODE_DATA(nid); - start = __va(start); - create_section_mapping(start, start + size); + start = (unsigned long)__va(start); + if (create_section_mapping(start, start + size)) + return -EINVAL; /* this should work for most non-highmem platforms */ zone = pgdata->node_zones; - return __add_pages(zone, start_pfn, nr_pages); - - return 0; + return __add_pages(nid, zone, start_pfn, nr_pages); } -/* - * First pass at this code will check to determine if the remove - * request is within the RMO. Do not allow removal within the RMO. - */ -int __devinit remove_memory(u64 start, u64 size) +#ifdef CONFIG_MEMORY_HOTREMOVE +int arch_remove_memory(u64 start, u64 size) { + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; struct zone *zone; - unsigned long start_pfn, end_pfn, nr_pages; + int ret; - start_pfn = start >> PAGE_SHIFT; - nr_pages = size >> PAGE_SHIFT; - end_pfn = start_pfn + nr_pages; - - printk("%s(): Attempting to remove memoy in range " - "%lx to %lx\n", __func__, start, start+size); - /* - * check for range within RMO - */ zone = page_zone(pfn_to_page(start_pfn)); + ret = __remove_pages(zone, start_pfn, nr_pages); + if (!ret && (ppc_md.remove_memory)) + ret = ppc_md.remove_memory(start, size); - printk("%s(): memory will be removed from " - "the %s zone\n", __func__, zone->name); - - /* - * not handling removing memory ranges that - * overlap multiple zones yet - */ - if (end_pfn > (zone->zone_start_pfn + zone->spanned_pages)) - goto overlap; - - /* make sure it is NOT in RMO */ - if ((start < lmb.rmo_size) || ((start+size) < lmb.rmo_size)) { - printk("%s(): range to be removed must NOT be in RMO!\n", - __func__); - goto in_rmo; - } - - return __remove_pages(zone, start_pfn, nr_pages); - -overlap: - printk("%s(): memory range to be removed overlaps " - "multiple zones!!!\n", __func__); -in_rmo: - return -1; + return ret; } +#endif #endif /* CONFIG_MEMORY_HOTPLUG */ -void show_mem(void) +/* + * walk_memory_resource() needs to make sure there is no holes in a given + * memory range. PPC64 does not maintain the memory layout in /proc/iomem. + * Instead it maintains it in memblock.memory structures. Walk through the + * memory regions, find holes and callback for contiguous regions. + */ +int +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)) { - unsigned long total = 0, reserved = 0; - unsigned long shared = 0, cached = 0; - unsigned long highmem = 0; - struct page *page; - pg_data_t *pgdat; - unsigned long i; - - printk("Mem-info:\n"); - show_free_areas(); - printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { - unsigned long flags; - pgdat_resize_lock(pgdat, &flags); - for (i = 0; i < pgdat->node_spanned_pages; i++) { - if (!pfn_valid(pgdat->node_start_pfn + i)) - continue; - page = pgdat_page_nr(pgdat, i); - total++; - if (PageHighMem(page)) - highmem++; - if (PageReserved(page)) - reserved++; - else if (PageSwapCache(page)) - cached++; - else if (page_count(page)) - shared += page_count(page) - 1; - } - pgdat_resize_unlock(pgdat, &flags); + struct memblock_region *reg; + unsigned long end_pfn = start_pfn + nr_pages; + unsigned long tstart, tend; + int ret = -1; + + for_each_memblock(memory, reg) { + tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); + tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); + if (tstart >= tend) + continue; + ret = (*func)(tstart, tend - tstart, arg); + if (ret) + break; } - printk("%ld pages of RAM\n", total); -#ifdef CONFIG_HIGHMEM - printk("%ld pages of HIGHMEM\n", highmem); -#endif - printk("%ld reserved pages\n", reserved); - printk("%ld pages shared\n", shared); - printk("%ld pages swap cached\n", cached); + return ret; } +EXPORT_SYMBOL_GPL(walk_system_ram_range); /* * Initialize the bootmem system and give it all the memory we @@ -231,14 +187,16 @@ void show_mem(void) #ifndef CONFIG_NEED_MULTIPLE_NODES void __init do_init_bootmem(void) { - unsigned long i; unsigned long start, bootmap_pages; unsigned long total_pages; + struct memblock_region *reg; int boot_mapsize; - max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT; + max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; + total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; #ifdef CONFIG_HIGHMEM total_pages = total_lowmem >> PAGE_SHIFT; + max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; #endif /* @@ -248,160 +206,184 @@ void __init do_init_bootmem(void) */ bootmap_pages = bootmem_bootmap_pages(total_pages); - start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); - BUG_ON(!start); + start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); + + min_low_pfn = MEMORY_START >> PAGE_SHIFT; + boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); - boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); + /* Place all memblock_regions in the same node and merge contiguous + * memblock_regions + */ + memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); /* Add all physical memory to the bootmem map, mark each area * present. */ - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long base = lmb.memory.region[i].base; - unsigned long size = lmb_size_bytes(&lmb.memory, i); #ifdef CONFIG_HIGHMEM - if (base >= total_lowmem) - continue; - if (base + size > total_lowmem) - size = total_lowmem - base; -#endif - free_bootmem(base, size); - } + free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); /* reserve the sections we're already using */ - for (i = 0; i < lmb.reserved.cnt; i++) - reserve_bootmem(lmb.reserved.region[i].base, - lmb_size_bytes(&lmb.reserved, i)); + for_each_memblock(reserved, reg) { + unsigned long top = reg->base + reg->size - 1; + if (top < lowmem_end_addr) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); + else if (reg->base < lowmem_end_addr) { + unsigned long trunc_size = lowmem_end_addr - reg->base; + reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); + } + } +#else + free_bootmem_with_active_regions(0, max_pfn); + /* reserve the sections we're already using */ + for_each_memblock(reserved, reg) + reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); +#endif /* XXX need to clip this if using highmem? */ - for (i = 0; i < lmb.memory.cnt; i++) - memory_present(0, lmb_start_pfn(&lmb.memory, i), - lmb_end_pfn(&lmb.memory, i)); + sparse_memory_present_with_active_regions(0); + init_bootmem_done = 1; } +/* mark pages that don't exist as nosave */ +static int __init mark_nonram_nosave(void) +{ + struct memblock_region *reg, *prev = NULL; + + for_each_memblock(memory, reg) { + if (prev && + memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) + register_nosave_region(memblock_region_memory_end_pfn(prev), + memblock_region_memory_base_pfn(reg)); + prev = reg; + } + return 0; +} + /* * paging_init() sets up the page tables - in fact we've already done this. */ void __init paging_init(void) { - unsigned long zones_size[MAX_NR_ZONES]; - unsigned long zholes_size[MAX_NR_ZONES]; - unsigned long total_ram = lmb_phys_mem_size(); - unsigned long top_of_ram = lmb_end_of_DRAM(); + unsigned long long total_ram = memblock_phys_mem_size(); + phys_addr_t top_of_ram = memblock_end_of_DRAM(); + unsigned long max_zone_pfns[MAX_NR_ZONES]; + +#ifdef CONFIG_PPC32 + unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1); + unsigned long end = __fix_to_virt(FIX_HOLE); + + for (; v < end; v += PAGE_SIZE) + map_page(v, 0, 0); /* XXX gross */ +#endif #ifdef CONFIG_HIGHMEM map_page(PKMAP_BASE, 0, 0); /* XXX gross */ - pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k - (PKMAP_BASE), PKMAP_BASE), PKMAP_BASE); - map_page(KMAP_FIX_BEGIN, 0, 0); /* XXX gross */ - kmap_pte = pte_offset_kernel(pmd_offset(pgd_offset_k - (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); + pkmap_page_table = virt_to_kpte(PKMAP_BASE); + + kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN)); kmap_prot = PAGE_KERNEL; #endif /* CONFIG_HIGHMEM */ - printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", - top_of_ram, total_ram); - printk(KERN_INFO "Memory hole size: %ldMB\n", - (top_of_ram - total_ram) >> 20); - /* - * All pages are DMA-able so we put them all in the DMA zone. - */ - memset(zones_size, 0, sizeof(zones_size)); - memset(zholes_size, 0, sizeof(zholes_size)); - - zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; - zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; - + printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n", + (unsigned long long)top_of_ram, total_ram); + printk(KERN_DEBUG "Memory hole size: %ldMB\n", + (long int)((top_of_ram - total_ram) >> 20)); + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_HIGHMEM - zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; - zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; - zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT; + max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT; + max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT; #else - zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; - zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; -#endif /* CONFIG_HIGHMEM */ + max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; +#endif + free_area_init_nodes(max_zone_pfns); - free_area_init_node(0, NODE_DATA(0), zones_size, - __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); + mark_nonram_nosave(); } #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ -void __init mem_init(void) +static void __init register_page_bootmem_info(void) { -#ifdef CONFIG_NEED_MULTIPLE_NODES - int nid; -#endif - pg_data_t *pgdat; - unsigned long i; - struct page *page; - unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize; + int i; - num_physpages = lmb.memory.size >> PAGE_SHIFT; - high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); + for_each_online_node(i) + register_page_bootmem_info_node(NODE_DATA(i)); +} -#ifdef CONFIG_NEED_MULTIPLE_NODES - for_each_online_node(nid) { - if (NODE_DATA(nid)->node_spanned_pages != 0) { - printk("freeing bootmem node %x\n", nid); - totalram_pages += - free_all_bootmem_node(NODE_DATA(nid)); - } - } -#else - max_mapnr = max_pfn; - totalram_pages += free_all_bootmem(); +void __init mem_init(void) +{ + /* + * book3s is limited to 16 page sizes due to encoding this in + * a 4-bit field for slices. + */ + BUILD_BUG_ON(MMU_PAGE_COUNT > 16); + +#ifdef CONFIG_SWIOTLB + swiotlb_init(0); #endif - for_each_pgdat(pgdat) { - for (i = 0; i < pgdat->node_spanned_pages; i++) { - if (!pfn_valid(pgdat->node_start_pfn + i)) - continue; - page = pgdat_page_nr(pgdat, i); - if (PageReserved(page)) - reservedpages++; - } - } - codesize = (unsigned long)&_sdata - (unsigned long)&_stext; - datasize = (unsigned long)&_edata - (unsigned long)&_sdata; - initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin; - bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start; + register_page_bootmem_info(); + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); + set_max_mapnr(max_pfn); + free_all_bootmem(); #ifdef CONFIG_HIGHMEM { unsigned long pfn, highmem_mapnr; - highmem_mapnr = total_lowmem >> PAGE_SHIFT; + highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT; for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) { + phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT; struct page *page = pfn_to_page(pfn); - - ClearPageReserved(page); - set_page_count(page, 1); - __free_page(page); - totalhigh_pages++; + if (!memblock_is_reserved(paddr)) + free_highmem_page(page); } - totalram_pages += totalhigh_pages; - printk(KERN_INFO "High memory: %luk\n", - totalhigh_pages << (PAGE_SHIFT-10)); } #endif /* CONFIG_HIGHMEM */ - printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, " - "%luk reserved, %luk data, %luk bss, %luk init)\n", - (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - bsssize >> 10, - initsize >> 10); +#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP) + /* + * If smp is enabled, next_tlbcam_idx is initialized in the cpu up + * functions.... do it here for the non-smp case. + */ + per_cpu(next_tlbcam_idx, smp_processor_id()) = + (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; +#endif + + mem_init_print_info(NULL); +#ifdef CONFIG_PPC32 + pr_info("Kernel virtual memory layout:\n"); + pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP); +#ifdef CONFIG_HIGHMEM + pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n", + PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP)); +#endif /* CONFIG_HIGHMEM */ +#ifdef CONFIG_NOT_COHERENT_CACHE + pr_info(" * 0x%08lx..0x%08lx : consistent mem\n", + IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE); +#endif /* CONFIG_NOT_COHERENT_CACHE */ + pr_info(" * 0x%08lx..0x%08lx : early ioremap\n", + ioremap_bot, IOREMAP_TOP); + pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", + VMALLOC_START, VMALLOC_END); +#endif /* CONFIG_PPC32 */ mem_init_done = 1; +} - /* Initialize the vDSO */ - vdso_init(); +void free_initmem(void) +{ + ppc_md.progress = ppc_printk_progress; + free_initmem_default(POISON_FREE_INITMEM); } +#ifdef CONFIG_BLK_DEV_INITRD +void __init free_initrd_mem(unsigned long start, unsigned long end) +{ + free_reserved_area((void *)start, (void *)end, -1, "initrd"); +} +#endif + /* * This is called when a page has been modified by the kernel. * It just marks the page as not i-cache clean. We do the i-cache @@ -419,33 +401,37 @@ EXPORT_SYMBOL(flush_dcache_page); void flush_dcache_icache_page(struct page *page) { +#ifdef CONFIG_HUGETLB_PAGE + if (PageCompound(page)) { + flush_dcache_icache_hugepage(page); + return; + } +#endif #ifdef CONFIG_BOOKE - void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); - __flush_dcache_icache(start); - kunmap_atomic(start, KM_PPC_SYNC_ICACHE); + { + void *start = kmap_atomic(page); + __flush_dcache_icache(start); + kunmap_atomic(start); + } #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) /* On 8xx there is no need to kmap since highmem is not supported */ __flush_dcache_icache(page_address(page)); #else __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); #endif - } +EXPORT_SYMBOL(flush_dcache_icache_page); + void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { clear_page(page); - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; /* - * We shouldnt have to do this, but some versions of glibc + * We shouldn't have to do this, but some versions of glibc * require it (ld.so assumes zero filled pages are icache clean) * - Anton */ - - /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &pg->flags)) - clear_bit(PG_arch_1, &pg->flags); + flush_dcache_page(pg); } EXPORT_SYMBOL(clear_user_page); @@ -469,12 +455,7 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, return; #endif - if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) - return; - - /* avoid an atomic op if possible */ - if (test_bit(PG_arch_1, &pg->flags)) - clear_bit(PG_arch_1, &pg->flags); + flush_dcache_page(pg); } void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, @@ -497,41 +478,17 @@ EXPORT_SYMBOL(flush_icache_user_range); * This must always be called with the pte lock held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, - pte_t pte) + pte_t *ptep) { #ifdef CONFIG_PPC_STD_MMU + /* + * We don't need to worry about _PAGE_PRESENT here because we are + * called with either mm->page_table_lock held or ptl lock held + */ unsigned long access = 0, trap; -#endif - unsigned long pfn = pte_pfn(pte); - - /* handle i-cache coherency */ - if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) && - !cpu_has_feature(CPU_FTR_NOEXECUTE) && - pfn_valid(pfn)) { - struct page *page = pfn_to_page(pfn); - if (!PageReserved(page) - && !test_bit(PG_arch_1, &page->flags)) { - if (vma->vm_mm == current->active_mm) { -#ifdef CONFIG_8xx - /* On 8xx, cache control instructions (particularly - * "dcbst" from flush_dcache_icache) fault as write - * operation if there is an unpopulated TLB entry - * for the address in question. To workaround that, - * we invalidate the TLB here, thus avoiding dcbst - * misbehaviour. - */ - _tlbie(address); -#endif - __flush_dcache_icache((void *) address); - } else - flush_dcache_icache_page(page); - set_bit(PG_arch_1, &page->flags); - } - } -#ifdef CONFIG_PPC_STD_MMU /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ - if (!pte_young(pte) || address >= TASK_SIZE) + if (!pte_young(*ptep) || address >= TASK_SIZE) return; /* We try to figure out if we are coming from an instruction @@ -550,4 +507,58 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, return; hash_preload(vma->vm_mm, address, access, trap); #endif /* CONFIG_PPC_STD_MMU */ +#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ + && defined(CONFIG_HUGETLB_PAGE) + if (is_vm_hugetlb_page(vma)) + book3e_hugetlb_preload(vma, address, *ptep); +#endif +} + +/* + * System memory should not be in /proc/iomem but various tools expect it + * (eg kdump). + */ +static int __init add_system_ram_resources(void) +{ + struct memblock_region *reg; + + for_each_memblock(memory, reg) { + struct resource *res; + unsigned long base = reg->base; + unsigned long size = reg->size; + + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + WARN_ON(!res); + + if (res) { + res->name = "System RAM"; + res->start = base; + res->end = base + size - 1; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + WARN_ON(request_resource(&iomem_resource, res) < 0); + } + } + + return 0; +} +subsys_initcall(add_system_ram_resources); + +#ifdef CONFIG_STRICT_DEVMEM +/* + * devmem_is_allowed(): check to see if /dev/mem access to a certain address + * is valid. The argument is a physical page number. + * + * Access has to be given to non-kernel-ram areas as well, these contain the + * PCI mmio resources as well as potential bios/acpi data regions. + */ +int devmem_is_allowed(unsigned long pfn) +{ + if (iomem_is_exclusive(pfn << PAGE_SHIFT)) + return 0; + if (!page_is_ram(pfn)) + return 1; + if (page_is_rtas_user_buf(pfn)) + return 1; + return 0; } +#endif /* CONFIG_STRICT_DEVMEM */ |
