diff options
author | Sam Ravnborg <sam@ravnborg.org> | 2008-11-16 20:08:45 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-12-04 09:16:59 -0800 |
commit | 27137e5285a3388e8f86d7bc5fe0ed8b92bd4624 (patch) | |
tree | 70cd698fb5561743913b5f7615f61df6e8883537 /arch/sparc64/mm/init.c | |
parent | c37ddd936d96b46cf2bb17e7b1a18b2bd24ec1fb (diff) |
sparc,sparc64: unify mm/
- move all sparc64/mm/ files to arch/sparc/mm/
- commonly named files are named _64.c
- add files to sparc/mm/Makefile preserving link order
- delete now unused sparc64/mm/Makefile
- sparc64 now finds mm/ in sparc
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r-- | arch/sparc64/mm/init.c | 2360 |
1 files changed, 0 insertions, 2360 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c deleted file mode 100644 index 4bd63968400..00000000000 --- a/arch/sparc64/mm/init.c +++ /dev/null @@ -1,2360 +0,0 @@ -/* - * arch/sparc64/mm/init.c - * - * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/string.h> -#include <linux/init.h> -#include <linux/bootmem.h> -#include <linux/mm.h> -#include <linux/hugetlb.h> -#include <linux/slab.h> -#include <linux/initrd.h> -#include <linux/swap.h> -#include <linux/pagemap.h> -#include <linux/poison.h> -#include <linux/fs.h> -#include <linux/seq_file.h> -#include <linux/kprobes.h> -#include <linux/cache.h> -#include <linux/sort.h> -#include <linux/percpu.h> -#include <linux/lmb.h> -#include <linux/mmzone.h> - -#include <asm/head.h> -#include <asm/system.h> -#include <asm/page.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> -#include <asm/oplib.h> -#include <asm/iommu.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/mmu_context.h> -#include <asm/tlbflush.h> -#include <asm/dma.h> -#include <asm/starfire.h> -#include <asm/tlb.h> -#include <asm/spitfire.h> -#include <asm/sections.h> -#include <asm/tsb.h> -#include <asm/hypervisor.h> -#include <asm/prom.h> -#include <asm/mdesc.h> -#include <asm/cpudata.h> -#include <asm/irq.h> - -#include "init.h" - -unsigned long kern_linear_pte_xor[2] __read_mostly; - -/* A bitmap, one bit for every 256MB of physical memory. If the bit - * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else - * if set we should use a 256MB page (via kern_linear_pte_xor[1]). - */ -unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)]; - -#ifndef CONFIG_DEBUG_PAGEALLOC -/* A special kernel TSB for 4MB and 256MB linear mappings. - * Space is allocated for this right after the trap table - * in arch/sparc64/kernel/head.S - */ -extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES]; -#endif - -#define MAX_BANKS 32 - -static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; -static int pavail_ents __initdata; - -static int cmp_p64(const void *a, const void *b) -{ - const struct linux_prom64_registers *x = a, *y = b; - - if (x->phys_addr > y->phys_addr) - return 1; - if (x->phys_addr < y->phys_addr) - return -1; - return 0; -} - -static void __init read_obp_memory(const char *property, - struct linux_prom64_registers *regs, - int *num_ents) -{ - int node = prom_finddevice("/memory"); - int prop_size = prom_getproplen(node, property); - int ents, ret, i; - - ents = prop_size / sizeof(struct linux_prom64_registers); - if (ents > MAX_BANKS) { - prom_printf("The machine has more %s property entries than " - "this kernel can support (%d).\n", - property, MAX_BANKS); - prom_halt(); - } - - ret = prom_getproperty(node, property, (char *) regs, prop_size); - if (ret == -1) { - prom_printf("Couldn't get %s property from /memory.\n"); - prom_halt(); - } - - /* Sanitize what we got from the firmware, by page aligning - * everything. - */ - for (i = 0; i < ents; i++) { - unsigned long base, size; - - base = regs[i].phys_addr; - size = regs[i].reg_size; - - size &= PAGE_MASK; - if (base & ~PAGE_MASK) { - unsigned long new_base = PAGE_ALIGN(base); - - size -= new_base - base; - if ((long) size < 0L) - size = 0UL; - base = new_base; - } - if (size == 0UL) { - /* If it is empty, simply get rid of it. - * This simplifies the logic of the other - * functions that process these arrays. - */ - memmove(®s[i], ®s[i + 1], - (ents - i - 1) * sizeof(regs[0])); - i--; - ents--; - continue; - } - regs[i].phys_addr = base; - regs[i].reg_size = size; - } - - *num_ents = ents; - - sort(regs, ents, sizeof(struct linux_prom64_registers), - cmp_p64, NULL); -} - -unsigned long *sparc64_valid_addr_bitmap __read_mostly; - -/* Kernel physical address base and size in bytes. */ -unsigned long kern_base __read_mostly; -unsigned long kern_size __read_mostly; - -/* Initial ramdisk setup */ -extern unsigned long sparc_ramdisk_image64; -extern unsigned int sparc_ramdisk_image; -extern unsigned int sparc_ramdisk_size; - -struct page *mem_map_zero __read_mostly; -EXPORT_SYMBOL(mem_map_zero); - -unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; - -unsigned long sparc64_kern_pri_context __read_mostly; -unsigned long sparc64_kern_pri_nuc_bits __read_mostly; -unsigned long sparc64_kern_sec_context __read_mostly; - -int num_kernel_image_mappings; - -#ifdef CONFIG_DEBUG_DCFLUSH -atomic_t dcpage_flushes = ATOMIC_INIT(0); -#ifdef CONFIG_SMP -atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); -#endif -#endif - -inline void flush_dcache_page_impl(struct page *page) -{ - BUG_ON(tlb_type == hypervisor); -#ifdef CONFIG_DEBUG_DCFLUSH - atomic_inc(&dcpage_flushes); -#endif - -#ifdef DCACHE_ALIASING_POSSIBLE - __flush_dcache_page(page_address(page), - ((tlb_type == spitfire) && - page_mapping(page) != NULL)); -#else - if (page_mapping(page) != NULL && - tlb_type == spitfire) - __flush_icache_page(__pa(page_address(page))); -#endif -} - -#define PG_dcache_dirty PG_arch_1 -#define PG_dcache_cpu_shift 32UL -#define PG_dcache_cpu_mask \ - ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL) - -#define dcache_dirty_cpu(page) \ - (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) - -static inline void set_dcache_dirty(struct page *page, int this_cpu) -{ - unsigned long mask = this_cpu; - unsigned long non_cpu_bits; - - non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); - mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); - - __asm__ __volatile__("1:\n\t" - "ldx [%2], %%g7\n\t" - "and %%g7, %1, %%g1\n\t" - "or %%g1, %0, %%g1\n\t" - "casx [%2], %%g7, %%g1\n\t" - "cmp %%g7, %%g1\n\t" - "bne,pn %%xcc, 1b\n\t" - " nop" - : /* no outputs */ - : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) - : "g1", "g7"); -} - -static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) -{ - unsigned long mask = (1UL << PG_dcache_dirty); - - __asm__ __volatile__("! test_and_clear_dcache_dirty\n" - "1:\n\t" - "ldx [%2], %%g7\n\t" - "srlx %%g7, %4, %%g1\n\t" - "and %%g1, %3, %%g1\n\t" - "cmp %%g1, %0\n\t" - "bne,pn %%icc, 2f\n\t" - " andn %%g7, %1, %%g1\n\t" - "casx [%2], %%g7, %%g1\n\t" - "cmp %%g7, %%g1\n\t" - "bne,pn %%xcc, 1b\n\t" - " nop\n" - "2:" - : /* no outputs */ - : "r" (cpu), "r" (mask), "r" (&page->flags), - "i" (PG_dcache_cpu_mask), - "i" (PG_dcache_cpu_shift) - : "g1", "g7"); -} - -static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte) -{ - unsigned long tsb_addr = (unsigned long) ent; - - if (tlb_type == cheetah_plus || tlb_type == hypervisor) - tsb_addr = __pa(tsb_addr); - - __tsb_insert(tsb_addr, tag, pte); -} - -unsigned long _PAGE_ALL_SZ_BITS __read_mostly; -unsigned long _PAGE_SZBITS __read_mostly; - -void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) -{ - struct mm_struct *mm; - struct tsb *tsb; - unsigned long tag, flags; - unsigned long tsb_index, tsb_hash_shift; - - if (tlb_type != hypervisor) { - unsigned long pfn = pte_pfn(pte); - unsigned long pg_flags; - struct page *page; - - if (pfn_valid(pfn) && - (page = pfn_to_page(pfn), page_mapping(page)) && - ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { - int cpu = ((pg_flags >> PG_dcache_cpu_shift) & - PG_dcache_cpu_mask); - int this_cpu = get_cpu(); - - /* This is just to optimize away some function calls - * in the SMP case. - */ - if (cpu == this_cpu) - flush_dcache_page_impl(page); - else - smp_flush_dcache_page_impl(page, cpu); - - clear_dcache_dirty_cpu(page, cpu); - - put_cpu(); - } - } - - mm = vma->vm_mm; - - tsb_index = MM_TSB_BASE; - tsb_hash_shift = PAGE_SHIFT; - - spin_lock_irqsave(&mm->context.lock, flags); - -#ifdef CONFIG_HUGETLB_PAGE - if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) { - if ((tlb_type == hypervisor && - (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) || - (tlb_type != hypervisor && - (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) { - tsb_index = MM_TSB_HUGE; - tsb_hash_shift = HPAGE_SHIFT; - } - } -#endif - - tsb = mm->context.tsb_block[tsb_index].tsb; - tsb += ((address >> tsb_hash_shift) & - (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL)); - tag = (address >> 22UL); - tsb_insert(tsb, tag, pte_val(pte)); - - spin_unlock_irqrestore(&mm->context.lock, flags); -} - -void flush_dcache_page(struct page *page) -{ - struct address_space *mapping; - int this_cpu; - - if (tlb_type == hypervisor) - return; - - /* Do not bother with the expensive D-cache flush if it - * is merely the zero page. The 'bigcore' testcase in GDB - * causes this case to run millions of times. - */ - if (page == ZERO_PAGE(0)) - return; - - this_cpu = get_cpu(); - - mapping = page_mapping(page); - if (mapping && !mapping_mapped(mapping)) { - int dirty = test_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - int dirty_cpu = dcache_dirty_cpu(page); - - if (dirty_cpu == this_cpu) - goto out; - smp_flush_dcache_page_impl(page, dirty_cpu); - } - set_dcache_dirty(page, this_cpu); - } else { - /* We could delay the flush for the !page_mapping - * case too. But that case is for exec env/arg - * pages and those are %99 certainly going to get - * faulted into the tlb (and thus flushed) anyways. - */ - flush_dcache_page_impl(page); - } - -out: - put_cpu(); -} - -void __kprobes flush_icache_range(unsigned long start, unsigned long end) -{ - /* Cheetah and Hypervisor platform cpus have coherent I-cache. */ - if (tlb_type == spitfire) { - unsigned long kaddr; - - /* This code only runs on Spitfire cpus so this is - * why we can assume _PAGE_PADDR_4U. - */ - for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) { - unsigned long paddr, mask = _PAGE_PADDR_4U; - - if (kaddr >= PAGE_OFFSET) - paddr = kaddr & mask; - else { - pgd_t *pgdp = pgd_offset_k(kaddr); - pud_t *pudp = pud_offset(pgdp, kaddr); - pmd_t *pmdp = pmd_offset(pudp, kaddr); - pte_t *ptep = pte_offset_kernel(pmdp, kaddr); - - paddr = pte_val(*ptep) & mask; - } - __flush_icache_page(paddr); - } - } -} - -void mmu_info(struct seq_file *m) -{ - if (tlb_type == cheetah) - seq_printf(m, "MMU Type\t: Cheetah\n"); - else if (tlb_type == cheetah_plus) - seq_printf(m, "MMU Type\t: Cheetah+\n"); - else if (tlb_type == spitfire) - seq_printf(m, "MMU Type\t: Spitfire\n"); - else if (tlb_type == hypervisor) - seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n"); - else - seq_printf(m, "MMU Type\t: ???\n"); - -#ifdef CONFIG_DEBUG_DCFLUSH - seq_printf(m, "DCPageFlushes\t: %d\n", - atomic_read(&dcpage_flushes)); -#ifdef CONFIG_SMP - seq_printf(m, "DCPageFlushesXC\t: %d\n", - atomic_read(&dcpage_flushes_xcall)); -#endif /* CONFIG_SMP */ -#endif /* CONFIG_DEBUG_DCFLUSH */ -} - -struct linux_prom_translation prom_trans[512] __read_mostly; -unsigned int prom_trans_ents __read_mostly; - -unsigned long kern_locked_tte_data; - -/* The obp translations are saved based on 8k pagesize, since obp can - * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> - * HI_OBP_ADDRESS range are handled in ktlb.S. - */ -static inline int in_obp_range(unsigned long vaddr) -{ - return (vaddr >= LOW_OBP_ADDRESS && - vaddr < HI_OBP_ADDRESS); -} - -static int cmp_ptrans(const void *a, const void *b) -{ - const struct linux_prom_translation *x = a, *y = b; - - if (x->virt > y->virt) - return 1; - if (x->virt < y->virt) - return -1; - return 0; -} - -/* Read OBP translations property into 'prom_trans[]'. */ -static void __init read_obp_translations(void) -{ - int n, node, ents, first, last, i; - - node = prom_finddevice("/virtual-memory"); - n = prom_getproplen(node, "translations"); - if (unlikely(n == 0 || n == -1)) { - prom_printf("prom_mappings: Couldn't get size.\n"); - prom_halt(); - } - if (unlikely(n > sizeof(prom_trans))) { - prom_printf("prom_mappings: Size %Zd is too big.\n", n); - prom_halt(); - } - - if ((n = prom_getproperty(node, "translations", - (char *)&prom_trans[0], - sizeof(prom_trans))) == -1) { - prom_printf("prom_mappings: Couldn't get property.\n"); - prom_halt(); - } - - n = n / sizeof(struct linux_prom_translation); - - ents = n; - - sort(prom_trans, ents, sizeof(struct linux_prom_translation), - cmp_ptrans, NULL); - - /* Now kick out all the non-OBP entries. */ - for (i = 0; i < ents; i++) { - if (in_obp_range(prom_trans[i].virt)) - break; - } - first = i; - for (; i < ents; i++) { - if (!in_obp_range(prom_trans[i].virt)) - break; - } - last = i; - - for (i = 0; i < (last - first); i++) { - struct linux_prom_translation *src = &prom_trans[i + first]; - struct linux_prom_translation *dest = &prom_trans[i]; - - *dest = *src; - } - for (; i < ents; i++) { - struct linux_prom_translation *dest = &prom_trans[i]; - dest->virt = dest->size = dest->data = 0x0UL; - } - - prom_trans_ents = last - first; - - if (tlb_type == spitfire) { - /* Clear diag TTE bits. */ - for (i = 0; i < prom_trans_ents; i++) - prom_trans[i].data &= ~0x0003fe0000000000UL; - } -} - -static void __init hypervisor_tlb_lock(unsigned long vaddr, - unsigned long pte, - unsigned long mmu) -{ - unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu); - - if (ret != 0) { - prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: " - "errors with %lx\n", vaddr, 0, pte, mmu, ret); - prom_halt(); - } -} - -static unsigned long kern_large_tte(unsigned long paddr); - -static void __init remap_kernel(void) -{ - unsigned long phys_page, tte_vaddr, tte_data; - int i, tlb_ent = sparc64_highest_locked_tlbent(); - - tte_vaddr = (unsigned long) KERNBASE; - phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; - tte_data = kern_large_tte(phys_page); - - kern_locked_tte_data = tte_data; - - /* Now lock us into the TLBs via Hypervisor or OBP. */ - if (tlb_type == hypervisor) { - for (i = 0; i < num_kernel_image_mappings; i++) { - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU); - hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU); - tte_vaddr += 0x400000; - tte_data += 0x400000; - } - } else { - for (i = 0; i < num_kernel_image_mappings; i++) { - prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr); - prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr); - tte_vaddr += 0x400000; - tte_data += 0x400000; - } - sparc64_highest_unlocked_tlb_ent = tlb_ent - i; - } - if (tlb_type == cheetah_plus) { - sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | - CTX_CHEETAH_PLUS_NUC); - sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; - sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; - } -} - - -static void __init inherit_prom_mappings(void) -{ - /* Now fixup OBP's idea about where we really are mapped. */ - printk("Remapping the kernel... "); - remap_kernel(); - printk("done.\n"); -} - -void prom_world(int enter) -{ - if (!enter) - set_fs((mm_segment_t) { get_thread_current_ds() }); - - __asm__ __volatile__("flushw"); -} - -void __flush_dcache_range(unsigned long start, unsigned long end) -{ - unsigned long va; - - if (tlb_type == spitfire) { - int n = 0; - - for (va = start; va < end; va += 32) { - spitfire_put_dcache_tag(va & 0x3fe0, 0x0); - if (++n >= 512) - break; - } - } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { - start = __pa(start); - end = __pa(end); - for (va = start; va < end; va += 32) - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (va), - "i" (ASI_DCACHE_INVALIDATE)); - } -} - -/* get_new_mmu_context() uses "cache + 1". */ -DEFINE_SPINLOCK(ctx_alloc_lock); -unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; -#define MAX_CTX_NR (1UL << CTX_NR_BITS) -#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) -DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); - -/* Caller does TLB context flushing on local CPU if necessary. - * The caller also ensures that CTX_VALID(mm->context) is false. - * - * We must be careful about boundary cases so that we never - * let the user have CTX 0 (nucleus) or we ever use a CTX - * version of zero (and thus NO_CONTEXT would not be caught - * by version mis-match tests in mmu_context.h). - * - * Always invoked with interrupts disabled. - */ -void get_new_mmu_context(struct mm_struct *mm) -{ - unsigned long ctx, new_ctx; - unsigned long orig_pgsz_bits; - unsigned long flags; - int new_version; - - spin_lock_irqsave(&ctx_alloc_lock, flags); - orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); - ctx = (tlb_context_cache + 1) & CTX_NR_MASK; - new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); - new_version = 0; - if (new_ctx >= (1 << CTX_NR_BITS)) { - new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); - if (new_ctx >= ctx) { - int i; - new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + - CTX_FIRST_VERSION; - if (new_ctx == 1) - new_ctx = CTX_FIRST_VERSION; - - /* Don't call memset, for 16 entries that's just - * plain silly... - */ - mmu_context_bmap[0] = 3; - mmu_context_bmap[1] = 0; - mmu_context_bmap[2] = 0; - mmu_context_bmap[3] = 0; - for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { - mmu_context_bmap[i + 0] = 0; - mmu_context_bmap[i + 1] = 0; - mmu_context_bmap[i + 2] = 0; - mmu_context_bmap[i + 3] = 0; - } - new_version = 1; - goto out; - } - } - mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); - new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); -out: - tlb_context_cache = new_ctx; - mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; - spin_unlock_irqrestore(&ctx_alloc_lock, flags); - - if (unlikely(new_version)) - smp_new_mmu_context_version(); -} - -static int numa_enabled = 1; -static int numa_debug; - -static int __init early_numa(char *p) -{ - if (!p) - return 0; - - if (strstr(p, "off")) - numa_enabled = 0; - - if (strstr(p, "debug")) - numa_debug = 1; - - return 0; -} -early_param("numa", early_numa); - -#define numadbg(f, a...) \ -do { if (numa_debug) \ - printk(KERN_INFO f, ## a); \ -} while (0) - -static void __init find_ramdisk(unsigned long phys_base) -{ -#ifdef CONFIG_BLK_DEV_INITRD - if (sparc_ramdisk_image || sparc_ramdisk_image64) { - unsigned long ramdisk_image; - - /* Older versions of the bootloader only supported a - * 32-bit physical address for the ramdisk image - * location, stored at sparc_ramdisk_image. Newer - * SILO versions set sparc_ramdisk_image to zero and - * provide a full 64-bit physical address at - * sparc_ramdisk_image64. - */ - ramdisk_image = sparc_ramdisk_image; - if (!ramdisk_image) - ramdisk_image = sparc_ramdisk_image64; - - /* Another bootloader quirk. The bootloader normalizes - * the physical address to KERNBASE, so we have to - * factor that back out and add in the lowest valid - * physical page address to get the true physical address. - */ - ramdisk_image -= KERNBASE; - ramdisk_image += phys_base; - - numadbg("Found ramdisk at physical address 0x%lx, size %u\n", - ramdisk_image, sparc_ramdisk_size); - - initrd_start = ramdisk_image; - initrd_end = ramdisk_image + sparc_ramdisk_size; - - lmb_reserve(initrd_start, sparc_ramdisk_size); - - initrd_start += PAGE_OFFSET; - initrd_end += PAGE_OFFSET; - } -#endif -} - -struct node_mem_mask { - unsigned long mask; - unsigned long val; - unsigned long bootmem_paddr; -}; -static struct node_mem_mask node_masks[MAX_NUMNODES]; -static int num_node_masks; - -int numa_cpu_lookup_table[NR_CPUS]; -cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; - -#ifdef CONFIG_NEED_MULTIPLE_NODES - -struct mdesc_mblock { - u64 base; - u64 size; - u64 offset; /* RA-to-PA */ -}; -static struct mdesc_mblock *mblocks; -static int num_mblocks; - -static unsigned long ra_to_pa(unsigned long addr) -{ - int i; - - for (i = 0; i < num_mblocks; i++) { - struct mdesc_mblock *m = &mblocks[i]; - - if (addr >= m->base && - addr < (m->base + m->size)) { - addr += m->offset; - break; - } - } - return addr; -} - -static int find_node(unsigned long addr) -{ - int i; - - addr = ra_to_pa(addr); - for (i = 0; i < num_node_masks; i++) { - struct node_mem_mask *p = &node_masks[i]; - - if ((addr & p->mask) == p->val) - return i; - } - return -1; -} - -static unsigned long nid_range(unsigned long start, unsigned long end, - int *nid) -{ - *nid = find_node(start); - start += PAGE_SIZE; - while (start < end) { - int n = find_node(start); - - if (n != *nid) - break; - start += PAGE_SIZE; - } - - if (start > end) - start = end; - - return start; -} -#else -static unsigned long nid_range(unsigned long start, unsigned long end, - int *nid) -{ - *nid = 0; - return end; -} -#endif - -/* This must be invoked after performing all of the necessary - * add_active_range() calls for 'nid'. We need to be able to get - * correct data from get_pfn_range_for_nid(). - */ -static void __init allocate_node_data(int nid) -{ - unsigned long paddr, num_pages, start_pfn, end_pfn; - struct pglist_data *p; - -#ifdef CONFIG_NEED_MULTIPLE_NODES - paddr = lmb_alloc_nid(sizeof(struct pglist_data), - SMP_CACHE_BYTES, nid, nid_range); - if (!paddr) { - prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid); - prom_halt(); - } - NODE_DATA(nid) = __va(paddr); - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); - - NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; -#endif - - p = NODE_DATA(nid); - - get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); - p->node_start_pfn = start_pfn; - p->node_spanned_pages = end_pfn - start_pfn; - - if (p->node_spanned_pages) { - num_pages = bootmem_bootmap_pages(p->node_spanned_pages); - - paddr = lmb_alloc_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid, - nid_range); - if (!paddr) { - prom_printf("Cannot allocate bootmap for nid[%d]\n", - nid); - prom_halt(); - } - node_masks[nid].bootmem_paddr = paddr; - } -} - -static void init_node_masks_nonnuma(void) -{ - int i; - - numadbg("Initializing tables for non-numa.\n"); - - node_masks[0].mask = node_masks[0].val = 0; - num_node_masks = 1; - - for (i = 0; i < NR_CPUS; i++) - numa_cpu_lookup_table[i] = 0; - - numa_cpumask_lookup_table[0] = CPU_MASK_ALL; -} - -#ifdef CONFIG_NEED_MULTIPLE_NODES -struct pglist_data *node_data[MAX_NUMNODES]; - -EXPORT_SYMBOL(numa_cpu_lookup_table); -EXPORT_SYMBOL(numa_cpumask_lookup_table); -EXPORT_SYMBOL(node_data); - -struct mdesc_mlgroup { - u64 node; - u64 latency; - u64 match; - u64 mask; -}; -static struct mdesc_mlgroup *mlgroups; -static int num_mlgroups; - -static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio, - u32 cfg_handle) -{ - u64 arc; - - mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) { - u64 target = mdesc_arc_target(md, arc); - const u64 *val; - - val = mdesc_get_property(md, target, - "cfg-handle", NULL); - if (val && *val == cfg_handle) - return 0; - } - return -ENODEV; -} - -static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp, - u32 cfg_handle) -{ - u64 arc, candidate, best_latency = ~(u64)0; - - candidate = MDESC_NODE_NULL; - mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { - u64 target = mdesc_arc_target(md, arc); - const char *name = mdesc_node_name(md, target); - const u64 *val; - - if (strcmp(name, "pio-latency-group")) - continue; - - val = mdesc_get_property(md, target, "latency", NULL); - if (!val) - continue; - - if (*val < best_latency) { - candidate = target; - best_latency = *val; - } - } - - if (candidate == MDESC_NODE_NULL) - return -ENODEV; - - return scan_pio_for_cfg_handle(md, candidate, cfg_handle); -} - -int of_node_to_nid(struct device_node *dp) -{ - const struct linux_prom64_registers *regs; - struct mdesc_handle *md; - u32 cfg_handle; - int count, nid; - u64 grp; - - /* This is the right thing to do on currently supported - * SUN4U NUMA platforms as well, as the PCI controller does - * not sit behind any particular memory controller. - */ - if (!mlgroups) - return -1; - - regs = of_get_property(dp, "reg", NULL); - if (!regs) - return -1; - - cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff; - - md = mdesc_grab(); - - count = 0; - nid = -1; - mdesc_for_each_node_by_name(md, grp, "group") { - if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) { - nid = count; - break; - } - count++; - } - - mdesc_release(md); - - return nid; -} - -static void __init add_node_ranges(void) -{ - int i; - - for (i = 0; i < lmb.memory.cnt; i++) { - unsigned long size = lmb_size_bytes(&lmb.memory, i); - unsigned long start, end; - - start = lmb.memory.region[i].base; - end = start + size; - while (start < end) { - unsigned long this_end; - int nid; - - this_end = nid_range(start, end, &nid); - - numadbg("Adding active range nid[%d] " - "start[%lx] end[%lx]\n", - nid, start, this_end); - - add_active_range(nid, - start >> PAGE_SHIFT, - this_end >> PAGE_SHIFT); - - start = this_end; - } - } -} - -static int __init grab_mlgroups(struct mdesc_handle *md) -{ - unsigned long paddr; - int count = 0; - u64 node; - - mdesc_for_each_node_by_name(md, node, "memory-latency-group") - count++; - if (!count) - return -ENOENT; - - paddr = lmb_alloc(count * sizeof(struct mdesc_mlgroup), - SMP_CACHE_BYTES); - if (!paddr) - return -ENOMEM; - - mlgroups = __va(paddr); - num_mlgroups = count; - - count = 0; - mdesc_for_each_node_by_name(md, node, "memory-latency-group") { - struct mdesc_mlgroup *m = &mlgroups[count++]; - const u64 *val; - - m->node = node; - - val = mdesc_get_property(md, node, "latency", NULL); - m->latency = *val; - val = mdesc_get_property(md, node, "address-match", NULL); - m->match = *val; - val = mdesc_get_property(md, node, "address-mask", NULL); - m->mask = *val; - - numadbg("MLGROUP[%d]: node[%lx] latency[%lx] " - "match[%lx] mask[%lx]\n", - count - 1, m->node, m->latency, m->match, m->mask); - } - - return 0; -} - -static int __init grab_mblocks(struct mdesc_handle *md) -{ - unsigned long paddr; - int count = 0; - u64 node; - - mdesc_for_each_node_by_name(md, node, "mblock") - count++; - if (!count) - return -ENOENT; - - paddr = lmb_alloc(count * sizeof(struct mdesc_mblock), - SMP_CACHE_BYTES); - if (!paddr) - return -ENOMEM; - - mblocks = __va(paddr); - num_mblocks = count; - - count = 0; - mdesc_for_each_node_by_name(md, node, "mblock") { - struct mdesc_mblock *m = &mblocks[count++]; - const u64 *val; - - val = mdesc_get_property(md, node, "base", NULL); - m->base = *val; - val = mdesc_get_property(md, node, "size", NULL); - m->size = *val; - val = mdesc_get_property(md, node, - "address-congruence-offset", NULL); - m->offset = *val; - - numadbg("MBLOCK[%d]: base[%lx] size[%lx] offset[%lx]\n", - count - 1, m->base, m->size, m->offset); - } - - return 0; -} - -static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md, - u64 grp, cpumask_t *mask) -{ - u64 arc; - - cpus_clear(*mask); - - mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) { - u64 target = mdesc_arc_target(md, arc); - const char *name = mdesc_node_name(md, target); - const u64 *id; - - if (strcmp(name, "cpu")) - continue; - id = mdesc_get_property(md, target, "id", NULL); - if (*id < NR_CPUS) - cpu_set(*id, *mask); - } -} - -static struct mdesc_mlgroup * __init find_mlgroup(u64 node) -{ - int i; - - for (i = 0; i < num_mlgroups; i++) { - struct mdesc_mlgroup *m = &mlgroups[i]; - if (m->node == node) - return m; - } - return NULL; -} - -static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp, - int index) -{ - struct mdesc_mlgroup *candidate = NULL; - u64 arc, best_latency = ~(u64)0; - struct node_mem_mask *n; - - mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) { - u64 target = mdesc_arc_target(md, arc); - struct mdesc_mlgroup *m = find_mlgroup(target); - if (!m) - continue; - if (m->latency < best_latency) { - candidate = m; - best_latency = m->latency; - } - } - if (!candidate) - return -ENOENT; - - if (num_node_masks != index) { - printk(KERN_ERR "Inconsistent NUMA state, " - "index[%d] != num_node_masks[%d]\n", - index, num_node_masks); - return -EINVAL; - } - - n = &node_masks[num_node_masks++]; - - n->mask = candidate->mask; - n->val = candidate->match; - - numadbg("NUMA NODE[%d]: mask[%lx] val[%lx] (latency[%lx])\n", - index, n->mask, n->val, candidate->latency); - - return 0; -} - -static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp, - int index) -{ - cpumask_t mask; - int cpu; - - numa_parse_mdesc_group_cpus(md, grp, &mask); - - for_each_cpu_mask(cpu, mask) - numa_cpu_lookup_table[cpu] = index; - numa_cpumask_lookup_table[index] = mask; - - if (numa_debug) { - printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index); - for_each_cpu_mask(cpu, mask) - printk("%d ", cpu); - printk("]\n"); - } - - return numa_attach_mlgroup(md, grp, index); -} - -static int __init numa_parse_mdesc(void) -{ - struct mdesc_handle *md = mdesc_grab(); - int i, err, count; - u64 node; - - node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); - if (node == MDESC_NODE_NULL) { - mdesc_release(md); - return -ENOENT; - } - - err = grab_mblocks(md); - if (err < 0) - goto out; - - err = grab_mlgroups(md); - if (err < 0) - goto out; - - count = 0; - mdesc_for_each_node_by_name(md, node, "group") { - err = numa_parse_mdesc_group(md, node, count); - if (err < 0) - break; - count++; - } - - add_node_ranges(); - - for (i = 0; i < num_node_masks; i++) { - allocate_node_data(i); - node_set_online(i); - } - - err = 0; -out: - mdesc_release(md); - return err; -} - -static int __init numa_parse_jbus(void) -{ - unsigned long cpu, index; - - /* NUMA node id is encoded in bits 36 and higher, and there is - * a 1-to-1 mapping from CPU ID to NUMA node ID. - */ - index = 0; - for_each_present_cpu(cpu) { - numa_cpu_lookup_table[cpu] = index; - numa_cpumask_lookup_table[index] = cpumask_of_cpu(cpu); - node_masks[index].mask = ~((1UL << 36UL) - 1UL); - node_masks[index].val = cpu << 36UL; - - index++; - } - num_node_masks = index; - - add_node_ranges |