diff options
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/pci_32.c | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/smp.c | 9 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 6 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 28 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 3 | ||||
-rw-r--r-- | arch/powerpc/platforms/powermac/feature.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/ps3/setup.c | 2 | ||||
-rw-r--r-- | include/asm-powerpc/mmu-hash64.h | 1 | ||||
-rw-r--r-- | include/asm-powerpc/pgtable-64k.h | 8 |
12 files changed, 46 insertions, 31 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 9ef28da2c7f..952eba6701f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -389,8 +389,11 @@ BEGIN_FTR_SECTION ld r9,PACA_SLBSHADOWPTR(r13) li r12,0 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ + eieio std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ + eieio std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ + eieio slbie r6 slbie r6 /* Workaround POWER5 < DD2.1 issue */ diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index cd35c969bb2..04a3109ae3c 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -581,8 +581,11 @@ pcibios_assign_resources(void) if ((r->flags & IORESOURCE_UNSET) && r->end && (!ppc_md.pcibios_enable_device_hook || !ppc_md.pcibios_enable_device_hook(dev, 1))) { + int rc; + r->flags &= ~IORESOURCE_UNSET; - pci_assign_resource(dev, idx); + rc = pci_assign_resource(dev, idx); + BUG_ON(rc); } } diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 087c92f2a3e..1ea43160f54 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -212,11 +212,6 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, atomic_set(&data.finished, 0); spin_lock(&call_lock); - /* Must grab online cpu count with preempt disabled, otherwise - * it can change. */ - num_cpus = num_online_cpus() - 1; - if (!num_cpus) - goto done; /* remove 'self' from the map */ if (cpu_isset(smp_processor_id(), map)) @@ -224,7 +219,9 @@ int smp_call_function_map(void (*func) (void *info), void *info, int nonatomic, /* sanity check the map, remove any non-online processors. */ cpus_and(map, map, cpu_online_map); - if (cpus_empty(map)) + + num_cpus = cpus_weight(map); + if (!num_cpus) goto done; call_data = &data; diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 4762ff7c14d..35eabfb5072 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -472,10 +472,12 @@ _GLOBAL(htab_call_hpte_insert1) /* Now try secondary slot */ /* real page number in r5, PTE RPN value + index */ - rldicl r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT + andis. r0,r31,_PAGE_4K_PFN@h + srdi r5,r31,PTE_RPN_SHIFT + bne- 3f sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT add r5,r5,r25 - sldi r5,r5,HW_PAGE_SHIFT +3: sldi r5,r5,HW_PAGE_SHIFT /* Calculate secondary group hash */ andc r0,r27,r28 diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index bc7b0cedae5..f1789578747 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) mmu_psize_defs[mmu_vmalloc_psize].sllp) { get_paca()->vmalloc_sllp = mmu_psize_defs[mmu_vmalloc_psize].sllp; - slb_flush_and_rebolt(); + slb_vmalloc_update(); } #endif /* CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index de45aa82d97..c12adc3ddff 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -307,9 +307,9 @@ static void __init parse_drconf_memory(struct device_node *memory) const unsigned int *lm, *dm, *aa; unsigned int ls, ld, la; unsigned int n, aam, aalen; - unsigned long lmb_size, size; + unsigned long lmb_size, size, start; int nid, default_nid = 0; - unsigned int start, ai, flags; + unsigned int ai, flags; lm = of_get_property(memory, "ibm,lmb-size", &ls); dm = of_get_property(memory, "ibm,dynamic-memory", &ld); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 304375a7357..b0697017d0e 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; } -static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, +static inline void slb_shadow_update(unsigned long ea, + unsigned long flags, unsigned long entry) { /* @@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, * updating it. */ get_slb_shadow()->save_area[entry].esid = 0; - barrier(); - get_slb_shadow()->save_area[entry].vsid = vsid; - barrier(); - get_slb_shadow()->save_area[entry].esid = esid; - + smp_wmb(); + get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags); + smp_wmb(); + get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry); + smp_wmb(); } static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, @@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, * we don't get a stale entry here if we get preempted by PHYP * between these two statements. */ - slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), - entry); + slb_shadow_update(ea, flags, entry); asm volatile("slbmte %0,%1" : : "r" (mk_vsid_data(ea, flags)), @@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void) ksp_esid_data &= ~SLB_ESID_V; /* Only third entry (stack) may change here so only resave that */ - slb_shadow_update(ksp_esid_data, - mk_vsid_data(ksp_esid_data, lflags), 2); + slb_shadow_update(get_paca()->kstack, lflags, 2); /* We need to do this all in asm, so we're sure we don't touch * the stack between the slbia and rebolting it. */ @@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void) : "memory"); } +void slb_vmalloc_update(void) +{ + unsigned long vflags; + + vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp; + slb_shadow_update(VMALLOC_START, vflags, 1); + slb_flush_and_rebolt(); +} + /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 758a80ac080..c784edd40ea 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -351,7 +351,8 @@ static void aff_set_ref_point_location(struct spu_gang *gang) lowest_offset = ctx->aff_offset; } - gang->aff_ref_spu = aff_ref_location(ctx, mem_aff, gs, lowest_offset); + gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs, + lowest_offset); } static struct spu *ctx_location(struct spu *ref, int offset, int node) diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index f29705f8047..ba931be2175 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -826,13 +826,15 @@ core99_ata100_enable(struct device_node *node, long value) if (value) { if (pci_device_from_OF_node(node, &pbus, &pid) == 0) - pdev = pci_find_slot(pbus, pid); + pdev = pci_get_bus_and_slot(pbus, pid); if (pdev == NULL) return 0; rc = pci_enable_device(pdev); + if (rc == 0) + pci_set_master(pdev); + pci_dev_put(pdev); if (rc) return rc; - pci_set_master(pdev); } return 0; } diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index aa05288de64..2952b22f1c8 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -109,7 +109,7 @@ static void ps3_panic(char *str) #if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \ defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE) -static void prealloc(struct ps3_prealloc *p) +static void __init prealloc(struct ps3_prealloc *p) { if (!p->size) return; diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h index 695962f0205..3112ad14ad9 100644 --- a/include/asm-powerpc/mmu-hash64.h +++ b/include/asm-powerpc/mmu-hash64.h @@ -262,6 +262,7 @@ extern void slb_initialize(void); extern void slb_flush_and_rebolt(void); extern void stab_initialize(unsigned long stab); +extern void slb_vmalloc_update(void); #endif /* __ASSEMBLY__ */ /* diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h index 31cbd3d7fce..33ae9018fe7 100644 --- a/include/asm-powerpc/pgtable-64k.h +++ b/include/asm-powerpc/pgtable-64k.h @@ -49,12 +49,10 @@ /* Shift to put page number into pte. * - * That gives us a max RPN of 32 bits, which means a max of 48 bits - * of addressable physical space. - * We could get 3 more bits here by setting PTE_RPN_SHIFT to 29 but - * 32 makes PTEs more readable for debugging for now :) + * That gives us a max RPN of 34 bits, which means a max of 50 bits + * of addressable physical space, or 46 bits for the special 4k PFNs. */ -#define PTE_RPN_SHIFT (32) +#define PTE_RPN_SHIFT (30) #define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) |