diff options
Diffstat (limited to 'arch/powerpc/mm/hash_native_64.c')
| -rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 84 |
1 files changed, 42 insertions, 42 deletions
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index c33d939120c..cf1d325eae8 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -35,7 +35,11 @@ #define DBG_LOW(fmt...) #endif +#ifdef __BIG_ENDIAN__ #define HPTE_LOCK_BIT 3 +#else +#define HPTE_LOCK_BIT (56+3) +#endif DEFINE_RAW_SPINLOCK(native_tlbie_lock); @@ -78,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); va |= penc << 12; va |= ssize << 8; - /* Add AVAL part */ - if (psize != apsize) { - /* - * MPSS, 64K base page size and 16MB parge page size - * We don't need all the bits, but rest of the bits - * must be ignored by the processor. - * vpn cover upto 65 bits of va. (0...65) and we need - * 58..64 bits of va. - */ - va |= (vpn & 0xfe); - } + /* + * AVAL bits: + * We don't need all the bits, but rest of the bits + * must be ignored by the processor. + * vpn cover upto 65 bits of va. (0...65) and we need + * 58..64 bits of va. + */ + va |= (vpn & 0xfe); /* AVAL */ va |= 1; /* L */ asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) @@ -129,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); va |= penc << 12; va |= ssize << 8; - /* Add AVAL part */ - if (psize != apsize) { - /* - * MPSS, 64K base page size and 16MB parge page size - * We don't need all the bits, but rest of the bits - * must be ignored by the processor. - * vpn cover upto 65 bits of va. (0...65) and we need - * 58..64 bits of va. - */ - va |= (vpn & 0xfe); - } + /* + * AVAL bits: + * We don't need all the bits, but rest of the bits + * must be ignored by the processor. + * vpn cover upto 65 bits of va. (0...65) and we need + * 58..64 bits of va. + */ + va |= (vpn & 0xfe); va |= 1; /* L */ asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)" : : "r"(va) : "memory"); @@ -172,7 +170,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize, static inline void native_lock_hpte(struct hash_pte *hptep) { - unsigned long *word = &hptep->v; + unsigned long *word = (unsigned long *)&hptep->v; while (1) { if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) @@ -184,7 +182,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) static inline void native_unlock_hpte(struct hash_pte *hptep) { - unsigned long *word = &hptep->v; + unsigned long *word = (unsigned long *)&hptep->v; clear_bit_unlock(HPTE_LOCK_BIT, word); } @@ -204,10 +202,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, } for (i = 0; i < HPTES_PER_GROUP; i++) { - if (! (hptep->v & HPTE_V_VALID)) { + if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { /* retry with lock held */ native_lock_hpte(hptep); - if (! (hptep->v & HPTE_V_VALID)) + if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) break; native_unlock_hpte(hptep); } @@ -226,14 +224,14 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, i, hpte_v, hpte_r); } - hptep->r = hpte_r; + hptep->r = cpu_to_be64(hpte_r); /* Guarantee the second dword is visible before the valid bit */ eieio(); /* * Now set the first dword including the valid bit * NOTE: this also unlocks the hpte */ - hptep->v = hpte_v; + hptep->v = cpu_to_be64(hpte_v); __asm__ __volatile__ ("ptesync" : : : "memory"); @@ -254,12 +252,12 @@ static long native_hpte_remove(unsigned long hpte_group) for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + hpte_group + slot_offset; - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { /* retry with lock held */ native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) break; @@ -294,7 +292,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less @@ -308,8 +306,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, } else { DBG_LOW(" -> hit\n"); /* Update the HPTE */ - hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); } native_unlock_hpte(hptep); @@ -334,7 +332,7 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize) slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hptep = htab_address + slot; - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) /* HPTE matches */ @@ -369,8 +367,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, hptep = htab_address + slot; /* Update the HPTE */ - hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | - (newpp & (HPTE_R_PP | HPTE_R_N)); + hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & + ~(HPTE_R_PP | HPTE_R_N)) | + (newpp & (HPTE_R_PP | HPTE_R_N))); /* * Ensure it is out of the tlb too. Bolted entries base and * actual page size will be same. @@ -392,7 +391,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, want_v = hpte_encode_avpn(vpn, bpsize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * We need to invalidate the TLB always because hpte_remove doesn't do @@ -458,7 +457,7 @@ static void native_hugepage_invalidate(struct mm_struct *mm, hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* Even if we miss, we need to invalidate the TLB */ if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) @@ -519,11 +518,12 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, int *psize, int *apsize, int *ssize, unsigned long *vpn) { unsigned long avpn, pteg, vpi; - unsigned long hpte_v = hpte->v; + unsigned long hpte_v = be64_to_cpu(hpte->v); + unsigned long hpte_r = be64_to_cpu(hpte->r); unsigned long vsid, seg_off; int size, a_size, shift; /* Look at the 8 bit LP value */ - unsigned int lp = (hpte->r >> LP_SHIFT) & ((1 << LP_BITS) - 1); + unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); if (!(hpte_v & HPTE_V_LARGE)) { size = MMU_PAGE_4K; @@ -612,7 +612,7 @@ static void native_hpte_clear(void) * running, right? and for crash dump, we probably * don't want to wait for a maybe bad cpu. */ - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); /* * Call __tlbie() here rather than tlbie() since we @@ -664,7 +664,7 @@ static void native_flush_hash_range(unsigned long number, int local) hptep = htab_address + slot; want_v = hpte_encode_avpn(vpn, psize, ssize); native_lock_hpte(hptep); - hpte_v = hptep->v; + hpte_v = be64_to_cpu(hptep->v); if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) native_unlock_hpte(hptep); |
