diff options
Diffstat (limited to 'arch/powerpc/mm/hash_low_32.S')
| -rw-r--r-- | arch/powerpc/mm/hash_low_32.S | 314 |
1 files changed, 204 insertions, 110 deletions
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S index 12ccd7155ba..115347f74ce 100644 --- a/arch/powerpc/mm/hash_low_32.S +++ b/arch/powerpc/mm/hash_low_32.S @@ -1,8 +1,4 @@ /* - * arch/ppc/kernel/hashtable.S - * - * $Id: hashtable.S,v 1.6 1999/10/08 01:56:15 paulus Exp $ - * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP @@ -23,7 +19,6 @@ * */ -#include <linux/config.h> #include <asm/reg.h> #include <asm/page.h> #include <asm/pgtable.h> @@ -33,55 +28,27 @@ #include <asm/asm-offsets.h> #ifdef CONFIG_SMP - .comm mmu_hash_lock,4 + .section .bss + .align 2 + .globl mmu_hash_lock +mmu_hash_lock: + .space 4 #endif /* CONFIG_SMP */ /* - * Sync CPUs with hash_page taking & releasing the hash - * table lock - */ -#ifdef CONFIG_SMP - .text -_GLOBAL(hash_page_sync) - lis r8,mmu_hash_lock@h - ori r8,r8,mmu_hash_lock@l - lis r0,0x0fff - b 10f -11: lwz r6,0(r8) - cmpwi 0,r6,0 - bne 11b -10: lwarx r6,0,r8 - cmpwi 0,r6,0 - bne- 11b - stwcx. r0,0,r8 - bne- 10b - isync - eieio - li r0,0 - stw r0,0(r8) - blr -#endif - -/* * Load a PTE into the hash table, if possible. * The address is in r4, and r3 contains an access flag: * _PAGE_RW (0x400) if a write. * r9 contains the SRR1 value, from which we use the MSR_PR bit. - * SPRG3 contains the physical address of the current task's thread. + * SPRG_THREAD contains the physical address of the current task's thread. * * Returns to the caller if the access is illegal or there is no * mapping for the address. Otherwise it places an appropriate PTE * in the hash table and returns from the exception. - * Uses r0, r3 - r8, ctr, lr. + * Uses r0, r3 - r8, r10, ctr, lr. */ .text _GLOBAL(hash_page) -#ifdef CONFIG_PPC64BRIDGE - mfmsr r0 - clrldi r0,r0,1 /* make sure it's in 32-bit mode */ - MTMSRD(r0) - isync -#endif tophys(r7,0) /* gets -KERNELBASE into r7 */ #ifdef CONFIG_SMP addis r8,r7,mmu_hash_lock@h @@ -101,7 +68,7 @@ _GLOBAL(hash_page) /* Get PTE (linux-style) and check access */ lis r0,KERNELBASE@h /* check if kernel address */ cmplw 0,r4,r0 - mfspr r8,SPRN_SPRG3 /* current task's THREAD (phys) */ + mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */ ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */ lwz r5,PGDIR(r8) /* virt page-table root */ blt+ 112f /* assume user more likely */ @@ -109,9 +76,15 @@ _GLOBAL(hash_page) addi r5,r5,swapper_pg_dir@l /* kernel page table */ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 112: add r5,r5,r7 /* convert to phys addr */ +#ifndef CONFIG_PTE_64BIT rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ lwz r8,0(r5) /* get pmd entry */ rlwinm. r8,r8,0,0,19 /* extract address of pte page */ +#else + rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */ + lwzx r8,r8,r5 /* Get L1 entry */ + rlwinm. r8,r8,0,0,20 /* extract pt base address */ +#endif #ifdef CONFIG_SMP beq- hash_page_out /* return if no mapping */ #else @@ -121,7 +94,11 @@ _GLOBAL(hash_page) to the address following the rfi. */ beqlr- #endif +#ifndef CONFIG_PTE_64BIT rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ +#else + rlwimi r8,r4,23,20,28 /* compute pte address */ +#endif rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE @@ -130,9 +107,15 @@ _GLOBAL(hash_page) * because almost always, there won't be a permission violation * and there won't already be an HPTE, and thus we will have * to update the PTE to set _PAGE_HASHPTE. -- paulus. + * + * If PTE_64BIT is set, the low word is the flags word; use that + * word for locking since it contains all the interesting bits. */ +#if (PTE_FLAGS_OFFSET != 0) + addi r8,r8,PTE_FLAGS_OFFSET +#endif retry: - lwarx r6,0,r8 /* get linux-style pte */ + lwarx r6,0,r8 /* get linux-style pte, flag word */ andc. r5,r3,r6 /* check access & ~permission */ #ifdef CONFIG_SMP bne- hash_page_out /* return if access not permitted */ @@ -140,6 +123,15 @@ retry: bnelr- #endif or r5,r0,r6 /* set accessed/dirty bits */ +#ifdef CONFIG_PTE_64BIT +#ifdef CONFIG_SMP + subf r10,r6,r8 /* create false data dependency */ + subi r10,r10,PTE_FLAGS_OFFSET + lwzx r10,r6,r10 /* Get upper PTE word */ +#else + lwz r10,-PTE_FLAGS_OFFSET(r8) +#endif /* CONFIG_SMP */ +#endif /* CONFIG_PTE_64BIT */ stwcx. r5,0,r8 /* attempt to update PTE */ bne- retry /* retry if someone got there first */ @@ -192,7 +184,7 @@ _GLOBAL(add_hash_page) add r3,r3,r0 /* note create_hpte trims to 24 bits */ #ifdef CONFIG_SMP - rlwinm r8,r1,0,0,18 /* use cpu number to make tag */ + CURRENT_THREAD_INFO(r8, r1) /* use cpu number to make tag */ lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */ oris r8,r8,12 #endif /* CONFIG_SMP */ @@ -206,9 +198,9 @@ _GLOBAL(add_hash_page) * we can't take a hash table miss (assuming the code is * covered by a BAT). -- paulus */ - mfmsr r10 + mfmsr r9 SYNC - rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ + rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */ rlwinm r0,r0,0,28,26 /* clear MSR_DR */ mtmsr r0 SYNC_601 @@ -217,14 +209,14 @@ _GLOBAL(add_hash_page) tophys(r7,0) #ifdef CONFIG_SMP - addis r9,r7,mmu_hash_lock@ha - addi r9,r9,mmu_hash_lock@l -10: lwarx r0,0,r9 /* take the mmu_hash_lock */ + addis r6,r7,mmu_hash_lock@ha + addi r6,r6,mmu_hash_lock@l +10: lwarx r0,0,r6 /* take the mmu_hash_lock */ cmpi 0,r0,0 bne- 11f - stwcx. r8,0,r9 + stwcx. r8,0,r6 beq+ 12f -11: lwz r0,0(r9) +11: lwz r0,0(r6) cmpi 0,r0,0 beq 10b b 11b @@ -237,10 +229,24 @@ _GLOBAL(add_hash_page) * HPTE, so we just unlock and return. */ mr r8,r5 +#ifndef CONFIG_PTE_64BIT rlwimi r8,r4,22,20,29 +#else + rlwimi r8,r4,23,20,28 + addi r8,r8,PTE_FLAGS_OFFSET +#endif 1: lwarx r6,0,r8 andi. r0,r6,_PAGE_HASHPTE bne 9f /* if HASHPTE already set, done */ +#ifdef CONFIG_PTE_64BIT +#ifdef CONFIG_SMP + subf r10,r6,r8 /* create false data dependency */ + subi r10,r10,PTE_FLAGS_OFFSET + lwzx r10,r6,r10 /* Get upper PTE word */ +#else + lwz r10,-PTE_FLAGS_OFFSET(r8) +#endif /* CONFIG_SMP */ +#endif /* CONFIG_PTE_64BIT */ ori r5,r6,_PAGE_HASHPTE stwcx. r5,0,r8 bne- 1b @@ -249,13 +255,15 @@ _GLOBAL(add_hash_page) 9: #ifdef CONFIG_SMP + addis r6,r7,mmu_hash_lock@ha + addi r6,r6,mmu_hash_lock@l eieio li r0,0 - stw r0,0(r9) /* clear mmu_hash_lock */ + stw r0,0(r6) /* clear mmu_hash_lock */ #endif /* reenable interrupts and DR */ - mtmsr r10 + mtmsr r9 SYNC_601 isync @@ -270,7 +278,8 @@ _GLOBAL(add_hash_page) * r5 contains the linux PTE, r6 contains the old value of the * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the * offset to be added to addresses (0 if the MMU is on, - * -KERNELBASE if it is off). + * -KERNELBASE if it is off). r10 contains the upper half of + * the PTE if CONFIG_PTE_64BIT. * On SMP, the caller should have the mmu_hash_lock held. * We assume that the caller has (or will) set the _PAGE_HASHPTE * bit in the linux PTE in memory. The value passed in r6 should @@ -287,12 +296,12 @@ Hash_base = 0xc0180000 Hash_bits = 12 /* e.g. 256kB hash table */ Hash_msk = (((1 << Hash_bits) - 1) * 64) -#ifndef CONFIG_PPC64BRIDGE /* defines for the PTE format for 32-bit PPCs */ -#define PTE_SIZE 8 +#define HPTE_SIZE 8 #define PTEG_SIZE 64 #define LG_PTEG_SIZE 6 #define LDPTEu lwzu +#define LDPTE lwz #define STPTE stw #define CMPPTE cmpw #define PTE_H 0x40 @@ -301,21 +310,6 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64) #define SET_V(r) oris r,r,PTE_V@h #define CLR_V(r,t) rlwinm r,r,0,1,31 -#else -/* defines for the PTE format for 64-bit PPCs */ -#define PTE_SIZE 16 -#define PTEG_SIZE 128 -#define LG_PTEG_SIZE 7 -#define LDPTEu ldu -#define STPTE std -#define CMPPTE cmpd -#define PTE_H 2 -#define PTE_V 1 -#define TST_V(r) andi. r,r,PTE_V -#define SET_V(r) ori r,r,PTE_V -#define CLR_V(r,t) li t,PTE_V; andc r,r,t -#endif /* CONFIG_PPC64BRIDGE */ - #define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1) #define HASH_RIGHT 31-LG_PTEG_SIZE @@ -326,21 +320,20 @@ _GLOBAL(create_hpte) and r8,r8,r0 /* writable if _RW & _DIRTY */ rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */ rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */ - ori r8,r8,0xe14 /* clear out reserved bits and M */ + ori r8,r8,0xe04 /* clear out reserved bits */ andc r8,r5,r8 /* PP = user? (rw&dirty? 2: 3): 0 */ BEGIN_FTR_SECTION - ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ -END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) + rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */ +END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT) +#ifdef CONFIG_PTE_64BIT + /* Put the XPN bits into the PTE */ + rlwimi r8,r10,8,20,22 + rlwimi r8,r10,2,29,29 +#endif /* Construct the high word of the PPC-style PTE (r5) */ -#ifndef CONFIG_PPC64BRIDGE rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */ -#else /* CONFIG_PPC64BRIDGE */ - clrlwi r3,r3,8 /* reduce vsid to 24 bits */ - sldi r5,r3,12 /* shift vsid into position */ - rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */ -#endif /* CONFIG_PPC64BRIDGE */ SET_V(r5) /* set V (valid) bit */ /* Get the address of the primary PTE group in the hash table (r3) */ @@ -366,8 +359,8 @@ _GLOBAL(hash_page_patch_A) /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ mtctr r0 - addi r4,r3,-PTE_SIZE -1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ + addi r4,r3,-HPTE_SIZE +1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ CMPPTE 0,r6,r5 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ found_slot @@ -377,9 +370,9 @@ _GLOBAL(hash_page_patch_A) _GLOBAL(hash_page_patch_B) xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) - addi r4,r4,-PTE_SIZE + addi r4,r4,-HPTE_SIZE mtctr r0 -2: LDPTEu r6,PTE_SIZE(r4) +2: LDPTEu r6,HPTE_SIZE(r4) CMPPTE 0,r6,r5 bdnzf 2,2b beq+ found_slot @@ -387,8 +380,8 @@ _GLOBAL(hash_page_patch_B) /* Search the primary PTEG for an empty slot */ 10: mtctr r0 - addi r4,r3,-PTE_SIZE /* search primary PTEG */ -1: LDPTEu r6,PTE_SIZE(r4) /* get next PTE */ + addi r4,r3,-HPTE_SIZE /* search primary PTEG */ +1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */ TST_V(r6) /* test valid bit */ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ found_empty @@ -404,9 +397,9 @@ _GLOBAL(hash_page_patch_B) _GLOBAL(hash_page_patch_C) xoris r4,r3,Hash_msk>>16 /* compute secondary hash */ xori r4,r4,(-PTEG_SIZE & 0xffff) - addi r4,r4,-PTE_SIZE + addi r4,r4,-HPTE_SIZE mtctr r0 -2: LDPTEu r6,PTE_SIZE(r4) +2: LDPTEu r6,HPTE_SIZE(r4) TST_V(r6) bdnzf 2,2b beq+ found_empty @@ -420,20 +413,37 @@ _GLOBAL(hash_page_patch_C) * and we know there is a definite (although small) speed * advantage to putting the PTE in the primary PTEG, we always * put the PTE in the primary PTEG. + * + * In addition, we skip any slot that is mapping kernel text in + * order to avoid a deadlock when not using BAT mappings if + * trying to hash in the kernel hash code itself after it has + * already taken the hash table lock. This works in conjunction + * with pre-faulting of the kernel text. + * + * If the hash table bucket is full of kernel text entries, we'll + * lockup here but that shouldn't happen */ - addis r4,r7,next_slot@ha + +1: addis r4,r7,next_slot@ha /* get next evict slot */ lwz r6,next_slot@l(r4) - addi r6,r6,PTE_SIZE - andi. r6,r6,7*PTE_SIZE + addi r6,r6,HPTE_SIZE /* search for candidate */ + andi. r6,r6,7*HPTE_SIZE stw r6,next_slot@l(r4) add r4,r3,r6 + LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */ + clrrwi r0,r0,12 + lis r6,etext@h + ori r6,r6,etext@l /* get etext */ + tophys(r6,r6) + cmpl cr0,r0,r6 /* compare and try again */ + blt 1b #ifndef CONFIG_SMP /* Store PTE in PTEG */ found_empty: STPTE r5,0(r4) found_slot: - STPTE r8,PTE_SIZE/2(r4) + STPTE r8,HPTE_SIZE/2(r4) #else /* CONFIG_SMP */ /* @@ -459,7 +469,7 @@ found_slot: STPTE r5,0(r4) sync TLBSYNC - STPTE r8,PTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ + STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */ sync SET_V(r5) STPTE r5,0(r4) /* finally set V bit in PTE */ @@ -468,9 +478,15 @@ found_slot: sync /* make sure pte updates get to memory */ blr - .comm next_slot,4 - .comm primary_pteg_full,4 - .comm htab_hash_searches,4 + .section .bss + .align 2 +next_slot: + .space 4 +primary_pteg_full: + .space 4 +htab_hash_searches: + .space 4 + .previous /* * Flush the entry for a particular page from the hash table. @@ -500,14 +516,18 @@ _GLOBAL(flush_hash_pages) isync /* First find a PTE in the range that has _PAGE_HASHPTE set */ +#ifndef CONFIG_PTE_64BIT rlwimi r5,r4,22,20,29 -1: lwz r0,0(r5) +#else + rlwimi r5,r4,23,20,28 +#endif +1: lwz r0,PTE_FLAGS_OFFSET(r5) cmpwi cr1,r6,1 andi. r0,r0,_PAGE_HASHPTE bne 2f ble cr1,19f addi r4,r4,0x1000 - addi r5,r5,4 + addi r5,r5,PTE_SIZE addi r6,r6,-1 b 1b @@ -518,20 +538,14 @@ _GLOBAL(flush_hash_pages) add r3,r3,r0 /* note code below trims to 24 bits */ /* Construct the high word of the PPC-style PTE (r11) */ -#ifndef CONFIG_PPC64BRIDGE rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */ -#else /* CONFIG_PPC64BRIDGE */ - clrlwi r3,r3,8 /* reduce vsid to 24 bits */ - sldi r11,r3,12 /* shift vsid into position */ - rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */ -#endif /* CONFIG_PPC64BRIDGE */ SET_V(r11) /* set V (valid) bit */ #ifdef CONFIG_SMP addis r9,r7,mmu_hash_lock@ha addi r9,r9,mmu_hash_lock@l - rlwinm r8,r1,0,0,18 + CURRENT_THREAD_INFO(r8, r1) add r8,r8,r7 lwz r8,TI_CPU(r8) oris r8,r8,9 @@ -552,7 +566,10 @@ _GLOBAL(flush_hash_pages) * already clear, we're done (for this pte). If not, * clear it (atomically) and proceed. -- paulus. */ -33: lwarx r8,0,r5 /* fetch the pte */ +#if (PTE_FLAGS_OFFSET != 0) + addi r5,r5,PTE_FLAGS_OFFSET +#endif +33: lwarx r8,0,r5 /* fetch the pte flags word */ andi. r0,r8,_PAGE_HASHPTE beq 8f /* done if HASHPTE is already clear */ rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ @@ -569,8 +586,8 @@ _GLOBAL(flush_hash_patch_A) /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */ li r0,8 /* PTEs/group */ mtctr r0 - addi r12,r8,-PTE_SIZE -1: LDPTEu r0,PTE_SIZE(r12) /* get next PTE */ + addi r12,r8,-HPTE_SIZE +1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */ CMPPTE 0,r0,r11 bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */ beq+ 3f @@ -581,9 +598,9 @@ _GLOBAL(flush_hash_patch_A) _GLOBAL(flush_hash_patch_B) xoris r12,r8,Hash_msk>>16 /* compute secondary hash */ xori r12,r12,(-PTEG_SIZE & 0xffff) - addi r12,r12,-PTE_SIZE + addi r12,r12,-HPTE_SIZE mtctr r0 -2: LDPTEu r0,PTE_SIZE(r12) +2: LDPTEu r0,HPTE_SIZE(r12) CMPPTE 0,r0,r11 bdnzf 2,2b xori r11,r11,PTE_H /* clear H again */ @@ -597,7 +614,7 @@ _GLOBAL(flush_hash_patch_B) 8: ble cr1,9f /* if all ptes checked */ 81: addi r6,r6,-1 - addi r5,r5,4 /* advance to next pte */ + addi r5,r5,PTE_SIZE addi r4,r4,0x1000 lwz r0,0(r5) /* check next pte */ cmpwi cr1,r6,1 @@ -616,3 +633,80 @@ _GLOBAL(flush_hash_patch_B) SYNC_601 isync blr + +/* + * Flush an entry from the TLB + */ +_GLOBAL(_tlbie) +#ifdef CONFIG_SMP + CURRENT_THREAD_INFO(r8, r1) + lwz r8,TI_CPU(r8) + oris r8,r8,11 + mfmsr r10 + SYNC + rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ + rlwinm r0,r0,0,28,26 /* clear DR */ + mtmsr r0 + SYNC_601 + isync + lis r9,mmu_hash_lock@h + ori r9,r9,mmu_hash_lock@l + tophys(r9,r9) +10: lwarx r7,0,r9 + cmpwi 0,r7,0 + bne- 10b + stwcx. r8,0,r9 + bne- 10b + eieio + tlbie r3 + sync + TLBSYNC + li r0,0 + stw r0,0(r9) /* clear mmu_hash_lock */ + mtmsr r10 + SYNC_601 + isync +#else /* CONFIG_SMP */ + tlbie r3 + sync +#endif /* CONFIG_SMP */ + blr + +/* + * Flush the entire TLB. 603/603e only + */ +_GLOBAL(_tlbia) +#if defined(CONFIG_SMP) + CURRENT_THREAD_INFO(r8, r1) + lwz r8,TI_CPU(r8) + oris r8,r8,10 + mfmsr r10 + SYNC + rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ + rlwinm r0,r0,0,28,26 /* clear DR */ + mtmsr r0 + SYNC_601 + isync + lis r9,mmu_hash_lock@h + ori r9,r9,mmu_hash_lock@l + tophys(r9,r9) +10: lwarx r7,0,r9 + cmpwi 0,r7,0 + bne- 10b + stwcx. r8,0,r9 + bne- 10b + sync + tlbia + sync + TLBSYNC + li r0,0 + stw r0,0(r9) /* clear mmu_hash_lock */ + mtmsr r10 + SYNC_601 + isync +#else /* CONFIG_SMP */ + sync + tlbia + sync +#endif /* CONFIG_SMP */ + blr |
