diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_32_mmu_host.c')
| -rw-r--r-- | arch/powerpc/kvm/book3s_32_mmu_host.c | 129 |
1 files changed, 83 insertions, 46 deletions
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 0b51ef872c1..678e7537049 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -19,7 +19,6 @@ */ #include <linux/kvm_host.h> -#include <linux/hash.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -77,7 +76,14 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { - return hash_64(gvsid, SID_MAP_BITS); + return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ + ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); } @@ -86,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) struct kvmppc_sid_map *map; u16 sid_map_mask; - if (vcpu->arch.msr & MSR_PR) + if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); @@ -132,10 +138,11 @@ static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, extern char etext[]; -int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) +int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, + bool iswrite) { pfn_t hpaddr; - u64 va; + u64 vpn; u64 vsid; struct kvmppc_sid_map *map; volatile u32 *pteg; @@ -145,13 +152,17 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) bool primary = false; bool evict = false; struct hpte_cache *pte; + int r = 0; + bool writable; /* Get host physical address for gpa */ - hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); - if (kvm_is_error_hva(hpaddr)) { + hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT, + iswrite, &writable); + if (is_error_noslot_pfn(hpaddr)) { printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); - return -EINVAL; + r = -EINVAL; + goto out; } hpaddr <<= PAGE_SHIFT; @@ -165,8 +176,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) BUG_ON(!map); vsid = map->host_vsid; - va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); - + vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) | + ((eaddr & ~ESID_MASK) >> VPN_SHIFT); next_pteg: if (rr == 16) { primary = !primary; @@ -196,13 +207,16 @@ next_pteg: (primary ? 0 : PTE_SEC); pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; - if (orig_pte->may_write) { + if (orig_pte->may_write && writable) { pteg1 |= PP_RWRW; mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); } else { pteg1 |= PP_RWRX; } + if (orig_pte->may_execute) + kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); + local_irq_disable(); if (pteg[rr]) { @@ -229,21 +243,33 @@ next_pteg: /* Now tell our Shadow PTE code about the new page */ pte = kvmppc_mmu_hpte_cache_next(vcpu); + if (!pte) { + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); + r = -EAGAIN; + goto out; + } dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", orig_pte->may_write ? 'w' : '-', orig_pte->may_execute ? 'x' : '-', - orig_pte->eaddr, (ulong)pteg, va, + orig_pte->eaddr, (ulong)pteg, vpn, orig_pte->vpage, hpaddr); pte->slot = (ulong)&pteg[rr]; - pte->host_va = va; + pte->host_vpn = vpn; pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; kvmppc_mmu_hpte_cache_map(vcpu, pte); - return 0; + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); +out: + return r; +} + +void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) +{ + kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL); } static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) @@ -253,7 +279,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) u16 sid_map_mask; static int backwards_map = 0; - if (vcpu->arch.msr & MSR_PR) + if (kvmppc_get_msr(vcpu) & MSR_PR) gvsid |= VSID_PR; /* We might get collisions that trap in preceding order, so let's @@ -269,18 +295,15 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ - if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; + if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) { + vcpu_book3s->vsid_next = 0; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } - map->host_vsid = vcpu_book3s->vsid_next; - - /* Would have to be 111 to be completely aligned with the rest of - Linux, but that is just way too little space! */ - vcpu_book3s->vsid_next+=1; + map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; + vcpu_book3s->vsid_next++; map->guest_vsid = gvsid; map->valid = true; @@ -294,12 +317,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) u64 gvsid; u32 sr; struct kvmppc_sid_map *map; - struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); + int r = 0; if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { /* Invalidate an entry */ svcpu->sr[esid] = SR_INVALID; - return -ENOENT; + r = -ENOENT; + goto out; } map = find_sid_vsid(vcpu, gvsid); @@ -312,55 +337,57 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); - return 0; +out: + svcpu_put(svcpu); + return r; } void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { int i; - struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); + struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) svcpu->sr[i] = SR_INVALID; + + svcpu_put(svcpu); } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) +void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) { + int i; + kvmppc_mmu_hpte_destroy(vcpu); preempt_disable(); - __destroy_context(to_book3s(vcpu)->context_id); + for (i = 0; i < SID_CONTEXTS; i++) + __destroy_context(to_book3s(vcpu)->context_id[i]); preempt_enable(); } /* From mm/mmu_context_hash32.c */ -#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) +#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) int kvmppc_mmu_init(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int err; ulong sdr1; + int i; + int j; - err = __init_new_context(); - if (err < 0) - return -1; - vcpu3s->context_id = err; - - vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1; - vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id); - -#if 0 /* XXX still doesn't guarantee uniqueness */ - /* We could collide with the Linux vsid space because the vsid - * wraps around at 24 bits. We're safe if we do our own space - * though, so let's always set the highest bit. */ + for (i = 0; i < SID_CONTEXTS; i++) { + err = __init_new_context(); + if (err < 0) + goto init_fail; + vcpu3s->context_id[i] = err; - vcpu3s->vsid_max |= 0x00800000; - vcpu3s->vsid_first |= 0x00800000; -#endif - BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); + /* Remember context id for this combination */ + for (j = 0; j < 16; j++) + vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j); + } - vcpu3s->vsid_next = vcpu3s->vsid_first; + vcpu3s->vsid_next = 0; /* Remember where the HTAB is */ asm ( "mfsdr1 %0" : "=r"(sdr1) ); @@ -370,4 +397,14 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) kvmppc_mmu_hpte_init(vcpu); return 0; + +init_fail: + for (j = 0; j < i; j++) { + if (!vcpu3s->context_id[j]) + continue; + + __destroy_context(to_book3s(vcpu)->context_id[j]); + } + + return -1; } |
