diff options
Diffstat (limited to 'arch/powerpc')
27 files changed, 540 insertions, 462 deletions
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 2a9cd74a841..076327f2eff 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h @@ -8,9 +8,9 @@ * On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory * and therefore we can only deal with memory within this range */ -#define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) -#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) -#define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL) +#define KEXEC_SOURCE_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) +#define KEXEC_DESTINATION_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) +#define KEXEC_CONTROL_MEMORY_LIMIT (2 * 1024 * 1024 * 1024UL - 1) #else diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 6f74d93725a..8274a2d4392 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -115,7 +115,15 @@ extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); -extern struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data); + +extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); +extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); +extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); +extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); +extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); +extern int kvmppc_mmu_hpte_sysinit(void); +extern void kvmppc_mmu_hpte_sysexit(void); + extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h index 94f05de9ad0..c3d4f0518a6 100644 --- a/arch/powerpc/include/asm/kvm_fpu.h +++ b/arch/powerpc/include/asm/kvm_fpu.h @@ -22,24 +22,24 @@ #include <linux/types.h> -extern void fps_fres(struct thread_struct *t, u32 *dst, u32 *src1); -extern void fps_frsqrte(struct thread_struct *t, u32 *dst, u32 *src1); -extern void fps_fsqrts(struct thread_struct *t, u32 *dst, u32 *src1); +extern void fps_fres(u64 *fpscr, u32 *dst, u32 *src1); +extern void fps_frsqrte(u64 *fpscr, u32 *dst, u32 *src1); +extern void fps_fsqrts(u64 *fpscr, u32 *dst, u32 *src1); -extern void fps_fadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); -extern void fps_fdivs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); -extern void fps_fmuls(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); -extern void fps_fsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2); +extern void fps_fadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); +extern void fps_fdivs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); +extern void fps_fmuls(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); +extern void fps_fsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2); -extern void fps_fmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, +extern void fps_fmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3); -extern void fps_fmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, +extern void fps_fmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3); -extern void fps_fnmadds(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, +extern void fps_fnmadds(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3); -extern void fps_fnmsubs(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, +extern void fps_fnmsubs(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3); -extern void fps_fsel(struct thread_struct *t, u32 *dst, u32 *src1, u32 *src2, +extern void fps_fsel(u64 *fpscr, u32 *dst, u32 *src1, u32 *src2, u32 *src3); #define FPD_ONE_IN(name) extern void fpd_ ## name(u64 *fpscr, u32 *cr, \ @@ -82,4 +82,7 @@ FPD_THREE_IN(fmadd) FPD_THREE_IN(fnmsub) FPD_THREE_IN(fnmadd) +extern void kvm_cvt_fd(u32 *from, u64 *to, u64 *fpscr); +extern void kvm_cvt_df(u64 *from, u32 *to, u64 *fpscr); + #endif diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 0c9ad869dec..b0b23c007d6 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -35,10 +35,17 @@ #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 /* We don't currently support large pages. */ +#define KVM_HPAGE_GFN_SHIFT(x) 0 #define KVM_NR_PAGE_SIZES 1 #define KVM_PAGES_PER_HPAGE(x) (1UL<<31) -#define HPTEG_CACHE_NUM 1024 +#define HPTEG_CACHE_NUM (1 << 15) +#define HPTEG_HASH_BITS_PTE 13 +#define HPTEG_HASH_BITS_VPTE 13 +#define HPTEG_HASH_BITS_VPTE_LONG 5 +#define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) +#define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) +#define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) struct kvm; struct kvm_run; @@ -151,6 +158,9 @@ struct kvmppc_mmu { }; struct hpte_cache { + struct hlist_node list_pte; + struct hlist_node list_vpte; + struct hlist_node list_vpte_long; u64 host_va; u64 pfn; ulong slot; @@ -282,8 +292,10 @@ struct kvm_vcpu_arch { unsigned long pending_exceptions; #ifdef CONFIG_PPC_BOOK3S - struct hpte_cache hpte_cache[HPTEG_CACHE_NUM]; - int hpte_cache_offset; + struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; + struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; + struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; + int hpte_cache_count; #endif }; diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 2102b214a87..0e398cfee2c 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -250,7 +250,9 @@ extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap) int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, pte_t *ptep, unsigned long trap, int local, int ssize, unsigned int shift, unsigned int mmu_psize); - +extern void hash_failure_debug(unsigned long ea, unsigned long access, + unsigned long vsid, unsigned long trap, + int ssize, int psize, unsigned long pte); extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, unsigned long pstart, unsigned long prot, int psize, int ssize); diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index a6297c67c3d..6c294acac84 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h @@ -515,11 +515,8 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, * powers of 2 writes until it reaches sufficient alignment). * * Based on this we disable the IP header alignment in network drivers. - * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining - * cacheline alignment of buffers. */ #define NET_IP_ALIGN 0 -#define NET_SKB_PAD L1_CACHE_BYTES #define cmpxchg64(ptr, o, n) \ ({ \ diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 369872f6cf7..babcceecd2e 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c @@ -566,9 +566,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, * Finally record data if requested. */ if (record) { - struct perf_sample_data data = { - .period = event->hw.last_period, - }; + struct perf_sample_data data; + + perf_sample_data_init(&data, 0); if (perf_event_overflow(event, nmi, &data, regs)) { /* diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 3b4dcc82a4c..ab3e392ac63 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -101,10 +101,6 @@ EXPORT_SYMBOL(pci_dram_offset); EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(kernel_thread); -#ifdef CONFIG_PPC_FPU -EXPORT_SYMBOL_GPL(cvt_df); -EXPORT_SYMBOL_GPL(cvt_fd); -#endif EXPORT_SYMBOL(giveup_fpu); #ifdef CONFIG_ALTIVEC EXPORT_SYMBOL(giveup_altivec); diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 9d3953983fb..fed9bf6187d 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -414,7 +414,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node) u64 base, size, memblock_size; unsigned int is_kexec_kdump = 0, rngs; - ls = of_get_flat_dt_prop(node, "ibm,memblock-size", &l); + ls = of_get_flat_dt_prop(node, "ibm,lmb-size", &l); if (ls == NULL || l < dt_root_size_cells * sizeof(__be32)) return 0; memblock_size = dt_mem_next_cell(dt_root_size_cells, &ls); diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c index 6fc6328dc62..0167d53da30 100644 --- a/arch/powerpc/kernel/suspend.c +++ b/arch/powerpc/kernel/suspend.c @@ -3,7 +3,7 @@ * * Distribute under GPLv2 * - * Copyright (c) 2002 Pavel Machek <pavel@suse.cz> + * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz> * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> */ diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 812312542e5..9b9b5cdea84 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -316,7 +316,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, gfn = gpaddr >> PAGE_SHIFT; new_page = gfn_to_page(vcpu->kvm, gfn); if (is_error_page(new_page)) { - printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); + printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n", + (unsigned long long)gfn); kvm_release_page_clean(new_page); return; } diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index ff436066bf7..d45c818a384 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -45,6 +45,7 @@ kvm-book3s_64-objs := \ book3s.o \ book3s_emulate.o \ book3s_interrupts.o \ + book3s_mmu_hpte.o \ book3s_64_mmu_host.o \ book3s_64_mmu.o \ book3s_32_mmu.o @@ -57,6 +58,7 @@ kvm-book3s_32-objs := \ book3s.o \ book3s_emulate.o \ book3s_interrupts.o \ + book3s_mmu_hpte.o \ book3s_32_mmu_host.o \ book3s_32_mmu.o kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs) diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index b998abf1a63..a3cef30d1d4 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -1047,8 +1047,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - regs->pc = kvmppc_get_pc(vcpu); regs->cr = kvmppc_get_cr(vcpu); regs->ctr = kvmppc_get_ctr(vcpu); @@ -1069,8 +1067,6 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) regs->gpr[i] = kvmppc_get_gpr(vcpu, i); - vcpu_put(vcpu); - return 0; } @@ -1078,8 +1074,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; - vcpu_load(vcpu); - kvmppc_set_pc(vcpu, regs->pc); kvmppc_set_cr(vcpu, regs->cr); kvmppc_set_ctr(vcpu, regs->ctr); @@ -1099,8 +1093,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); - vcpu_put(vcpu); - return 0; } @@ -1110,8 +1102,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; - vcpu_load(vcpu); - sregs->pvr = vcpu->arch.pvr; sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; @@ -1131,8 +1121,6 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, } } - vcpu_put(vcpu); - return 0; } @@ -1142,8 +1130,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int i; - vcpu_load(vcpu); - kvmppc_set_pvr(vcpu, sregs->pvr); vcpu3s->sdr1 = sregs->u.s.sdr1; @@ -1171,8 +1157,6 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, /* Flush the MMU after messing with the segments */ kvmppc_mmu_pte_flush(vcpu, 0, 0); - vcpu_put(vcpu); - return 0; } @@ -1309,12 +1293,17 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; - struct thread_struct ext_bkp; + double fpr[32][TS_FPRWIDTH]; + unsigned int fpscr; + int fpexc_mode; #ifdef CONFIG_ALTIVEC - bool save_vec = current->thread.used_vr; + vector128 vr[32]; + vector128 vscr; + unsigned long uninitialized_var(vrsave); + int used_vr; #endif #ifdef CONFIG_VSX - bool save_vsx = current->thread.used_vsr; + int used_vsr; #endif ulong ext_msr; @@ -1327,27 +1316,27 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* Save FPU state in stack */ if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); - memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); - ext_bkp.fpscr = current->thread.fpscr; - ext_bkp.fpexc_mode = current->thread.fpexc_mode; + memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); + fpscr = current->thread.fpscr.val; + fpexc_mode = current->thread.fpexc_mode; #ifdef CONFIG_ALTIVEC /* Save Altivec state in stack */ - if (save_vec) { + used_vr = current->thread.used_vr; + if (used_vr) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); - memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); - ext_bkp.vscr = current->thread.vscr; - ext_bkp.vrsave = current->thread.vrsave; + memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); + vscr = current->thread.vscr; + vrsave = current->thread.vrsave; } - ext_bkp.used_vr = current->thread.used_vr; #endif #ifdef CONFIG_VSX /* Save VSX state in stack */ - if (save_vsx && (current->thread.regs->msr & MSR_VSX)) + used_vsr = current->thread.used_vsr; + if (used_vsr && (current->thread.regs->msr & MSR_VSX)) __giveup_vsx(current); - ext_bkp.used_vsr = current->thread.used_vsr; #endif /* Remember the MSR with disabled extensions */ @@ -1372,22 +1361,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_giveup_ext(vcpu, MSR_VSX); /* Restore FPU state from stack */ - memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); - current->thread.fpscr = ext_bkp.fpscr; - current->thread.fpexc_mode = ext_bkp.fpexc_mode; + memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); + current->thread.fpscr.val = fpscr; + current->thread.fpexc_mode = fpexc_mode; #ifdef CONFIG_ALTIVEC /* Restore Altivec state from stack */ - if (save_vec && current->thread.used_vr) { - memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); - current->thread.vscr = ext_bkp.vscr; - current->thread.vrsave= ext_bkp.vrsave; + if (used_vr && current->thread.used_vr) { + memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); + current->thread.vscr = vscr; + current->thread.vrsave = vrsave; } - current->thread.used_vr = ext_bkp.used_vr; + current->thread.used_vr = used_vr; #endif #ifdef CONFIG_VSX - current->thread.used_vsr = ext_bkp.used_vsr; + current->thread.used_vsr = used_vsr; #endif return ret; @@ -1395,12 +1384,22 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) static int kvmppc_book3s_init(void) { - return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, - THIS_MODULE); + int r; + + r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, + THIS_MODULE); + + if (r) + return r; + + r = kvmppc_mmu_hpte_sysinit(); + + return r; } static void kvmppc_book3s_exit(void) { + kvmppc_mmu_hpte_sysexit(); kvm_exit(); } diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 0b10503c8a4..3292d76101d 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -354,10 +354,10 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: - if (!sr->valid) - return -1; - - *vsid = sr->vsid; + if (sr->valid) + *vsid = sr->vsid; + else + *vsid = VSID_BAT | gvsid; break; default: BUG(); diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 0bb66005338..0b51ef872c1 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -19,6 +19,7 @@ */ #include <linux/kvm_host.h> +#include <linux/hash.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -57,139 +58,26 @@ static ulong htab; static u32 htabmask; -static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) +void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { volatile u32 *pteg; - dprintk_mmu("KVM: Flushing SPTE: 0x%llx (0x%llx) -> 0x%llx\n", - pte->pte.eaddr, pte->pte.vpage, pte->host_va); - + /* Remove from host HTAB */ pteg = (u32*)pte->slot; - pteg[0] = 0; + + /* And make sure it's gone from the TLB too */ asm volatile ("sync"); asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); asm volatile ("sync"); asm volatile ("tlbsync"); - - pte->host_va = 0; - - if (pte->pte.may_write) - kvm_release_pfn_dirty(pte->pfn); - else - kvm_release_pfn_clean(pte->pfn); -} - -void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%x & 0x%x\n", - vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_ea &= ea_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.eaddr & ea_mask) == guest_ea) { - invalidate_pte(vcpu, pte); - } - } - - /* Doing a complete flush -> start from scratch */ - if (!ea_mask) - vcpu->arch.hpte_cache_offset = 0; -} - -void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", - vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_vp &= vp_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.vpage & vp_mask) == guest_vp) { - invalidate_pte(vcpu, pte); - } - } -} - -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n", - vcpu->arch.hpte_cache_offset, pa_start, pa_end); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.raddr >= pa_start) && - (pte->pte.raddr < pa_end)) { - invalidate_pte(vcpu, pte); - } - } -} - -struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) -{ - int i; - u64 guest_vp; - - guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); - for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if (pte->pte.vpage == guest_vp) - return &pte->pte; - } - - return NULL; -} - -static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) - kvmppc_mmu_pte_flush(vcpu, 0, 0); - - return vcpu->arch.hpte_cache_offset++; } /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { - return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); + return hash_64(gvsid, SID_MAP_BITS); } @@ -256,7 +144,6 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) register int rr = 0; bool primary = false; bool evict = false; - int hpte_id; struct hpte_cache *pte; /* Get host physical address for gpa */ @@ -341,8 +228,7 @@ next_pteg: /* Now tell our Shadow PTE code about the new page */ - hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); - pte = &vcpu->arch.hpte_cache[hpte_id]; + pte = kvmppc_mmu_hpte_cache_next(vcpu); dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", orig_pte->may_write ? 'w' : '-', @@ -355,6 +241,8 @@ next_pteg: pte->pte = *orig_pte; pte->pfn = hpaddr >> PAGE_SHIFT; + kvmppc_mmu_hpte_cache_map(vcpu, pte); + return 0; } @@ -439,7 +327,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) { - kvmppc_mmu_pte_flush(vcpu, 0, 0); + kvmppc_mmu_hpte_destroy(vcpu); preempt_disable(); __destroy_context(to_book3s(vcpu)->context_id); preempt_enable(); @@ -479,5 +367,7 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; htab = (ulong)__va(sdr1 & 0xffff0000); + kvmppc_mmu_hpte_init(vcpu); + return 0; } diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e4b5744977f..384179a5002 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -20,6 +20,7 @@ */ #include <linux/kvm_host.h> +#include <linux/hash.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> @@ -46,135 +47,20 @@ #define dprintk_slb(a, ...) do { } while(0) #endif -static void invalidate_pte(struct hpte_cache *pte) +void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) { - dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", - pte->pte.eaddr, pte->pte.vpage, pte->host_va); - ppc_md.hpte_invalidate(pte->slot, pte->host_va, MMU_PAGE_4K, MMU_SEGSIZE_256M, false); - pte->host_va = 0; - - if (pte->pte.may_write) - kvm_release_pfn_dirty(pte->pfn); - else - kvm_release_pfn_clean(pte->pfn); -} - -void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", - vcpu->arch.hpte_cache_offset, guest_ea, ea_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_ea &= ea_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.eaddr & ea_mask) == guest_ea) { - invalidate_pte(pte); - } - } - - /* Doing a complete flush -> start from scratch */ - if (!ea_mask) - vcpu->arch.hpte_cache_offset = 0; -} - -void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", - vcpu->arch.hpte_cache_offset, guest_vp, vp_mask); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - guest_vp &= vp_mask; - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.vpage & vp_mask) == guest_vp) { - invalidate_pte(pte); - } - } -} - -void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) -{ - int i; - - dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n", - vcpu->arch.hpte_cache_offset, pa_start, pa_end); - BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM); - - for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if ((pte->pte.raddr >= pa_start) && - (pte->pte.raddr < pa_end)) { - invalidate_pte(pte); - } - } -} - -struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data) -{ - int i; - u64 guest_vp; - - guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false); - for (i=0; i<vcpu->arch.hpte_cache_offset; i++) { - struct hpte_cache *pte; - - pte = &vcpu->arch.hpte_cache[i]; - if (!pte->host_va) - continue; - - if (pte->pte.vpage == guest_vp) - return &pte->pte; - } - - return NULL; -} - -static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) -{ - if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM) - kvmppc_mmu_pte_flush(vcpu, 0, 0); - - return vcpu->arch.hpte_cache_offset++; } /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using * a hash, so we don't waste cycles on looping */ static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) { - return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ - ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); + return hash_64(gvsid, SID_MAP_BITS); } - static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) { struct kvmppc_sid_map *map; @@ -273,8 +159,7 @@ map_again: attempt++; goto map_again; } else { - int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu); - struct hpte_cache *pte = & |