aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 14:35:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-28 14:35:31 -0700
commit2e7580b0e75d771d93e24e681031a165b1d31071 (patch)
treed9449702609eeaab28913a43b5a4434667e09d43 /arch/powerpc/kvm
parentd25413efa9536e2f425ea45c7720598035c597bc (diff)
parentcf9eeac46350b8b43730b7dc5e999757bed089a4 (diff)
Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Avi Kivity: "Changes include timekeeping improvements, support for assigning host PCI devices that share interrupt lines, s390 user-controlled guests, a large ppc update, and random fixes." This is with the sign-off's fixed, hopefully next merge window we won't have rebased commits. * 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (130 commits) KVM: Convert intx_mask_lock to spin lock KVM: x86: fix kvm_write_tsc() TSC matching thinko x86: kvmclock: abstract save/restore sched_clock_state KVM: nVMX: Fix erroneous exception bitmap check KVM: Ignore the writes to MSR_K7_HWCR(3) KVM: MMU: make use of ->root_level in reset_rsvds_bits_mask KVM: PMU: add proper support for fixed counter 2 KVM: PMU: Fix raw event check KVM: PMU: warn when pin control is set in eventsel msr KVM: VMX: Fix delayed load of shared MSRs KVM: use correct tlbs dirty type in cmpxchg KVM: Allow host IRQ sharing for assigned PCI 2.3 devices KVM: Ensure all vcpus are consistent with in-kernel irqchip settings KVM: x86 emulator: Allow PM/VM86 switch during task switch KVM: SVM: Fix CPL updates KVM: x86 emulator: VM86 segments must have DPL 3 KVM: x86 emulator: Fix task switch privilege checks arch/powerpc/kvm/book3s_hv.c: included linux/sched.h twice KVM: x86 emulator: correctly mask pmc index bits in RDPMC instruction emulation KVM: mmu_notifier: Flush TLBs before releasing mmu_lock ...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c57
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c66
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c919
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv.c465
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c209
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c835
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S176
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c9
-rw-r--r--arch/powerpc/kvm/book3s_pr.c178
-rw-r--r--arch/powerpc/kvm/booke.c150
-rw-r--r--arch/powerpc/kvm/booke.h4
-rw-r--r--arch/powerpc/kvm/booke_emulate.c23
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S18
-rw-r--r--arch/powerpc/kvm/e500.c32
-rw-r--r--arch/powerpc/kvm/e500_emulate.c38
-rw-r--r--arch/powerpc/kvm/e500_tlb.c775
-rw-r--r--arch/powerpc/kvm/e500_tlb.h80
-rw-r--r--arch/powerpc/kvm/emulate.c61
-rw-r--r--arch/powerpc/kvm/powerpc.c148
-rw-r--r--arch/powerpc/kvm/trace.h62
23 files changed, 3315 insertions, 1020 deletions
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 78133deb4b6..8f64709ae33 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -69,6 +69,7 @@ config KVM_BOOK3S_64
config KVM_BOOK3S_64_HV
bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
+ select MMU_NOTIFIER
---help---
Support running unmodified book3s_64 guest kernels in
virtual machines on POWER7 and PPC970 processors that have
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index e41ac6f7dcf..7d54f4ed6d9 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
return true;
}
-void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
+void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -423,10 +423,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg1 = vcpu->arch.shared->sprg1;
regs->sprg2 = vcpu->arch.shared->sprg2;
regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg4 = vcpu->arch.sprg4;
- regs->sprg5 = vcpu->arch.sprg5;
- regs->sprg6 = vcpu->arch.sprg6;
- regs->sprg7 = vcpu->arch.sprg7;
+ regs->sprg4 = vcpu->arch.shared->sprg4;
+ regs->sprg5 = vcpu->arch.shared->sprg5;
+ regs->sprg6 = vcpu->arch.shared->sprg6;
+ regs->sprg7 = vcpu->arch.shared->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -450,10 +450,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.shared->sprg1 = regs->sprg1;
vcpu->arch.shared->sprg2 = regs->sprg2;
vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.sprg4 = regs->sprg4;
- vcpu->arch.sprg5 = regs->sprg5;
- vcpu->arch.sprg6 = regs->sprg6;
- vcpu->arch.sprg7 = regs->sprg7;
+ vcpu->arch.shared->sprg4 = regs->sprg4;
+ vcpu->arch.shared->sprg5 = regs->sprg5;
+ vcpu->arch.shared->sprg6 = regs->sprg6;
+ vcpu->arch.shared->sprg7 = regs->sprg7;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -477,41 +477,10 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * Get (and clear) the dirty memory log for a memory slot.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+void kvmppc_decrementer_func(unsigned long data)
{
- struct kvm_memory_slot *memslot;
- struct kvm_vcpu *vcpu;
- ulong ga, ga_end;
- int is_dirty = 0;
- int r;
- unsigned long n;
-
- mutex_lock(&kvm->slots_lock);
-
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
- if (r)
- goto out;
-
- /* If nothing is dirty, don't bother messing with page tables. */
- if (is_dirty) {
- memslot = id_to_memslot(kvm->memslots, log->slot);
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
- ga = memslot->base_gfn << PAGE_SHIFT;
- ga_end = ga + (memslot->npages << PAGE_SHIFT);
-
- kvm_for_each_vcpu(n, vcpu, kvm)
- kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
-
- n = kvm_dirty_bitmap_bytes(memslot);
- memset(memslot->dirty_bitmap, 0, n);
- }
-
- r = 0;
-out:
- mutex_unlock(&kvm->slots_lock);
- return r;
+ kvmppc_core_queue_dec(vcpu);
+ kvm_vcpu_kick(vcpu);
}
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 9fecbfbce77..f922c29bb23 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -151,13 +151,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
bool primary = false;
bool evict = false;
struct hpte_cache *pte;
+ int r = 0;
/* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (is_error_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
orig_pte->eaddr);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
hpaddr <<= PAGE_SHIFT;
@@ -249,7 +251,8 @@ next_pteg:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
- return 0;
+out:
+ return r;
}
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -297,12 +300,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
u64 gvsid;
u32 sr;
struct kvmppc_sid_map *map;
- struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ int r = 0;
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
/* Invalidate an entry */
svcpu->sr[esid] = SR_INVALID;
- return -ENOENT;
+ r = -ENOENT;
+ goto out;
}
map = find_sid_vsid(vcpu, gvsid);
@@ -315,17 +320,21 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
- return 0;
+out:
+ svcpu_put(svcpu);
+ return r;
}
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
int i;
- struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu);
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
svcpu->sr[i] = SR_INVALID;
+
+ svcpu_put(svcpu);
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index fa2f08434ba..6f87f39a1ac 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -88,12 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
int vflags = 0;
int attempt = 0;
struct kvmppc_sid_map *map;
+ int r = 0;
/* Get host physical address for gpa */
hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
if (is_error_pfn(hpaddr)) {
printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
hpaddr <<= PAGE_SHIFT;
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
@@ -110,7 +112,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
vsid, orig_pte->eaddr);
WARN_ON(true);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
vsid = map->host_vsid;
@@ -131,8 +134,10 @@ map_again:
/* In case we tried normal mapping already, let's nuke old entries */
if (attempt > 1)
- if (ppc_md.hpte_remove(hpteg) < 0)
- return -1;
+ if (ppc_md.hpte_remove(hpteg) < 0) {
+ r = -1;
+ goto out;
+ }
ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
@@ -162,7 +167,8 @@ map_again:
kvmppc_mmu_hpte_cache_map(vcpu, pte);
}
- return 0;
+out:
+ return r;
}
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -207,25 +213,30 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
{
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
int i;
int max_slb_size = 64;
int found_inval = -1;
int r;
- if (!to_svcpu(vcpu)->slb_max)
- to_svcpu(vcpu)->slb_max = 1;
+ if (!svcpu->slb_max)
+ svcpu->slb_max = 1;
/* Are we overwriting? */
- for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
- if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
+ for (i = 1; i < svcpu->slb_max; i++) {
+ if (!(svcpu->slb[i].esid & SLB_ESID_V))
found_inval = i;
- else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
- return i;
+ else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
+ r = i;
+ goto out;
+ }
}
/* Found a spare entry that was invalidated before */
- if (found_inval > 0)
- return found_inval;
+ if (found_inval > 0) {
+ r = found_inval;
+ goto out;
+ }
/* No spare invalid entry, so create one */
@@ -233,30 +244,35 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
max_slb_size = mmu_slb_size;
/* Overflowing -> purge */
- if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
+ if ((svcpu->slb_max) == max_slb_size)
kvmppc_mmu_flush_segments(vcpu);
- r = to_svcpu(vcpu)->slb_max;
- to_svcpu(vcpu)->slb_max++;
+ r = svcpu->slb_max;
+ svcpu->slb_max++;
+out:
+ svcpu_put(svcpu);
return r;
}
int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
{
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
u64 esid = eaddr >> SID_SHIFT;
u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
u64 slb_vsid = SLB_VSID_USER;
u64 gvsid;
int slb_index;
struct kvmppc_sid_map *map;
+ int r = 0;
slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
/* Invalidate an entry */
- to_svcpu(vcpu)->slb[slb_index].esid = 0;
- return -ENOENT;
+ svcpu->slb[slb_index].esid = 0;
+ r = -ENOENT;
+ goto out;
}
map = find_sid_vsid(vcpu, gvsid);
@@ -269,18 +285,22 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
slb_vsid &= ~SLB_VSID_KP;
slb_esid |= slb_index;
- to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
- to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
+ svcpu->slb[slb_index].esid = slb_esid;
+ svcpu->slb[slb_index].vsid = slb_vsid;
trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
- return 0;
+out:
+ svcpu_put(svcpu);
+ return r;
}
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
- to_svcpu(vcpu)->slb_max = 1;
- to_svcpu(vcpu)->slb[0].esid = 0;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ svcpu->slb_max = 1;
+ svcpu->slb[0].esid = 0;
+ svcpu_put(svcpu);
}
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index bc3a2ea9421..ddc485a529f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -23,6 +23,7 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
@@ -33,15 +34,6 @@
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
-/* For now use fixed-size 16MB page table */
-#define HPT_ORDER 24
-#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
-#define HPT_HASH_MASK (HPT_NPTEG - 1)
-
-/* Pages in the VRMA are 16MB pages */
-#define VRMA_PAGE_ORDER 24
-#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
-
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
#define NR_LPIDS (LPID_RSVD + 1)
@@ -51,21 +43,41 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
{
unsigned long hpt;
unsigned long lpid;
+ struct revmap_entry *rev;
+ struct kvmppc_linear_info *li;
+
+ /* Allocate guest's hashed page table */
+ li = kvm_alloc_hpt();
+ if (li) {
+ /* using preallocated memory */
+ hpt = (ulong)li->base_virt;
+ kvm->arch.hpt_li = li;
+ } else {
+ /* using dynamic memory */
+ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
+ __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
+ }
- hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
- HPT_ORDER - PAGE_SHIFT);
if (!hpt) {
pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
return -ENOMEM;
}
kvm->arch.hpt_virt = hpt;
+ /* Allocate reverse map array */
+ rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
+ if (!rev) {
+ pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
+ goto out_freehpt;
+ }
+ kvm->arch.revmap = rev;
+
+ /* Allocate the guest's logical partition ID */
do {
lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
if (lpid >= NR_LPIDS) {
pr_err("kvm_alloc_hpt: No LPIDs free\n");
- free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
- return -ENOMEM;
+ goto out_freeboth;
}
} while (test_and_set_bit(lpid, lpid_inuse));
@@ -74,37 +86,64 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
return 0;
+
+ out_freeboth:
+ vfree(rev);
+ out_freehpt:
+ free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
+ return -ENOMEM;
}
void kvmppc_free_hpt(struct kvm *kvm)
{
clear_bit(kvm->arch.lpid, lpid_inuse);
- free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+ vfree(kvm->arch.revmap);
+ if (kvm->arch.hpt_li)
+ kvm_release_hpt(kvm->arch.hpt_li);
+ else
+ free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+}
+
+/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
+static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
+{
+ return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
+}
+
+/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
+static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
+{
+ return (pgsize == 0x10000) ? 0x1000 : 0;
}
-void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
+void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
+ unsigned long porder)
{
unsigned long i;
- unsigned long npages = kvm->arch.ram_npages;
- unsigned long pfn;
- unsigned long *hpte;
- unsigned long hash;
- struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo;
+ unsigned long npages;
+ unsigned long hp_v, hp_r;
+ unsigned long addr, hash;
+ unsigned long psize;
+ unsigned long hp0, hp1;
+ long ret;
- if (!pginfo)
- return;
+ psize = 1ul << porder;
+ npages = memslot->npages >> (porder - PAGE_SHIFT);
/* VRMA can't be > 1TB */
- if (npages > 1ul << (40 - kvm->arch.ram_porder))
- npages = 1ul << (40 - kvm->arch.ram_porder);
+ if (npages > 1ul << (40 - porder))
+ npages = 1ul << (40 - porder);
/* Can't use more than 1 HPTE per HPTEG */
if (npages > HPT_NPTEG)
npages = HPT_NPTEG;
+ hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
+ HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
+ hp1 = hpte1_pgsize_encoding(psize) |
+ HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
+
for (i = 0; i < npages; ++i) {
- pfn = pginfo[i].pfn;
- if (!pfn)
- break;
+ addr = i << porder;
/* can't use hpt_hash since va > 64 bits */
hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
/*
@@ -113,15 +152,15 @@ void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
* at most one HPTE per HPTEG, we just assume entry 7
* is available and use it.
*/
- hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7));
- hpte += 7 * 2;
- /* HPTE low word - RPN, protection, etc. */
- hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C |
- HPTE_R_M | PP_RWXX;
- wmb();
- hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
- (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED |
- HPTE_V_LARGE | HPTE_V_VALID;
+ hash = (hash << 3) + 7;
+ hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
+ hp_r = hp1 | addr;
+ ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
+ if (ret != H_SUCCESS) {
+ pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
+ addr, ret);
+ break;
+ }
}
}
@@ -158,10 +197,814 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
}
+/*
+ * This is called to get a reference to a guest page if there isn't
+ * one already in the kvm->arch.slot_phys[][] arrays.
+ */
+static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
+ struct kvm_memory_slot *memslot,
+ unsigned long psize)
+{
+ unsigned long start;
+ long np, err;
+ struct page *page, *hpage, *pages[1];
+ unsigned long s, pgsize;
+ unsigned long *physp;
+ unsigned int is_io, got, pgorder;
+ struct vm_area_struct *vma;
+ unsigned long pfn, i, npages;
+
+ physp = kvm->arch.slot_phys[memslot->id];
+ if (!physp)
+ return -EINVAL;
+ if (physp[gfn - memslot->base_gfn])
+ return 0;
+
+ is_io = 0;
+ got = 0;
+ page = NULL;
+ pgsize = psize;
+ err = -EINVAL;
+ start = gfn_to_hva_memslot(memslot, gfn);
+
+ /* Instantiate and get the page we want access to */
+ np = get_user_pages_fast(start, 1, 1, pages);
+ if (np != 1) {
+ /* Look up the vma for the page */
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, start);
+ if (!vma || vma->vm_start > start ||
+ start + psize > vma->vm_end ||
+ !(vma->vm_flags & VM_PFNMAP))
+ goto up_err;
+ is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
+ pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+ /* check alignment of pfn vs. requested page size */
+ if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
+ goto up_err;
+ up_read(&current->mm->mmap_sem);
+
+ } else {
+ page = pages[0];
+ got = KVMPPC_GOT_PAGE;
+
+ /* See if this is a large page */
+ s = PAGE_SIZE;
+ if (PageHuge(page)) {
+ hpage = compound_head(page);
+ s <<= compound_order(hpage);
+ /* Get the whole large page if slot alignment is ok */
+ if (s > psize && slot_is_aligned(memslot, s) &&
+ !(memslot->userspace_addr & (s - 1))) {
+ start &= ~(s - 1);
+ pgsize = s;
+ page = hpage;
+ }
+ }
+ if (s < psize)
+ goto out;
+ pfn = page_to_pfn(page);
+ }
+
+ npages = pgsize >> PAGE_SHIFT;
+ pgorder = __ilog2(npages);
+ physp += (gfn - memslot->base_gfn) & ~(npages - 1);
+ spin_lock(&kvm->arch.slot_phys_lock);
+ for (i = 0; i < npages; ++i) {
+ if (!physp[i]) {
+ physp[i] = ((pfn + i) << PAGE_SHIFT) +
+ got + is_io + pgorder;
+ got = 0;
+ }
+ }
+ spin_unlock(&kvm->arch.slot_phys_lock);
+ err = 0;
+
+ out:
+ if (got) {
+ if (PageHuge(page))
+ page = compound_head(page);
+ put_page(page);
+ }
+ return err;
+
+ up_err:
+ up_read(&current->mm->mmap_sem);
+ return err;
+}
+
+/*
+ * We come here on a H_ENTER call from the guest when we are not
+ * using mmu notifiers and we don't have the requested page pinned
+ * already.
+ */
+long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
+ long pte_index, unsigned long pteh, unsigned long ptel)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long psize, gpa, gfn;
+ struct kvm_memory_slot *memslot;
+ long ret;
+
+ if (kvm->arch.using_mmu_notifiers)
+ goto do_insert;
+
+ psize = hpte_page_size(pteh, ptel);
+ if (!psize)
+ return H_PARAMETER;
+
+ pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
+
+ /* Find the memslot (if any) for this address */
+ gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
+ gfn = gpa >> PAGE_SHIFT;
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
+ if (!slot_is_aligned(memslot, psize))
+ return H_PARAMETER;
+ if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
+ return H_PARAMETER;
+ }
+
+ do_insert:
+ /* Protect linux PTE lookup from page table destruction */
+ rcu_read_lock_sched(); /* this disables preemption too */
+ vcpu->arch.pgdir = current->mm->pgd;
+ ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
+ rcu_read_unlock_sched();
+ if (ret == H_TOO_HARD) {
+ /* this can't happen */
+ pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
+ ret = H_RESOURCE; /* or something */
+ }
+ return ret;
+
+}
+
+static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
+ gva_t eaddr)
+{
+ u64 mask;
+ int i;
+
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
+ continue;
+
+ if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
+ mask = ESID_MASK_1T;
+ else
+ mask = ESID_MASK;
+
+ if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
+ return &vcpu->arch.slb[i];
+ }
+ return NULL;
+}
+
+static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
+ unsigned long ea)
+{
+ unsigned long ra_mask;
+
+ ra_mask = hpte_page_size(v, r) - 1;
+ return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
+}
+
static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data)
+ struct kvmppc_pte *gpte, bool data)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvmppc_slb *slbe;
+ unsigned long slb_v;
+ unsigned long pp, key;
+ unsigned long v, gr;
+ unsigned long *hptep;
+ int index;
+ int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
+
+ /* Get SLB entry */
+ if (virtmode) {
+ slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
+ if (!slbe)
+ return -EINVAL;
+ slb_v = slbe->origv;
+ } else {
+ /* real mode access */
+ slb_v = vcpu->kvm->arch.vrma_slb_v;
+ }
+
+ /* Find the HPTE in the hash table */
+ index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
+ HPTE_V_VALID | HPTE_V_ABSENT);
+ if (index < 0)
+ return -ENOENT;
+ hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+ v = hptep[0] & ~HPTE_V_HVLOCK;
+ gr = kvm->arch.revmap[index].guest_rpte;
+
+ /* Unlock the HPTE */
+ asm volatile("lwsync" : : : "memory");
+ hptep[0] = v;
+
+ gpte->eaddr = eaddr;
+ gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
+
+ /* Get PP bits and key for permission check */
+ pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
+ key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+ key &= slb_v;
+
+ /* Calculate permissions */
+ gpte->may_read = hpte_read_permission(pp, key);
+ gpte->may_write = hpte_write_permission(pp, key);
+ gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
+
+ /* Storage key permission check for POWER7 */
+ if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
+ int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
+ if (amrfield & 1)
+ gpte->may_read = 0;
+ if (amrfield & 2)
+ gpte->may_write = 0;
+ }
+
+ /* Get the guest physical address */
+ gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
+ return 0;
+}
+
+/*
+ * Quick test for whether an instruction is a load or a store.
+ * If the instruction is a load or a store, then this will indicate
+ * which it is, at least on server processors. (Embedded processors
+ * have some external PID instructions that don't follow the rule
+ * embodied here.) If the instruction isn't a load or store, then
+ * this doesn't return anything useful.
+ */
+static int instruction_is_store(unsigned int instr)
+{
+ unsigned int mask;
+
+ mask = 0x10000000;
+ if ((instr & 0xfc000000) == 0x7c000000)
+ mask = 0x100; /* major opcode 31 */
+ return (instr & mask) != 0;
+}
+
+static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned long gpa, int is_store)
+{
+ int ret;
+ u32 last_inst;
+ unsigned long srr0 = kvmppc_get_pc(vcpu);
+
+ /* We try to load the last instruction. We don't let
+ * emulate_instruction do it as it doesn't check what
+ * kvmppc_ld returns.
+ * If we fail, we just return to the guest and try executing it again.
+ */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
+ ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
+ if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
+ return RESUME_GUEST;
+ vcpu->arch.last_inst = last_inst;
+ }
+
+ /*
+ * WARNING: We do not know for sure whether the instruction we just
+ * read from memory is the same that caused the fault in the first
+ * place. If the instruction we read is neither an load or a store,
+ * then it can't access memory, so we don't need to worry about
+ * enforcing access permissions. So, assuming it is a load or
+ * store, we just check that its direction (load or store) is
+ * consistent with the original fault, since that's what we
+ * checked the access permissions against. If there is a mismatch
+ * we just return and retry the instruction.
+ */
+
+ if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
+ return RESUME_GUEST;
+
+ /*
+ * Emulated accesses are emulated by looking at the hash for
+ * translation once, then performing the access later. The
+ * translation could be invalidated in the meantime in which
+ * point performing the subsequent memory access on the old
+ * physical address could possibly be a security hole for the
+ * guest (but not the host).
+ *
+ * This is less of an issue for MMIO stores since they aren't
+ * globally visible. It could be an issue for MMIO loads to
+ * a certain extent but we'll ignore it for now.
+ */
+
+ vcpu->arch.paddr_accessed = gpa;
+ return kvmppc_emulate_mmio(run, vcpu);
+}
+
+int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned long ea, unsigned long dsisr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long *hptep, hpte[3], r;
+ unsigned long mmu_seq, psize, pte_size;
+ unsigned long gfn, hva, pfn;
+ struct kvm_memory_slot *memslot;
+ unsigned long *rmap;
+ struct revmap_entry *rev;
+ struct page *page, *pages[1];
+ long index, ret, npages;
+ unsigned long is_io;
+ unsigned int writing, write_ok;
+ struct vm_area_struct *vma;
+ unsigned long rcbits;
+
+ /*
+ * Real-mode code has already searched the HPT and found the
+ * entry we're interested in. Lock the entry and check that
+ * it hasn't changed. If it has, just return and re-execute the
+ * instruction.
+ */
+ if (ea != vcpu->arch.pgfault_addr)
+ return RESUME_GUEST;
+ index = vcpu->arch.pgfault_index;
+ hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
+ rev = &kvm->arch.revmap[index];
+ preempt_disable();
+ while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
+ cpu_relax();
+ hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
+ hpte[1] = hptep[1];
+ hpte[2] = r = rev->guest_rpte;
+ asm volatile("lwsync" : : : "memory");
+ hptep[0] = hpte[0];
+ preempt_enable();
+
+ if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
+ hpte[1] != vcpu->arch.pgfault_hpte[1])
+ return RESUME_GUEST;
+
+ /* Translate the logical address and get the page */
+ psize = hpte_page_size(hpte[0], r);
+ gfn = hpte_rpn(r, psize);
+ memslot = gfn_to_memslot(kvm, gfn);
+
+ /* No memslot means it's an emulated MMIO region */
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
+ unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
+ dsisr & DSISR_ISSTORE);
+ }
+
+ if (!kvm->arch.using_mmu_notifiers)
+ return -EFAULT; /* should never get here */
+
+ /* used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier