aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/kvm_ppc.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/kvm_ppc.h')
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h209
1 files changed, 177 insertions, 32 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b15554a26c2..9c89cdd067a 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_bigendian);
+ int is_default_endian);
extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
- int is_bigendian);
+ int is_default_endian);
extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
- u64 val, unsigned int bytes, int is_bigendian);
+ u64 val, unsigned int bytes,
+ int is_default_endian);
extern int kvmppc_emulate_instruction(struct kvm_run *run,
struct kvm_vcpu *vcpu);
@@ -106,13 +107,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
-
-extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int op, int *advance);
-extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
- ulong val);
-extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
- ulong *val);
extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void);
@@ -135,17 +129,19 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce *args);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
-extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
- struct kvm_allocate_rma *rma);
+extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba);
extern struct kvm_rma_info *kvm_alloc_rma(void);
extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
-extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
+extern void kvmppc_core_free_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
-extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
+extern int kvmppc_core_create_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
unsigned long npages);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -177,6 +173,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
+union kvmppc_one_reg {
+ u32 wval;
+ u64 dval;
+ vector128 vval;
+ u64 vsxval[2];
+ struct {
+ u64 addr;
+ u64 length;
+ } vpaval;
+};
+
+struct kvmppc_ops {
+ struct module *owner;
+ int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+ int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val);
+ void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
+ void (*vcpu_put)(struct kvm_vcpu *vcpu);
+ void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
+ void (*vcpu_free)(struct kvm_vcpu *vcpu);
+ int (*check_requests)(struct kvm_vcpu *vcpu);
+ int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
+ void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
+ int (*prepare_memory_region)(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_userspace_memory_region *mem);
+ void (*commit_memory_region)(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old);
+ int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
+ int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
+ unsigned long end);
+ int (*age_hva)(struct kvm *kvm, unsigned long hva);
+ int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
+ void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
+ void (*mmu_destroy)(struct kvm_vcpu *vcpu);
+ void (*free_memslot)(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+ int (*create_memslot)(struct kvm_memory_slot *slot,
+ unsigned long npages);
+ int (*init_vm)(struct kvm *kvm);
+ void (*destroy_vm)(struct kvm *kvm);
+ int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
+ int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int inst, int *advance);
+ int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
+ int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
+ void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
+ long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
+ unsigned long arg);
+
+};
+
+extern struct kvmppc_ops *kvmppc_hv_ops;
+extern struct kvmppc_ops *kvmppc_pr_ops;
+
+static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
+{
+ return kvm->arch.kvm_ops == kvmppc_hv_ops;
+}
+
/*
* Cuts out inst bits with ordering according to spec.
* That means the leftmost bit is zero. All given bits are included.
@@ -210,17 +272,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
return r;
}
-union kvmppc_one_reg {
- u32 wval;
- u64 dval;
- vector128 vval;
- u64 vsxval[2];
- struct {
- u64 addr;
- u64 length;
- } vpaval;
-};
-
#define one_reg_size(id) \
(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -245,10 +296,10 @@ union kvmppc_one_reg {
__v; \
})
-void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
-void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
+int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
@@ -260,7 +311,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
struct openpic;
-#ifdef CONFIG_KVM_BOOK3S_64_HV
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
extern void kvm_cma_reserve(void) __init;
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
@@ -269,10 +320,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
static inline u32 kvmppc_get_xics_latch(void)
{
- u32 xirr = get_paca()->kvm_hstate.saved_xirr;
+ u32 xirr;
+ xirr = get_paca()->kvm_hstate.saved_xirr;
get_paca()->kvm_hstate.saved_xirr = 0;
-
return xirr;
}
@@ -281,7 +332,14 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
paca[cpu].kvm_hstate.host_ipi = host_ipi;
}
-extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
+static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+ vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
+}
+
+extern void kvm_hv_vm_activated(void);
+extern void kvm_hv_vm_deactivated(void);
+extern bool kvm_hv_mode_active(void);
#else
static inline void __init kvm_cma_reserve(void)
@@ -302,6 +360,9 @@ static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
{
kvm_vcpu_kick(vcpu);
}
+
+static inline bool kvm_hv_mode_active(void) { return false; }
+
#endif
#ifdef CONFIG_KVM_XICS
@@ -395,6 +456,84 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
}
/*
+ * Shared struct helpers. The shared struct can be little or big endian,
+ * depending on the guest endianness. So expose helpers to all of them.
+ */
+static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ /* Only Book3S_64 PR supports bi-endian for now */
+ return vcpu->arch.shared_big_endian;
+#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
+ /* Book3s_64 HV on little endian is always little endian */
+ return false;
+#else
+ return true;
+#endif
+}
+
+#define SHARED_WRAPPER_GET(reg, size) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ return be##size##_to_cpu(vcpu->arch.shared->reg); \
+ else \
+ return le##size##_to_cpu(vcpu->arch.shared->reg); \
+} \
+
+#define SHARED_WRAPPER_SET(reg, size) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ vcpu->arch.shared->reg = cpu_to_be##size(val); \
+ else \
+ vcpu->arch.shared->reg = cpu_to_le##size(val); \
+} \
+
+#define SHARED_WRAPPER(reg, size) \
+ SHARED_WRAPPER_GET(reg, size) \
+ SHARED_WRAPPER_SET(reg, size) \
+
+SHARED_WRAPPER(critical, 64)
+SHARED_WRAPPER(sprg0, 64)
+SHARED_WRAPPER(sprg1, 64)
+SHARED_WRAPPER(sprg2, 64)
+SHARED_WRAPPER(sprg3, 64)
+SHARED_WRAPPER(srr0, 64)
+SHARED_WRAPPER(srr1, 64)
+SHARED_WRAPPER(dar, 64)
+SHARED_WRAPPER_GET(msr, 64)
+static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->msr = cpu_to_be64(val);
+ else
+ vcpu->arch.shared->msr = cpu_to_le64(val);
+}
+SHARED_WRAPPER(dsisr, 32)
+SHARED_WRAPPER(int_pending, 32)
+SHARED_WRAPPER(sprg4, 64)
+SHARED_WRAPPER(sprg5, 64)
+SHARED_WRAPPER(sprg6, 64)
+SHARED_WRAPPER(sprg7, 64)
+
+static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ return be32_to_cpu(vcpu->arch.shared->sr[nr]);
+ else
+ return le32_to_cpu(vcpu->arch.shared->sr[nr]);
+}
+
+static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
+ else
+ vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
+}
+
+/*
* Please call after prepare_to_enter. This function puts the lazy ee and irq
* disabled tracking state back to normal mode, without actually enabling
* interrupts.
@@ -404,6 +543,12 @@ static inline void kvmppc_fix_ee_before_entry(void)
trace_hardirqs_on();
#ifdef CONFIG_PPC64
+ /*
+ * To avoid races, the caller must have gone directly from having
+ * interrupts fully-enabled to hard-disabled.
+ */
+ WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
local_paca->soft_enabled = 1;
@@ -425,7 +570,7 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
msr_64bit = MSR_SF;
#endif
- if (!(vcpu->arch.shared->msr & msr_64bit))
+ if (!(kvmppc_get_msr(vcpu) & msr_64bit))
ea = (uint32_t)ea;
return ea;