diff options
-rw-r--r-- | Documentation/kvm/api.txt | 10 | ||||
-rw-r--r-- | arch/ia64/kvm/vcpu.h | 9 | ||||
-rw-r--r-- | arch/ia64/kvm/vmm.c | 4 | ||||
-rw-r--r-- | arch/ia64/kvm/vtlb.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 22 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm.h | 4 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 18 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 12 | ||||
-rw-r--r-- | virt/kvm/assigned-dev.c | 6 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 5 |
11 files changed, 59 insertions, 34 deletions
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt index e1a11416102..2811e452f75 100644 --- a/Documentation/kvm/api.txt +++ b/Documentation/kvm/api.txt @@ -685,7 +685,7 @@ struct kvm_vcpu_events { __u8 pad; } nmi; __u32 sipi_vector; - __u32 flags; /* must be zero */ + __u32 flags; }; 4.30 KVM_SET_VCPU_EVENTS @@ -701,6 +701,14 @@ vcpu. See KVM_GET_VCPU_EVENTS for the data structure. +Fields that may be modified asynchronously by running VCPUs can be excluded +from the update. These fields are nmi.pending and sipi_vector. Keep the +corresponding bits in the flags field cleared to suppress overwriting the +current in-kernel state. The bits are: + +KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel +KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector + 5. The kvm_run structure diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h index 360724d3ae6..988911b4cc7 100644 --- a/arch/ia64/kvm/vcpu.h +++ b/arch/ia64/kvm/vcpu.h @@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define _vmm_raw_spin_lock(x) do {}while(0) #define _vmm_raw_spin_unlock(x) do {}while(0) #else +typedef struct { + volatile unsigned int lock; +} vmm_spinlock_t; #define _vmm_raw_spin_lock(x) \ do { \ __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ @@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn) #define _vmm_raw_spin_unlock(x) \ do { barrier(); \ - ((spinlock_t *)x)->raw_lock.lock = 0; } \ + ((vmm_spinlock_t *)x)->lock = 0; } \ while (0) #endif -void vmm_spin_lock(spinlock_t *lock); -void vmm_spin_unlock(spinlock_t *lock); +void vmm_spin_lock(vmm_spinlock_t *lock); +void vmm_spin_unlock(vmm_spinlock_t *lock); enum { I_TLB = 1, D_TLB = 2 diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c index f4b4c899bb6..7a62f75778c 100644 --- a/arch/ia64/kvm/vmm.c +++ b/arch/ia64/kvm/vmm.c @@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void) return ; } -void vmm_spin_lock(spinlock_t *lock) +void vmm_spin_lock(vmm_spinlock_t *lock) { _vmm_raw_spin_lock(lock); } -void vmm_spin_unlock(spinlock_t *lock) +void vmm_spin_unlock(vmm_spinlock_t *lock) { _vmm_raw_spin_unlock(lock); } diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 20b3852f7a6..4332f7ee520 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps) { u64 i, dirty_pages = 1; u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; - spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); + vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 5598f88f142..e4beeb371a7 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -390,6 +390,26 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, { u64 rb = 0, rs = 0; + /* + * According to Book3 2.01 mtsrin is implemented as: + * + * The SLB entry specified by (RB)32:35 is loaded from register + * RS, as follows. + * + * SLBE Bit Source SLB Field + * + * 0:31 0x0000_0000 ESID-0:31 + * 32:35 (RB)32:35 ESID-32:35 + * 36 0b1 V + * 37:61 0x00_0000|| 0b0 VSID-0:24 + * 62:88 (RS)37:63 VSID-25:51 + * 89:91 (RS)33:35 Ks Kp N + * 92 (RS)36 L ((RS)36 must be 0b0) + * 93 0b0 C + */ + + dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value); + /* ESID = srnum */ rb |= (srnum & 0xf) << 28; /* Set the valid bit */ @@ -400,7 +420,7 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, /* VSID = VSID */ rs |= (value & 0xfffffff) << 12; /* flags = flags */ - rs |= ((value >> 27) & 0xf) << 9; + rs |= ((value >> 28) & 0x7) << 9; kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); } diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 950df434763..f46b79f6c16 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h @@ -254,6 +254,10 @@ struct kvm_reinject_control { __u8 reserved[31]; }; +/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */ +#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001 +#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002 + /* for KVM_GET/SET_VCPU_EVENTS */ struct kvm_vcpu_events { struct { diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cd60c0bd1b3..3063a0c4858 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) hrtimer_cancel(&apic->lapic_timer.timer); update_divide_count(apic); start_apic_timer(apic); + apic->irr_pending = true; } void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a6017132fba..58a0f1e8859 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -455,8 +455,6 @@ out_unlock: static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) { struct kvm_shadow_walk_iterator iterator; - pt_element_t gpte; - gpa_t pte_gpa = -1; int level; u64 *sptep; int need_flush = 0; @@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) if (level == PT_PAGE_TABLE_LEVEL || ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) || ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) { - struct kvm_mmu_page *sp = page_header(__pa(sptep)); - - pte_gpa = (sp->gfn << PAGE_SHIFT); - pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t); if (is_shadow_present_pte(*sptep)) { rmap_remove(vcpu->kvm, sptep); @@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) if (need_flush) kvm_flush_remote_tlbs(vcpu->kvm); spin_unlock(&vcpu->kvm->mmu_lock); - - if (pte_gpa == -1) - return; - if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte, - sizeof(pt_element_t))) - return; - if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) { - if (mmu_topup_memory_caches(vcpu)) - return; - kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte, - sizeof(pt_element_t), 0); - } } static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d068966fb2..6651dbf5867 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, events->sipi_vector = vcpu->arch.sipi_vector; - events->flags = 0; + events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING + | KVM_VCPUEVENT_VALID_SIPI_VECTOR); vcpu_put(vcpu); } @@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { - if (events->flags) + if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING + | KVM_VCPUEVENT_VALID_SIPI_VECTOR)) return -EINVAL; vcpu_load(vcpu); @@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, kvm_pic_clear_isr_ack(vcpu->kvm); vcpu->arch.nmi_injected = events->nmi.injected; - vcpu->arch.nmi_pending = events->nmi.pending; + if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) + vcpu->arch.nmi_pending = events->nmi.pending; kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); - vcpu->arch.sipi_vector = events->sipi_vector; + if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) + vcpu->arch.sipi_vector = events->sipi_vector; vcpu_put(vcpu); diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c index fd9c097b760..f73de631e3e 100644 --- a/virt/kvm/assigned-dev.c +++ b/virt/kvm/assigned-dev.c @@ -508,8 +508,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *match; struct pci_dev *dev; - down_read(&kvm->slots_lock); mutex_lock(&kvm->lock); + down_read(&kvm->slots_lock); match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head, assigned_dev->assigned_dev_id); @@ -573,8 +573,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm, } out: - mutex_unlock(&kvm->lock); up_read(&kvm->slots_lock); + mutex_unlock(&kvm->lock); return r; out_list_del: list_del(&match->list); @@ -585,8 +585,8 @@ out_put: pci_dev_put(dev); out_free: kfree(match); - mutex_unlock(&kvm->lock); up_read(&kvm->slots_lock); + mutex_unlock(&kvm->lock); return r; } diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index b5af8816761..a944be392d6 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -64,7 +64,7 @@ MODULE_LICENSE("GPL"); /* * Ordering of locks: * - * kvm->slots_lock --> kvm->lock --> kvm->irq_lock + * kvm->lock --> kvm->slots_lock --> kvm->irq_lock */ DEFINE_SPINLOCK(kvm_lock); @@ -406,8 +406,11 @@ static struct kvm *kvm_create_vm(void) out: return kvm; +#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \ + (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)) out_err: hardware_disable_all(); +#endif out_err_nodisable: kfree(kvm); return ERR_PTR(r); |