aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-05-02 09:26:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-05-02 09:26:09 -0700
commite7e6d2a4a1aecb087c0f522d8d131a0252691398 (patch)
treedc81a974f3a8510c88f3e40427be68c8a03a5727
parentb28e4f08d43beca4ac8ba2f768f4aaa8b056c4cf (diff)
parenta5a5aef451430dbd48f5bf32029b8b98b690074d (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: - Fix for a Haswell regression in nested virtualization, introduced during the merge window. - A fix from Oleg to async page faults. - A bunch of small ARM changes. - A trivial patch to use the new MSI-X API introduced during the merge window. * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: ARM: vgic: Fix the overlap check action about setting the GICD & GICC base address. KVM: arm/arm64: vgic: fix GICD_ICFGR register accesses KVM: async_pf: mm->mm_users can not pin apf->mm KVM: ARM: vgic: Fix sgi dispatch problem MAINTAINERS: co-maintainance of KVM/{arm,arm64} arm: KVM: fix possible misalignment of PGDs and bounce page KVM: x86: Check for host supported fields in shadow vmcs kvm: Use pci_enable_msix_exact() instead of pci_enable_msix() ARM: KVM: disable KVM in Kconfig on big-endian systems
-rw-r--r--MAINTAINERS5
-rw-r--r--arch/arm/kvm/Kconfig2
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/x86/kvm/vmx.c53
-rw-r--r--virt/kvm/arm/vgic.c15
-rw-r--r--virt/kvm/assigned-dev.c3
-rw-r--r--virt/kvm/async_pf.c8
7 files changed, 70 insertions, 31 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index ea44a57f790..6bef70b614c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5114,14 +5114,19 @@ F: drivers/s390/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org>
+M: Marc Zyngier <marc.zyngier@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm
S: Supported
F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/
+F: virt/kvm/arm/
+F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 466bd299b1a..4be5bb150bd 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,7 +23,7 @@ config KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
- depends on ARM_VIRT_EXT && ARM_LPAE
+ depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
---help---
Support hosting virtualized guest machines. You will also
need to select one or more of the processor modules below.
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 80bb1e6c2c2..16f804938b8 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
if (boot_hyp_pgd) {
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- kfree(boot_hyp_pgd);
+ free_pages((unsigned long)boot_hyp_pgd, pgd_order);
boot_hyp_pgd = NULL;
}
if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- kfree(init_bounce_page);
+ free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
- kfree(hyp_pgd);
+ free_pages((unsigned long)hyp_pgd, pgd_order);
hyp_pgd = NULL;
}
@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
phys_addr_t phys_base;
- init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM;
@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
(unsigned long)phys_base);
}
- hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
- boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+ boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
if (!hyp_pgd || !boot_hyp_pgd) {
kvm_err("Hyp mode PGD not allocated\n");
err = -ENOMEM;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1f68c583192..33e8c028842 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
[number##_HIGH] = VMCS12_OFFSET(name)+4
-static const unsigned long shadow_read_only_fields[] = {
+static unsigned long shadow_read_only_fields[] = {
/*
* We do NOT shadow fields that are modified when L0
* traps and emulates any vmx instruction (e.g. VMPTRLD,
@@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
GUEST_LINEAR_ADDRESS,
GUEST_PHYSICAL_ADDRESS
};
-static const int max_shadow_read_only_fields =
+static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields);
-static const unsigned long shadow_read_write_fields[] = {
+static unsigned long shadow_read_write_fields[] = {
GUEST_RIP,
GUEST_RSP,
GUEST_CR0,
@@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
HOST_FS_SELECTOR,
HOST_GS_SELECTOR
};
-static const int max_shadow_read_write_fields =
+static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields);
static const unsigned short vmcs_field_to_offset_table[] = {
@@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
}
}
+static void init_vmcs_shadow_fields(void)
+{
+ int i, j;
+
+ /* No checks for read only fields yet */
+
+ for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+ switch (shadow_read_write_fields[i]) {
+ case GUEST_BNDCFGS:
+ if (!vmx_mpx_supported())
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ if (j < i)
+ shadow_read_write_fields[j] =
+ shadow_read_write_fields[i];
+ j++;
+ }
+ max_shadow_read_write_fields = j;
+
+ /* shadowed fields guest access without vmexit */
+ for (i = 0; i < max_shadow_read_write_fields; i++) {
+ clear_bit(shadow_read_write_fields[i],
+ vmx_vmwrite_bitmap);
+ clear_bit(shadow_read_write_fields[i],
+ vmx_vmread_bitmap);
+ }
+ for (i = 0; i < max_shadow_read_only_fields; i++)
+ clear_bit(shadow_read_only_fields[i],
+ vmx_vmread_bitmap);
+}
+
static __init int alloc_kvm_area(void)
{
int cpu;
@@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs)
+ init_vmcs_shadow_fields();
if (!cpu_has_vmx_ept() ||
!cpu_has_vmx_ept_4levels()) {
@@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
- /* shadowed read/write fields */
- for (i = 0; i < max_shadow_read_write_fields; i++) {
- clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
- clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
- }
- /* shadowed read only fields */
- for (i = 0; i < max_shadow_read_only_fields; i++)
- clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
/*
* Allow direct access to the PC debug port (it is often used for I/O
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 47b29834a6b..56ff9bebb57 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
u32 val;
u32 *reg;
- offset >>= 1;
reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset);
+ vcpu->vcpu_id, offset >> 1);
- if (offset & 2)
+ if (offset & 4)
val = *reg >> 16;
else
val = *reg & 0xffff;
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
vgic_reg_access(mmio, &val, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) {
- if (offset < 4) {
+ if (offset < 8) {
*reg = ~0U; /* Force PPIs/SGIs to 1 */
return false;
}
val = vgic_cfg_compress(val);
- if (offset & 2) {
+ if (offset & 4) {
*reg &= 0xffff;
*reg |= val << 16;
} else {
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
case 0:
if (!target_cpus)
return;
+ break;
case 1:
target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
if (addr + size < addr)
return -EINVAL;
+ *ioaddr = addr;
ret = vgic_ioaddr_overlap(kvm);
if (ret)
- return ret;
- *ioaddr = addr;
+ *ioaddr = VGIC_ADDR_UNDEF;
+
return ret;
}
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 8db43701016..bf06577fea5 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
if (dev->entries_nr == 0)
return r;
- r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
+ r = pci_enable_msix_exact(dev->dev,
+ dev->host_msix_entries, dev->entries_nr);
if (r)
return r;
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 10df100c451..06e6401d6ef 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work)
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- mmdrop(mm);
+ mmput(mm);
kvm_put_kvm(vcpu->kvm);
}
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
flush_work(&work->work);
#else
if (cancel_work_sync(&work->work)) {
- mmdrop(work->mm);
+ mmput(work->mm);
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
work->addr = hva;
work->arch = *arch;
work->mm = current->mm;
- atomic_inc(&work->mm->mm_count);
+ atomic_inc(&work->mm->mm_users);
kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
return 1;
retry_sync:
kvm_put_kvm(work->vcpu->kvm);
- mmdrop(work->mm);
+ mmput(work->mm);
kmem_cache_free(async_pf_cache, work);
return 0;
}