diff options
Diffstat (limited to 'drivers/kvm/kvm_main.c')
-rw-r--r-- | drivers/kvm/kvm_main.c | 3628 |
1 files changed, 0 insertions, 3628 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c deleted file mode 100644 index 47c10b8f89b..00000000000 --- a/drivers/kvm/kvm_main.c +++ /dev/null @@ -1,3628 +0,0 @@ -/* - * Kernel-based Virtual Machine driver for Linux - * - * This module enables machines with Intel VT-x extensions to run virtual - * machines without emulation or binary translation. - * - * Copyright (C) 2006 Qumranet, Inc. - * - * Authors: - * Avi Kivity <avi@qumranet.com> - * Yaniv Kamay <yaniv@qumranet.com> - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#include "kvm.h" -#include "x86_emulate.h" -#include "segment_descriptor.h" -#include "irq.h" - -#include <linux/kvm.h> -#include <linux/module.h> -#include <linux/errno.h> -#include <linux/percpu.h> -#include <linux/gfp.h> -#include <linux/mm.h> -#include <linux/miscdevice.h> -#include <linux/vmalloc.h> -#include <linux/reboot.h> -#include <linux/debugfs.h> -#include <linux/highmem.h> -#include <linux/file.h> -#include <linux/sysdev.h> -#include <linux/cpu.h> -#include <linux/sched.h> -#include <linux/cpumask.h> -#include <linux/smp.h> -#include <linux/anon_inodes.h> -#include <linux/profile.h> - -#include <asm/processor.h> -#include <asm/msr.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/desc.h> - -MODULE_AUTHOR("Qumranet"); -MODULE_LICENSE("GPL"); - -static DEFINE_SPINLOCK(kvm_lock); -static LIST_HEAD(vm_list); - -static cpumask_t cpus_hardware_enabled; - -struct kvm_x86_ops *kvm_x86_ops; -struct kmem_cache *kvm_vcpu_cache; -EXPORT_SYMBOL_GPL(kvm_vcpu_cache); - -static __read_mostly struct preempt_ops kvm_preempt_ops; - -#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x) - -static struct kvm_stats_debugfs_item { - const char *name; - int offset; - struct dentry *dentry; -} debugfs_entries[] = { - { "pf_fixed", STAT_OFFSET(pf_fixed) }, - { "pf_guest", STAT_OFFSET(pf_guest) }, - { "tlb_flush", STAT_OFFSET(tlb_flush) }, - { "invlpg", STAT_OFFSET(invlpg) }, - { "exits", STAT_OFFSET(exits) }, - { "io_exits", STAT_OFFSET(io_exits) }, - { "mmio_exits", STAT_OFFSET(mmio_exits) }, - { "signal_exits", STAT_OFFSET(signal_exits) }, - { "irq_window", STAT_OFFSET(irq_window_exits) }, - { "halt_exits", STAT_OFFSET(halt_exits) }, - { "halt_wakeup", STAT_OFFSET(halt_wakeup) }, - { "request_irq", STAT_OFFSET(request_irq_exits) }, - { "irq_exits", STAT_OFFSET(irq_exits) }, - { "light_exits", STAT_OFFSET(light_exits) }, - { "efer_reload", STAT_OFFSET(efer_reload) }, - { NULL } -}; - -static struct dentry *debugfs_dir; - -#define MAX_IO_MSRS 256 - -#define CR0_RESERVED_BITS \ - (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \ - | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \ - | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG)) -#define CR4_RESERVED_BITS \ - (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\ - | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ - | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \ - | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) - -#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) -#define EFER_RESERVED_BITS 0xfffffffffffff2fe - -#ifdef CONFIG_X86_64 -// LDT or TSS descriptor in the GDT. 16 bytes. -struct segment_descriptor_64 { - struct segment_descriptor s; - u32 base_higher; - u32 pad_zero; -}; - -#endif - -static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, - unsigned long arg); - -unsigned long segment_base(u16 selector) -{ - struct descriptor_table gdt; - struct segment_descriptor *d; - unsigned long table_base; - typedef unsigned long ul; - unsigned long v; - - if (selector == 0) - return 0; - - asm ("sgdt %0" : "=m"(gdt)); - table_base = gdt.base; - - if (selector & 4) { /* from ldt */ - u16 ldt_selector; - - asm ("sldt %0" : "=g"(ldt_selector)); - table_base = segment_base(ldt_selector); - } - d = (struct segment_descriptor *)(table_base + (selector & ~7)); - v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24); -#ifdef CONFIG_X86_64 - if (d->system == 0 - && (d->type == 2 || d->type == 9 || d->type == 11)) - v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32; -#endif - return v; -} -EXPORT_SYMBOL_GPL(segment_base); - -static inline int valid_vcpu(int n) -{ - return likely(n >= 0 && n < KVM_MAX_VCPUS); -} - -void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) -{ - if (!vcpu->fpu_active || vcpu->guest_fpu_loaded) - return; - - vcpu->guest_fpu_loaded = 1; - fx_save(&vcpu->host_fx_image); - fx_restore(&vcpu->guest_fx_image); -} -EXPORT_SYMBOL_GPL(kvm_load_guest_fpu); - -void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) -{ - if (!vcpu->guest_fpu_loaded) - return; - - vcpu->guest_fpu_loaded = 0; - fx_save(&vcpu->guest_fx_image); - fx_restore(&vcpu->host_fx_image); -} -EXPORT_SYMBOL_GPL(kvm_put_guest_fpu); - -/* - * Switches to specified vcpu, until a matching vcpu_put() - */ -static void vcpu_load(struct kvm_vcpu *vcpu) -{ - int cpu; - - mutex_lock(&vcpu->mutex); - cpu = get_cpu(); - preempt_notifier_register(&vcpu->preempt_notifier); - kvm_x86_ops->vcpu_load(vcpu, cpu); - put_cpu(); -} - -static void vcpu_put(struct kvm_vcpu *vcpu) -{ - preempt_disable(); - kvm_x86_ops->vcpu_put(vcpu); - preempt_notifier_unregister(&vcpu->preempt_notifier); - preempt_enable(); - mutex_unlock(&vcpu->mutex); -} - -static void ack_flush(void *_completed) -{ -} - -void kvm_flush_remote_tlbs(struct kvm *kvm) -{ - int i, cpu; - cpumask_t cpus; - struct kvm_vcpu *vcpu; - - cpus_clear(cpus); - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - vcpu = kvm->vcpus[i]; - if (!vcpu) - continue; - if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests)) - continue; - cpu = vcpu->cpu; - if (cpu != -1 && cpu != raw_smp_processor_id()) - cpu_set(cpu, cpus); - } - smp_call_function_mask(cpus, ack_flush, NULL, 1); -} - -int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) -{ - struct page *page; - int r; - - mutex_init(&vcpu->mutex); - vcpu->cpu = -1; - vcpu->mmu.root_hpa = INVALID_PAGE; - vcpu->kvm = kvm; - vcpu->vcpu_id = id; - if (!irqchip_in_kernel(kvm) || id == 0) - vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; - else - vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED; - init_waitqueue_head(&vcpu->wq); - - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - r = -ENOMEM; - goto fail; - } - vcpu->run = page_address(page); - - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) { - r = -ENOMEM; - goto fail_free_run; - } - vcpu->pio_data = page_address(page); - - r = kvm_mmu_create(vcpu); - if (r < 0) - goto fail_free_pio_data; - - return 0; - -fail_free_pio_data: - free_page((unsigned long)vcpu->pio_data); -fail_free_run: - free_page((unsigned long)vcpu->run); -fail: - return -ENOMEM; -} -EXPORT_SYMBOL_GPL(kvm_vcpu_init); - -void kvm_vcpu_uninit(struct kvm_vcpu *vcpu) -{ - kvm_mmu_destroy(vcpu); - if (vcpu->apic) - hrtimer_cancel(&vcpu->apic->timer.dev); - kvm_free_apic(vcpu->apic); - free_page((unsigned long)vcpu->pio_data); - free_page((unsigned long)vcpu->run); -} -EXPORT_SYMBOL_GPL(kvm_vcpu_uninit); - -static struct kvm *kvm_create_vm(void) -{ - struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); - - if (!kvm) - return ERR_PTR(-ENOMEM); - - kvm_io_bus_init(&kvm->pio_bus); - mutex_init(&kvm->lock); - INIT_LIST_HEAD(&kvm->active_mmu_pages); - kvm_io_bus_init(&kvm->mmio_bus); - spin_lock(&kvm_lock); - list_add(&kvm->vm_list, &vm_list); - spin_unlock(&kvm_lock); - return kvm; -} - -/* - * Free any memory in @free but not in @dont. - */ -static void kvm_free_physmem_slot(struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ - int i; - - if (!dont || free->phys_mem != dont->phys_mem) - if (free->phys_mem) { - for (i = 0; i < free->npages; ++i) - if (free->phys_mem[i]) - __free_page(free->phys_mem[i]); - vfree(free->phys_mem); - } - - if (!dont || free->dirty_bitmap != dont->dirty_bitmap) - vfree(free->dirty_bitmap); - - free->phys_mem = NULL; - free->npages = 0; - free->dirty_bitmap = NULL; -} - -static void kvm_free_physmem(struct kvm *kvm) -{ - int i; - - for (i = 0; i < kvm->nmemslots; ++i) - kvm_free_physmem_slot(&kvm->memslots[i], NULL); -} - -static void free_pio_guest_pages(struct kvm_vcpu *vcpu) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i) - if (vcpu->pio.guest_pages[i]) { - __free_page(vcpu->pio.guest_pages[i]); - vcpu->pio.guest_pages[i] = NULL; - } -} - -static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) -{ - vcpu_load(vcpu); - kvm_mmu_unload(vcpu); - vcpu_put(vcpu); -} - -static void kvm_free_vcpus(struct kvm *kvm) -{ - unsigned int i; - - /* - * Unpin any mmu pages first. - */ - for (i = 0; i < KVM_MAX_VCPUS; ++i) - if (kvm->vcpus[i]) - kvm_unload_vcpu_mmu(kvm->vcpus[i]); - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - if (kvm->vcpus[i]) { - kvm_x86_ops->vcpu_free(kvm->vcpus[i]); - kvm->vcpus[i] = NULL; - } - } - -} - -static void kvm_destroy_vm(struct kvm *kvm) -{ - spin_lock(&kvm_lock); - list_del(&kvm->vm_list); - spin_unlock(&kvm_lock); - kvm_io_bus_destroy(&kvm->pio_bus); - kvm_io_bus_destroy(&kvm->mmio_bus); - kfree(kvm->vpic); - kfree(kvm->vioapic); - kvm_free_vcpus(kvm); - kvm_free_physmem(kvm); - kfree(kvm); -} - -static int kvm_vm_release(struct inode *inode, struct file *filp) -{ - struct kvm *kvm = filp->private_data; - - kvm_destroy_vm(kvm); - return 0; -} - -static void inject_gp(struct kvm_vcpu *vcpu) -{ - kvm_x86_ops->inject_gp(vcpu, 0); -} - -/* - * Load the pae pdptrs. Return true is they are all valid. - */ -static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) -{ - gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; - unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; - int i; - u64 *pdpt; - int ret; - struct page *page; - u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)]; - - mutex_lock(&vcpu->kvm->lock); - page = gfn_to_page(vcpu->kvm, pdpt_gfn); - if (!page) { - ret = 0; - goto out; - } - - pdpt = kmap_atomic(page, KM_USER0); - memcpy(pdpte, pdpt+offset, sizeof(pdpte)); - kunmap_atomic(pdpt, KM_USER0); - - for (i = 0; i < ARRAY_SIZE(pdpte); ++i) { - if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) { - ret = 0; - goto out; - } - } - ret = 1; - - memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs)); -out: - mutex_unlock(&vcpu->kvm->lock); - - return ret; -} - -void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) -{ - if (cr0 & CR0_RESERVED_BITS) { - printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n", - cr0, vcpu->cr0); - inject_gp(vcpu); - return; - } - - if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) { - printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n"); - inject_gp(vcpu); - return; - } - - if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) { - printk(KERN_DEBUG "set_cr0: #GP, set PG flag " - "and a clear PE flag\n"); - inject_gp(vcpu); - return; - } - - if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { -#ifdef CONFIG_X86_64 - if ((vcpu->shadow_efer & EFER_LME)) { - int cs_db, cs_l; - - if (!is_pae(vcpu)) { - printk(KERN_DEBUG "set_cr0: #GP, start paging " - "in long mode while PAE is disabled\n"); - inject_gp(vcpu); - return; - } - kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); - if (cs_l) { - printk(KERN_DEBUG "set_cr0: #GP, start paging " - "in long mode while CS.L == 1\n"); - inject_gp(vcpu); - return; - - } - } else -#endif - if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) { - printk(KERN_DEBUG "set_cr0: #GP, pdptrs " - "reserved bits\n"); - inject_gp(vcpu); - return; - } - - } - - kvm_x86_ops->set_cr0(vcpu, cr0); - vcpu->cr0 = cr0; - - mutex_lock(&vcpu->kvm->lock); - kvm_mmu_reset_context(vcpu); - mutex_unlock(&vcpu->kvm->lock); - return; -} -EXPORT_SYMBOL_GPL(set_cr0); - -void lmsw(struct kvm_vcpu *vcpu, unsigned long msw) -{ - set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f)); -} -EXPORT_SYMBOL_GPL(lmsw); - -void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) -{ - if (cr4 & CR4_RESERVED_BITS) { - printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n"); - inject_gp(vcpu); - return; - } - - if (is_long_mode(vcpu)) { - if (!(cr4 & X86_CR4_PAE)) { - printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while " - "in long mode\n"); - inject_gp(vcpu); - return; - } - } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE) - && !load_pdptrs(vcpu, vcpu->cr3)) { - printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); - inject_gp(vcpu); - return; - } - - if (cr4 & X86_CR4_VMXE) { - printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n"); - inject_gp(vcpu); - return; - } - kvm_x86_ops->set_cr4(vcpu, cr4); - vcpu->cr4 = cr4; - mutex_lock(&vcpu->kvm->lock); - kvm_mmu_reset_context(vcpu); - mutex_unlock(&vcpu->kvm->lock); -} -EXPORT_SYMBOL_GPL(set_cr4); - -void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) -{ - if (is_long_mode(vcpu)) { - if (cr3 & CR3_L_MODE_RESERVED_BITS) { - printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n"); - inject_gp(vcpu); - return; - } - } else { - if (is_pae(vcpu)) { - if (cr3 & CR3_PAE_RESERVED_BITS) { - printk(KERN_DEBUG - "set_cr3: #GP, reserved bits\n"); - inject_gp(vcpu); - return; - } - if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) { - printk(KERN_DEBUG "set_cr3: #GP, pdptrs " - "reserved bits\n"); - inject_gp(vcpu); - return; - } - } else { - if (cr3 & CR3_NONPAE_RESERVED_BITS) { - printk(KERN_DEBUG - "set_cr3: #GP, reserved bits\n"); - inject_gp(vcpu); - return; - } - } - } - - mutex_lock(&vcpu->kvm->lock); - /* - * Does the new cr3 value map to physical memory? (Note, we - * catch an invalid cr3 even in real-mode, because it would - * cause trouble later on when we turn on paging anyway.) - * - * A real CPU would silently accept an invalid cr3 and would - * attempt to use it - with largely undefined (and often hard - * to debug) behavior on the guest side. - */ - if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT))) - inject_gp(vcpu); - else { - vcpu->cr3 = cr3; - vcpu->mmu.new_cr3(vcpu); - } - mutex_unlock(&vcpu->kvm->lock); -} -EXPORT_SYMBOL_GPL(set_cr3); - -void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) -{ - if (cr8 & CR8_RESERVED_BITS) { - printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8); - inject_gp(vcpu); - return; - } - if (irqchip_in_kernel(vcpu->kvm)) - kvm_lapic_set_tpr(vcpu, cr8); - else - vcpu->cr8 = cr8; -} -EXPORT_SYMBOL_GPL(set_cr8); - -unsigned long get_cr8(struct kvm_vcpu *vcpu) -{ - if (irqchip_in_kernel(vcpu->kvm)) - return kvm_lapic_get_cr8(vcpu); - else - return vcpu->cr8; -} -EXPORT_SYMBOL_GPL(get_cr8); - -u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) -{ - if (irqchip_in_kernel(vcpu->kvm)) - return vcpu->apic_base; - else - return vcpu->apic_base; -} -EXPORT_SYMBOL_GPL(kvm_get_apic_base); - -void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) -{ - /* TODO: reserve bits check */ - if (irqchip_in_kernel(vcpu->kvm)) - kvm_lapic_set_base(vcpu, data); - else - vcpu->apic_base = data; -} -EXPORT_SYMBOL_GPL(kvm_set_apic_base); - -void fx_init(struct kvm_vcpu *vcpu) -{ - unsigned after_mxcsr_mask; - - /* Initialize guest FPU by resetting ours and saving into guest's */ - preempt_disable(); - fx_save(&vcpu->host_fx_image); - fpu_init(); - fx_save(&vcpu->guest_fx_image); - fx_restore(&vcpu->host_fx_image); - preempt_enable(); - - vcpu->cr0 |= X86_CR0_ET; - after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space); - vcpu->guest_fx_image.mxcsr = 0x1f80; - memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask, - 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask); -} -EXPORT_SYMBOL_GPL(fx_init); - -/* - * Allocate some memory and give it an address in the guest physical address - * space. - * - * Discontiguous memory is allowed, mostly for framebuffers. - */ -static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, - struct kvm_memory_region *mem) -{ - int r; - gfn_t base_gfn; - unsigned long npages; - unsigned long i; - struct kvm_memory_slot *memslot; - struct kvm_memory_slot old, new; - - r = -EINVAL; - /* General sanity checks */ - if (mem->memory_size & (PAGE_SIZE - 1)) - goto out; - if (mem->guest_phys_addr & (PAGE_SIZE - 1)) - goto out; - if (mem->slot >= KVM_MEMORY_SLOTS) - goto out; - if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr) - goto out; - - memslot = &kvm->memslots[mem->slot]; - base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; - npages = mem->memory_size >> PAGE_SHIFT; - - if (!npages) - mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES; - - mutex_lock(&kvm->lock); - - new = old = *memslot; - - new.base_gfn = base_gfn; - new.npages = npages; - new.flags = mem->flags; - - /* Disallow changing a memory slot's size. */ - r = -EINVAL; - if (npages && old.npages && npages != old.npages) - goto out_unlock; - - /* Check for overlaps */ - r = -EEXIST; - for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { - struct kvm_memory_slot *s = &kvm->memslots[i]; - - if (s == memslot) - continue; - if (!((base_gfn + npages <= s->base_gfn) || - (base_gfn >= s->base_gfn + s->npages))) - goto out_unlock; - } - - /* Deallocate if slot is being removed */ - if (!npages) - new.phys_mem = NULL; - - /* Free page dirty bitmap if unneeded */ - if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES)) - new.dirty_bitmap = NULL; - - r = -ENOMEM; - - /* Allocate if a slot is being created */ - if (npages && !new.phys_mem) { - new.phys_mem = vmalloc(npages * sizeof(struct page *)); - - if (!new.phys_mem) - goto out_unlock; - - memset(new.phys_mem, 0, npages * sizeof(struct page *)); - for (i = 0; i < npages; ++i) { - new.phys_mem[i] = alloc_page(GFP_HIGHUSER - | __GFP_ZERO); - if (!new.phys_mem[i]) - goto out_unlock; - set_page_private(new.phys_mem[i],0); - } - } - - /* Allocate page dirty bitmap if needed */ - if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) { - unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8; - - new.dirty_bitmap = vmalloc(dirty_bytes); - if (!new.dirty_bitmap) - goto out_unlock; - memset(new.dirty_bitmap, 0, dirty_bytes); - } - - if (mem->slot >= kvm->nmemslots) - kvm->nmemslots = mem->slot + 1; - - *memslot = new; - - kvm_mmu_slot_remove_write_access(kvm, mem->slot); - kvm_flush_remote_tlbs(kvm); - - mutex_unlock(&kvm->lock); - - kvm_free_physmem_slot(&old, &new); - return 0; - -out_unlock: - mutex_unlock(&kvm->lock); - kvm_free_physmem_slot(&new, &old); -out: - return r; -} - -/* - * Get (and clear) the dirty memory log for a memory slot. - */ -static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, - struct kvm_dirty_log *log) -{ - struct kvm_memory_slot *memslot; - int r, i; - int n; - unsigned long any = 0; - - mutex_lock(&kvm->lock); - - r = -EINVAL; - if (log->slot >= KVM_MEMORY_SLOTS) - goto out; - - memslot = &kvm->memslots[log->slot]; - r = -ENOENT; - if (!memslot->dirty_bitmap) - goto out; - - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; - - for (i = 0; !any && i < n/sizeof(long); ++i) - any = memslot->dirty_bitmap[i]; - - r = -EFAULT; - if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) - goto out; - - /* If nothing is dirty, don't bother messing with page tables. */ - if (any) { - kvm_mmu_slot_remove_write_access(kvm, log->slot); - kvm_flush_remote_tlbs(kvm); - memset(memslot->dirty_bitmap, 0, n); - } - - r = 0; - -out: - mutex_unlock(&kvm->lock); - return r; -} - -/* - * Set a new alias region. Aliases map a portion of physical memory into - * another portion. This is useful for memory windows, for example the PC - * VGA region. - */ -static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm, - struct kvm_memory_alias *alias) -{ - int r, n; - struct kvm_mem_alias *p; - - r = -EINVAL; - /* General sanity checks */ - if (alias->memory_size & (PAGE_SIZE - 1)) - goto out; - if (alias->guest_phys_addr & (PAGE_SIZE - 1)) - goto out; - if (alias->slot >= KVM_ALIAS_SLOTS) - goto out; - if (alias->guest_phys_addr + alias->memory_size - < alias->guest_phys_addr) - goto out; - if (alias->target_phys_addr + alias->memory_size - < alias->target_phys_addr) - goto out; - - mutex_lock(&kvm->lock); - - p = &kvm->aliases[alias->slot]; - p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT; - p->npages = alias->memory_size >> PAGE_SHIFT; - p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT; - - for (n = KVM_ALIAS_SLOTS; n > 0; --n) - if (kvm->aliases[n - 1].npages) - break; - kvm->naliases = n; - - kvm_mmu_zap_all(kvm); - - mutex_unlock(&kvm->lock); - - return 0; - -out: - return r; -} - -static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) -{ - int r; - - r = 0; - switch (chip->chip_id) { - case KVM_IRQCHIP_PIC_MASTER: - memcpy (&chip->chip.pic, - &pic_irqchip(kvm)->pics[0], - sizeof(struct kvm_pic_state)); - break; - case KVM_IRQCHIP_PIC_SLAVE: - memcpy (&chip->chip.pic, - &pic_irqchip(kvm)->pics[1], - sizeof(struct kvm_pic_state)); - break; - case KVM_IRQCHIP_IOAPIC: - memcpy (&chip->chip.ioapic, - ioapic_irqchip(kvm), - sizeof(struct kvm_ioapic_state)); - break; - default: - r = -EINVAL; - break; - } - return r; -} - -static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) -{ - int r; - - r = 0; - switch (chip->chip_id) { - case KVM_IRQCHIP_PIC_MASTER: - memcpy (&pic_irqchip(kvm)->pics[0], - &chip->chip.pic, - sizeof(struct kvm_pic_state)); - break; - case KVM_IRQCHIP_PIC_SLAVE: - memcpy (&pic_irqchip(kvm)->pics[1], - &chip->chip.pic, - sizeof(struct kvm_pic_state)); - break; - case KVM_IRQCHIP_IOAPIC: - memcpy (ioapic_irqchip(kvm), - &chip->chip.ioapic, - sizeof(struct kvm_ioapic_state)); - break; - default: - r = -EINVAL; - break; - } - kvm_pic_update_irq(pic_irqchip(kvm)); - return r; -} - -static gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) -{ - int i; - struct kvm_mem_alias *alias; - - for (i = 0; i < kvm->naliases; ++i) { - alias = &kvm->aliases[i]; - if (gfn >= alias->base_gfn - && gfn < alias->base_gfn + alias->npages) - return alias->target_gfn + gfn - alias->base_gfn; - } - return gfn; -} - -static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn) -{ - int i; - - for (i = 0; i < kvm->nmemslots; ++i) { - struct kvm_memory_slot *memslot = &kvm->memslots[i]; - - if (gfn >= memslot->base_gfn - && gfn < memslot->base_gfn + memslot->npages) - return memslot; - } - return NULL; -} - -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) -{ - gfn = unalias_gfn(kvm, gfn); - return __gfn_to_memslot(kvm, gfn); -} - -struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) -{ - struct kvm_memory_slot *slot; - - gfn = unalias_gfn(kvm, gfn); - slot = __gfn_to_memslot(kvm, gfn); - if (!slot) - return NULL; - return slot->phys_mem[gfn - slot->base_gfn]; -} -EXPORT_SYMBOL_GPL(gfn_to_page); - -/* WARNING: Does not work on aliased pages. */ -void mark_page_dirty(struct kvm *kvm, gfn_t gfn) -{ - struct kvm_memory_slot *memslot; - - memslot = __gfn_to_memslot(kvm, gfn); - if (memslot && memslot->dirty_bitmap) { - unsigned long rel_gfn = gfn - memslot->base_gfn; - - /* avoid RMW */ - if (!test_bit(rel_gfn, memslot->dirty_bitmap)) - set_bit(rel_gfn, memslot->dirty_bitmap); - } -} - -int emulator_read_std(unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu) -{ - void *data = val; - - while (bytes) { - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); - unsigned offset = addr & (PAGE_SIZE-1); - unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset); - unsigned long pfn; - struct page *page; - void *page_virt; - - if (gpa == UNMAPPED_GVA) - return X86EMUL_PROPAGATE_FAULT; - pfn = gpa >> PAGE_SHIFT; - page = gfn_to_page(vcpu->kvm, pfn); - if (!page) - return X86EMUL_UNHANDLEABLE; - page_virt = kmap_atomic(page, KM_USER0); - - memcpy(data, page_virt + offset, tocopy); - - kunmap_atomic(page_virt, KM_USER0); - - bytes -= tocopy; - data += tocopy; - addr += tocopy; - } - - return X86EMUL_CONTINUE; -} -EXPORT_SYMBOL_GPL(emulator_read_std); - -static int emulator_write_std(unsigned long addr, - const void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu) -{ - pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes); - return X86EMUL_UNHANDLEABLE; -} - -/* - * Only apic need an MMIO device hook, so shortcut now.. - */ -static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu, - gpa_t addr) -{ - struct kvm_io_device *dev; - - if (vcpu->apic) { - dev = &vcpu->apic->dev; - if (dev->in_range(dev, addr)) - return dev; - } - return NULL; -} - -static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, - gpa_t addr) -{ - struct kvm_io_device *dev; - - dev = vcpu_find_pervcpu_dev(vcpu, addr); - if (dev == NULL) - dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr); - return dev; -} - -static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu, - gpa_t addr) -{ - return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr); -} - -static int emulator_read_emulated(unsigned long addr, - void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu) -{ - struct kvm_io_device *mmio_dev; - gpa_t gpa; - - if (vcpu->mmio_read_completed) { - memcpy(val, vcpu->mmio_data, bytes); - vcpu->mmio_read_completed = 0; - return X86EMUL_CONTINUE; - } else if (emulator_read_std(addr, val, bytes, vcpu) - == X86EMUL_CONTINUE) - return X86EMUL_CONTINUE; - - gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); - if (gpa == UNMAPPED_GVA) - return X86EMUL_PROPAGATE_FAULT; - - /* - * Is this MMIO handled locally? - */ - mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); - if (mmio_dev) { - kvm_iodevice_read(mmio_dev, gpa, bytes, val); - return X86EMUL_CONTINUE; - } - - vcpu->mmio_needed = 1; - vcpu->mmio_phys_addr = gpa; - vcpu->mmio_size = bytes; - vcpu->mmio_is_write = 0; - - return X86EMUL_UNHANDLEABLE; -} - -static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, - const void *val, int bytes) -{ - struct page *page; - void *virt; - - if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT)) - return 0; - page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT); - if (!page) - return 0; - mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT); - virt = kmap_atomic(page, KM_USER0); - kvm_mmu_pte_write(vcpu, gpa, val, bytes); - memcpy(virt + offset_in_page(gpa), val, bytes); - kunmap_atomic(virt, KM_USER0); - return 1; -} - -static int emulator_write_emulated_onepage(unsigned long addr, - const void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu) -{ - struct kvm_io_device *mmio_dev; - gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr); - - if (gpa == UNMAPPED_GVA) { - kvm_x86_ops->inject_page_fault(vcpu, addr, 2); - return X86EMUL_PROPAGATE_FAULT; - } - - if (emulator_write_phys(vcpu, gpa, val, bytes)) - return X86EMUL_CONTINUE; - - /* - * Is this MMIO handled locally? - */ - mmio_dev = vcpu_find_mmio_dev(vcpu, gpa); - if (mmio_dev) { - kvm_iodevice_write(mmio_dev, gpa, bytes, val); - return X86EMUL_CONTINUE; - } - - vcpu->mmio_needed = 1; - vcpu->mmio_phys_addr = gpa; - vcpu->mmio_size = bytes; - vcpu->mmio_is_write = 1; - memcpy(vcpu->mmio_data, val, bytes); - - return X86EMUL_CONTINUE; -} - -int emulator_write_emulated(unsigned long addr, - const void *val, - unsigned int bytes, - struct kvm_vcpu *vcpu) -{ - /* Crossing a page boundary? */ - if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { - int rc, now; - - now = -addr & ~PAGE_MASK; - rc = emulator_write_emulated_onepage(addr, val, now, vcpu); - if (rc != X86EMUL_CONTINUE) - return rc; - addr += now; - val += now; - bytes -= now; - } - return emulator_write_emulated_onepage(addr, val, bytes, vcpu); -} -EXPORT_SYMBOL_GPL(emulator_write_emulated); - -static int emulator_cmpxchg_emulated(unsigned long addr, - const void *old, - const void *new, - unsigned int bytes, - struct kvm_vcpu *vcpu) -{ - static int reported; - - if (!reported) { - reported = 1; - printk(KERN_WARNING "kvm: emulating exchange as write\n"); - } - return emulator_write_emulated(addr, new, bytes, vcpu); -} - -static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) -{ - return kvm_x86_ops->get_segment_base(vcpu, seg); -} - -int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) -{ - return X86EMUL_CONTINUE; -} - -int emulate_clts(struct kvm_vcpu *vcpu) -{ - kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS); - return X86EMUL_CONTINUE; -} - -int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest) -{ - struct kvm_vcpu *vcpu = ctxt->vcpu; - - switch (dr) { - case 0 ... 3: - *dest = kvm_x86_ops->get_dr(vcpu, dr); - return X86EMUL_CONTINUE; - default: - pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr); - return X86EMUL_UNHANDLEABLE; - } -} - -int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) -{ - unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; - int exception; - - kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception); - if (exception) { - /* FIXME: better handling */ - return X86EMUL_UNHANDLEABLE; - } - return X86EMUL_CONTINUE; -} - -void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) -{ - static int reported; - u8 opcodes[4]; - unsigned long rip = vcpu->rip; - unsigned long rip_linear; - - rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); - - if (reported) - return; - - emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu); - - printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", - context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); - reported = 1; -} -EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); - -struct x86_emulate_ops emulate_ops = { - .read_std = emulator_read_std, - .write_std = emulator_write_std, - .read_emulated = emulator_read_emulated, - .write_emulated = emulator_write_emulated, - .cmpxchg_emulated = emulator_cmpxchg_emulated, -}; - -int emulate_instruction(struct kvm_vcpu *vcpu, - struct kvm_run *run, - unsigned long cr2, - u16 error_code) -{ - struct x86_emulate_ctxt emulate_ctxt; - int r; - int cs_db, cs_l; - - vcpu->mmio_fault_cr2 = cr2; - kvm_x86_ops->cache_regs(vcpu); - - kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); - - emulate_ctxt.vcpu = vcpu; - emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); - emulate_ctxt.cr2 = cr2; |