aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kvm/kvm_tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm/kvm_tlb.c')
-rw-r--r--arch/mips/kvm/kvm_tlb.c206
1 files changed, 38 insertions, 168 deletions
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index c777dd36d4a..8a5a700ad8d 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -10,7 +10,6 @@
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
@@ -25,6 +24,7 @@
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/tlb.h>
#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
@@ -35,9 +35,6 @@
#define PRIx64 "llx"
-/* Use VZ EntryHi.EHINV to invalidate TLB entries */
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
-
atomic_t kvm_mips_instance;
EXPORT_SYMBOL(kvm_mips_instance);
@@ -147,30 +144,6 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
}
}
-void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
-{
- int i;
- volatile struct kvm_mips_tlb tlb;
-
- printk("Shadow TLBs:\n");
- for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
- tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i];
- printk("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
- i, tlb.tlb_hi);
- printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
- (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo0 >> 3) & 7);
- printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
- (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
- }
-}
-
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
{
int srcu_idx, err = 0;
@@ -249,26 +222,19 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
return -1;
}
- if (idx < 0) {
- idx = read_c0_random() % current_cpu_data.tlbsize;
- write_c0_index(idx);
- mtc0_tlbw_hazard();
- }
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
- tlb_write_indexed();
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
tlbw_use_hazard();
-#ifdef DEBUG
- if (debug) {
- kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
- "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
- vcpu->arch.pc, idx, read_c0_entryhi(),
- read_c0_entrylo0(), read_c0_entrylo1());
- }
-#endif
+ kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+ vcpu->arch.pc, idx, read_c0_entryhi(),
+ read_c0_entrylo0(), read_c0_entrylo1());
/* Flush D-cache */
if (flush_dcache_mask) {
@@ -375,11 +341,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
mtc0_tlbw_hazard();
tlbw_use_hazard();
-#ifdef DEBUG
kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
read_c0_entrylo0(), read_c0_entrylo1());
-#endif
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
@@ -427,10 +391,8 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
-#ifdef DEBUG
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
tlb->tlb_lo0, tlb->tlb_lo1);
-#endif
return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
tlb->tlb_mask);
@@ -451,10 +413,8 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
}
}
-#ifdef DEBUG
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
__func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
-#endif
return index;
}
@@ -488,9 +448,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
local_irq_restore(flags);
-#ifdef DEBUG
kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
-#endif
return idx;
}
@@ -535,12 +493,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
local_irq_restore(flags);
-#ifdef DEBUG
- if (idx > 0) {
+ if (idx > 0)
kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
- (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
- }
-#endif
+ (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
return 0;
}
@@ -657,70 +612,6 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
-void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu)
-{
- unsigned long flags;
- unsigned long old_entryhi;
- unsigned long old_pagemask;
- int entry = 0;
- int cpu = smp_processor_id();
-
- local_irq_save(flags);
-
- old_entryhi = read_c0_entryhi();
- old_pagemask = read_c0_pagemask();
-
- for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
- write_c0_index(entry);
- mtc0_tlbw_hazard();
- tlb_read();
- tlbw_use_hazard();
-
- vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi();
- vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0();
- vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1();
- vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask();
- }
-
- write_c0_entryhi(old_entryhi);
- write_c0_pagemask(old_pagemask);
- mtc0_tlbw_hazard();
-
- local_irq_restore(flags);
-
-}
-
-void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu)
-{
- unsigned long flags;
- unsigned long old_ctx;
- int entry;
- int cpu = smp_processor_id();
-
- local_irq_save(flags);
-
- old_ctx = read_c0_entryhi();
-
- for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
- write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi);
- mtc0_tlbw_hazard();
- write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0);
- write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
-
- write_c0_index(entry);
- mtc0_tlbw_hazard();
-
- tlb_write_indexed();
- tlbw_use_hazard();
- }
-
- tlbw_use_hazard();
- write_c0_entryhi(old_ctx);
- mtc0_tlbw_hazard();
- local_irq_restore(flags);
-}
-
-
void kvm_local_flush_tlb_all(void)
{
unsigned long flags;
@@ -749,28 +640,21 @@ void kvm_local_flush_tlb_all(void)
local_irq_restore(flags);
}
-void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu)
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
{
- int cpu, entry;
-
- for_each_possible_cpu(cpu) {
- for (entry = 0; entry < current_cpu_data.tlbsize; entry++) {
- vcpu->arch.shadow_tlb[cpu][entry].tlb_hi =
- UNIQUE_ENTRYHI(entry);
- vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0;
- vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0;
- vcpu->arch.shadow_tlb[cpu][entry].tlb_mask =
- read_c0_pagemask();
-#ifdef DEBUG
- kvm_debug
- ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n",
- cpu, entry,
- vcpu->arch.shadow_tlb[cpu][entry].tlb_hi,
- vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0,
- vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1);
-#endif
- }
- }
+ if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+ hrtimer_restart(&vcpu->arch.comparecount_timer);
}
/* Restore ASID once we are scheduled back after preemption */
@@ -779,9 +663,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
unsigned long flags;
int newasid = 0;
-#ifdef DEBUG
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
-#endif
/* Alocate new kernel and user ASIDs if needed */
@@ -797,26 +679,24 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.guest_user_mm.context.asid[cpu];
newasid++;
- kvm_info("[%d]: cpu_context: %#lx\n", cpu,
- cpu_context(cpu, current->mm));
- kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
- cpu, vcpu->arch.guest_kernel_asid[cpu]);
- kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
- vcpu->arch.guest_user_asid[cpu]);
+ kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+ cpu_context(cpu, current->mm));
+ kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+ cpu, vcpu->arch.guest_kernel_asid[cpu]);
+ kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+ vcpu->arch.guest_user_asid[cpu]);
}
if (vcpu->arch.last_sched_cpu != cpu) {
- kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
- vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
- }
-
- /* Only reload shadow host TLB if new ASIDs haven't been allocated */
-#if 0
- if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) {
- kvm_mips_flush_host_tlb(0);
- kvm_shadow_tlb_load(vcpu);
+ kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+ vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+ /*
+ * Migrate the timer interrupt to the current CPU so that it
+ * always interrupts the guest and synchronously triggers a
+ * guest timer interrupt.
+ */
+ kvm_mips_migrate_count(vcpu);
}
-#endif
if (!newasid) {
/* If we preempted while the guest was executing, then reload the pre-empted ASID */
@@ -863,12 +743,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.preempt_entryhi = read_c0_entryhi();
vcpu->arch.last_sched_cpu = cpu;
-#if 0
- if ((atomic_read(&kvm_mips_instance) > 1)) {
- kvm_shadow_tlb_put(vcpu);
- }
-#endif
-
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
ASID_VERSION_MASK)) {
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
@@ -930,10 +804,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL(kvm_local_flush_tlb_all);
-EXPORT_SYMBOL(kvm_shadow_tlb_put);
EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_init_shadow_tlb);
EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
@@ -941,8 +813,6 @@ EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
-EXPORT_SYMBOL(kvm_shadow_tlb_load);
-EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs);
EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
EXPORT_SYMBOL(kvm_get_inst);
EXPORT_SYMBOL(kvm_arch_vcpu_load);