aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kvm/vtlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kvm/vtlb.c')
-rw-r--r--arch/ia64/kvm/vtlb.c57
1 files changed, 25 insertions, 32 deletions
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 6b6307a3bd5..a7869f8f49a 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -135,7 +135,7 @@ struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
u64 rid;
rid = vcpu_get_rr(vcpu, va);
- rid = rid & RR_RID_MASK;;
+ rid = rid & RR_RID_MASK;
if (type == D_TLB) {
if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
@@ -164,11 +164,11 @@ static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
unsigned long ps, gpaddr;
ps = itir_ps(itir);
+ rr.val = ia64_get_rr(ifa);
- gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
- (ifa & ((1UL << ps) - 1));
+ gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
+ (ifa & ((1UL << ps) - 1));
- rr.val = ia64_get_rr(ifa);
head = (struct thash_data *)ia64_thash(ifa);
head->etag = INVALID_TI_TAG;
ia64_mf();
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
{
u64 i, dirty_pages = 1;
u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
- spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+ vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
@@ -210,6 +210,7 @@ void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
ia64_itc(type, va, phy_pte, itir_ps(itir));
+ paravirt_dv_serialize_data();
ia64_set_psr(psr);
}
@@ -253,8 +254,9 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
"(p7) st8 [%2]=r9;;"
"ssm psr.ic;;"
"srlz.d;;"
- /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
- : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+ "ssm psr.i;;"
+ "srlz.d;;"
+ : "=&r"(ret) : "r"(iha), "r"(pte) : "memory");
return ret;
}
@@ -412,16 +414,14 @@ u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
/*
* Purge overlap TCs and then insert the new entry to emulate itc ops.
- * Notes: Only TC entry can purge and insert.
- * 1 indicates this is MMIO
+ * Notes: Only TC entry can purge and insert.
*/
-int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
+void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
u64 ifa, int type)
{
u64 ps;
u64 phy_pte, io_mask, index;
union ia64_rr vrr, mrr;
- int ret = 0;
ps = itir_ps(itir);
vrr.val = vcpu_get_rr(v, ifa);
@@ -441,35 +441,29 @@ int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
phy_pte &= ~_PAGE_MA_MASK;
}
- if (pte & VTLB_PTE_IO)
- ret = 1;
-
vtlb_purge(v, ifa, ps);
vhpt_purge(v, ifa, ps);
- if (ps == mrr.ps) {
- if (!(pte&VTLB_PTE_IO)) {
- vhpt_insert(phy_pte, itir, ifa, pte);
- } else {
- vtlb_insert(v, pte, itir, ifa);
- vcpu_quick_region_set(VMX(v, tc_regions), ifa);
- }
- } else if (ps > mrr.ps) {
+ if ((ps != mrr.ps) || (pte & VTLB_PTE_IO)) {
vtlb_insert(v, pte, itir, ifa);
vcpu_quick_region_set(VMX(v, tc_regions), ifa);
- if (!(pte&VTLB_PTE_IO))
- vhpt_insert(phy_pte, itir, ifa, pte);
- } else {
+ }
+ if (pte & VTLB_PTE_IO)
+ return;
+
+ if (ps >= mrr.ps)
+ vhpt_insert(phy_pte, itir, ifa, pte);
+ else {
u64 psr;
phy_pte &= ~PAGE_FLAGS_RV_MASK;
psr = ia64_clear_ic();
ia64_itc(type, ifa, phy_pte, ps);
+ paravirt_dv_serialize_data();
ia64_set_psr(psr);
}
if (!(pte&VTLB_PTE_IO))
mark_pages_dirty(v, pte, ps);
- return ret;
}
/*
@@ -509,7 +503,6 @@ void thash_purge_all(struct kvm_vcpu *v)
local_flush_tlb_all();
}
-
/*
* Lookup the hash table and its collision chain to find an entry
* covering this address rid:va or the entry.
@@ -517,7 +510,6 @@ void thash_purge_all(struct kvm_vcpu *v)
* INPUT:
* in: TLB format for both VHPT & TLB.
*/
-
struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
{
struct thash_data *cch;
@@ -526,7 +518,7 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
struct thash_cb *hcb = &v->arch.vtlb;
- cch = __vtr_lookup(v, va, is_data);;
+ cch = __vtr_lookup(v, va, is_data);
if (cch)
return cch;
@@ -547,7 +539,6 @@ struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
return NULL;
}
-
/*
* Initialize internal control data before service.
*/
@@ -573,6 +564,10 @@ void thash_init(struct thash_cb *hcb, u64 sz)
u64 kvm_get_mpt_entry(u64 gpfn)
{
u64 *base = (u64 *) KVM_P2M_BASE;
+
+ if (gpfn >= (KVM_P2M_SIZE >> 3))
+ panic_vm(current_vcpu, "Invalid gpfn =%lx\n", gpfn);
+
return *(base + gpfn);
}
@@ -589,7 +584,6 @@ u64 kvm_gpa_to_mpa(u64 gpa)
return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
}
-
/*
* Fetch guest bundle code.
* INPUT:
@@ -631,7 +625,6 @@ int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
return IA64_NO_FAULT;
}
-
void kvm_init_vhpt(struct kvm_vcpu *v)
{
v->arch.vhpt.num = VHPT_NUM_ENTRIES;