aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c1247
1 files changed, 916 insertions, 331 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4dade6ac082..5ac0bb465ed 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -33,6 +33,7 @@
#include <asm/kvm_emulate.h>
#include "x86.h"
+#include "tss.h"
/*
* Opcode effective-address decode tables.
@@ -50,6 +51,8 @@
#define DstReg (2<<1) /* Register operand. */
#define DstMem (3<<1) /* Memory operand. */
#define DstAcc (4<<1) /* Destination Accumulator */
+#define DstDI (5<<1) /* Destination is in ES:(E)DI */
+#define DstMem64 (6<<1) /* 64bit memory operand */
#define DstMask (7<<1)
/* Source operand type. */
#define SrcNone (0<<4) /* No source operand. */
@@ -63,6 +66,7 @@
#define SrcOne (7<<4) /* Implied '1' */
#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
#define SrcImmU (9<<4) /* Immediate operand, unsigned */
+#define SrcSI (0xa<<4) /* Source is in the DS:RSI */
#define SrcMask (0xf<<4)
/* Generic ModRM decode. */
#define ModRM (1<<8)
@@ -85,6 +89,9 @@
#define Src2ImmByte (2<<29)
#define Src2One (3<<29)
#define Src2Imm16 (4<<29)
+#define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
+ in memory and second argument is located
+ immediately after the first one in memory. */
#define Src2Mask (7<<29)
enum {
@@ -147,8 +154,8 @@ static u32 opcode_table[256] = {
0, 0, 0, 0,
/* 0x68 - 0x6F */
SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */
+ DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */
+ SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */
/* 0x70 - 0x77 */
SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte,
@@ -173,12 +180,12 @@ static u32 opcode_table[256] = {
/* 0xA0 - 0xA7 */
ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs,
ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs,
- ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | String, ImplicitOps | String,
+ ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String,
+ ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String,
/* 0xA8 - 0xAF */
- 0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String,
- ByteOp | ImplicitOps | String, ImplicitOps | String,
+ 0, 0, ByteOp | DstDI | Mov | String, DstDI | Mov | String,
+ ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String,
+ ByteOp | DstDI | String, DstDI | String,
/* 0xB0 - 0xB7 */
ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov,
@@ -204,13 +211,13 @@ static u32 opcode_table[256] = {
0, 0, 0, 0, 0, 0, 0, 0,
/* 0xE0 - 0xE7 */
0, 0, 0, 0,
- ByteOp | SrcImmUByte, SrcImmUByte,
- ByteOp | SrcImmUByte, SrcImmUByte,
+ ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
+ ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc,
/* 0xE8 - 0xEF */
SrcImm | Stack, SrcImm | ImplicitOps,
SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
- SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps,
+ SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
+ SrcNone | ByteOp | DstAcc, SrcNone | DstAcc,
/* 0xF0 - 0xF7 */
0, 0, 0, 0,
ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3,
@@ -343,7 +350,8 @@ static u32 group_table[] = {
[Group5*8] =
DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
SrcMem | ModRM | Stack, 0,
- SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0,
+ SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps,
+ SrcMem | ModRM | Stack, 0,
[Group7*8] =
0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
@@ -353,14 +361,14 @@ static u32 group_table[] = {
DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock,
DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock,
[Group9*8] =
- 0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0,
+ 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0,
};
static u32 group2_table[] = {
[Group7*8] =
- SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM,
+ SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv,
SrcNone | ModRM | DstMem | Mov, 0,
- SrcMem16 | ModRM | Mov, 0,
+ SrcMem16 | ModRM | Mov | Priv, 0,
[Group9*8] =
0, 0, 0, 0, 0, 0, 0, 0,
};
@@ -562,7 +570,7 @@ static u32 group2_table[] = {
#define insn_fetch(_type, _size, _eip) \
({ unsigned long _x; \
rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \
- if (rc != 0) \
+ if (rc != X86EMUL_CONTINUE) \
goto done; \
(_eip) += (_size); \
(_type)_x; \
@@ -638,40 +646,40 @@ static unsigned long ss_base(struct x86_emulate_ctxt *ctxt)
static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
- unsigned long linear, u8 *dest)
+ unsigned long eip, u8 *dest)
{
struct fetch_cache *fc = &ctxt->decode.fetch;
int rc;
- int size;
+ int size, cur_size;
- if (linear < fc->start || linear >= fc->end) {
- size = min(15UL, PAGE_SIZE - offset_in_page(linear));
- rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
- if (rc)
+ if (eip == fc->end) {
+ cur_size = fc->end - fc->start;
+ size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
+ rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
+ size, ctxt->vcpu, NULL);
+ if (rc != X86EMUL_CONTINUE)
return rc;
- fc->start = linear;
- fc->end = linear + size;
+ fc->end += size;
}
- *dest = fc->data[linear - fc->start];
- return 0;
+ *dest = fc->data[eip - fc->start];
+ return X86EMUL_CONTINUE;
}
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
unsigned long eip, void *dest, unsigned size)
{
- int rc = 0;
+ int rc;
/* x86 instructions are limited to 15 bytes. */
- if (eip + size - ctxt->decode.eip_orig > 15)
+ if (eip + size - ctxt->eip > 15)
return X86EMUL_UNHANDLEABLE;
- eip += ctxt->cs_base;
while (size--) {
rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
/*
@@ -702,7 +710,7 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
*address = 0;
rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
ctxt->vcpu, NULL);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
ctxt->vcpu, NULL);
@@ -782,7 +790,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct decode_cache *c = &ctxt->decode;
u8 sib;
int index_reg = 0, base_reg = 0, scale;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
if (c->rex_prefix) {
c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */
@@ -895,7 +903,7 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
switch (c->ad_bytes) {
case 2:
@@ -916,14 +924,18 @@ int
x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, group;
- /* Shadow copy of register state. Committed on successful emulation. */
+ /* we cannot decode insn before we complete previous rep insn */
+ WARN_ON(ctxt->restart);
+
+ /* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache));
- c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu);
+ c->eip = ctxt->eip;
+ c->fetch.start = c->fetch.end = c->eip;
ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS);
memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
@@ -1015,11 +1027,6 @@ done_prefixes:
}
}
- if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
- kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction");
- return -1;
- }
-
if (c->d & Group) {
group = c->d & GroupMask;
c->modrm = insn_fetch(u8, 1, c->eip);
@@ -1046,7 +1053,7 @@ done_prefixes:
rc = decode_modrm(ctxt, ops);
else if (c->d & MemAbs)
rc = decode_abs(ctxt, ops);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
goto done;
if (!c->has_seg_override)
@@ -1057,6 +1064,10 @@ done_prefixes:
if (c->ad_bytes != 8)
c->modrm_ea = (u32)c->modrm_ea;
+
+ if (c->rip_relative)
+ c->modrm_ea += c->eip;
+
/*
* Decode and fetch the source operand: register, memory
* or immediate.
@@ -1091,6 +1102,8 @@ done_prefixes:
break;
}
c->src.type = OP_MEM;
+ c->src.ptr = (unsigned long *)c->modrm_ea;
+ c->src.val = 0;
break;
case SrcImm:
case SrcImmU:
@@ -1139,6 +1152,14 @@ done_prefixes:
c->src.bytes = 1;
c->src.val = 1;
break;
+ case SrcSI:
+ c->src.type = OP_MEM;
+ c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->src.ptr = (unsigned long *)
+ register_address(c, seg_override_base(ctxt, c),
+ c->regs[VCPU_REGS_RSI]);
+ c->src.val = 0;
+ break;
}
/*
@@ -1168,6 +1189,12 @@ done_prefixes:
c->src2.bytes = 1;
c->src2.val = 1;
break;
+ case Src2Mem16:
+ c->src2.type = OP_MEM;
+ c->src2.bytes = 2;
+ c->src2.ptr = (unsigned long *)(c->modrm_ea + c->src.bytes);
+ c->src2.val = 0;
+ break;
}
/* Decode and fetch the destination operand: register or memory. */
@@ -1180,6 +1207,7 @@ done_prefixes:
c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
break;
case DstMem:
+ case DstMem64:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.type = OP_REG;
@@ -1188,12 +1216,24 @@ done_prefixes:
break;
}
c->dst.type = OP_MEM;
+ c->dst.ptr = (unsigned long *)c->modrm_ea;
+ if ((c->d & DstMask) == DstMem64)
+ c->dst.bytes = 8;
+ else
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->dst.val = 0;
+ if (c->d & BitOp) {
+ unsigned long mask = ~(c->dst.bytes * 8 - 1);
+
+ c->dst.ptr = (void *)c->dst.ptr +
+ (c->src.val & mask) / 8;
+ }
break;
case DstAcc:
c->dst.type = OP_REG;
- c->dst.bytes = c->op_bytes;
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = &c->regs[VCPU_REGS_RAX];
- switch (c->op_bytes) {
+ switch (c->dst.bytes) {
case 1:
c->dst.val = *(u8 *)c->dst.ptr;
break;
@@ -1203,18 +1243,248 @@ done_prefixes:
case 4:
c->dst.val = *(u32 *)c->dst.ptr;
break;
+ case 8:
+ c->dst.val = *(u64 *)c->dst.ptr;
+ break;
}
c->dst.orig_val = c->dst.val;
break;
+ case DstDI:
+ c->dst.type = OP_MEM;
+ c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
+ c->dst.ptr = (unsigned long *)
+ register_address(c, es_base(ctxt),
+ c->regs[VCPU_REGS_RDI]);
+ c->dst.val = 0;
+ break;
}
- if (c->rip_relative)
- c->modrm_ea += c->eip;
-
done:
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}
+static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ unsigned int size, unsigned short port,
+ void *dest)
+{
+ struct read_cache *rc = &ctxt->decode.io_read;
+
+ if (rc->pos == rc->end) { /* refill pio read ahead */
+ struct decode_cache *c = &ctxt->decode;
+ unsigned int in_page, n;
+ unsigned int count = c->rep_prefix ?
+ address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
+ in_page = (ctxt->eflags & EFLG_DF) ?
+ offset_in_page(c->regs[VCPU_REGS_RDI]) :
+ PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
+ n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
+ count);
+ if (n == 0)
+ n = 1;
+ rc->pos = rc->end = 0;
+ if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
+ return 0;
+ rc->end = n * size;
+ }
+
+ memcpy(dest, rc->data + rc->pos, size);
+ rc->pos += size;
+ return 1;
+}
+
+static u32 desc_limit_scaled(struct desc_struct *desc)
+{
+ u32 limit = get_desc_limit(desc);
+
+ return desc->g ? (limit << 12) | 0xfff : limit;
+}
+
+static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_ptr *dt)
+{
+ if (selector & 1 << 2) {
+ struct desc_struct desc;
+ memset (dt, 0, sizeof *dt);
+ if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
+ return;
+
+ dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
+ dt->address = get_desc_base(&desc);
+ } else
+ ops->get_gdt(dt, ctxt->vcpu);
+}
+
+/* allowed just for 8 bytes segments */
+static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_struct *desc)
+{
+ struct desc_ptr dt;
+ u16 index = selector >> 3;
+ int ret;
+ u32 err;
+ ulong addr;
+
+ get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+
+ if (dt.size < index * 8 + 7) {
+ kvm_inject_gp(ctxt->vcpu, selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ addr = dt.address + index * 8;
+ ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(ctxt->vcpu, addr, err);
+
+ return ret;
+}
+
+/* allowed just for 8 bytes segments */
+static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, struct desc_struct *desc)
+{
+ struct desc_ptr dt;
+ u16 index = selector >> 3;
+ u32 err;
+ ulong addr;
+ int ret;
+
+ get_descriptor_table_ptr(ctxt, ops, selector, &dt);
+
+ if (dt.size < index * 8 + 7) {
+ kvm_inject_gp(ctxt->vcpu, selector & 0xfffc);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+
+ addr = dt.address + index * 8;
+ ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT)
+ kvm_inject_page_fault(ctxt->vcpu, addr, err);
+
+ return ret;
+}
+
+static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 selector, int seg)
+{
+ struct desc_struct seg_desc;
+ u8 dpl, rpl, cpl;
+ unsigned err_vec = GP_VECTOR;
+ u32 err_code = 0;
+ bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+ int ret;
+
+ memset(&seg_desc, 0, sizeof seg_desc);
+
+ if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
+ || ctxt->mode == X86EMUL_MODE_REAL) {
+ /* set real mode segment descriptor */
+ set_desc_base(&seg_desc, selector << 4);
+ set_desc_limit(&seg_desc, 0xffff);
+ seg_desc.type = 3;
+ seg_desc.p = 1;
+ seg_desc.s = 1;
+ goto load;
+ }
+
+ /* NULL selector is not valid for TR, CS and SS */
+ if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
+ && null_selector)
+ goto exception;
+
+ /* TR should be in GDT only */
+ if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
+ goto exception;
+
+ if (null_selector) /* for NULL selector skip all following checks */
+ goto load;
+
+ ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ err_code = selector & 0xfffc;
+ err_vec = GP_VECTOR;
+
+ /* can't load system descriptor into segment selecor */
+ if (seg <= VCPU_SREG_GS && !seg_desc.s)
+ goto exception;
+
+ if (!seg_desc.p) {
+ err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
+ goto exception;
+ }
+
+ rpl = selector & 3;
+ dpl = seg_desc.dpl;
+ cpl = ops->cpl(ctxt->vcpu);
+
+ switch (seg) {
+ case VCPU_SREG_SS:
+ /*
+ * segment is not a writable data segment or segment
+ * selector's RPL != CPL or segment selector's RPL != CPL
+ */
+ if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
+ goto exception;
+ break;
+ case VCPU_SREG_CS:
+ if (!(seg_desc.type & 8))
+ goto exception;
+
+ if (seg_desc.type & 4) {
+ /* conforming */
+ if (dpl > cpl)
+ goto exception;
+ } else {
+ /* nonconforming */
+ if (rpl > cpl || dpl != cpl)
+ goto exception;
+ }
+ /* CS(RPL) <- CPL */
+ selector = (selector & 0xfffc) | cpl;
+ break;
+ case VCPU_SREG_TR:
+ if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
+ goto exception;
+ break;
+ case VCPU_SREG_LDTR:
+ if (seg_desc.s || seg_desc.type != 2)
+ goto exception;
+ break;
+ default: /* DS, ES, FS, or GS */
+ /*
+ * segment is not a data or readable code segment or
+ * ((segment is a data or nonconforming code segment)
+ * and (both RPL and CPL > DPL))
+ */
+ if ((seg_desc.type & 0xa) == 0x8 ||
+ (((seg_desc.type & 0xc) != 0xc) &&
+ (rpl > dpl && cpl > dpl)))
+ goto exception;
+ break;
+ }
+
+ if (seg_desc.s) {
+ /* mark segment as accessed */
+ seg_desc.type |= 1;
+ ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ }
+load:
+ ops->set_segment_selector(selector, seg, ctxt->vcpu);
+ ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
+ return X86EMUL_CONTINUE;
+exception:
+ kvm_queue_exception_e(ctxt->vcpu, err_vec, err_code);
+ return X86EMUL_PROPAGATE_FAULT;
+}
+
static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
{
struct decode_cache *c = &ctxt->decode;
@@ -1251,7 +1521,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu);
+ int cpl = ops->cpl(ctxt->vcpu);
rc = emulate_pop(ctxt, ops, &val, len);
if (rc != X86EMUL_CONTINUE)
@@ -1306,10 +1576,10 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
int rc;
rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
+ rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
return rc;
}
@@ -1332,7 +1602,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
+ int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
@@ -1343,7 +1613,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
}
rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
- if (rc != 0)
+ if (rc != X86EMUL_CONTINUE)
break;
--reg;
}
@@ -1354,12 +1624,8 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc;
- rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
- if (rc != 0)
- return rc;
- return 0;
+ return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
}
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
@@ -1395,7 +1661,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- int rc = 0;
switch (c->modrm_reg) {
case 0 ... 1: /* test */
@@ -1408,11 +1673,9 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
emulate_1op("neg", c->dst, ctxt->eflags);
break;
default:
- DPRINTF("Cannot emulate %02x\n", c->b);
- rc = X86EMUL_UNHANDLEABLE;
- break;
+ return 0;
}
- return rc;
+ return 1;
}
static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
@@ -1442,20 +1705,14 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
emulate_push(ctxt);
break;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
- struct x86_emulate_ops *ops,
- unsigned long memop)
+ struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
- u64 old, new;
- int rc;
-
- rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- return rc;
+ u64 old = c->dst.orig_val;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) {
@@ -1463,17 +1720,13 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
c->regs[VCPU_REGS_RAX] = (u32) (old >> 0);
c->regs[VCPU_REGS_RDX] = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
-
} else {
- new = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
+ c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) |
(u32) c->regs[VCPU_REGS_RBX];
- rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
- if (rc != X86EMUL_CONTINUE)
- return rc;
ctxt->eflags |= EFLG_ZF;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
@@ -1484,14 +1737,14 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
unsigned long cs;
rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
if (c->op_bytes == 4)
c->eip = (u32)c->eip;
rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
- if (rc)
+ if (rc != X86EMUL_CONTINUE)
return rc;
- rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
+ rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
return rc;
}
@@ -1544,7 +1797,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
default:
break;
}
- return 0;
+ return X86EMUL_CONTINUE;
}
static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
@@ -1598,8 +1851,11 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt)
u64 msr_data;
/* syscall is not available in real mode */
- if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86)
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->mode == X86EMUL_MODE_REAL ||
+ ctxt->mode == X86EMUL_MODE_VM86) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1649,14 +1905,16 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt)
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL) {
kvm_inject_gp(ctxt->vcpu, 0);
- return X86EMUL_UNHANDLEABLE;
+ return X86EMUL_PROPAGATE_FAULT;
}
/* XXX sysenter/sysexit have not been tested in 64bit mode.
* Therefore, we inject an #UD.
*/
- if (ctxt->mode == X86EMUL_MODE_PROT64)
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->mode == X86EMUL_MODE_PROT64) {
+ kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
+ return X86EMUL_PROPAGATE_FAULT;
+ }
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1711,7 +1969,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86) {
kvm_inject_gp(ctxt->vcpu, 0);
- return X86EMUL_UNHANDLEABLE;
+ return X86EMUL_PROPAGATE_FAULT;
}
setup_syscalls_segments(ctxt, &cs, &ss);
@@ -1756,7 +2014,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
+static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
@@ -1764,7 +2023,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
- return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl;
+ return ops->cpl(ctxt->vcpu) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
@@ -1801,22 +2060,419 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 port, u16 len)
{
- if (emulator_bad_iopl(ctxt))
+ if (emulator_bad_iopl(ctxt, ops))
if (!emulator_io_port_access_allowed(ctxt, ops, port, len))
return false;
return true;
}
+static u32 get_cached_descriptor_base(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ int seg)
+{
+ struct desc_struct desc;
+ if (ops->get_cached_descriptor(&desc, seg, ctxt->vcpu))
+ return get_desc_base(&desc);
+ else
+ return ~0;
+}
+
+static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_16 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ tss->ip = c->eip;
+ tss->flag = ctxt->eflags;
+ tss->ax = c->regs[VCPU_REGS_RAX];
+ tss->cx = c->regs[VCPU_REGS_RCX];
+ tss->dx = c->regs[VCPU_REGS_RDX];
+ tss->bx = c->regs[VCPU_REGS_RBX];
+ tss->sp = c->regs[VCPU_REGS_RSP];
+ tss->bp = c->regs[VCPU_REGS_RBP];
+ tss->si = c->regs[VCPU_REGS_RSI];
+ tss->di = c->regs[VCPU_REGS_RDI];
+
+ tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
+ tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
+ tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
+ tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
+ tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
+}
+
+static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_16 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int ret;
+
+ c->eip = tss->ip;
+ ctxt->eflags = tss->flag | 2;
+ c->regs[VCPU_REGS_RAX] = tss->ax;
+ c->regs[VCPU_REGS_RCX] = tss->cx;
+ c->regs[VCPU_REGS_RDX] = tss->dx;
+ c->regs[VCPU_REGS_RBX] = tss->bx;
+ c->regs[VCPU_REGS_RSP] = tss->sp;
+ c->regs[VCPU_REGS_RBP] = tss->bp;
+ c->regs[VCPU_REGS_RSI] = tss->si;
+ c->regs[VCPU_REGS_RDI] = tss->di;
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu);
+ ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
+ ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int task_switch_16(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+{
+ struct tss_segment_16 tss_seg;
+ int ret;
+ u32 err, new_tss_base = get_desc_base(new_desc);
+
+ ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ save_state_to_tss16(ctxt, ops, &tss_seg);
+
+ ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+ ret = ops->write_std(new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link,
+ ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+ }
+
+ return load_state_from_tss16(ctxt, ops, &tss_seg);
+}
+
+static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_32 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+
+ tss->cr3 = ops->get_cr(3, ctxt->vcpu);
+ tss->eip = c->eip;
+ tss->eflags = ctxt->eflags;
+ tss->eax = c->regs[VCPU_REGS_RAX];
+ tss->ecx = c->regs[VCPU_REGS_RCX];
+ tss->edx = c->regs[VCPU_REGS_RDX];
+ tss->ebx = c->regs[VCPU_REGS_RBX];
+ tss->esp = c->regs[VCPU_REGS_RSP];
+ tss->ebp = c->regs[VCPU_REGS_RBP];
+ tss->esi = c->regs[VCPU_REGS_RSI];
+ tss->edi = c->regs[VCPU_REGS_RDI];
+
+ tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu);
+ tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu);
+ tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu);
+ tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu);
+ tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu);
+ tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu);
+ tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu);
+}
+
+static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ struct tss_segment_32 *tss)
+{
+ struct decode_cache *c = &ctxt->decode;
+ int ret;
+
+ ops->set_cr(3, tss->cr3, ctxt->vcpu);
+ c->eip = tss->eip;
+ ctxt->eflags = tss->eflags | 2;
+ c->regs[VCPU_REGS_RAX] = tss->eax;
+ c->regs[VCPU_REGS_RCX] = tss->ecx;
+ c->regs[VCPU_REGS_RDX] = tss->edx;
+ c->regs[VCPU_REGS_RBX] = tss->ebx;
+ c->regs[VCPU_REGS_RSP] = tss->esp;
+ c->regs[VCPU_REGS_RBP] = tss->ebp;
+ c->regs[VCPU_REGS_RSI] = tss->esi;
+ c->regs[VCPU_REGS_RDI] = tss->edi;
+
+ /*
+ * SDM says that segment selectors are loaded before segment
+ * descriptors
+ */
+ ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu);
+ ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu);
+ ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu);
+ ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu);
+ ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu);
+ ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu);
+
+ /*
+ * Now load segment descriptors. If fault happenes at this stage
+ * it is handled in a context of new task
+ */
+ ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+
+ return X86EMUL_CONTINUE;
+}
+
+static int task_switch_32(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, u16 old_tss_sel,
+ ulong old_tss_base, struct desc_struct *new_desc)
+{
+ struct tss_segment_32 tss_seg;
+ int ret;
+ u32 err, new_tss_base = get_desc_base(new_desc);
+
+ ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ save_state_to_tss32(ctxt, ops, &tss_seg);
+
+ ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err);
+ return ret;
+ }
+
+ ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
+ &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+
+ if (old_tss_sel != 0xffff) {
+ tss_seg.prev_task_link = old_tss_sel;
+
+ ret = ops->write_std(new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link,
+ ctxt->vcpu, &err);
+ if (ret == X86EMUL_PROPAGATE_FAULT) {
+ /* FIXME: need to provide precise fault address */
+ kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err);
+ return ret;
+ }
+ }
+
+ return load_state_from_tss32(ctxt, ops, &tss_seg);
+}
+
+static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
+ struct x86_emulate_ops *ops,
+ u16 tss_selector, int reason,
+ bool has_error_code, u32 error_code)
+{
+ struct desc_struct curr_tss_desc, next_tss_desc;
+ int ret;
+ u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu);
+ ulong old_tss_base =
+ get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR);
+ u32 desc_limit;
+
+ /* FIXME: old_tss_base == ~0 ? */
+
+ ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
+ ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc);
+ if (ret != X86EMUL_CONTINUE)