Commit d40a6898 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: emulate: protect checks on ctxt->d by a common "if (unlikely())"

There are several checks for "peculiar" aspects of instructions in both
x86_decode_insn and x86_emulate_insn.  Group them together, and guard
them with a single "if" that lets the processor quickly skip them all.
Make this more effective by adding two more flag bits that say whether the
.intercept and .check_perm fields are valid.  We will reuse these
flags later to avoid initializing fields of the emulate_ctxt struct.

This skims about 30 cycles for each emulated instructions, which is
approximately a 3% improvement.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e24186e0
...@@ -162,6 +162,8 @@ ...@@ -162,6 +162,8 @@
#define NoWrite ((u64)1 << 45) /* No writeback */ #define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */ #define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NoMod ((u64)1 << 47) /* Mod field is ignored */ #define NoMod ((u64)1 << 47) /* Mod field is ignored */
#define Intercept ((u64)1 << 48) /* Has valid intercept field */
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite) #define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
...@@ -3546,9 +3548,9 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) ...@@ -3546,9 +3548,9 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
} }
#define D(_y) { .flags = (_y) } #define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i } #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \ #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
.check_perm = (_p) } .intercept = x86_intercept_##_i, .check_perm = (_p) }
#define N D(NotImpl) #define N D(NotImpl)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
...@@ -3557,10 +3559,10 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) ...@@ -3557,10 +3559,10 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) } #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
#define II(_f, _e, _i) \ #define II(_f, _e, _i) \
{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
#define IIP(_f, _e, _i, _p) \ #define IIP(_f, _e, _i, _p) \
{ .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \ { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
.check_perm = (_p) } .intercept = x86_intercept_##_i, .check_perm = (_p) }
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
#define D2bv(_f) D((_f) | ByteOp), D(_f) #define D2bv(_f) D((_f) | ByteOp), D(_f)
...@@ -4393,6 +4395,13 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ...@@ -4393,6 +4395,13 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
return EMULATION_FAILED; return EMULATION_FAILED;
ctxt->execute = opcode.u.execute; ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->d &
(NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm; ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept; ctxt->intercept = opcode.intercept;
...@@ -4416,6 +4425,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ...@@ -4416,6 +4425,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->op_bytes = 16; ctxt->op_bytes = 16;
else if (ctxt->d & Mmx) else if (ctxt->d & Mmx)
ctxt->op_bytes = 8; ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */ /* ModRM and SIB bytes. */
if (ctxt->d & ModRM) { if (ctxt->d & ModRM) {
...@@ -4549,6 +4559,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4549,6 +4559,8 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done; goto done;
} }
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) { (ctxt->d & Undefined)) {
rc = emulate_ud(ctxt); rc = emulate_ud(ctxt);
...@@ -4620,6 +4632,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4620,6 +4632,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done; goto done;
} }
} }
}
if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) { if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
rc = segmented_read(ctxt, ctxt->src.addr.mem, rc = segmented_read(ctxt, ctxt->src.addr.mem,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment