From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: Richard Henderson <richard.henderson@linaro.org>
Subject: [PULL 05/24] target/i386: group common checks in the decoding phase
Date: Thu, 26 Oct 2023 01:26:58 +0200 [thread overview]
Message-ID: <20231025232718.89428-6-pbonzini@redhat.com> (raw)
In-Reply-To: <20231025232718.89428-1-pbonzini@redhat.com>
In preparation for adding more similar checks, move the VEX.L=0 check
and several X86_SPECIAL_* checks to a new field, where each bit represent
a common check on unused bits, or a restriction on the processor mode.
Likewise, many SVM intercepts can be checked during the decoding phase,
the main exception being the selective CR0 write, MSR and IOIO intercepts.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/decode-new.c.inc | 85 ++++++++++++++++++++++++--------
target/i386/tcg/decode-new.h | 29 ++++++++---
target/i386/tcg/emit.c.inc | 8 ---
3 files changed, 85 insertions(+), 37 deletions(-)
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index ec5d260b7ea..25c1dae55a4 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -90,8 +90,6 @@
X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
#define cpuid(feat) .cpuid = X86_FEAT_##feat,
-#define i64 .special = X86_SPECIAL_i64,
-#define o64 .special = X86_SPECIAL_o64,
#define xchg .special = X86_SPECIAL_Locked,
#define mmx .special = X86_SPECIAL_MMX,
#define zext0 .special = X86_SPECIAL_ZExtOp0,
@@ -114,6 +112,9 @@
#define vex12 .vex_class = 12,
#define vex13 .vex_class = 13,
+#define chk(a) .check = X86_CHECK_##a,
+#define svm(a) .intercept = SVM_EXIT_##a,
+
#define avx2_256 .vex_special = X86_VEX_AVX2_256,
#define P_00 1
@@ -161,8 +162,8 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
};
static const X86OpEntry group15_mem[8] = {
- [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5),
- [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5),
+ [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)),
+ [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)),
};
uint8_t modrm = get_modrm(s, env);
@@ -1590,6 +1591,12 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode)
if (s->flags & HF_EM_MASK) {
goto illegal;
}
+
+ if (e->check & X86_CHECK_VEX128) {
+ if (s->vex_l) {
+ goto illegal;
+ }
+ }
return true;
nm_exception:
@@ -1775,6 +1782,25 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
goto illegal_op;
}
+ /* Checks that result in #UD come first. */
+ if (decode.e.check) {
+ if (decode.e.check & X86_CHECK_i64) {
+ if (CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_o64) {
+ if (!CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_prot) {
+ if (!PE(s) || VM86(s)) {
+ goto illegal_op;
+ }
+ }
+ }
+
switch (decode.e.special) {
case X86_SPECIAL_None:
break;
@@ -1785,23 +1811,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
}
break;
- case X86_SPECIAL_ProtMode:
- if (!PE(s) || VM86(s)) {
- goto illegal_op;
- }
- break;
-
- case X86_SPECIAL_i64:
- if (CODE64(s)) {
- goto illegal_op;
- }
- break;
- case X86_SPECIAL_o64:
- if (!CODE64(s)) {
- goto illegal_op;
- }
- break;
-
case X86_SPECIAL_ZExtOp0:
assert(decode.op[0].unit == X86_OP_INT);
if (!decode.op[0].has_ea) {
@@ -1831,6 +1840,37 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
if (!validate_vex(s, &decode)) {
return;
}
+
+ /*
+ * Checks that result in #GP or VMEXIT come second. Intercepts are
+ * generally checked after non-memory exceptions (i.e. before all
+ * exceptions if there is no memory operand). Exceptions are
+ * vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!).
+ *
+ * RSM and XSETBV will be handled in the gen_* functions
+ * instead of using chk().
+ */
+ if (decode.e.check & X86_CHECK_cpl0) {
+ if (CPL(s) != 0) {
+ goto gp_fault;
+ }
+ }
+ if (decode.e.intercept && unlikely(GUEST(s))) {
+ gen_helper_svm_check_intercept(tcg_env,
+ tcg_constant_i32(decode.e.intercept));
+ }
+ if (decode.e.check) {
+ if ((decode.e.check & X86_CHECK_vm86_iopl) && VM86(s)) {
+ if (IOPL(s) < 3) {
+ goto gp_fault;
+ }
+ } else if (decode.e.check & X86_CHECK_cpl_iopl) {
+ if (IOPL(s) < CPL(s)) {
+ goto gp_fault;
+ }
+ }
+ }
+
if (decode.e.special == X86_SPECIAL_MMX &&
!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
gen_helper_enter_mmx(tcg_env);
@@ -1857,6 +1897,9 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
gen_writeback(s, &decode, 0, s->T0);
}
return;
+ gp_fault:
+ gen_exception_gpf(s);
+ return;
illegal_op:
gen_illegal_opcode(s);
return;
diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h
index 9be8a6e65fd..bbc9aea940d 100644
--- a/target/i386/tcg/decode-new.h
+++ b/target/i386/tcg/decode-new.h
@@ -131,15 +131,30 @@ typedef enum X86OpUnit {
X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */
} X86OpUnit;
+typedef enum X86InsnCheck {
+ /* Illegal or exclusive to 64-bit mode */
+ X86_CHECK_i64 = 1,
+ X86_CHECK_o64 = 2,
+
+ /* Fault outside protected mode */
+ X86_CHECK_prot = 4,
+
+ /* Privileged instruction checks */
+ X86_CHECK_cpl0 = 8,
+ X86_CHECK_vm86_iopl = 16,
+ X86_CHECK_cpl_iopl = 32,
+ X86_CHECK_iopl = X86_CHECK_cpl_iopl | X86_CHECK_vm86_iopl,
+
+ /* Fault if VEX.L=1 */
+ X86_CHECK_VEX128 = 64,
+} X86InsnCheck;
+
typedef enum X86InsnSpecial {
X86_SPECIAL_None,
/* Always locked if it has a memory operand (XCHG) */
X86_SPECIAL_Locked,
- /* Fault outside protected mode */
- X86_SPECIAL_ProtMode,
-
/*
* Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw
* in the manual.
@@ -158,10 +173,6 @@ typedef enum X86InsnSpecial {
* become P/P/Q/N, and size "x" becomes "q".
*/
X86_SPECIAL_MMX,
-
- /* Illegal or exclusive to 64-bit mode */
- X86_SPECIAL_i64,
- X86_SPECIAL_o64,
} X86InsnSpecial;
/*
@@ -224,7 +235,9 @@ struct X86OpEntry {
X86CPUIDFeature cpuid:8;
unsigned vex_class:8;
X86VEXSpecial vex_special:8;
- uint16_t valid_prefix:16;
+ unsigned valid_prefix:16;
+ unsigned check:16;
+ unsigned intercept:8;
bool is_decode:1;
};
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 16085a19d7a..82da5488d47 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -1236,10 +1236,6 @@ static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec
static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- if (s->vex_l) {
- gen_illegal_opcode(s);
- return;
- }
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1);
gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
}
@@ -1886,10 +1882,6 @@ static void gen_VAESKEYGEN(DisasContext *s, CPUX86State *env, X86DecodedInsn *de
static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- if (s->vex_l) {
- gen_illegal_opcode(s);
- return;
- }
gen_helper_update_mxcsr(tcg_env);
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
}
--
2.41.0
next prev parent reply other threads:[~2023-10-25 23:29 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-25 23:26 [PULL 00/24] x86, KVM changes for 2023-10-26 Paolo Bonzini
2023-10-25 23:26 ` [PULL 01/24] tests/tcg: fix out-of-bounds access in test-avx Paolo Bonzini
2023-10-25 23:26 ` [PULL 02/24] target/i386: implement SHA instructions Paolo Bonzini
2023-10-25 23:26 ` [PULL 03/24] tests/tcg/i386: initialize more registers in test-avx Paolo Bonzini
2023-10-25 23:26 ` [PULL 04/24] tests/tcg/i386: test-avx: add test cases for SHA new instructions Paolo Bonzini
2023-10-25 23:26 ` Paolo Bonzini [this message]
2023-10-25 23:26 ` [PULL 06/24] target/i386: validate VEX.W for AVX instructions Paolo Bonzini
2023-10-25 23:27 ` [PULL 07/24] target/i386: check CPUID_PAE to determine 36 bit processor address space Paolo Bonzini
2023-10-25 23:27 ` [PULL 08/24] kvm: remove unnecessary stub Paolo Bonzini
2023-10-25 23:27 ` [PULL 09/24] kvm: require KVM_CAP_INTERNAL_ERROR_DATA Paolo Bonzini
2023-10-25 23:27 ` [PULL 10/24] kvm: require KVM_CAP_SIGNAL_MSI Paolo Bonzini
2023-10-25 23:27 ` [PULL 11/24] kvm: require KVM_IRQFD for kernel irqchip Paolo Bonzini
2023-10-25 23:27 ` [PULL 12/24] " Paolo Bonzini
2023-10-25 23:27 ` [PULL 13/24] kvm: drop reference to KVM_CAP_PCI_2_3 Paolo Bonzini
2023-10-25 23:27 ` [PULL 14/24] kvm: assume that many ioeventfds can be created Paolo Bonzini
2023-10-25 23:27 ` [PULL 15/24] kvm: require KVM_CAP_IOEVENTFD and KVM_CAP_IOEVENTFD_ANY_LENGTH Paolo Bonzini
2023-10-25 23:27 ` [PULL 16/24] kvm: unify listeners for PIO address space Paolo Bonzini
2023-10-25 23:27 ` [PULL 17/24] kvm: i386: move KVM_CAP_IRQ_ROUTING detection to kvm_arch_required_capabilities Paolo Bonzini
2023-10-25 23:27 ` [PULL 18/24] kvm: i386: require KVM_CAP_DEBUGREGS Paolo Bonzini
2023-10-25 23:27 ` [PULL 19/24] kvm: i386: require KVM_CAP_XSAVE Paolo Bonzini
2023-10-25 23:27 ` [PULL 20/24] kvm: i386: require KVM_CAP_SET_VCPU_EVENTS and KVM_CAP_X86_ROBUST_SINGLESTEP Paolo Bonzini
2023-10-25 23:27 ` [PULL 21/24] kvm: i386: require KVM_CAP_MCE Paolo Bonzini
2023-10-25 23:27 ` [PULL 22/24] kvm: i386: require KVM_CAP_ADJUST_CLOCK Paolo Bonzini
2023-10-25 23:27 ` [PULL 23/24] kvm: i386: require KVM_CAP_SET_IDENTITY_MAP_ADDR Paolo Bonzini
2023-10-25 23:27 ` [PULL 24/24] kvm: i8254: require KVM_CAP_PIT2 and KVM_CAP_PIT_STATE2 Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231025232718.89428-6-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).