* [PATCH 01/10] kvm: split out instruction structure from decoding method.
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 02/10] kvm: split out instruction decode from emulation Rusty Russell
` (9 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
Add a new 'struct arm_insn' to represent the decoded instruction; the
decoding logic belong in a separate structure (arm_decode).
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 120 +++++++++++++++++++++++++++---------------------
1 file changed, 68 insertions(+), 52 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 30124cb..e4fc12b 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -288,34 +288,37 @@ out:
/******************************************************************************
* Load-Store instruction emulation
*****************************************************************************/
+enum SRType {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+ SRType_RRX
+};
-struct arm_instr {
- /* Instruction decoding */
- u32 opc;
- u32 opc_mask;
-
+struct arm_insn {
/* Decoding for the register write back */
bool register_form;
u32 imm;
- u8 Rm;
u8 type;
+ u8 Rt, Rn, Rm;
u8 shift_n;
/* Common decoding */
u8 len;
bool sign_extend;
bool w;
+};
+
+struct arm_decode {
+ /* Instruction decoding */
+ u32 opc;
+ u32 opc_mask;
bool (*decode)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, struct arm_instr *ai);
-};
+ unsigned long instr, struct arm_insn *ai);
-enum SRType {
- SRType_LSL,
- SRType_LSR,
- SRType_ASR,
- SRType_ROR,
- SRType_RRX
+ struct arm_insn template;
};
/* Modelled after DecodeImmShift() in the ARM ARM */
@@ -383,7 +386,7 @@ u32 shift(u32 value, u8 N, enum SRType type, u8 amount, bool carry_in)
}
static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, const struct arm_instr *ai)
+ unsigned long instr, const struct arm_insn *ai)
{
u8 Rt = (instr >> 12) & 0xf;
u8 Rn = (instr >> 16) & 0xf;
@@ -431,7 +434,7 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
}
static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, struct arm_instr *ai)
+ unsigned long instr, struct arm_insn *ai)
{
u8 A = (instr >> 25) & 1;
@@ -449,7 +452,7 @@ static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
}
static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, struct arm_instr *ai)
+ unsigned long instr, struct arm_insn *ai)
{
mmio->is_write = ai->w;
mmio->len = ai->len;
@@ -476,56 +479,69 @@ static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
* loads and stores as their encodings mandate the W bit set and the P bit
* clear.
*/
-static const struct arm_instr arm_instr[] = {
+static const struct arm_decode arm_decode[] = {
/**************** Load/Store Word and Byte **********************/
/* Store word with writeback */
- { .opc = 0x04000000, .opc_mask = 0x0c500000, .len = 4, .w = true,
- .sign_extend = false, .decode = decode_arm_ls },
+ { .opc = 0x04000000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 4, .w = true, .sign_extend = false, }, },
/* Store byte with writeback */
- { .opc = 0x04400000, .opc_mask = 0x0c500000, .len = 1, .w = true,
- .sign_extend = false, .decode = decode_arm_ls },
+ { .opc = 0x04400000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 1, .w = true, .sign_extend = false, }, },
/* Load word with writeback */
- { .opc = 0x04100000, .opc_mask = 0x0c500000, .len = 4, .w = false,
- .sign_extend = false, .decode = decode_arm_ls },
+ { .opc = 0x04100000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 4, .w = false, .sign_extend = false, }, },
/* Load byte with writeback */
- { .opc = 0x04500000, .opc_mask = 0x0c500000, .len = 1, .w = false,
- .sign_extend = false, .decode = decode_arm_ls },
+ { .opc = 0x04500000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 1, .w = false, .sign_extend = false, }, },
/*************** Extra load/store instructions ******************/
/* Store halfword with writeback */
- { .opc = 0x000000b0, .opc_mask = 0x0c1000f0, .len = 2, .w = true,
- .sign_extend = false, .decode = decode_arm_extra },
+ { .opc = 0x000000b0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = true, .sign_extend = false, }, },
/* Load halfword with writeback */
- { .opc = 0x001000b0, .opc_mask = 0x0c1000f0, .len = 2, .w = false,
- .sign_extend = false, .decode = decode_arm_extra },
-
+ { .opc = 0x001000b0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = false, }, },
/* Load dual with writeback */
- { .opc = 0x000000d0, .opc_mask = 0x0c1000f0, .len = 8, .w = false,
- .sign_extend = false, .decode = decode_arm_extra },
+ { .opc = 0x000000d0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 8, .w = false, .sign_extend = false, }, },
/* Load signed byte with writeback */
- { .opc = 0x001000d0, .opc_mask = 0x0c1000f0, .len = 1, .w = false,
- .sign_extend = true, .decode = decode_arm_extra },
+ { .opc = 0x001000d0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 1, .w = false, .sign_extend = true, }, },
/* Store dual with writeback */
- { .opc = 0x000000f0, .opc_mask = 0x0c1000f0, .len = 8, .w = true,
- .sign_extend = false, .decode = decode_arm_extra },
+ { .opc = 0x000000f0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 8, .w = true, .sign_extend = false, }, },
/* Load signed halfword with writeback */
- { .opc = 0x001000f0, .opc_mask = 0x0c1000f0, .len = 2, .w = false,
- .sign_extend = true, .decode = decode_arm_extra },
+ { .opc = 0x001000f0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = true, }, },
/* Store halfword unprivileged */
- { .opc = 0x002000b0, .opc_mask = 0x0f3000f0, .len = 2, .w = true,
- .sign_extend = false, .decode = decode_arm_extra },
+ { .opc = 0x002000b0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = true, .sign_extend = false, }, },
/* Load halfword unprivileged */
- { .opc = 0x003000b0, .opc_mask = 0x0f3000f0, .len = 2, .w = false,
- .sign_extend = false, .decode = decode_arm_extra },
+ { .opc = 0x003000b0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = false, }, },
/* Load signed byte unprivileged */
- { .opc = 0x003000d0, .opc_mask = 0x0f3000f0, .len = 1, .w = false,
- .sign_extend = true , .decode = decode_arm_extra },
+ { .opc = 0x003000d0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 1, .w = false, .sign_extend = true, }, },
/* Load signed halfword unprivileged */
- { .opc = 0x003000d0, .opc_mask = 0x0f3000f0, .len = 2, .w = false,
- .sign_extend = true , .decode = decode_arm_extra },
+ { .opc = 0x003000d0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = true, }, },
};
static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr,
@@ -533,11 +549,11 @@ static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr,
{
int i;
- for (i = 0; i < ARRAY_SIZE(arm_instr); i++) {
- const struct arm_instr *ai = &arm_instr[i];
- if ((instr & ai->opc_mask) == ai->opc) {
- struct arm_instr ai_copy = *ai;
- return ai->decode(vcpu, mmio, instr, &ai_copy);
+ for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
+ const struct arm_decode *d = &arm_decode[i];
+ if ((instr & d->opc_mask) == d->opc) {
+ struct arm_insn ai = d->template;
+ return d->decode(vcpu, mmio, instr, &ai);
}
}
return false;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 02/10] kvm: split out instruction decode from emulation.
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
2012-10-24 11:25 ` [PATCH 01/10] kvm: split out instruction structure from decoding method Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 03/10] kvm: split out instruction decode from emulation (thumb instructions) Rusty Russell
` (8 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
This adds more fields which we need to store in the struct arm_insn,
so execute() knows what to do.
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 80 ++++++++++++++++++++++++++----------------------
1 file changed, 43 insertions(+), 37 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index e4fc12b..05b534f 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -303,11 +303,12 @@ struct arm_insn {
u8 type;
u8 Rt, Rn, Rm;
u8 shift_n;
+ u32 offset_addr;
/* Common decoding */
u8 len;
bool sign_extend;
- bool w;
+ bool w, W, U, P;
};
struct arm_decode {
@@ -315,7 +316,7 @@ struct arm_decode {
u32 opc;
u32 opc_mask;
- bool (*decode)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ bool (*decode)(struct kvm_vcpu *vcpu,
unsigned long instr, struct arm_insn *ai);
struct arm_insn template;
@@ -385,31 +386,24 @@ u32 shift(u32 value, u8 N, enum SRType type, u8 amount, bool carry_in)
return value & mask;
}
-static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, const struct arm_insn *ai)
+static bool decode_arm_wb(struct kvm_vcpu *vcpu, unsigned long instr,
+ struct arm_insn *ai)
{
- u8 Rt = (instr >> 12) & 0xf;
- u8 Rn = (instr >> 16) & 0xf;
- u8 W = (instr >> 21) & 1;
- u8 U = (instr >> 23) & 1;
- u8 P = (instr >> 24) & 1;
- u32 base_addr = *vcpu_reg(vcpu, Rn);
- u32 offset_addr, offset;
+ u32 base_addr, offset;
- /*
- * Technically this is allowed in certain circumstances,
- * but we don't support it.
- */
- if (Rt == 15 || Rn == 15)
- return false;
+ ai->Rt = (instr >> 12) & 0xf;
+ ai->Rn = (instr >> 16) & 0xf;
+ ai->W = (instr >> 21) & 1;
+ ai->U = (instr >> 23) & 1;
+ ai->P = (instr >> 24) & 1;
+
+ base_addr = *vcpu_reg(vcpu, ai->Rn);
- if (P && !W) {
+ if (ai->P && !ai->W) {
kvm_err("Decoding operation with valid ISV?\n");
return false;
}
- vcpu->arch.mmio.rd = Rt;
-
if (ai->register_form) {
/* Register operation */
enum SRType s_type;
@@ -425,46 +419,56 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
}
/* Handle Writeback */
- if (U)
- offset_addr = base_addr + offset;
+ if (ai->U)
+ ai->offset_addr = base_addr + offset;
else
- offset_addr = base_addr - offset;
- *vcpu_reg(vcpu, Rn) = offset_addr;
+ ai->offset_addr = base_addr - offset;
return true;
}
-static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+static bool decode_arm_ls(struct kvm_vcpu *vcpu,
unsigned long instr, struct arm_insn *ai)
{
u8 A = (instr >> 25) & 1;
- mmio->is_write = ai->w;
- mmio->len = ai->len;
- vcpu->arch.mmio.sign_extend = false;
-
ai->register_form = A;
ai->imm = instr & 0xfff;
ai->Rm = instr & 0xf;
ai->type = (instr >> 5) & 0x3;
ai->shift_n = (instr >> 7) & 0x1f;
- return decode_arm_wb(vcpu, mmio, instr, ai);
+ return decode_arm_wb(vcpu, instr, ai);
}
-static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+static bool decode_arm_extra(struct kvm_vcpu *vcpu,
unsigned long instr, struct arm_insn *ai)
{
- mmio->is_write = ai->w;
- mmio->len = ai->len;
- vcpu->arch.mmio.sign_extend = ai->sign_extend;
-
ai->register_form = !((instr >> 22) & 1);
ai->imm = ((instr >> 4) & 0xf0) | (instr & 0xf);
ai->Rm = instr & 0xf;
ai->type = 0; /* SRType_LSL */
ai->shift_n = 0;
- return decode_arm_wb(vcpu, mmio, instr, ai);
+ return decode_arm_wb(vcpu, instr, ai);
+}
+
+static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ const struct arm_insn *ai)
+{
+ /*
+ * Technically this is allowed in certain circumstances,
+ * but we don't support it.
+ */
+ if (ai->Rt == 15 || ai->Rn == 15)
+ return false;
+
+ mmio->is_write = ai->w;
+ mmio->len = ai->len;
+ vcpu->arch.mmio.sign_extend = ai->sign_extend;
+
+ vcpu->arch.mmio.rd = ai->Rt;
+ *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
+ return true;
}
/*
@@ -553,7 +557,9 @@ static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr,
const struct arm_decode *d = &arm_decode[i];
if ((instr & d->opc_mask) == d->opc) {
struct arm_insn ai = d->template;
- return d->decode(vcpu, mmio, instr, &ai);
+ if (!d->decode(vcpu, instr, &ai))
+ return false;
+ return execute(vcpu, mmio, &ai);
}
}
return false;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 03/10] kvm: split out instruction decode from emulation (thumb instructions).
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
2012-10-24 11:25 ` [PATCH 01/10] kvm: split out instruction structure from decoding method Rusty Russell
2012-10-24 11:25 ` [PATCH 02/10] kvm: split out instruction decode from emulation Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 04/10] kvm: completely separate decoding from execution Rusty Russell
` (7 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
Add thumb info to struct arm_insn, and use that to store decode
information for later handling in execute_thumb().
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 120 +++++++++++++++++++++++++++++-------------------
1 file changed, 74 insertions(+), 46 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 05b534f..882db33 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -301,7 +301,7 @@ struct arm_insn {
bool register_form;
u32 imm;
u8 type;
- u8 Rt, Rn, Rm;
+ u8 Rt, Rn, Rm, Rd;
u8 shift_n;
u32 offset_addr;
@@ -309,6 +309,21 @@ struct arm_insn {
u8 len;
bool sign_extend;
bool w, W, U, P;
+
+ /* Thumb encoding */
+ bool is_thumb, is_thumb32;
+ union {
+ struct {
+ u8 opcode;
+ u8 mask;
+ } t16;
+
+ struct {
+ u8 op1;
+ u8 op2;
+ u8 op2_mask;
+ } t32;
+ };
};
struct arm_decode {
@@ -565,7 +580,7 @@ static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr,
return false;
}
-struct thumb_instr {
+struct thumb_decode {
bool is32;
union {
@@ -581,86 +596,95 @@ struct thumb_instr {
} t32;
};
- bool (*decode)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, const struct thumb_instr *ti);
+ bool (*decode)(struct kvm_vcpu *vcpu,
+ unsigned long instr, struct arm_insn *ti);
};
-static bool decode_thumb_wb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr)
+static bool decode_thumb_wb(struct kvm_vcpu *vcpu,
+ unsigned long instr, struct arm_insn *ti)
{
- bool P = (instr >> 10) & 1;
- bool U = (instr >> 9) & 1;
u8 imm8 = instr & 0xff;
u32 offset_addr = vcpu->arch.hxfar;
- u8 Rn = (instr >> 16) & 0xf;
-
- vcpu->arch.mmio.rd = (instr >> 12) & 0xf;
- if (kvm_vcpu_reg_is_pc(vcpu, Rn))
- return false;
+ ti->P = (instr >> 10) & 1;
+ ti->U = (instr >> 9) & 1;
+ ti->Rn = (instr >> 16) & 0xf;
+ ti->Rd = (instr >> 12) & 0xf;
/* Handle Writeback */
- if (!P && U)
- *vcpu_reg(vcpu, Rn) = offset_addr + imm8;
- else if (!P && !U)
- *vcpu_reg(vcpu, Rn) = offset_addr - imm8;
+ if (!ti->P && ti->U)
+ ti->offset_addr = offset_addr + imm8;
+ else if (!ti->P && !ti->U)
+ ti->offset_addr = offset_addr - imm8;
return true;
}
-static bool decode_thumb_str(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, const struct thumb_instr *ti)
+static bool decode_thumb_str(struct kvm_vcpu *vcpu,
+ unsigned long instr, struct arm_insn *ti)
{
u8 op1 = (instr >> (16 + 5)) & 0x7;
u8 op2 = (instr >> 6) & 0x3f;
- mmio->is_write = true;
- vcpu->arch.mmio.sign_extend = false;
+ ti->W = true;
+ ti->sign_extend = false;
switch (op1) {
- case 0x0: mmio->len = 1; break;
- case 0x1: mmio->len = 2; break;
- case 0x2: mmio->len = 4; break;
+ case 0x0: ti->len = 1; break;
+ case 0x1: ti->len = 2; break;
+ case 0x2: ti->len = 4; break;
default:
return false; /* Only register write-back versions! */
}
if ((op2 & 0x24) == 0x24) {
/* STRB (immediate, thumb, W=1) */
- return decode_thumb_wb(vcpu, mmio, instr);
+ return decode_thumb_wb(vcpu, instr, ti);
}
return false;
}
-static bool decode_thumb_ldr(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- unsigned long instr, const struct thumb_instr *ti)
+static bool decode_thumb_ldr(struct kvm_vcpu *vcpu,
+ unsigned long instr, struct arm_insn *ti)
{
u8 op1 = (instr >> (16 + 7)) & 0x3;
u8 op2 = (instr >> 6) & 0x3f;
- mmio->is_write = false;
+ ti->W = false;
switch (ti->t32.op2 & 0x7) {
- case 0x1: mmio->len = 1; break;
- case 0x3: mmio->len = 2; break;
- case 0x5: mmio->len = 4; break;
+ case 0x1: ti->len = 1; break;
+ case 0x3: ti->len = 2; break;
+ case 0x5: ti->len = 4; break;
}
if (op1 == 0x0)
- vcpu->arch.mmio.sign_extend = false;
+ ti->sign_extend = false;
else if (op1 == 0x2 && (ti->t32.op2 & 0x7) != 0x5)
- vcpu->arch.mmio.sign_extend = true;
+ ti->sign_extend = true;
else
return false; /* Only register write-back versions! */
if ((op2 & 0x24) == 0x24) {
/* LDR{S}X (immediate, thumb, W=1) */
- return decode_thumb_wb(vcpu, mmio, instr);
+ return decode_thumb_wb(vcpu, instr, ti);
}
return false;
}
+static bool execute_thumb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ const struct arm_insn *ti)
+{
+ if (kvm_vcpu_reg_is_pc(vcpu, ti->Rn))
+ return false;
+
+ if (!ti->P)
+ *vcpu_reg(vcpu, ti->Rn) = ti->offset_addr;
+ vcpu->arch.mmio.sign_extend = ti->sign_extend;
+ return true;
+}
+
/*
* We only support instruction decoding for valid reasonable MMIO operations
* where trapping them do not provide sufficient information in the HSR (no
@@ -673,7 +697,7 @@ static bool decode_thumb_ldr(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
* - any load/store dual
* - anything with the PC as the dest register
*/
-static const struct thumb_instr thumb_instr[] = {
+static const struct thumb_decode thumb_decode[] = {
/**************** 32-bit Thumb instructions **********************/
/* Store single data item: Op1 == 11, Op2 == 000xxx0 */
{ .is32 = true, .t32 = { 3, 0x00, 0x71}, decode_thumb_str },
@@ -686,15 +710,17 @@ static const struct thumb_instr thumb_instr[] = {
};
-
static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr,
struct kvm_exit_mmio *mmio)
{
- bool is32 = is_wide_instruction(instr);
- bool is16 = !is32;
- struct thumb_instr tinstr; /* re-use to pass on already decoded info */
+ struct arm_insn tinstr; /* re-use to pass on already decoded info */
+ bool is16;
int i;
+ tinstr.is_thumb = true;
+ tinstr.is_thumb32 = is_wide_instruction(instr);
+
+ is16 = !tinstr.is_thumb32;
if (is16) {
tinstr.t16.opcode = (instr >> 10) & 0x3f;
} else {
@@ -702,22 +728,24 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr,
tinstr.t32.op2 = (instr >> (16 + 4)) & 0x7f;
}
- for (i = 0; i < ARRAY_SIZE(thumb_instr); i++) {
- const struct thumb_instr *ti = &thumb_instr[i];
- if (ti->is32 != is32)
+ for (i = 0; i < ARRAY_SIZE(thumb_decode); i++) {
+ const struct thumb_decode *td = &thumb_decode[i];
+ if (td->is32 != tinstr.is_thumb32)
continue;
if (is16) {
- if ((tinstr.t16.opcode & ti->t16.mask) != ti->t16.opcode)
+ if ((tinstr.t16.opcode & td->t16.mask) != td->t16.opcode)
continue;
} else {
- if (ti->t32.op1 != tinstr.t32.op1)
+ if (td->t32.op1 != tinstr.t32.op1)
continue;
- if ((ti->t32.op2_mask & tinstr.t32.op2) != ti->t32.op2)
+ if ((td->t32.op2_mask & tinstr.t32.op2) != td->t32.op2)
continue;
}
- return ti->decode(vcpu, mmio, instr, &tinstr);
+ if (!td->decode(vcpu, instr, &tinstr))
+ return false;
+ return execute_thumb(vcpu, mmio, &tinstr);
}
return false;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 04/10] kvm: completely separate decoding from execution.
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (2 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 03/10] kvm: split out instruction decode from emulation (thumb instructions) Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 05/10] kvm: move instruction copying inside kvm_decode() Rusty Russell
` (6 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
Now we simply call kvm_decode_ls(), and if it succeeds, execute() to
handle the instruction.
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 131 ++++++++++++++++++++++++++----------------------
1 file changed, 70 insertions(+), 61 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 882db33..6ebf0ff 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -467,25 +467,6 @@ static bool decode_arm_extra(struct kvm_vcpu *vcpu,
return decode_arm_wb(vcpu, instr, ai);
}
-static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- const struct arm_insn *ai)
-{
- /*
- * Technically this is allowed in certain circumstances,
- * but we don't support it.
- */
- if (ai->Rt == 15 || ai->Rn == 15)
- return false;
-
- mmio->is_write = ai->w;
- mmio->len = ai->len;
- vcpu->arch.mmio.sign_extend = ai->sign_extend;
-
- vcpu->arch.mmio.rd = ai->Rt;
- *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
- return true;
-}
-
/*
* The encodings in this table assumes that a fault was generated where the
* ISV field in the HSR was clear, and the decoding information was invalid,
@@ -563,18 +544,16 @@ static const struct arm_decode arm_decode[] = {
.template = { .len = 2, .w = false, .sign_extend = true, }, },
};
-static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, unsigned long instr,
- struct kvm_exit_mmio *mmio)
+static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu,
+ u32 instr, struct arm_insn *ai)
{
int i;
for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
const struct arm_decode *d = &arm_decode[i];
if ((instr & d->opc_mask) == d->opc) {
- struct arm_insn ai = d->template;
- if (!d->decode(vcpu, instr, &ai))
- return false;
- return execute(vcpu, mmio, &ai);
+ *ai = d->template;
+ return d->decode(vcpu, instr, ai);
}
}
return false;
@@ -673,18 +652,6 @@ static bool decode_thumb_ldr(struct kvm_vcpu *vcpu,
return false;
}
-static bool execute_thumb(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
- const struct arm_insn *ti)
-{
- if (kvm_vcpu_reg_is_pc(vcpu, ti->Rn))
- return false;
-
- if (!ti->P)
- *vcpu_reg(vcpu, ti->Rn) = ti->offset_addr;
- vcpu->arch.mmio.sign_extend = ti->sign_extend;
- return true;
-}
-
/*
* We only support instruction decoding for valid reasonable MMIO operations
* where trapping them do not provide sufficient information in the HSR (no
@@ -710,47 +677,86 @@ static const struct thumb_decode thumb_decode[] = {
};
-static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr,
- struct kvm_exit_mmio *mmio)
+static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
+ u32 instr, struct arm_insn *ti)
{
- struct arm_insn tinstr; /* re-use to pass on already decoded info */
bool is16;
int i;
- tinstr.is_thumb = true;
- tinstr.is_thumb32 = is_wide_instruction(instr);
+ ti->is_thumb = true;
+ ti->is_thumb32 = is_wide_instruction(instr);
- is16 = !tinstr.is_thumb32;
+ is16 = !ti->is_thumb32;
if (is16) {
- tinstr.t16.opcode = (instr >> 10) & 0x3f;
+ ti->t16.opcode = (instr >> 10) & 0x3f;
} else {
- tinstr.t32.op1 = (instr >> (16 + 11)) & 0x3;
- tinstr.t32.op2 = (instr >> (16 + 4)) & 0x7f;
+ ti->t32.op1 = (instr >> (16 + 11)) & 0x3;
+ ti->t32.op2 = (instr >> (16 + 4)) & 0x7f;
}
for (i = 0; i < ARRAY_SIZE(thumb_decode); i++) {
const struct thumb_decode *td = &thumb_decode[i];
- if (td->is32 != tinstr.is_thumb32)
+ if (td->is32 != ti->is_thumb32)
continue;
if (is16) {
- if ((tinstr.t16.opcode & td->t16.mask) != td->t16.opcode)
+ if ((ti->t16.opcode & td->t16.mask) != td->t16.opcode)
continue;
} else {
- if (td->t32.op1 != tinstr.t32.op1)
+ if (td->t32.op1 != ti->t32.op1)
continue;
- if ((td->t32.op2_mask & tinstr.t32.op2) != td->t32.op2)
+ if ((td->t32.op2_mask & ti->t32.op2) != td->t32.op2)
continue;
}
- if (!td->decode(vcpu, instr, &tinstr))
- return false;
- return execute_thumb(vcpu, mmio, &tinstr);
+ return td->decode(vcpu, instr, ti);
}
return false;
}
+static int kvm_decode_ls(struct kvm_vcpu *vcpu, u32 instr, u32 psr,
+ struct arm_insn *ai)
+{
+ bool is_thumb = !!(psr & PSR_T_BIT);
+
+ if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, ai))
+ return -ENOENT;
+ else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, ai))
+ return -ENOENT;
+
+ return 0;
+}
+
+static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
+ const struct arm_insn *ai)
+{
+ if (ai->is_thumb) {
+ if (kvm_vcpu_reg_is_pc(vcpu, ai->Rn))
+ return false;
+
+ if (!ai->P)
+ *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
+ vcpu->arch.mmio.sign_extend = ai->sign_extend;
+ return true;
+ }
+
+ /*
+ * Technically this is allowed in certain circumstances,
+ * but we don't support it.
+ */
+ if (ai->Rt == 15 || ai->Rn == 15)
+ return false;
+
+ mmio->is_write = ai->w;
+ mmio->len = ai->len;
+ vcpu->arch.mmio.sign_extend = ai->sign_extend;
+
+ vcpu->arch.mmio.rd = ai->Rt;
+ *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
+ return true;
+}
+
/**
* kvm_emulate_mmio_ls - emulates load/store instructions made to I/O memory
* @vcpu: The vcpu pointer
@@ -770,8 +776,9 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, unsigned long instr,
int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
- bool is_thumb;
+ bool is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
unsigned long instr = 0;
+ struct arm_insn insn;
trace_kvm_mmio_emulate(*vcpu_pc(vcpu), instr, *vcpu_cpsr(vcpu));
@@ -780,17 +787,19 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return 1;
mmio->phys_addr = fault_ipa;
- is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
- if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, mmio)) {
- kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=0)"
+
+ if (kvm_decode_ls(vcpu, instr, *vcpu_cpsr(vcpu), &insn) != 0) {
+ kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=%i)"
"pc: %#08x)\n",
- instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu));
+ instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
- } else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, mmio)) {
- kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=1)"
+ }
+
+ if (!execute(vcpu, mmio, &insn)) {
+ kvm_debug("Unable to execute inst: %#08lx (cpsr: %#08x (T=%i)"
"pc: %#08x)\n",
- instr, *vcpu_cpsr(vcpu), *vcpu_pc(vcpu));
+ instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 05/10] kvm: move instruction copying inside kvm_decode()
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (3 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 04/10] kvm: completely separate decoding from execution Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 06/10] kvm: cleanup use of instr Rusty Russell
` (5 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
Now kvm_decode_ls (renamed to kvm_decode) does the copying; we
generalize copy_current_insn() to copy_from_guest(), though it's used
the same way.
This means stopping the kvm threads twice for a wide thumb instruction,
but we've already declared this a v. slow path.
In order for the caller to print out what instruction we couldn't
decode, we add an 'u32 instr' field to struct arm_insn, and always put
the raw instruction in that.
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 95 ++++++++++++++++++++++++++----------------------
1 file changed, 52 insertions(+), 43 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 6ebf0ff..a6af2a5 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -231,13 +231,14 @@ static void do_nothing(void *info)
* Fortunately this is so rare (we don't usually need the instruction), we
* can go very slowly and noone will mind.
*/
-static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
+static int copy_from_guest(struct kvm_vcpu *vcpu,
+ void *dest,
+ unsigned long gva,
+ size_t len)
{
int i;
bool ret;
struct kvm_vcpu *v;
- bool is_thumb;
- size_t instr_len;
/* Don't cross with IPIs in kvm_main.c */
spin_lock(&vcpu->kvm->mmu_lock);
@@ -256,33 +257,16 @@ static bool copy_current_insn(struct kvm_vcpu *vcpu, unsigned long *instr)
smp_call_function_single(v->cpu, do_nothing, NULL, 1);
}
-
- is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
- instr_len = (is_thumb) ? 2 : 4;
-
- BUG_ON(!is_thumb && *vcpu_pc(vcpu) & 0x3);
-
/* Now guest isn't running, we can va->pa map and copy atomically. */
- ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu), instr_len,
- vcpu_mode_priv(vcpu));
- if (!ret)
- goto out;
-
- /* A 32-bit thumb2 instruction can actually go over a page boundary! */
- if (is_thumb && is_wide_instruction(*instr)) {
- *instr = *instr << 16;
- ret = copy_from_guest_va(vcpu, instr, *vcpu_pc(vcpu) + 2, 2,
- vcpu_mode_priv(vcpu));
- }
+ ret = copy_from_guest_va(vcpu, dest, gva, len, vcpu_mode_priv(vcpu));
-out:
/* Release them all. */
kvm_for_each_vcpu(i, v, vcpu->kvm)
v->arch.pause = false;
spin_unlock(&vcpu->kvm->mmu_lock);
- return ret;
+ return ret ? 0 : -EFAULT;
}
/******************************************************************************
@@ -297,6 +281,9 @@ enum SRType {
};
struct arm_insn {
+ /* Encoded instruction. */
+ u32 instr;
+
/* Decoding for the register write back */
bool register_form;
u32 imm;
@@ -545,14 +532,16 @@ static const struct arm_decode arm_decode[] = {
};
static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu,
- u32 instr, struct arm_insn *ai)
+ unsigned long instr, struct arm_insn *ai)
{
int i;
for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
const struct arm_decode *d = &arm_decode[i];
if ((instr & d->opc_mask) == d->opc) {
- *ai = d->template;
+ ai->len = d->template.len;
+ ai->w = d->template.w;
+ ai->sign_extend = d->template.sign_extend;
return d->decode(vcpu, instr, ai);
}
}
@@ -678,7 +667,7 @@ static const struct thumb_decode thumb_decode[] = {
static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
- u32 instr, struct arm_insn *ti)
+ unsigned long instr, struct arm_insn *ti)
{
bool is16;
int i;
@@ -715,14 +704,38 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
return false;
}
-static int kvm_decode_ls(struct kvm_vcpu *vcpu, u32 instr, u32 psr,
- struct arm_insn *ai)
+static int kvm_decode(struct kvm_vcpu *vcpu, unsigned long pc, u32 psr,
+ struct arm_insn *ai)
{
bool is_thumb = !!(psr & PSR_T_BIT);
+ unsigned int instr_len = is_thumb ? 2 : 4;
+ int err;
+
+ BUG_ON(!is_thumb && (pc & 0x3));
- if (!is_thumb && !kvm_decode_arm_ls(vcpu, instr, ai))
+ /* Zero out high bits for thumb case, and so it's set on error. */
+ ai->instr = 0;
+
+ /* Now guest isn't running, we can va->pa map and copy atomically. */
+ err = copy_from_guest(vcpu, &ai->instr, pc, instr_len);
+ if (err)
+ return err;
+
+ /*
+ * Is it a 32 bit thumb instruction? Can't get it all in 1 go, since
+ * it can actually go over a page boundary.
+ */
+ if (is_thumb && is_wide_instruction(ai->instr)) {
+ ai->instr = ai->instr << 16;
+ err = copy_from_guest(vcpu, &ai->instr,
+ pc + instr_len, instr_len);
+ if (err)
+ return err;
+ }
+
+ if (!is_thumb && !kvm_decode_arm_ls(vcpu, ai->instr, ai))
return -ENOENT;
- else if (is_thumb && !kvm_decode_thumb_ls(vcpu, instr, ai))
+ else if (is_thumb && !kvm_decode_thumb_ls(vcpu, ai->instr, ai))
return -ENOENT;
return 0;
@@ -777,29 +790,25 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
bool is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
- unsigned long instr = 0;
struct arm_insn insn;
-
- trace_kvm_mmio_emulate(*vcpu_pc(vcpu), instr, *vcpu_cpsr(vcpu));
-
- /* If it fails (SMP race?), we reenter guest for it to retry. */
- if (!copy_current_insn(vcpu, &instr))
- return 1;
+ int err;
mmio->phys_addr = fault_ipa;
- if (kvm_decode_ls(vcpu, instr, *vcpu_cpsr(vcpu), &insn) != 0) {
- kvm_debug("Unable to decode inst: %#08lx (cpsr: %#08x (T=%i)"
- "pc: %#08x)\n",
- instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
+ err = kvm_decode(vcpu, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu), &insn);
+ if (err) {
+ kvm_debug("Unable to decode inst: %#08x (cpsr: %#08x (T=%i)"
+ "pc: %#08x) - %i\n",
+ insn.instr, *vcpu_cpsr(vcpu), is_thumb,
+ *vcpu_pc(vcpu), err);
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}
if (!execute(vcpu, mmio, &insn)) {
- kvm_debug("Unable to execute inst: %#08lx (cpsr: %#08x (T=%i)"
+ kvm_debug("Unable to execute inst: %#08x (cpsr: %#08x (T=%i)"
"pc: %#08x)\n",
- instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
+ insn.instr, *vcpu_cpsr(vcpu), is_thumb, *vcpu_pc(vcpu));
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}
@@ -808,7 +817,7 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
- kvm_skip_instr(vcpu, is_wide_instruction(instr));
+ kvm_skip_instr(vcpu, insn.is_thumb32);
return 0;
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 06/10] kvm: cleanup use of instr.
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (4 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 05/10] kvm: move instruction copying inside kvm_decode() Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 07/10] kvm: clean up use of is_wide_instruction() Rusty Russell
` (4 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
Now we put instr inside struct arm_insn, we don't need to hand it
internally.
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 100 ++++++++++++++++++++++--------------------------
1 file changed, 45 insertions(+), 55 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index a6af2a5..d619ad8 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -318,8 +318,7 @@ struct arm_decode {
u32 opc;
u32 opc_mask;
- bool (*decode)(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ai);
+ bool (*decode)(struct kvm_vcpu *vcpu, struct arm_insn *ai);
struct arm_insn template;
};
@@ -388,16 +387,15 @@ u32 shift(u32 value, u8 N, enum SRType type, u8 amount, bool carry_in)
return value & mask;
}
-static bool decode_arm_wb(struct kvm_vcpu *vcpu, unsigned long instr,
- struct arm_insn *ai)
+static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct arm_insn *ai)
{
u32 base_addr, offset;
- ai->Rt = (instr >> 12) & 0xf;
- ai->Rn = (instr >> 16) & 0xf;
- ai->W = (instr >> 21) & 1;
- ai->U = (instr >> 23) & 1;
- ai->P = (instr >> 24) & 1;
+ ai->Rt = (ai->instr >> 12) & 0xf;
+ ai->Rn = (ai->instr >> 16) & 0xf;
+ ai->W = (ai->instr >> 21) & 1;
+ ai->U = (ai->instr >> 23) & 1;
+ ai->P = (ai->instr >> 24) & 1;
base_addr = *vcpu_reg(vcpu, ai->Rn);
@@ -428,30 +426,28 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, unsigned long instr,
return true;
}
-static bool decode_arm_ls(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ai)
+static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai)
{
- u8 A = (instr >> 25) & 1;
+ u8 A = (ai->instr >> 25) & 1;
ai->register_form = A;
- ai->imm = instr & 0xfff;
- ai->Rm = instr & 0xf;
- ai->type = (instr >> 5) & 0x3;
- ai->shift_n = (instr >> 7) & 0x1f;
+ ai->imm = ai->instr & 0xfff;
+ ai->Rm = ai->instr & 0xf;
+ ai->type = (ai->instr >> 5) & 0x3;
+ ai->shift_n = (ai->instr >> 7) & 0x1f;
- return decode_arm_wb(vcpu, instr, ai);
+ return decode_arm_wb(vcpu, ai);
}
-static bool decode_arm_extra(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ai)
+static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct arm_insn *ai)
{
- ai->register_form = !((instr >> 22) & 1);
- ai->imm = ((instr >> 4) & 0xf0) | (instr & 0xf);
- ai->Rm = instr & 0xf;
+ ai->register_form = !((ai->instr >> 22) & 1);
+ ai->imm = ((ai->instr >> 4) & 0xf0) | (ai->instr & 0xf);
+ ai->Rm = ai->instr & 0xf;
ai->type = 0; /* SRType_LSL */
ai->shift_n = 0;
- return decode_arm_wb(vcpu, instr, ai);
+ return decode_arm_wb(vcpu, ai);
}
/*
@@ -531,18 +527,17 @@ static const struct arm_decode arm_decode[] = {
.template = { .len = 2, .w = false, .sign_extend = true, }, },
};
-static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ai)
+static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai)
{
int i;
for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
const struct arm_decode *d = &arm_decode[i];
- if ((instr & d->opc_mask) == d->opc) {
+ if ((ai->instr & d->opc_mask) == d->opc) {
ai->len = d->template.len;
ai->w = d->template.w;
ai->sign_extend = d->template.sign_extend;
- return d->decode(vcpu, instr, ai);
+ return d->decode(vcpu, ai);
}
}
return false;
@@ -564,20 +559,18 @@ struct thumb_decode {
} t32;
};
- bool (*decode)(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ti);
+ bool (*decode)(struct kvm_vcpu *vcpu, struct arm_insn *ti);
};
-static bool decode_thumb_wb(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ti)
+static bool decode_thumb_wb(struct kvm_vcpu *vcpu, struct arm_insn *ti)
{
- u8 imm8 = instr & 0xff;
+ u8 imm8 = ti->instr & 0xff;
u32 offset_addr = vcpu->arch.hxfar;
- ti->P = (instr >> 10) & 1;
- ti->U = (instr >> 9) & 1;
- ti->Rn = (instr >> 16) & 0xf;
- ti->Rd = (instr >> 12) & 0xf;
+ ti->P = (ti->instr >> 10) & 1;
+ ti->U = (ti->instr >> 9) & 1;
+ ti->Rn = (ti->instr >> 16) & 0xf;
+ ti->Rd = (ti->instr >> 12) & 0xf;
/* Handle Writeback */
if (!ti->P && ti->U)
@@ -587,11 +580,10 @@ static bool decode_thumb_wb(struct kvm_vcpu *vcpu,
return true;
}
-static bool decode_thumb_str(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ti)
+static bool decode_thumb_str(struct kvm_vcpu *vcpu, struct arm_insn *ti)
{
- u8 op1 = (instr >> (16 + 5)) & 0x7;
- u8 op2 = (instr >> 6) & 0x3f;
+ u8 op1 = (ti->instr >> (16 + 5)) & 0x7;
+ u8 op2 = (ti->instr >> 6) & 0x3f;
ti->W = true;
ti->sign_extend = false;
@@ -606,17 +598,16 @@ static bool decode_thumb_str(struct kvm_vcpu *vcpu,
if ((op2 & 0x24) == 0x24) {
/* STRB (immediate, thumb, W=1) */
- return decode_thumb_wb(vcpu, instr, ti);
+ return decode_thumb_wb(vcpu, ti);
}
return false;
}
-static bool decode_thumb_ldr(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ti)
+static bool decode_thumb_ldr(struct kvm_vcpu *vcpu, struct arm_insn *ti)
{
- u8 op1 = (instr >> (16 + 7)) & 0x3;
- u8 op2 = (instr >> 6) & 0x3f;
+ u8 op1 = (ti->instr >> (16 + 7)) & 0x3;
+ u8 op2 = (ti->instr >> 6) & 0x3f;
ti->W = false;
@@ -635,7 +626,7 @@ static bool decode_thumb_ldr(struct kvm_vcpu *vcpu,
if ((op2 & 0x24) == 0x24) {
/* LDR{S}X (immediate, thumb, W=1) */
- return decode_thumb_wb(vcpu, instr, ti);
+ return decode_thumb_wb(vcpu, ti);
}
return false;
@@ -666,21 +657,20 @@ static const struct thumb_decode thumb_decode[] = {
};
-static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
- unsigned long instr, struct arm_insn *ti)
+static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, struct arm_insn *ti)
{
bool is16;
int i;
ti->is_thumb = true;
- ti->is_thumb32 = is_wide_instruction(instr);
+ ti->is_thumb32 = is_wide_instruction(ti->instr);
is16 = !ti->is_thumb32;
if (is16) {
- ti->t16.opcode = (instr >> 10) & 0x3f;
+ ti->t16.opcode = (ti->instr >> 10) & 0x3f;
} else {
- ti->t32.op1 = (instr >> (16 + 11)) & 0x3;
- ti->t32.op2 = (instr >> (16 + 4)) & 0x7f;
+ ti->t32.op1 = (ti->instr >> (16 + 11)) & 0x3;
+ ti->t32.op2 = (ti->instr >> (16 + 4)) & 0x7f;
}
for (i = 0; i < ARRAY_SIZE(thumb_decode); i++) {
@@ -698,7 +688,7 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu,
continue;
}
- return td->decode(vcpu, instr, ti);
+ return td->decode(vcpu, ti);
}
return false;
@@ -733,9 +723,9 @@ static int kvm_decode(struct kvm_vcpu *vcpu, unsigned long pc, u32 psr,
return err;
}
- if (!is_thumb && !kvm_decode_arm_ls(vcpu, ai->instr, ai))
+ if (!is_thumb && !kvm_decode_arm_ls(vcpu, ai))
return -ENOENT;
- else if (is_thumb && !kvm_decode_thumb_ls(vcpu, ai->instr, ai))
+ else if (is_thumb && !kvm_decode_thumb_ls(vcpu, ai))
return -ENOENT;
return 0;
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 07/10] kvm: clean up use of is_wide_instruction()
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (5 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 06/10] kvm: cleanup use of instr Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 08/10] kvm: avoid using vcpu_cpsr() by passing down PSR Rusty Russell
` (3 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
We determine whether we're in thumb mode and whether it's a wide instruction
when we copy it in, so just initialize is_thumb and is_thumb32 there.
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 44 ++++++++++++++++++++++----------------------
1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d619ad8..ca24828 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -662,9 +662,6 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, struct arm_insn *ti)
bool is16;
int i;
- ti->is_thumb = true;
- ti->is_thumb32 = is_wide_instruction(ti->instr);
-
is16 = !ti->is_thumb32;
if (is16) {
ti->t16.opcode = (ti->instr >> 10) & 0x3f;
@@ -697,11 +694,13 @@ static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, struct arm_insn *ti)
static int kvm_decode(struct kvm_vcpu *vcpu, unsigned long pc, u32 psr,
struct arm_insn *ai)
{
- bool is_thumb = !!(psr & PSR_T_BIT);
- unsigned int instr_len = is_thumb ? 2 : 4;
int err;
+ unsigned int instr_len;
+
+ ai->is_thumb = psr & PSR_T_BIT;
+ instr_len = ai->is_thumb ? 2 : 4;
- BUG_ON(!is_thumb && (pc & 0x3));
+ BUG_ON(!ai->is_thumb && (pc & 0x3));
/* Zero out high bits for thumb case, and so it's set on error. */
ai->instr = 0;
@@ -711,24 +710,25 @@ static int kvm_decode(struct kvm_vcpu *vcpu, unsigned long pc, u32 psr,
if (err)
return err;
- /*
- * Is it a 32 bit thumb instruction? Can't get it all in 1 go, since
- * it can actually go over a page boundary.
- */
- if (is_thumb && is_wide_instruction(ai->instr)) {
- ai->instr = ai->instr << 16;
- err = copy_from_guest(vcpu, &ai->instr,
- pc + instr_len, instr_len);
- if (err)
- return err;
+ if (ai->is_thumb) {
+ /*
+ * Is it a 32 bit thumb instruction? Can't get it all
+ * in 1 go, since it can actually go over a page
+ * boundary.
+ */
+ ai->is_thumb32 = is_wide_instruction(ai->instr);
+
+ if (ai->is_thumb32) {
+ ai->instr = ai->instr << 16;
+ err = copy_from_guest(vcpu, &ai->instr,
+ pc + instr_len, instr_len);
+ if (err)
+ return err;
+ }
+ return kvm_decode_thumb_ls(vcpu, ai);
}
- if (!is_thumb && !kvm_decode_arm_ls(vcpu, ai))
- return -ENOENT;
- else if (is_thumb && !kvm_decode_thumb_ls(vcpu, ai))
- return -ENOENT;
-
- return 0;
+ return kvm_decode_arm_ls(vcpu, ai);
}
static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 08/10] kvm: avoid using vcpu_cpsr() by passing down PSR.
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (6 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 07/10] kvm: clean up use of is_wide_instruction() Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 09/10] kvm: avoid reference vcpu->arch.hxfar by making thumb offset_addr relative Rusty Russell
` (2 subsequent siblings)
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
We need the C bit, so hand down the psr through the callchain.
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index ca24828..5ac4cf7 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -318,7 +318,7 @@ struct arm_decode {
u32 opc;
u32 opc_mask;
- bool (*decode)(struct kvm_vcpu *vcpu, struct arm_insn *ai);
+ bool (*decode)(struct kvm_vcpu *vcpu, struct arm_insn *ai, u32 psr);
struct arm_insn template;
};
@@ -387,7 +387,7 @@ u32 shift(u32 value, u8 N, enum SRType type, u8 amount, bool carry_in)
return value & mask;
}
-static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct arm_insn *ai)
+static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct arm_insn *ai, u32 psr)
{
u32 base_addr, offset;
@@ -408,7 +408,7 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct arm_insn *ai)
/* Register operation */
enum SRType s_type;
u8 shift_n;
- bool c_bit = *vcpu_cpsr(vcpu) & PSR_C_BIT;
+ bool c_bit = psr & PSR_C_BIT;
u32 s_reg = *vcpu_reg(vcpu, ai->Rm);
s_type = decode_imm_shift(ai->type, ai->shift_n, &shift_n);
@@ -426,7 +426,7 @@ static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct arm_insn *ai)
return true;
}
-static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai)
+static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai, u32 psr)
{
u8 A = (ai->instr >> 25) & 1;
@@ -436,10 +436,11 @@ static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai)
ai->type = (ai->instr >> 5) & 0x3;
ai->shift_n = (ai->instr >> 7) & 0x1f;
- return decode_arm_wb(vcpu, ai);
+ return decode_arm_wb(vcpu, ai, psr);
}
-static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct arm_insn *ai)
+static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct arm_insn *ai,
+ u32 psr)
{
ai->register_form = !((ai->instr >> 22) & 1);
ai->imm = ((ai->instr >> 4) & 0xf0) | (ai->instr & 0xf);
@@ -447,7 +448,7 @@ static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct arm_insn *ai)
ai->type = 0; /* SRType_LSL */
ai->shift_n = 0;
- return decode_arm_wb(vcpu, ai);
+ return decode_arm_wb(vcpu, ai, psr);
}
/*
@@ -527,7 +528,8 @@ static const struct arm_decode arm_decode[] = {
.template = { .len = 2, .w = false, .sign_extend = true, }, },
};
-static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai)
+static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai,
+ u32 psr)
{
int i;
@@ -537,7 +539,7 @@ static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai)
ai->len = d->template.len;
ai->w = d->template.w;
ai->sign_extend = d->template.sign_extend;
- return d->decode(vcpu, ai);
+ return d->decode(vcpu, ai, psr);
}
}
return false;
@@ -728,7 +730,7 @@ static int kvm_decode(struct kvm_vcpu *vcpu, unsigned long pc, u32 psr,
return kvm_decode_thumb_ls(vcpu, ai);
}
- return kvm_decode_arm_ls(vcpu, ai);
+ return kvm_decode_arm_ls(vcpu, ai, psr);
}
static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 09/10] kvm: avoid reference vcpu->arch.hxfar by making thumb offset_addr relative
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (7 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 08/10] kvm: avoid using vcpu_cpsr() by passing down PSR Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 11:25 ` [PATCH 10/10] opcode: move generic instruction decode out of KVM Rusty Russell
2012-10-24 16:27 ` RFC: Move kvm's instruction decoding into generic code Dave Martin
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
For generic code we won't know the hdfar, so make the offset_addr relative
in thumb mode.
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/kvm/emulate.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 5ac4cf7..c0014e1 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -567,18 +567,17 @@ struct thumb_decode {
static bool decode_thumb_wb(struct kvm_vcpu *vcpu, struct arm_insn *ti)
{
u8 imm8 = ti->instr & 0xff;
- u32 offset_addr = vcpu->arch.hxfar;
ti->P = (ti->instr >> 10) & 1;
ti->U = (ti->instr >> 9) & 1;
ti->Rn = (ti->instr >> 16) & 0xf;
ti->Rd = (ti->instr >> 12) & 0xf;
- /* Handle Writeback */
+ /* Handle Writeback: offset_addr relative to fault address. */
if (!ti->P && ti->U)
- ti->offset_addr = offset_addr + imm8;
+ ti->offset_addr = imm8;
else if (!ti->P && !ti->U)
- ti->offset_addr = offset_addr - imm8;
+ ti->offset_addr = -imm8;
return true;
}
@@ -740,8 +739,10 @@ static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
if (kvm_vcpu_reg_is_pc(vcpu, ai->Rn))
return false;
- if (!ai->P)
- *vcpu_reg(vcpu, ai->Rn) = ai->offset_addr;
+ if (!ai->P) {
+ *vcpu_reg(vcpu, ai->Rn)
+ = vcpu->arch.hxfar + ai->offset_addr;
+ }
vcpu->arch.mmio.sign_extend = ai->sign_extend;
return true;
}
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* [PATCH 10/10] opcode: move generic instruction decode out of KVM.
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (8 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 09/10] kvm: avoid reference vcpu->arch.hxfar by making thumb offset_addr relative Rusty Russell
@ 2012-10-24 11:25 ` Rusty Russell
2012-10-24 16:27 ` RFC: Move kvm's instruction decoding into generic code Dave Martin
10 siblings, 0 replies; 28+ messages in thread
From: Rusty Russell @ 2012-10-24 11:25 UTC (permalink / raw)
To: Will Deacon
Cc: Christoffer Dall, kvm@vger.kernel.org, dave.martin, Rusty Russell
From: Rusty Russell <rusty.russell@linaro.org>
We add some callbacks and use them instead of using struct kvm_vcpu or
vcpu_reg() directly. We also remove the kvm_ prefixes, and call the
top-level function decode_insn() instead of kvm_decode().
Signed-off-by: Rusty Russell <rusty.russell@linaro.org>
---
arch/arm/include/asm/opcodes.h | 38 ++++
arch/arm/kernel/opcodes.c | 438 +++++++++++++++++++++++++++++++++++++
arch/arm/kvm/emulate.c | 473 +---------------------------------------
3 files changed, 484 insertions(+), 465 deletions(-)
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
index 74e211a..7ceb7fb 100644
--- a/arch/arm/include/asm/opcodes.h
+++ b/arch/arm/include/asm/opcodes.h
@@ -11,6 +11,44 @@
#ifndef __ASSEMBLY__
extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
+
+struct arm_insn {
+ /* Encoded instruction. */
+ u32 instr;
+
+ /* Decoding for the register write back */
+ bool register_form;
+ u32 imm;
+ u8 type;
+ u8 Rt, Rn, Rm, Rd;
+ u8 shift_n;
+ u32 offset_addr;
+
+ /* Common decoding */
+ u8 len;
+ bool sign_extend;
+ bool w, W, U, P;
+
+ /* Thumb encoding */
+ bool is_thumb, is_thumb32;
+ union {
+ struct {
+ u8 opcode;
+ u8 mask;
+ } t16;
+
+ struct {
+ u8 op1;
+ u8 op2;
+ u8 op2_mask;
+ } t32;
+ };
+};
+
+int decode_insn(struct arm_insn *ai, unsigned long pc, u32 psr,
+ int (*copy_from)(void *dst, unsigned long addr, size_t, void *),
+ u32 (*get_regval)(int reg, void *),
+ void *priv);
#endif
#define ARM_OPCODE_CONDTEST_FAIL 0
diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c
index f8179c6..b6c7940 100644
--- a/arch/arm/kernel/opcodes.c
+++ b/arch/arm/kernel/opcodes.c
@@ -70,3 +70,441 @@ asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr)
return ret;
}
EXPORT_SYMBOL_GPL(arm_check_condition);
+
+/******************************************************************************
+ * Load-Store instruction emulation
+ *****************************************************************************/
+enum SRType {
+ SRType_LSL,
+ SRType_LSR,
+ SRType_ASR,
+ SRType_ROR,
+ SRType_RRX
+};
+
+struct arm_decode {
+ /* Instruction decoding */
+ u32 opc;
+ u32 opc_mask;
+
+ bool (*decode)(struct arm_insn *ai, u32 psr,
+ u32 (*get_regval)(int reg, void *priv), void *priv);
+
+ struct arm_insn template;
+};
+
+/* Modelled after DecodeImmShift() in the ARM ARM */
+enum SRType decode_imm_shift(u8 type, u8 imm5, u8 *amount)
+{
+ switch (type) {
+ case 0x0:
+ *amount = imm5;
+ return SRType_LSL;
+ case 0x1:
+ *amount = (imm5 == 0) ? 32 : imm5;
+ return SRType_LSR;
+ case 0x2:
+ *amount = (imm5 == 0) ? 32 : imm5;
+ return SRType_ASR;
+ case 0x3:
+ if (imm5 == 0) {
+ *amount = 1;
+ return SRType_RRX;
+ } else {
+ *amount = imm5;
+ return SRType_ROR;
+ }
+ }
+
+ return SRType_LSL;
+}
+
+/* Modelled after Shift() in the ARM ARM */
+u32 shift(u32 value, u8 N, enum SRType type, u8 amount, bool carry_in)
+{
+ u32 mask = (1 << N) - 1;
+ s32 svalue = (s32)value;
+
+ BUG_ON(N > 32);
+ BUG_ON(type == SRType_RRX && amount != 1);
+ BUG_ON(amount > N);
+
+ if (amount == 0)
+ return value;
+
+ switch (type) {
+ case SRType_LSL:
+ value <<= amount;
+ break;
+ case SRType_LSR:
+ value >>= amount;
+ break;
+ case SRType_ASR:
+ if (value & (1 << (N - 1)))
+ svalue |= ((-1UL) << N);
+ value = svalue >> amount;
+ break;
+ case SRType_ROR:
+ value = (value >> amount) | (value << (N - amount));
+ break;
+ case SRType_RRX: {
+ u32 C = (carry_in) ? 1 : 0;
+ value = (value >> 1) | (C << (N - 1));
+ break;
+ }
+ }
+
+ return value & mask;
+}
+
+static bool decode_arm_wb(struct arm_insn *ai, u32 psr,
+ u32 (*get_regval)(int reg, void *priv), void *priv)
+{
+ u32 base_addr, offset;
+
+ ai->Rt = (ai->instr >> 12) & 0xf;
+ ai->Rn = (ai->instr >> 16) & 0xf;
+ ai->W = (ai->instr >> 21) & 1;
+ ai->U = (ai->instr >> 23) & 1;
+ ai->P = (ai->instr >> 24) & 1;
+
+ base_addr = get_regval(ai->Rn, priv);
+
+ if (ai->P && !ai->W) {
+ pr_err("Decoding operation with valid ISV?\n");
+ return false;
+ }
+
+ if (ai->register_form) {
+ /* Register operation */
+ enum SRType s_type;
+ u8 shift_n;
+ bool c_bit = psr & PSR_C_BIT;
+ u32 s_reg = get_regval(ai->Rm, priv);
+
+ s_type = decode_imm_shift(ai->type, ai->shift_n, &shift_n);
+ offset = shift(s_reg, 5, s_type, shift_n, c_bit);
+ } else {
+ /* Immediate operation */
+ offset = ai->imm;
+ }
+
+ /* Handle Writeback */
+ if (ai->U)
+ ai->offset_addr = base_addr + offset;
+ else
+ ai->offset_addr = base_addr - offset;
+ return true;
+}
+
+static bool decode_arm_ls(struct arm_insn *ai, u32 psr,
+ u32 (*get_regval)(int reg, void *priv), void *priv)
+{
+ u8 A = (ai->instr >> 25) & 1;
+
+ ai->register_form = A;
+ ai->imm = ai->instr & 0xfff;
+ ai->Rm = ai->instr & 0xf;
+ ai->type = (ai->instr >> 5) & 0x3;
+ ai->shift_n = (ai->instr >> 7) & 0x1f;
+
+ return decode_arm_wb(ai, psr, get_regval, priv);
+}
+
+static bool decode_arm_extra(struct arm_insn *ai, u32 psr,
+ u32 (*get_regval)(int reg, void *priv), void *priv)
+{
+ ai->register_form = !((ai->instr >> 22) & 1);
+ ai->imm = ((ai->instr >> 4) & 0xf0) | (ai->instr & 0xf);
+ ai->Rm = ai->instr & 0xf;
+ ai->type = 0; /* SRType_LSL */
+ ai->shift_n = 0;
+
+ return decode_arm_wb(ai, psr, get_regval, priv);
+}
+
+/*
+ * The encodings in this table assumes that a fault was generated where the
+ * ISV field in the HSR was clear, and the decoding information was invalid,
+ * which means that a register write-back occurred, the PC was used as the
+ * destination or a load/store multiple operation was used. Since the latter
+ * two cases are crazy for MMIO on the guest side, we simply inject a fault
+ * when this happens and support the common case.
+ *
+ * We treat unpriviledged loads and stores of words and bytes like all other
+ * loads and stores as their encodings mandate the W bit set and the P bit
+ * clear.
+ */
+static const struct arm_decode arm_decode[] = {
+ /**************** Load/Store Word and Byte **********************/
+ /* Store word with writeback */
+ { .opc = 0x04000000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 4, .w = true, .sign_extend = false, }, },
+ /* Store byte with writeback */
+ { .opc = 0x04400000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 1, .w = true, .sign_extend = false, }, },
+ /* Load word with writeback */
+ { .opc = 0x04100000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 4, .w = false, .sign_extend = false, }, },
+ /* Load byte with writeback */
+ { .opc = 0x04500000, .opc_mask = 0x0c500000,
+ .decode = decode_arm_ls,
+ .template = { .len = 1, .w = false, .sign_extend = false, }, },
+
+ /*************** Extra load/store instructions ******************/
+
+ /* Store halfword with writeback */
+ { .opc = 0x000000b0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = true, .sign_extend = false, }, },
+ /* Load halfword with writeback */
+ { .opc = 0x001000b0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = false, }, },
+ /* Load dual with writeback */
+ { .opc = 0x000000d0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 8, .w = false, .sign_extend = false, }, },
+ /* Load signed byte with writeback */
+ { .opc = 0x001000d0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 1, .w = false, .sign_extend = true, }, },
+
+ /* Store dual with writeback */
+ { .opc = 0x000000f0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 8, .w = true, .sign_extend = false, }, },
+ /* Load signed halfword with writeback */
+ { .opc = 0x001000f0, .opc_mask = 0x0c1000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = true, }, },
+
+ /* Store halfword unprivileged */
+ { .opc = 0x002000b0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = true, .sign_extend = false, }, },
+ /* Load halfword unprivileged */
+ { .opc = 0x003000b0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = false, }, },
+ /* Load signed byte unprivileged */
+ { .opc = 0x003000d0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 1, .w = false, .sign_extend = true, }, },
+ /* Load signed halfword unprivileged */
+ { .opc = 0x003000d0, .opc_mask = 0x0f3000f0,
+ .decode = decode_arm_extra,
+ .template = { .len = 2, .w = false, .sign_extend = true, }, },
+};
+
+static bool decode_arm(struct arm_insn *ai, u32 psr,
+ u32 (*get_regval)(int reg, void *), void *priv)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
+ const struct arm_decode *d = &arm_decode[i];
+ if ((ai->instr & d->opc_mask) == d->opc) {
+ ai->len = d->template.len;
+ ai->w = d->template.w;
+ ai->sign_extend = d->template.sign_extend;
+ return d->decode(ai, psr, get_regval, priv);
+ }
+ }
+ return false;
+}
+
+struct thumb_decode {
+ bool is32;
+
+ union {
+ struct {
+ u8 opcode;
+ u8 mask;
+ } t16;
+
+ struct {
+ u8 op1;
+ u8 op2;
+ u8 op2_mask;
+ } t32;
+ };
+
+ bool (*decode)(struct arm_insn *ti);
+};
+
+static bool decode_thumb_wb(struct arm_insn *ti)
+{
+ u8 imm8 = ti->instr & 0xff;
+
+ ti->P = (ti->instr >> 10) & 1;
+ ti->U = (ti->instr >> 9) & 1;
+ ti->Rn = (ti->instr >> 16) & 0xf;
+ ti->Rd = (ti->instr >> 12) & 0xf;
+
+ /* Handle Writeback: offset_addr relative to fault address. */
+ if (!ti->P && ti->U)
+ ti->offset_addr = imm8;
+ else if (!ti->P && !ti->U)
+ ti->offset_addr = -imm8;
+ return true;
+}
+
+static bool decode_thumb_str(struct arm_insn *ti)
+{
+ u8 op1 = (ti->instr >> (16 + 5)) & 0x7;
+ u8 op2 = (ti->instr >> 6) & 0x3f;
+
+ ti->W = true;
+ ti->sign_extend = false;
+
+ switch (op1) {
+ case 0x0: ti->len = 1; break;
+ case 0x1: ti->len = 2; break;
+ case 0x2: ti->len = 4; break;
+ default:
+ return false; /* Only register write-back versions! */
+ }
+
+ if ((op2 & 0x24) == 0x24) {
+ /* STRB (immediate, thumb, W=1) */
+ return decode_thumb_wb(ti);
+ }
+
+ return false;
+}
+
+static bool decode_thumb_ldr(struct arm_insn *ti)
+{
+ u8 op1 = (ti->instr >> (16 + 7)) & 0x3;
+ u8 op2 = (ti->instr >> 6) & 0x3f;
+
+ ti->W = false;
+
+ switch (ti->t32.op2 & 0x7) {
+ case 0x1: ti->len = 1; break;
+ case 0x3: ti->len = 2; break;
+ case 0x5: ti->len = 4; break;
+ }
+
+ if (op1 == 0x0)
+ ti->sign_extend = false;
+ else if (op1 == 0x2 && (ti->t32.op2 & 0x7) != 0x5)
+ ti->sign_extend = true;
+ else
+ return false; /* Only register write-back versions! */
+
+ if ((op2 & 0x24) == 0x24) {
+ /* LDR{S}X (immediate, thumb, W=1) */
+ return decode_thumb_wb(ti);
+ }
+
+ return false;
+}
+
+/*
+ * We only support instruction decoding for valid reasonable MMIO operations
+ * where trapping them do not provide sufficient information in the HSR (no
+ * 16-bit Thumb instructions provide register writeback that we care about).
+ *
+ * The following instruciton types are NOT supported for MMIO operations
+ * despite the HSR not containing decode info:
+ * - any Load/Store multiple
+ * - any load/store exclusive
+ * - any load/store dual
+ * - anything with the PC as the dest register
+ */
+static const struct thumb_decode thumb_decode[] = {
+ /**************** 32-bit Thumb instructions **********************/
+ /* Store single data item: Op1 == 11, Op2 == 000xxx0 */
+ { .is32 = true, .t32 = { 3, 0x00, 0x71}, decode_thumb_str },
+ /* Load byte: Op1 == 11, Op2 == 00xx001 */
+ { .is32 = true, .t32 = { 3, 0x01, 0x67}, decode_thumb_ldr },
+ /* Load halfword: Op1 == 11, Op2 == 00xx011 */
+ { .is32 = true, .t32 = { 3, 0x03, 0x67}, decode_thumb_ldr },
+ /* Load word: Op1 == 11, Op2 == 00xx101 */
+ { .is32 = true, .t32 = { 3, 0x05, 0x67}, decode_thumb_ldr },
+};
+
+
+static bool decode_thumb(struct arm_insn *ti)
+{
+ bool is16;
+ int i;
+
+ is16 = !ti->is_thumb32;
+ if (is16) {
+ ti->t16.opcode = (ti->instr >> 10) & 0x3f;
+ } else {
+ ti->t32.op1 = (ti->instr >> (16 + 11)) & 0x3;
+ ti->t32.op2 = (ti->instr >> (16 + 4)) & 0x7f;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(thumb_decode); i++) {
+ const struct thumb_decode *td = &thumb_decode[i];
+ if (td->is32 != ti->is_thumb32)
+ continue;
+
+ if (is16) {
+ if ((ti->t16.opcode & td->t16.mask) != td->t16.opcode)
+ continue;
+ } else {
+ if (td->t32.op1 != ti->t32.op1)
+ continue;
+ if ((td->t32.op2_mask & ti->t32.op2) != td->t32.op2)
+ continue;
+ }
+
+ return td->decode(ti);
+ }
+
+ return false;
+}
+
+int decode_insn(struct arm_insn *ai, unsigned long pc, u32 psr,
+ int (*copy_from)(void *dst, unsigned long addr, size_t, void *),
+ u32 (*get_regval)(int reg, void *),
+ void *priv)
+{
+ int err;
+ unsigned int instr_len;
+
+ memset(&ai, 0, sizeof(ai));
+
+ ai->is_thumb = psr & PSR_T_BIT;
+ instr_len = ai->is_thumb ? 2 : 4;
+
+ BUG_ON(!ai->is_thumb && (pc & 0x3));
+
+ /* Zero out high bits for thumb case, and so it's set on error. */
+ ai->instr = 0;
+
+ /* Now guest isn't running, we can va->pa map and copy atomically. */
+ err = copy_from(&ai->instr, pc, instr_len, priv);
+ if (err)
+ return err;
+
+ if (ai->is_thumb) {
+ /*
+ * Is it a 32 bit thumb instruction? Can't get it all
+ * in 1 go, since it can actually go over a page
+ * boundary.
+ */
+ ai->is_thumb32 = is_wide_instruction(ai->instr);
+
+ if (ai->is_thumb32) {
+ ai->instr = ai->instr << 16;
+ err = copy_from(&ai->instr, pc + instr_len, instr_len,
+ priv);
+ if (err)
+ return err;
+ }
+ return decode_thumb(ai);
+ }
+
+ return decode_arm(ai, psr, get_regval, priv);
+}
+EXPORT_SYMBOL_GPL(decode_insn);
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index c0014e1..eeebd33 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -20,6 +20,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
+#include <asm/opcodes.h>
#include <trace/events/kvm.h>
#include "trace.h"
@@ -231,11 +232,10 @@ static void do_nothing(void *info)
* Fortunately this is so rare (we don't usually need the instruction), we
* can go very slowly and noone will mind.
*/
-static int copy_from_guest(struct kvm_vcpu *vcpu,
- void *dest,
- unsigned long gva,
- size_t len)
+static int copy_from_guest(void *dest, unsigned long gva, size_t len,
+ void *_vcpu)
{
+ struct kvm_vcpu *vcpu = _vcpu;
int i;
bool ret;
struct kvm_vcpu *v;
@@ -269,467 +269,9 @@ static int copy_from_guest(struct kvm_vcpu *vcpu,
return ret ? 0 : -EFAULT;
}
-/******************************************************************************
- * Load-Store instruction emulation
- *****************************************************************************/
-enum SRType {
- SRType_LSL,
- SRType_LSR,
- SRType_ASR,
- SRType_ROR,
- SRType_RRX
-};
-
-struct arm_insn {
- /* Encoded instruction. */
- u32 instr;
-
- /* Decoding for the register write back */
- bool register_form;
- u32 imm;
- u8 type;
- u8 Rt, Rn, Rm, Rd;
- u8 shift_n;
- u32 offset_addr;
-
- /* Common decoding */
- u8 len;
- bool sign_extend;
- bool w, W, U, P;
-
- /* Thumb encoding */
- bool is_thumb, is_thumb32;
- union {
- struct {
- u8 opcode;
- u8 mask;
- } t16;
-
- struct {
- u8 op1;
- u8 op2;
- u8 op2_mask;
- } t32;
- };
-};
-
-struct arm_decode {
- /* Instruction decoding */
- u32 opc;
- u32 opc_mask;
-
- bool (*decode)(struct kvm_vcpu *vcpu, struct arm_insn *ai, u32 psr);
-
- struct arm_insn template;
-};
-
-/* Modelled after DecodeImmShift() in the ARM ARM */
-enum SRType decode_imm_shift(u8 type, u8 imm5, u8 *amount)
-{
- switch (type) {
- case 0x0:
- *amount = imm5;
- return SRType_LSL;
- case 0x1:
- *amount = (imm5 == 0) ? 32 : imm5;
- return SRType_LSR;
- case 0x2:
- *amount = (imm5 == 0) ? 32 : imm5;
- return SRType_ASR;
- case 0x3:
- if (imm5 == 0) {
- *amount = 1;
- return SRType_RRX;
- } else {
- *amount = imm5;
- return SRType_ROR;
- }
- }
-
- return SRType_LSL;
-}
-
-/* Modelled after Shift() in the ARM ARM */
-u32 shift(u32 value, u8 N, enum SRType type, u8 amount, bool carry_in)
+static u32 get_guest_reg(int reg, void *_vcpu)
{
- u32 mask = (1 << N) - 1;
- s32 svalue = (s32)value;
-
- BUG_ON(N > 32);
- BUG_ON(type == SRType_RRX && amount != 1);
- BUG_ON(amount > N);
-
- if (amount == 0)
- return value;
-
- switch (type) {
- case SRType_LSL:
- value <<= amount;
- break;
- case SRType_LSR:
- value >>= amount;
- break;
- case SRType_ASR:
- if (value & (1 << (N - 1)))
- svalue |= ((-1UL) << N);
- value = svalue >> amount;
- break;
- case SRType_ROR:
- value = (value >> amount) | (value << (N - amount));
- break;
- case SRType_RRX: {
- u32 C = (carry_in) ? 1 : 0;
- value = (value >> 1) | (C << (N - 1));
- break;
- }
- }
-
- return value & mask;
-}
-
-static bool decode_arm_wb(struct kvm_vcpu *vcpu, struct arm_insn *ai, u32 psr)
-{
- u32 base_addr, offset;
-
- ai->Rt = (ai->instr >> 12) & 0xf;
- ai->Rn = (ai->instr >> 16) & 0xf;
- ai->W = (ai->instr >> 21) & 1;
- ai->U = (ai->instr >> 23) & 1;
- ai->P = (ai->instr >> 24) & 1;
-
- base_addr = *vcpu_reg(vcpu, ai->Rn);
-
- if (ai->P && !ai->W) {
- kvm_err("Decoding operation with valid ISV?\n");
- return false;
- }
-
- if (ai->register_form) {
- /* Register operation */
- enum SRType s_type;
- u8 shift_n;
- bool c_bit = psr & PSR_C_BIT;
- u32 s_reg = *vcpu_reg(vcpu, ai->Rm);
-
- s_type = decode_imm_shift(ai->type, ai->shift_n, &shift_n);
- offset = shift(s_reg, 5, s_type, shift_n, c_bit);
- } else {
- /* Immediate operation */
- offset = ai->imm;
- }
-
- /* Handle Writeback */
- if (ai->U)
- ai->offset_addr = base_addr + offset;
- else
- ai->offset_addr = base_addr - offset;
- return true;
-}
-
-static bool decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai, u32 psr)
-{
- u8 A = (ai->instr >> 25) & 1;
-
- ai->register_form = A;
- ai->imm = ai->instr & 0xfff;
- ai->Rm = ai->instr & 0xf;
- ai->type = (ai->instr >> 5) & 0x3;
- ai->shift_n = (ai->instr >> 7) & 0x1f;
-
- return decode_arm_wb(vcpu, ai, psr);
-}
-
-static bool decode_arm_extra(struct kvm_vcpu *vcpu, struct arm_insn *ai,
- u32 psr)
-{
- ai->register_form = !((ai->instr >> 22) & 1);
- ai->imm = ((ai->instr >> 4) & 0xf0) | (ai->instr & 0xf);
- ai->Rm = ai->instr & 0xf;
- ai->type = 0; /* SRType_LSL */
- ai->shift_n = 0;
-
- return decode_arm_wb(vcpu, ai, psr);
-}
-
-/*
- * The encodings in this table assumes that a fault was generated where the
- * ISV field in the HSR was clear, and the decoding information was invalid,
- * which means that a register write-back occurred, the PC was used as the
- * destination or a load/store multiple operation was used. Since the latter
- * two cases are crazy for MMIO on the guest side, we simply inject a fault
- * when this happens and support the common case.
- *
- * We treat unpriviledged loads and stores of words and bytes like all other
- * loads and stores as their encodings mandate the W bit set and the P bit
- * clear.
- */
-static const struct arm_decode arm_decode[] = {
- /**************** Load/Store Word and Byte **********************/
- /* Store word with writeback */
- { .opc = 0x04000000, .opc_mask = 0x0c500000,
- .decode = decode_arm_ls,
- .template = { .len = 4, .w = true, .sign_extend = false, }, },
- /* Store byte with writeback */
- { .opc = 0x04400000, .opc_mask = 0x0c500000,
- .decode = decode_arm_ls,
- .template = { .len = 1, .w = true, .sign_extend = false, }, },
- /* Load word with writeback */
- { .opc = 0x04100000, .opc_mask = 0x0c500000,
- .decode = decode_arm_ls,
- .template = { .len = 4, .w = false, .sign_extend = false, }, },
- /* Load byte with writeback */
- { .opc = 0x04500000, .opc_mask = 0x0c500000,
- .decode = decode_arm_ls,
- .template = { .len = 1, .w = false, .sign_extend = false, }, },
-
- /*************** Extra load/store instructions ******************/
-
- /* Store halfword with writeback */
- { .opc = 0x000000b0, .opc_mask = 0x0c1000f0,
- .decode = decode_arm_extra,
- .template = { .len = 2, .w = true, .sign_extend = false, }, },
- /* Load halfword with writeback */
- { .opc = 0x001000b0, .opc_mask = 0x0c1000f0,
- .decode = decode_arm_extra,
- .template = { .len = 2, .w = false, .sign_extend = false, }, },
- /* Load dual with writeback */
- { .opc = 0x000000d0, .opc_mask = 0x0c1000f0,
- .decode = decode_arm_extra,
- .template = { .len = 8, .w = false, .sign_extend = false, }, },
- /* Load signed byte with writeback */
- { .opc = 0x001000d0, .opc_mask = 0x0c1000f0,
- .decode = decode_arm_extra,
- .template = { .len = 1, .w = false, .sign_extend = true, }, },
-
- /* Store dual with writeback */
- { .opc = 0x000000f0, .opc_mask = 0x0c1000f0,
- .decode = decode_arm_extra,
- .template = { .len = 8, .w = true, .sign_extend = false, }, },
- /* Load signed halfword with writeback */
- { .opc = 0x001000f0, .opc_mask = 0x0c1000f0,
- .decode = decode_arm_extra,
- .template = { .len = 2, .w = false, .sign_extend = true, }, },
-
- /* Store halfword unprivileged */
- { .opc = 0x002000b0, .opc_mask = 0x0f3000f0,
- .decode = decode_arm_extra,
- .template = { .len = 2, .w = true, .sign_extend = false, }, },
- /* Load halfword unprivileged */
- { .opc = 0x003000b0, .opc_mask = 0x0f3000f0,
- .decode = decode_arm_extra,
- .template = { .len = 2, .w = false, .sign_extend = false, }, },
- /* Load signed byte unprivileged */
- { .opc = 0x003000d0, .opc_mask = 0x0f3000f0,
- .decode = decode_arm_extra,
- .template = { .len = 1, .w = false, .sign_extend = true, }, },
- /* Load signed halfword unprivileged */
- { .opc = 0x003000d0, .opc_mask = 0x0f3000f0,
- .decode = decode_arm_extra,
- .template = { .len = 2, .w = false, .sign_extend = true, }, },
-};
-
-static bool kvm_decode_arm_ls(struct kvm_vcpu *vcpu, struct arm_insn *ai,
- u32 psr)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(arm_decode); i++) {
- const struct arm_decode *d = &arm_decode[i];
- if ((ai->instr & d->opc_mask) == d->opc) {
- ai->len = d->template.len;
- ai->w = d->template.w;
- ai->sign_extend = d->template.sign_extend;
- return d->decode(vcpu, ai, psr);
- }
- }
- return false;
-}
-
-struct thumb_decode {
- bool is32;
-
- union {
- struct {
- u8 opcode;
- u8 mask;
- } t16;
-
- struct {
- u8 op1;
- u8 op2;
- u8 op2_mask;
- } t32;
- };
-
- bool (*decode)(struct kvm_vcpu *vcpu, struct arm_insn *ti);
-};
-
-static bool decode_thumb_wb(struct kvm_vcpu *vcpu, struct arm_insn *ti)
-{
- u8 imm8 = ti->instr & 0xff;
-
- ti->P = (ti->instr >> 10) & 1;
- ti->U = (ti->instr >> 9) & 1;
- ti->Rn = (ti->instr >> 16) & 0xf;
- ti->Rd = (ti->instr >> 12) & 0xf;
-
- /* Handle Writeback: offset_addr relative to fault address. */
- if (!ti->P && ti->U)
- ti->offset_addr = imm8;
- else if (!ti->P && !ti->U)
- ti->offset_addr = -imm8;
- return true;
-}
-
-static bool decode_thumb_str(struct kvm_vcpu *vcpu, struct arm_insn *ti)
-{
- u8 op1 = (ti->instr >> (16 + 5)) & 0x7;
- u8 op2 = (ti->instr >> 6) & 0x3f;
-
- ti->W = true;
- ti->sign_extend = false;
-
- switch (op1) {
- case 0x0: ti->len = 1; break;
- case 0x1: ti->len = 2; break;
- case 0x2: ti->len = 4; break;
- default:
- return false; /* Only register write-back versions! */
- }
-
- if ((op2 & 0x24) == 0x24) {
- /* STRB (immediate, thumb, W=1) */
- return decode_thumb_wb(vcpu, ti);
- }
-
- return false;
-}
-
-static bool decode_thumb_ldr(struct kvm_vcpu *vcpu, struct arm_insn *ti)
-{
- u8 op1 = (ti->instr >> (16 + 7)) & 0x3;
- u8 op2 = (ti->instr >> 6) & 0x3f;
-
- ti->W = false;
-
- switch (ti->t32.op2 & 0x7) {
- case 0x1: ti->len = 1; break;
- case 0x3: ti->len = 2; break;
- case 0x5: ti->len = 4; break;
- }
-
- if (op1 == 0x0)
- ti->sign_extend = false;
- else if (op1 == 0x2 && (ti->t32.op2 & 0x7) != 0x5)
- ti->sign_extend = true;
- else
- return false; /* Only register write-back versions! */
-
- if ((op2 & 0x24) == 0x24) {
- /* LDR{S}X (immediate, thumb, W=1) */
- return decode_thumb_wb(vcpu, ti);
- }
-
- return false;
-}
-
-/*
- * We only support instruction decoding for valid reasonable MMIO operations
- * where trapping them do not provide sufficient information in the HSR (no
- * 16-bit Thumb instructions provide register writeback that we care about).
- *
- * The following instruciton types are NOT supported for MMIO operations
- * despite the HSR not containing decode info:
- * - any Load/Store multiple
- * - any load/store exclusive
- * - any load/store dual
- * - anything with the PC as the dest register
- */
-static const struct thumb_decode thumb_decode[] = {
- /**************** 32-bit Thumb instructions **********************/
- /* Store single data item: Op1 == 11, Op2 == 000xxx0 */
- { .is32 = true, .t32 = { 3, 0x00, 0x71}, decode_thumb_str },
- /* Load byte: Op1 == 11, Op2 == 00xx001 */
- { .is32 = true, .t32 = { 3, 0x01, 0x67}, decode_thumb_ldr },
- /* Load halfword: Op1 == 11, Op2 == 00xx011 */
- { .is32 = true, .t32 = { 3, 0x03, 0x67}, decode_thumb_ldr },
- /* Load word: Op1 == 11, Op2 == 00xx101 */
- { .is32 = true, .t32 = { 3, 0x05, 0x67}, decode_thumb_ldr },
-};
-
-
-static bool kvm_decode_thumb_ls(struct kvm_vcpu *vcpu, struct arm_insn *ti)
-{
- bool is16;
- int i;
-
- is16 = !ti->is_thumb32;
- if (is16) {
- ti->t16.opcode = (ti->instr >> 10) & 0x3f;
- } else {
- ti->t32.op1 = (ti->instr >> (16 + 11)) & 0x3;
- ti->t32.op2 = (ti->instr >> (16 + 4)) & 0x7f;
- }
-
- for (i = 0; i < ARRAY_SIZE(thumb_decode); i++) {
- const struct thumb_decode *td = &thumb_decode[i];
- if (td->is32 != ti->is_thumb32)
- continue;
-
- if (is16) {
- if ((ti->t16.opcode & td->t16.mask) != td->t16.opcode)
- continue;
- } else {
- if (td->t32.op1 != ti->t32.op1)
- continue;
- if ((td->t32.op2_mask & ti->t32.op2) != td->t32.op2)
- continue;
- }
-
- return td->decode(vcpu, ti);
- }
-
- return false;
-}
-
-static int kvm_decode(struct kvm_vcpu *vcpu, unsigned long pc, u32 psr,
- struct arm_insn *ai)
-{
- int err;
- unsigned int instr_len;
-
- ai->is_thumb = psr & PSR_T_BIT;
- instr_len = ai->is_thumb ? 2 : 4;
-
- BUG_ON(!ai->is_thumb && (pc & 0x3));
-
- /* Zero out high bits for thumb case, and so it's set on error. */
- ai->instr = 0;
-
- /* Now guest isn't running, we can va->pa map and copy atomically. */
- err = copy_from_guest(vcpu, &ai->instr, pc, instr_len);
- if (err)
- return err;
-
- if (ai->is_thumb) {
- /*
- * Is it a 32 bit thumb instruction? Can't get it all
- * in 1 go, since it can actually go over a page
- * boundary.
- */
- ai->is_thumb32 = is_wide_instruction(ai->instr);
-
- if (ai->is_thumb32) {
- ai->instr = ai->instr << 16;
- err = copy_from_guest(vcpu, &ai->instr,
- pc + instr_len, instr_len);
- if (err)
- return err;
- }
- return kvm_decode_thumb_ls(vcpu, ai);
- }
-
- return kvm_decode_arm_ls(vcpu, ai, psr);
+ return *vcpu_reg(_vcpu, reg);
}
static bool execute(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
@@ -788,7 +330,8 @@ int kvm_emulate_mmio_ls(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
mmio->phys_addr = fault_ipa;
- err = kvm_decode(vcpu, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu), &insn);
+ err = decode_insn(&insn, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu),
+ copy_from_guest, get_guest_reg, vcpu);
if (err) {
kvm_debug("Unable to decode inst: %#08x (cpsr: %#08x (T=%i)"
"pc: %#08x) - %i\n",
--
1.7.10.4
^ permalink raw reply related [flat|nested] 28+ messages in thread* Re: RFC: Move kvm's instruction decoding into generic code.
2012-10-24 11:25 ` RFC: Move kvm's instruction decoding into generic code Rusty Russell
` (9 preceding siblings ...)
2012-10-24 11:25 ` [PATCH 10/10] opcode: move generic instruction decode out of KVM Rusty Russell
@ 2012-10-24 16:27 ` Dave Martin
10 siblings, 0 replies; 28+ messages in thread
From: Dave Martin @ 2012-10-24 16:27 UTC (permalink / raw)
To: Rusty Russell; +Cc: Will Deacon, Christoffer Dall, kvm@vger.kernel.org
On Wed, Oct 24, 2012 at 09:55:13PM +1030, Rusty Russell wrote:
> Compile-tested only, but shows what it looks like (ugly).
>
> I just used the existing code and didn't crack open my ARM books to
> determine if there's a better way. The struct names are pretty vague,
> too, as a result.
>
Thanks (to you and Christoffer) for having a stab at this.
I'll try to respond to these patches, but I have a recorsive panic
going on here so it may take a bit of time...
Cheers
---Dave
^ permalink raw reply [flat|nested] 28+ messages in thread