* [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits
@ 2014-11-06 9:15 Nadav Amit
2014-11-06 14:10 ` Paolo Bonzini
2014-11-07 17:37 ` Radim Krčmář
0 siblings, 2 replies; 7+ messages in thread
From: Nadav Amit @ 2014-11-06 9:15 UTC (permalink / raw)
To: pbonzini; +Cc: kvm, Nadav Amit
As we run out of bits in the KVM emulator instruction flags, we can merge
together the Mmx/Sse/Avx bits. These bits are mutual exclusive (i.e., each
instruction is either MMX, SSE, AVX, or none), so we can save one bit in the
flags by merging them.
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
---
arch/x86/kvm/emulate.c | 44 ++++++++++++++++++++++++++++----------------
1 file changed, 28 insertions(+), 16 deletions(-)
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index cd2029b..f98ead7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -123,7 +123,6 @@
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
-#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
/* Destination is only written; never read. */
@@ -155,9 +154,11 @@
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
-#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
-#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
-#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
+#define Sse ((u64)2 << 40) /* SSE Vector instruction */
+#define Avx ((u64)3 << 40) /* Advanced Vector Extensions */
+#define OpExtMask ((u64)3 << 40)
+#define Aligned ((u64)1 << 42) /* Explicitly aligned (e.g. MOVDQA) */
+#define Unaligned ((u64)1 << 43) /* Explicitly unaligned (e.g. MOVDQU) */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
@@ -1082,18 +1083,19 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
unsigned reg = ctxt->modrm_reg;
+ u64 op_ext = ctxt->d & OpExtMask;
if (!(ctxt->d & ModRM))
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
- if (ctxt->d & Sse) {
+ if (op_ext == Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
- if (ctxt->d & Mmx) {
+ if (op_ext == Mmx) {
reg &= 7;
op->type = OP_MM;
op->bytes = 8;
@@ -1122,6 +1124,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
int index_reg, base_reg, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
+ u64 op_ext = ctxt->d & OpExtMask;
ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
@@ -1137,14 +1140,15 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
ctxt->d & ByteOp);
- if (ctxt->d & Sse) {
+
+ if (op_ext == Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
- if (ctxt->d & Mmx) {
+ if (op_ext == Mmx) {
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = ctxt->modrm_rm & 7;
@@ -4555,7 +4559,9 @@ done_prefixes:
return EMULATION_FAILED;
if (unlikely(ctxt->d &
- (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch))) {
+ (NotImpl|Stack|Op3264|OpExtMask|Intercept|CheckPerm|NearBranch))) {
+ u64 op_ext = ctxt->d & OpExtMask;
+
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
@@ -4580,9 +4586,9 @@ done_prefixes:
ctxt->op_bytes = 4;
}
- if (ctxt->d & Sse)
+ if (op_ext == Sse)
ctxt->op_bytes = 16;
- else if (ctxt->d & Mmx)
+ else if (op_ext == Mmx)
ctxt->op_bytes = 8;
}
@@ -4728,25 +4734,31 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
}
if (unlikely(ctxt->d &
- (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
+ (No64|Undefined|OpExtMask|Intercept|CheckPerm|Priv|Prot|
+ String))) {
+ u64 op_ext = ctxt->d & OpExtMask;
+
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) {
rc = emulate_ud(ctxt);
goto done;
}
- if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
- || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
+ if (((op_ext == Sse || op_ext == Mmx) &&
+ ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
+ || ((op_ext == Sse) &&
+ !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
- if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+ if ((op_ext == Sse || op_ext == Mmx) &&
+ (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
- if (ctxt->d & Mmx) {
+ if (op_ext == Mmx) {
rc = flush_pending_x87_faults(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
--
1.9.1
^ permalink raw reply related [flat|nested] 7+ messages in thread* Re: [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits
2014-11-06 9:15 [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits Nadav Amit
@ 2014-11-06 14:10 ` Paolo Bonzini
2014-11-06 18:52 ` Nadav Amit
2014-11-07 17:37 ` Radim Krčmář
1 sibling, 1 reply; 7+ messages in thread
From: Paolo Bonzini @ 2014-11-06 14:10 UTC (permalink / raw)
To: Nadav Amit; +Cc: kvm
On 06/11/2014 10:15, Nadav Amit wrote:
> As we run out of bits in the KVM emulator instruction flags, we can merge
> together the Mmx/Sse/Avx bits. These bits are mutual exclusive (i.e., each
> instruction is either MMX, SSE, AVX, or none), so we can save one bit in the
> flags by merging them.
Do we need the Avx bit at all? Currently it is a dup of Unaligned, and
I think we can just reuse Unaligned. If we see VEX, we just do "ctxt->d
|= Unaligned".
AVX instructions are just tweaks of the operand length and the alignment
restrictions of SSE instructions, there is nothing really special about
them.
Paolo
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits
2014-11-06 14:10 ` Paolo Bonzini
@ 2014-11-06 18:52 ` Nadav Amit
2014-11-06 21:51 ` Paolo Bonzini
0 siblings, 1 reply; 7+ messages in thread
From: Nadav Amit @ 2014-11-06 18:52 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Nadav Amit, kvm
> On Nov 6, 2014, at 16:10, Paolo Bonzini <pbonzini@redhat.com> wrote:
>
>
>
> On 06/11/2014 10:15, Nadav Amit wrote:
>> As we run out of bits in the KVM emulator instruction flags, we can merge
>> together the Mmx/Sse/Avx bits. These bits are mutual exclusive (i.e., each
>> instruction is either MMX, SSE, AVX, or none), so we can save one bit in the
>> flags by merging them.
>
> Do we need the Avx bit at all? Currently it is a dup of Unaligned, and
> I think we can just reuse Unaligned. If we see VEX, we just do "ctxt->d
> |= Unaligned".
>
> AVX instructions are just tweaks of the operand length and the alignment
> restrictions of SSE instructions, there is nothing really special about
> them.
Hmm… I do not think this is the case.
AVX instruction have some things in common, which are currently not implemented (since no instruction is marked as AVX), but should be if anyone implements the emulation of AVX instructions:
1. They should cause #UD if CR4.OSXSAVE=0 or XCR0[2:1] != 3, or CPUID[1].AVX = 0.
2. They should cause #NM if CR0.TS = 1 (like SSE/MMX)
3. They work on YMM registers (256-bit long; unlike SSE/MMX)
The first case should usually be less interesting if #UD actually happens - since #UD take precedence over exit. The second one surely needs to be checked. The third one determines the registers type.
Nadav
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits
2014-11-06 18:52 ` Nadav Amit
@ 2014-11-06 21:51 ` Paolo Bonzini
0 siblings, 0 replies; 7+ messages in thread
From: Paolo Bonzini @ 2014-11-06 21:51 UTC (permalink / raw)
To: Nadav Amit; +Cc: Nadav Amit, kvm
On 06/11/2014 19:52, Nadav Amit wrote:
>
>> On Nov 6, 2014, at 16:10, Paolo Bonzini <pbonzini@redhat.com> wrote:
>>
>>
>>
>> On 06/11/2014 10:15, Nadav Amit wrote:
>>> As we run out of bits in the KVM emulator instruction flags, we can merge
>>> together the Mmx/Sse/Avx bits. These bits are mutual exclusive (i.e., each
>>> instruction is either MMX, SSE, AVX, or none), so we can save one bit in the
>>> flags by merging them.
>>
>> Do we need the Avx bit at all? Currently it is a dup of Unaligned, and
>> I think we can just reuse Unaligned. If we see VEX, we just do "ctxt->d
>> |= Unaligned".
>>
>> AVX instructions are just tweaks of the operand length and the alignment
>> restrictions of SSE instructions, there is nothing really special about
>> them.
>
> Hmm… I do not think this is the case.
> AVX instruction have some things in common, which are currently not
> implemented (since no instruction is marked as AVX), but should be if
> anyone implements the emulation of AVX instructions:
>
> 1. They should cause #UD if CR4.OSXSAVE=0 or XCR0[2:1] != 3, or CPUID[1].AVX = 0.
> 2. They should cause #NM if CR0.TS = 1 (like SSE/MMX)
> 3. They work on YMM registers (256-bit long; unlike SSE/MMX)
4. They always accept unaligned arguments, with some exceptions.
But encoding-wise, (1) and (3) are determined by the VEX prefix I think.
Regarding (3), there are also 128-bit AVX instruction, e.g. vmovpd
xmm0, xmm1. I'm not sure if those also cause #UD for case (1). And as
you said, (2) is common to SSE and AVX.
For what I understood, AVX instructions are basically SSE instructions
as far as decoding is concerned, only there is always a VEX prefix and
there is never a 66/F2/F3 prefix.
Paolo
^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits
2014-11-06 9:15 [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits Nadav Amit
2014-11-06 14:10 ` Paolo Bonzini
@ 2014-11-07 17:37 ` Radim Krčmář
2014-11-07 17:39 ` Paolo Bonzini
1 sibling, 1 reply; 7+ messages in thread
From: Radim Krčmář @ 2014-11-07 17:37 UTC (permalink / raw)
To: Nadav Amit; +Cc: pbonzini, kvm
2014-11-06 11:15+0200, Nadav Amit:
> As we run out of bits in the KVM emulator instruction flags, we can merge
> together the Mmx/Sse/Avx bits. These bits are mutual exclusive (i.e., each
> instruction is either MMX, SSE, AVX, or none), so we can save one bit in the
> flags by merging them.
>
> Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
> ---
It looks that Avx behaves a bit differently that legacy Sse, so having
it exclusive is better.
I'd make changes, but the behavior doesn't look wrong now, so
Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
> arch/x86/kvm/emulate.c | 44 ++++++++++++++++++++++++++++----------------
> 1 file changed, 28 insertions(+), 16 deletions(-)
>
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index cd2029b..f98ead7 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -123,7 +123,6 @@
> #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
> #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
> #define Escape (5<<15) /* Escape to coprocessor instruction */
> -#define Sse (1<<18) /* SSE Vector instruction */
(I liked that Paolo moved something to the empty spot.)
> /* Generic ModRM decode. */
> #define ModRM (1<<19)
> /* Destination is only written; never read. */
> @@ -155,9 +154,11 @@
> #define Src2GS (OpGS << Src2Shift)
> #define Src2Mask (OpMask << Src2Shift)
> #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
> -#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
> -#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
> -#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
#define OpExtShift 40
> +#define Sse ((u64)2 << 40) /* SSE Vector instruction */
> +#define Avx ((u64)3 << 40) /* Advanced Vector Extensions */
(Precedents set OpExt{None,Mmx,Sse,Avx}.)
> +#define OpExtMask ((u64)3 << 40)
(Wouldn't Ext be enough?)
> +#define Aligned ((u64)1 << 42) /* Explicitly aligned (e.g. MOVDQA) */
> +#define Unaligned ((u64)1 << 43) /* Explicitly unaligned (e.g. MOVDQU) */
> #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
> #define NoWrite ((u64)1 << 45) /* No writeback */
> #define SrcWrite ((u64)1 << 46) /* Write back src operand */
> @@ -1082,18 +1083,19 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
> struct operand *op)
> {
> unsigned reg = ctxt->modrm_reg;
> + u64 op_ext = ctxt->d & OpExtMask;
(We kept it inline.)
>
> if (!(ctxt->d & ModRM))
> reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
>
> - if (ctxt->d & Sse) {
> + if (op_ext == Sse) {
> op->type = OP_XMM;
> op->bytes = 16;
> op->addr.xmm = reg;
> read_sse_reg(ctxt, &op->vec_val, reg);
> return;
> }
> - if (ctxt->d & Mmx) {
> + if (op_ext == Mmx) {
> reg &= 7;
> op->type = OP_MM;
> op->bytes = 8;
[...]
> @@ -1137,14 +1140,15 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
> - if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
> + if ((op_ext == Sse || op_ext == Mmx) &&
It could be just op_ext here -- Avx doesn't differ.
> + (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
> rc = emulate_nm(ctxt);
> goto done;
> }
>
> - if (ctxt->d & Mmx) {
> + if (op_ext == Mmx) {
> rc = flush_pending_x87_faults(ctxt);
> if (rc != X86EMUL_CONTINUE)
> goto done;
> --
> 1.9.1
>
> --
> To unsubscribe from this list: send the line "unsubscribe kvm" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits
2014-11-07 17:37 ` Radim Krčmář
@ 2014-11-07 17:39 ` Paolo Bonzini
2014-11-07 17:49 ` Radim Krčmář
0 siblings, 1 reply; 7+ messages in thread
From: Paolo Bonzini @ 2014-11-07 17:39 UTC (permalink / raw)
To: Radim Krčmář, Nadav Amit; +Cc: kvm
On 07/11/2014 18:37, Radim Krčmář wrote:
> 2014-11-06 11:15+0200, Nadav Amit:
>> As we run out of bits in the KVM emulator instruction flags, we can merge
>> together the Mmx/Sse/Avx bits. These bits are mutual exclusive (i.e., each
>> instruction is either MMX, SSE, AVX, or none), so we can save one bit in the
>> flags by merging them.
>>
>> Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
>> ---
>
> It looks that Avx behaves a bit differently that legacy Sse, so having
> it exclusive is better.
>
> I'd make changes, but the behavior doesn't look wrong now, so
Thanks for the review, Radim.
I think we have no clear idea of what Avx would do (I have one---same as
Sse but make VEX prefix mandatory, see VBROADCASTSS---but I'm not sure
it's the right one either). Let's keep these patches on hold.
Paolo
> Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
>
>> arch/x86/kvm/emulate.c | 44 ++++++++++++++++++++++++++++----------------
>> 1 file changed, 28 insertions(+), 16 deletions(-)
>>
>> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
>> index cd2029b..f98ead7 100644
>> --- a/arch/x86/kvm/emulate.c
>> +++ b/arch/x86/kvm/emulate.c
>> @@ -123,7 +123,6 @@
>> #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
>> #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
>> #define Escape (5<<15) /* Escape to coprocessor instruction */
>> -#define Sse (1<<18) /* SSE Vector instruction */
>
> (I liked that Paolo moved something to the empty spot.)
>
>> /* Generic ModRM decode. */
>> #define ModRM (1<<19)
>> /* Destination is only written; never read. */
>> @@ -155,9 +154,11 @@
>> #define Src2GS (OpGS << Src2Shift)
>> #define Src2Mask (OpMask << Src2Shift)
>> #define Mmx ((u64)1 << 40) /* MMX Vector instruction */
>> -#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
>> -#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
>> -#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
>
> #define OpExtShift 40
>
>> +#define Sse ((u64)2 << 40) /* SSE Vector instruction */
>> +#define Avx ((u64)3 << 40) /* Advanced Vector Extensions */
>
> (Precedents set OpExt{None,Mmx,Sse,Avx}.)
>
>> +#define OpExtMask ((u64)3 << 40)
>
> (Wouldn't Ext be enough?)
>
>> +#define Aligned ((u64)1 << 42) /* Explicitly aligned (e.g. MOVDQA) */
>> +#define Unaligned ((u64)1 << 43) /* Explicitly unaligned (e.g. MOVDQU) */
>> #define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
>> #define NoWrite ((u64)1 << 45) /* No writeback */
>> #define SrcWrite ((u64)1 << 46) /* Write back src operand */
>> @@ -1082,18 +1083,19 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
>> struct operand *op)
>> {
>> unsigned reg = ctxt->modrm_reg;
>> + u64 op_ext = ctxt->d & OpExtMask;
>
> (We kept it inline.)
>
>>
>> if (!(ctxt->d & ModRM))
>> reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
>>
>> - if (ctxt->d & Sse) {
>> + if (op_ext == Sse) {
>> op->type = OP_XMM;
>> op->bytes = 16;
>> op->addr.xmm = reg;
>> read_sse_reg(ctxt, &op->vec_val, reg);
>> return;
>> }
>> - if (ctxt->d & Mmx) {
>> + if (op_ext == Mmx) {
>> reg &= 7;
>> op->type = OP_MM;
>> op->bytes = 8;
> [...]
>> @@ -1137,14 +1140,15 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
>> - if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
>> + if ((op_ext == Sse || op_ext == Mmx) &&
>
> It could be just op_ext here -- Avx doesn't differ.
>
>> + (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
>> rc = emulate_nm(ctxt);
>> goto done;
>> }
>>
>> - if (ctxt->d & Mmx) {
>> + if (op_ext == Mmx) {
>> rc = flush_pending_x87_faults(ctxt);
>> if (rc != X86EMUL_CONTINUE)
>> goto done;
>> --
>> 1.9.1
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe kvm" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits
2014-11-07 17:39 ` Paolo Bonzini
@ 2014-11-07 17:49 ` Radim Krčmář
0 siblings, 0 replies; 7+ messages in thread
From: Radim Krčmář @ 2014-11-07 17:49 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Nadav Amit, kvm
2014-11-07 18:39+0100, Paolo Bonzini:
> I think we have no clear idea of what Avx would do (I have one---same as
> Sse but make VEX prefix mandatory, see VBROADCASTSS---but I'm not sure
> it's the right one either). Let's keep these patches on hold.
Implementing Avx first makes sense, we don't want Knuth's optimizations.
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2014-11-07 17:49 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-11-06 9:15 [PATCH] KVM: x86: Save bits by merging Mmx/Sse/Avx bits Nadav Amit
2014-11-06 14:10 ` Paolo Bonzini
2014-11-06 18:52 ` Nadav Amit
2014-11-06 21:51 ` Paolo Bonzini
2014-11-07 17:37 ` Radim Krčmář
2014-11-07 17:39 ` Paolo Bonzini
2014-11-07 17:49 ` Radim Krčmář
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox