* [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug [not found] <20171020232525.7387-1-pbonzini@redhat.com> @ 2017-10-21 18:45 ` Christoffer Dall 2017-10-22 3:06 ` Kees Cook 2017-10-22 7:48 ` [PATCH v2] " Christoffer Dall 1 sibling, 1 reply; 8+ messages in thread From: Christoffer Dall @ 2017-10-21 18:45 UTC (permalink / raw) To: linux-arm-kernel We do direct useraccess copying to the kvm_cpu_context structure embedded in the kvm_vcpu_arch structure, and to the vcpu debug register state. Everything else (timer, PMU, vgic) goes through a temporary indirection. Fixing all accesses to kvm_cpu_context is massively invasive, and we'd like to avoid that, so we tell kvm_init_usercopy to whitelist accesses to out context structure. The debug system register accesses on arm64 are modified to work through an indirection instead. Cc: kernel-hardening at lists.openwall.com Cc: Kees Cook <keescook@chromium.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Kr?m?? <rkrcmar@redhat.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> --- This fixes KVM/ARM on today's linux next with CONFIG_HARDENED_USERCOPY. The patch is based on linux-next plus Paolo's x86 patch which introduces kvm_init_usercopy. Not sure how this needs to get merged, but it would potentially make sense for Paolo to put together a set of the patches needed for this. Thanks, -Christoffer arch/arm64/kvm/sys_regs.c | 36 ++++++++++++++++++++---------------- virt/kvm/arm/arm.c | 5 ++++- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e070d3baf9f..cdf47a9108fe 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -293,19 +293,20 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = r; return 0; } static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } @@ -335,10 +336,11 @@ static bool trap_bcr(struct kvm_vcpu *vcpu, static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = r; return 0; } @@ -346,9 +348,9 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } @@ -379,19 +381,20 @@ static bool trap_wvr(struct kvm_vcpu *vcpu, static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = r; return 0; } static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } @@ -421,19 +424,20 @@ static bool trap_wcr(struct kvm_vcpu *vcpu, static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = r; return 0; } static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) return -EFAULT; return 0; } diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index b9f68e4add71..639e388678ff 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1502,7 +1502,10 @@ void kvm_arch_exit(void) static int arm_init(void) { - int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); + int rc = kvm_init_usercopy(NULL, sizeof(struct kvm_vcpu), 0, + offsetof(struct kvm_vcpu_arch, ctxt), + sizeof_field(struct kvm_vcpu_arch, ctxt), + THIS_MODULE); return rc; } -- 2.14.2 ^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug 2017-10-21 18:45 ` [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug Christoffer Dall @ 2017-10-22 3:06 ` Kees Cook 2017-10-22 7:44 ` Christoffer Dall 0 siblings, 1 reply; 8+ messages in thread From: Kees Cook @ 2017-10-22 3:06 UTC (permalink / raw) To: linux-arm-kernel On Sat, Oct 21, 2017 at 11:45 AM, Christoffer Dall <christoffer.dall@linaro.org> wrote: > We do direct useraccess copying to the kvm_cpu_context structure > embedded in the kvm_vcpu_arch structure, and to the vcpu debug register > state. Everything else (timer, PMU, vgic) goes through a temporary > indirection. Are these copies done with a dynamic size? The normal way these get whitelisted is via builtin_const sizes on the copy. Looking at KVM_REG_SIZE(), though, it seems that would be a dynamic calculation. > Fixing all accesses to kvm_cpu_context is massively invasive, and we'd > like to avoid that, so we tell kvm_init_usercopy to whitelist accesses > to out context structure. > > The debug system register accesses on arm64 are modified to work through > an indirection instead. > > Cc: kernel-hardening at lists.openwall.com > Cc: Kees Cook <keescook@chromium.org> > Cc: Paolo Bonzini <pbonzini@redhat.com> > Cc: Radim Kr?m?? <rkrcmar@redhat.com> > Cc: Marc Zyngier <marc.zyngier@arm.com> > Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> > --- > This fixes KVM/ARM on today's linux next with CONFIG_HARDENED_USERCOPY. > > The patch is based on linux-next plus Paolo's x86 patch which introduces > kvm_init_usercopy. Not sure how this needs to get merged, but it would > potentially make sense for Paolo to put together a set of the patches > needed for this. I was planning to carry Paolo's patches, and I can take this one too. If this poses a problem, then I could just do a two-phase commit of the whitelisting code, leaving the very last commit (which enables the defense for anything not yet whitelisted), until the KVM trees land. What's preferred? Thanks for looking at this! -Kees > > Thanks, > -Christoffer > > arch/arm64/kvm/sys_regs.c | 36 ++++++++++++++++++++---------------- > virt/kvm/arm/arm.c | 5 ++++- > 2 files changed, 24 insertions(+), 17 deletions(-) > > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index 2e070d3baf9f..cdf47a9108fe 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -293,19 +293,20 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, > static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; > + __u64 r; > > - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > + vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = r; > return 0; > } > > static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; > + __u64 r = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; > > - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > return 0; > } > @@ -335,10 +336,11 @@ static bool trap_bcr(struct kvm_vcpu *vcpu, > static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; > + __u64 r; > > - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > + vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = r; > > return 0; > } > @@ -346,9 +348,9 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; > + __u64 r = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; > > - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > return 0; > } > @@ -379,19 +381,20 @@ static bool trap_wvr(struct kvm_vcpu *vcpu, > static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; > + __u64 r; > > - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > + vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = r; > return 0; > } > > static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; > + __u64 r = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; > > - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > return 0; > } > @@ -421,19 +424,20 @@ static bool trap_wcr(struct kvm_vcpu *vcpu, > static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; > + __u64 r; > > - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_from_user(&r, uaddr, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > + vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = r; > return 0; > } > > static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, > const struct kvm_one_reg *reg, void __user *uaddr) > { > - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; > + __u64 r = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; > > - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) > + if (copy_to_user(uaddr, &r, KVM_REG_SIZE(reg->id)) != 0) > return -EFAULT; > return 0; > } > diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c > index b9f68e4add71..639e388678ff 100644 > --- a/virt/kvm/arm/arm.c > +++ b/virt/kvm/arm/arm.c > @@ -1502,7 +1502,10 @@ void kvm_arch_exit(void) > > static int arm_init(void) > { > - int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); > + int rc = kvm_init_usercopy(NULL, sizeof(struct kvm_vcpu), 0, > + offsetof(struct kvm_vcpu_arch, ctxt), > + sizeof_field(struct kvm_vcpu_arch, ctxt), > + THIS_MODULE); > return rc; > } > > -- > 2.14.2 > -- Kees Cook Pixel Security ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug 2017-10-22 3:06 ` Kees Cook @ 2017-10-22 7:44 ` Christoffer Dall 2017-10-23 14:14 ` Paolo Bonzini 0 siblings, 1 reply; 8+ messages in thread From: Christoffer Dall @ 2017-10-22 7:44 UTC (permalink / raw) To: linux-arm-kernel On Sat, Oct 21, 2017 at 08:06:10PM -0700, Kees Cook wrote: > On Sat, Oct 21, 2017 at 11:45 AM, Christoffer Dall > <christoffer.dall@linaro.org> wrote: > > We do direct useraccess copying to the kvm_cpu_context structure > > embedded in the kvm_vcpu_arch structure, and to the vcpu debug register > > state. Everything else (timer, PMU, vgic) goes through a temporary > > indirection. > > Are these copies done with a dynamic size? The normal way these get > whitelisted is via builtin_const sizes on the copy. Looking at > KVM_REG_SIZE(), though, it seems that would be a dynamic calculation. > It's super confusing, but it's actually static. We can only get to thee functions via kvm_arm_sys_reg_get_reg() and kvm_arm_sys_reg_set_reg(), and they both do if (KVM_REG_SIZE(reg->id) != sizeof(__u64))" return -ENOENT; So this is always a u64 copy. However, I think it's much clearer if I rewrite these to use get_user() and put_user(). v2 incoming. > > Fixing all accesses to kvm_cpu_context is massively invasive, and we'd > > like to avoid that, so we tell kvm_init_usercopy to whitelist accesses > > to out context structure. > > > > The debug system register accesses on arm64 are modified to work through > > an indirection instead. > > > > Cc: kernel-hardening at lists.openwall.com > > Cc: Kees Cook <keescook@chromium.org> > > Cc: Paolo Bonzini <pbonzini@redhat.com> > > Cc: Radim Kr?m?? <rkrcmar@redhat.com> > > Cc: Marc Zyngier <marc.zyngier@arm.com> > > Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> > > --- > > This fixes KVM/ARM on today's linux next with CONFIG_HARDENED_USERCOPY. > > > > The patch is based on linux-next plus Paolo's x86 patch which introduces > > kvm_init_usercopy. Not sure how this needs to get merged, but it would > > potentially make sense for Paolo to put together a set of the patches > > needed for this. > > I was planning to carry Paolo's patches, and I can take this one too. Sounds good to me. > If this poses a problem, then I could just do a two-phase commit of > the whitelisting code, leaving the very last commit (which enables the > defense for anything not yet whitelisted), until the KVM trees land. > > What's preferred? Assuming there's an ack from Marc Zyngier on v2 of this patch, I prefer you just take them as part of your series. > > Thanks for looking at this! > No problem, -Christoffer ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug 2017-10-22 7:44 ` Christoffer Dall @ 2017-10-23 14:14 ` Paolo Bonzini 2017-10-23 14:49 ` Christoffer Dall 2017-10-23 19:40 ` Kees Cook 0 siblings, 2 replies; 8+ messages in thread From: Paolo Bonzini @ 2017-10-23 14:14 UTC (permalink / raw) To: linux-arm-kernel On 22/10/2017 09:44, Christoffer Dall wrote: > However, I think it's much clearer if I > rewrite these to use get_user() and put_user(). v2 incoming. I'd actually prefer if you all do a trivial conversion to kvm_init_usercopy to begin with. In fact, we could just change the default from "0, 0" to "0, sizeof (kvm_arch_vcpu)" in kvm_init. Any other change can be applied after the patches are merged to Linus's tree, especially with KVM Forum and the merge window both coming soon. I'll send a v2 myself later this week. Thanks all, Paolo ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug 2017-10-23 14:14 ` Paolo Bonzini @ 2017-10-23 14:49 ` Christoffer Dall 2017-10-23 19:40 ` Kees Cook 1 sibling, 0 replies; 8+ messages in thread From: Christoffer Dall @ 2017-10-23 14:49 UTC (permalink / raw) To: linux-arm-kernel On Mon, Oct 23, 2017 at 4:14 PM, Paolo Bonzini <pbonzini@redhat.com> wrote: > On 22/10/2017 09:44, Christoffer Dall wrote: >> However, I think it's much clearer if I >> rewrite these to use get_user() and put_user(). v2 incoming. > > I'd actually prefer if you all do a trivial conversion to > kvm_init_usercopy to begin with. In fact, we could just change the > default from "0, 0" to "0, sizeof (kvm_arch_vcpu)" in kvm_init. Any > other change can be applied after the patches are merged to Linus's > tree, especially with KVM Forum and the merge window both coming soon. > In that case, expect no further action from me on this one until the patches have landed and I can resend my patch, unless you specifically tell me otherwise. Thanks, -Christoffer ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug 2017-10-23 14:14 ` Paolo Bonzini 2017-10-23 14:49 ` Christoffer Dall @ 2017-10-23 19:40 ` Kees Cook 2017-10-23 21:06 ` R: " Paolo Bonzini 1 sibling, 1 reply; 8+ messages in thread From: Kees Cook @ 2017-10-23 19:40 UTC (permalink / raw) To: linux-arm-kernel On Mon, Oct 23, 2017 at 7:14 AM, Paolo Bonzini <pbonzini@redhat.com> wrote: > On 22/10/2017 09:44, Christoffer Dall wrote: >> However, I think it's much clearer if I >> rewrite these to use get_user() and put_user(). v2 incoming. > > I'd actually prefer if you all do a trivial conversion to > kvm_init_usercopy to begin with. In fact, we could just change the > default from "0, 0" to "0, sizeof (kvm_arch_vcpu)" in kvm_init. Any > other change can be applied after the patches are merged to Linus's > tree, especially with KVM Forum and the merge window both coming soon. > > I'll send a v2 myself later this week. Okay, which patches would you like me to carry in the usercopy whitelisting tree for the coming merge window? -Kees -- Kees Cook Pixel Security ^ permalink raw reply [flat|nested] 8+ messages in thread
* R: Re: [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug 2017-10-23 19:40 ` Kees Cook @ 2017-10-23 21:06 ` Paolo Bonzini 0 siblings, 0 replies; 8+ messages in thread From: Paolo Bonzini @ 2017-10-23 21:06 UTC (permalink / raw) To: linux-arm-kernel ----- Kees Cook <keescook@chromium.org> ha scritto: > On Mon, Oct 23, 2017 at 7:14 AM, Paolo Bonzini <pbonzini@redhat.com> wrote: > > On 22/10/2017 09:44, Christoffer Dall wrote: > >> However, I think it's much clearer if I > >> rewrite these to use get_user() and put_user(). v2 incoming. > > > > I'd actually prefer if you all do a trivial conversion to > > kvm_init_usercopy to begin with. In fact, we could just change the > > default from "0, 0" to "0, sizeof (kvm_arch_vcpu)" in kvm_init. Any > > other change can be applied after the patches are merged to Linus's > > tree, especially with KVM Forum and the merge window both coming soon. > > > > I'll send a v2 myself later this week. > > Okay, which patches would you like me to carry in the usercopy > whitelisting tree for the coming merge window? v2 of mine, which shall come in the next couple of days. Paolo > > -Kees > > -- > Kees Cook > Pixel Security ^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH v2] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug [not found] <20171020232525.7387-1-pbonzini@redhat.com> 2017-10-21 18:45 ` [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug Christoffer Dall @ 2017-10-22 7:48 ` Christoffer Dall 1 sibling, 0 replies; 8+ messages in thread From: Christoffer Dall @ 2017-10-22 7:48 UTC (permalink / raw) To: linux-arm-kernel We do direct useraccess copying to the kvm_cpu_context structure embedded in the kvm_vcpu_arch structure, and to the vcpu debug register state. Everything else (timer, PMU, vgic) goes through a temporary indirection. Fixing all accesses to kvm_cpu_context is massively invasive, and we'd like to avoid that, so we tell kvm_init_usercopy to whitelist accesses to out context structure. The debug system register accesses on arm64 are modified to work through an indirection instead. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> --- Changes since v1: - Use get_user() and put_user() instead of the implicit understanding that these will always be 64-bit values. arch/arm64/kvm/sys_regs.c | 44 ++++++++++++++++++++++++++++---------------- virt/kvm/arm/arm.c | 5 ++++- 2 files changed, 32 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e070d3baf9f..34b9e1734a3f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -293,19 +293,22 @@ static bool trap_bvr(struct kvm_vcpu *vcpu, static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + __u64 __user *uval = uaddr; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (get_user(r, uval)) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = r; return 0; } static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; + __u64 __user *uval = uaddr; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (put_user(r, uval)) return -EFAULT; return 0; } @@ -335,10 +338,12 @@ static bool trap_bcr(struct kvm_vcpu *vcpu, static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + __u64 __user *uval = uaddr; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (get_user(r, uval)) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = r; return 0; } @@ -346,9 +351,10 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; + __u64 __user *uval = uaddr; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (put_user(r, uval)) return -EFAULT; return 0; } @@ -379,19 +385,22 @@ static bool trap_wvr(struct kvm_vcpu *vcpu, static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + __u64 __user *uval = uaddr; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (get_user(r, uval)) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = r; return 0; } static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; + __u64 __user *uval = uaddr; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (put_user(r, uval)) return -EFAULT; return 0; } @@ -421,19 +430,22 @@ static bool trap_wcr(struct kvm_vcpu *vcpu, static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + __u64 __user *uval = uaddr; + __u64 r; - if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) + if (get_user(r, uval)) return -EFAULT; + vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = r; return 0; } static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, const struct kvm_one_reg *reg, void __user *uaddr) { - __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + __u64 r = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; + __u64 __user *uval = uaddr; - if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) + if (put_user(r, uval)) return -EFAULT; return 0; } diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index b9f68e4add71..639e388678ff 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1502,7 +1502,10 @@ void kvm_arch_exit(void) static int arm_init(void) { - int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); + int rc = kvm_init_usercopy(NULL, sizeof(struct kvm_vcpu), 0, + offsetof(struct kvm_vcpu_arch, ctxt), + sizeof_field(struct kvm_vcpu_arch, ctxt), + THIS_MODULE); return rc; } -- 2.14.2 ^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2017-10-23 21:06 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
[not found] <20171020232525.7387-1-pbonzini@redhat.com>
2017-10-21 18:45 ` [PATCH] KVM: arm/arm64: Allow usercopy to vcpu->arch.ctxt and arm64 debug Christoffer Dall
2017-10-22 3:06 ` Kees Cook
2017-10-22 7:44 ` Christoffer Dall
2017-10-23 14:14 ` Paolo Bonzini
2017-10-23 14:49 ` Christoffer Dall
2017-10-23 19:40 ` Kees Cook
2017-10-23 21:06 ` R: " Paolo Bonzini
2017-10-22 7:48 ` [PATCH v2] " Christoffer Dall
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).