From: David Woodhouse <dwmw2@infradead.org>
To: qemu-devel@nongnu.org
Cc: "Paul Durrant" <paul@xen.org>,
"Paolo Bonzini" <pbonzini@redhat.com>,
"Richard Henderson" <richard.henderson@linaro.org>,
"Eduardo Habkost" <eduardo@habkost.net>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Marcel Apfelbaum" <marcel.apfelbaum@gmail.com>,
"Marcelo Tosatti" <mtosatti@redhat.com>,
kvm@vger.kernel.org, "Peter Maydell" <peter.maydell@linaro.org>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"Anthony PERARD" <anthony.perard@citrix.com>,
"David Woodhouse" <dwmw@amazon.co.uk>
Subject: [PATCH for-8.1 2/3] i386/xen: consistent locking around Xen singleshot timers
Date: Tue, 1 Aug 2023 18:57:46 +0100 [thread overview]
Message-ID: <20230801175747.145906-3-dwmw2@infradead.org> (raw)
In-Reply-To: <20230801175747.145906-1-dwmw2@infradead.org>
From: David Woodhouse <dwmw@amazon.co.uk>
Coverity points out (CID 1507534, 1507968) that we sometimes access
env->xen_singleshot_timer_ns under the protection of
env->xen_timers_lock and sometimes not.
This isn't always an issue. There are two modes for the timers; if the
kernel supports the EVTCHN_SEND capability then it handles all the timer
hypercalls and delivery internally, and all we use the field for is to
get/set the timer as part of the vCPU state via an ioctl(). If the
kernel doesn't have that support, then we do all the emulation within
qemu, and *those* are the code paths where we actually care about the
locking.
But it doesn't hurt to be a little bit more consistent and avoid having
to explain *why* it's OK.
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Reviewed-by: Paul Durrant <paul@xen.org>
---
target/i386/kvm/xen-emu.c | 36 ++++++++++++++++++++++++++----------
1 file changed, 26 insertions(+), 10 deletions(-)
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index d7c7eb8d9c..9946ff0905 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -43,6 +43,7 @@
static void xen_vcpu_singleshot_timer_event(void *opaque);
static void xen_vcpu_periodic_timer_event(void *opaque);
+static int vcpuop_stop_singleshot_timer(CPUState *cs);
#ifdef TARGET_X86_64
#define hypercall_compat32(longmode) (!(longmode))
@@ -466,6 +467,7 @@ void kvm_xen_inject_vcpu_callback_vector(uint32_t vcpu_id, int type)
}
}
+/* Must always be called with xen_timers_lock held */
static int kvm_xen_set_vcpu_timer(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
@@ -483,6 +485,7 @@ static int kvm_xen_set_vcpu_timer(CPUState *cs)
static void do_set_vcpu_timer_virq(CPUState *cs, run_on_cpu_data data)
{
+ QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock);
kvm_xen_set_vcpu_timer(cs);
}
@@ -545,7 +548,6 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_time_info_gpa = INVALID_GPA;
env->xen_vcpu_runstate_gpa = INVALID_GPA;
env->xen_vcpu_callback_vector = 0;
- env->xen_singleshot_timer_ns = 0;
memset(env->xen_virq, 0, sizeof(env->xen_virq));
set_vcpu_info(cs, INVALID_GPA);
@@ -555,8 +557,13 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
INVALID_GPA);
if (kvm_xen_has_cap(EVTCHN_SEND)) {
kvm_xen_set_vcpu_callback_vector(cs);
+
+ QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock);
+ env->xen_singleshot_timer_ns = 0;
kvm_xen_set_vcpu_timer(cs);
- }
+ } else {
+ vcpuop_stop_singleshot_timer(cs);
+ };
}
@@ -1059,6 +1066,10 @@ static int vcpuop_stop_periodic_timer(CPUState *target)
return 0;
}
+/*
+ * Userspace handling of timer, for older kernels.
+ * Must always be called with xen_timers_lock held.
+ */
static int do_set_singleshot_timer(CPUState *cs, uint64_t timeout_abs,
bool future, bool linux_wa)
{
@@ -1086,12 +1097,8 @@ static int do_set_singleshot_timer(CPUState *cs, uint64_t timeout_abs,
timeout_abs = now + delta;
}
- qemu_mutex_lock(&env->xen_timers_lock);
-
timer_mod_ns(env->xen_singleshot_timer, qemu_now + delta);
env->xen_singleshot_timer_ns = now + delta;
-
- qemu_mutex_unlock(&env->xen_timers_lock);
return 0;
}
@@ -1115,6 +1122,7 @@ static int vcpuop_set_singleshot_timer(CPUState *cs, uint64_t arg)
return -EFAULT;
}
+ QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock);
return do_set_singleshot_timer(cs, sst.timeout_abs_ns,
!!(sst.flags & VCPU_SSHOTTMR_future),
false);
@@ -1141,6 +1149,7 @@ static bool kvm_xen_hcall_set_timer_op(struct kvm_xen_exit *exit, X86CPU *cpu,
if (unlikely(timeout == 0)) {
err = vcpuop_stop_singleshot_timer(CPU(cpu));
} else {
+ QEMU_LOCK_GUARD(&X86_CPU(cpu)->env.xen_timers_lock);
err = do_set_singleshot_timer(CPU(cpu), timeout, false, true);
}
exit->u.hcall.result = err;
@@ -1826,6 +1835,7 @@ int kvm_put_xen_state(CPUState *cs)
* If the kernel has EVTCHN_SEND support then it handles timers too,
* so the timer will be restored by kvm_xen_set_vcpu_timer() below.
*/
+ QEMU_LOCK_GUARD(&env->xen_timers_lock);
if (env->xen_singleshot_timer_ns) {
ret = do_set_singleshot_timer(cs, env->xen_singleshot_timer_ns,
false, false);
@@ -1844,10 +1854,7 @@ int kvm_put_xen_state(CPUState *cs)
}
if (env->xen_virq[VIRQ_TIMER]) {
- ret = kvm_xen_set_vcpu_timer(cs);
- if (ret < 0) {
- return ret;
- }
+ do_set_vcpu_timer_virq(cs, RUN_ON_CPU_HOST_INT(env->xen_virq[VIRQ_TIMER]));
}
return 0;
}
@@ -1896,6 +1903,15 @@ int kvm_get_xen_state(CPUState *cs)
if (ret < 0) {
return ret;
}
+
+ /*
+ * This locking is fairly pointless, and is here to appease Coverity.
+ * There is an unavoidable race condition if a different vCPU sets a
+ * timer for this vCPU after the value has been read out. But that's
+ * OK in practice because *all* the vCPUs need to be stopped before
+ * we set about migrating their state.
+ */
+ QEMU_LOCK_GUARD(&X86_CPU(cs)->env.xen_timers_lock);
env->xen_singleshot_timer_ns = va.u.timer.expires_ns;
}
--
2.40.1
next prev parent reply other threads:[~2023-08-01 17:59 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-01 17:57 [PATCH for-8.1] Misc Xen-on-KVM fixes David Woodhouse
2023-08-01 17:57 ` [PATCH for-8.1 1/3] hw/xen: fix off-by-one in xen_evtchn_set_gsi() David Woodhouse
2023-08-01 17:57 ` David Woodhouse [this message]
2023-08-01 17:57 ` [PATCH for-8.1 3/3] hw/xen: prevent guest from binding loopback event channel to itself David Woodhouse
2023-08-01 21:46 ` [PATCH for-8.1] Misc Xen-on-KVM fixes Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230801175747.145906-3-dwmw2@infradead.org \
--to=dwmw2@infradead.org \
--cc=anthony.perard@citrix.com \
--cc=dwmw@amazon.co.uk \
--cc=eduardo@habkost.net \
--cc=kvm@vger.kernel.org \
--cc=marcel.apfelbaum@gmail.com \
--cc=mst@redhat.com \
--cc=mtosatti@redhat.com \
--cc=paul@xen.org \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=philmd@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).