qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Dmitry Osipenko <dmitry.osipenko@collabora.com>
To: "Akihiko Odaki" <akihiko.odaki@daynix.com>,
	"Huang Rui" <ray.huang@amd.com>,
	"Marc-André Lureau" <marcandre.lureau@redhat.com>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>,
	"Gerd Hoffmann" <kraxel@redhat.com>,
	"Alex Bennée" <alex.bennee@linaro.org>,
	"Michael S . Tsirkin" <mst@redhat.com>,
	"Paolo Bonzini" <pbonzini@redhat.com>
Cc: "Gert Wollny" <gert.wollny@collabora.com>,
	qemu-devel@nongnu.org,
	"Gurchetan Singh" <gurchetansingh@chromium.org>,
	"Alyssa Ross" <hi@alyssa.is>,
	"Roger Pau Monné" <roger.pau@citrix.com>,
	"Alex Deucher" <alexander.deucher@amd.com>,
	"Stefano Stabellini" <stefano.stabellini@amd.com>,
	"Christian König" <christian.koenig@amd.com>,
	"Xenia Ragiadakou" <xenia.ragiadakou@amd.com>,
	"Pierre-Eric Pelloux-Prayer" <pierre-eric.pelloux-prayer@amd.com>,
	"Honglei Huang" <honglei1.huang@amd.com>,
	"Julia Zhang" <julia.zhang@amd.com>,
	"Chen Jiqian" <Jiqian.Chen@amd.com>,
	"Rob Clark" <robdclark@gmail.com>,
	"Yiwei Zhang" <zzyiwei@chromium.org>,
	"Sergio Lopez Pascual" <slp@redhat.com>
Subject: [PATCH v5 4/8] virtio-gpu: Support asynchronous fencing
Date: Mon, 20 Jan 2025 01:00:46 +0300	[thread overview]
Message-ID: <20250119220050.15167-5-dmitry.osipenko@collabora.com> (raw)
In-Reply-To: <20250119220050.15167-1-dmitry.osipenko@collabora.com>

Support asynchronous fencing feature of virglrenderer. It allows Qemu to
handle fence as soon as it's signalled instead of periodically polling
the fence status. This feature is required for enabling DRM context
support in Qemu because legacy fencing mode isn't supported for DRM
contexts in virglrenderer.

Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
---
 hw/display/virtio-gpu-gl.c     |   3 +
 hw/display/virtio-gpu-virgl.c  | 142 ++++++++++++++++++++++++++++++---
 include/hw/virtio/virtio-gpu.h |  13 +++
 3 files changed, 149 insertions(+), 9 deletions(-)

diff --git a/hw/display/virtio-gpu-gl.c b/hw/display/virtio-gpu-gl.c
index 683fad3bf8a8..d9bb50ac1d4a 100644
--- a/hw/display/virtio-gpu-gl.c
+++ b/hw/display/virtio-gpu-gl.c
@@ -169,6 +169,9 @@ static void virtio_gpu_gl_device_unrealize(DeviceState *qdev)
     if (gl->renderer_state >= RS_INITED) {
 #if VIRGL_VERSION_MAJOR >= 1
         qemu_bh_delete(gl->cmdq_resume_bh);
+
+        virtio_gpu_virgl_reset_async_fences(g);
+        qemu_bh_delete(gl->async_fence_bh);
 #endif
         if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
             timer_free(gl->print_stats);
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c
index 2eb6aaab4e84..15a465acf43b 100644
--- a/hw/display/virtio-gpu-virgl.c
+++ b/hw/display/virtio-gpu-virgl.c
@@ -871,6 +871,7 @@ static void virgl_cmd_set_scanout_blob(VirtIOGPU *g,
 void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
                                       struct virtio_gpu_ctrl_command *cmd)
 {
+    VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
     bool cmd_suspended = false;
     int ret;
 
@@ -972,15 +973,29 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
 
     trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
 
-    /*
-     * Unlike other virglrenderer functions, this one returns a positive
-     * error code.
-     */
-    ret = virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, 0);
-    if (ret) {
-        qemu_log_mask(LOG_GUEST_ERROR,
-                      "%s: virgl_renderer_create_fence error: %s",
-                      __func__, strerror(ret));
+    if (gl->context_fence_enabled &&
+        (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX)) {
+        uint32_t flags = 0;
+
+        ret = virgl_renderer_context_create_fence(cmd->cmd_hdr.ctx_id, flags,
+                                                  cmd->cmd_hdr.ring_idx,
+                                                  cmd->cmd_hdr.fence_id);
+        if (ret) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                          "%s: virgl_renderer_context_create_fence error: %s",
+                          __func__, strerror(-ret));
+        }
+    } else {
+        /*
+         * Unlike other virglrenderer functions, this one returns a positive
+         * error code.
+         */
+        ret = virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, 0);
+        if (ret) {
+            qemu_log_mask(LOG_GUEST_ERROR,
+                          "%s: virgl_renderer_create_fence error: %s",
+                          __func__, strerror(ret));
+        }
     }
 }
 
@@ -1008,6 +1023,102 @@ static void virgl_write_fence(void *opaque, uint32_t fence)
     }
 }
 
+void virtio_gpu_virgl_reset_async_fences(VirtIOGPU *g)
+{
+    struct virtio_gpu_virgl_context_fence *f;
+    VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
+
+    while (!QSLIST_EMPTY(&gl->async_fenceq)) {
+        f = QSLIST_FIRST(&gl->async_fenceq);
+
+        QSLIST_REMOVE_HEAD(&gl->async_fenceq, next);
+
+        g_free(f);
+    }
+}
+
+#if VIRGL_VERSION_MAJOR >= 1
+static void virtio_gpu_virgl_async_fence_bh(void *opaque)
+{
+    QSLIST_HEAD(, virtio_gpu_virgl_context_fence) async_fenceq;
+    struct virtio_gpu_ctrl_command *cmd, *tmp;
+    struct virtio_gpu_virgl_context_fence *f;
+    VirtIOGPU *g = opaque;
+    VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
+
+    QSLIST_MOVE_ATOMIC(&async_fenceq, &gl->async_fenceq);
+
+    while (!QSLIST_EMPTY(&async_fenceq)) {
+        f = QSLIST_FIRST(&async_fenceq);
+
+        QSLIST_REMOVE_HEAD(&async_fenceq, next);
+
+        QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
+            /*
+             * the guest can end up emitting fences out of order
+             * so we should check all fenced cmds not just the first one.
+             */
+            if (cmd->cmd_hdr.fence_id > f->fence_id) {
+                continue;
+            }
+            if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX) {
+                if (cmd->cmd_hdr.ring_idx != f->ring_idx) {
+                    continue;
+                }
+                if (cmd->cmd_hdr.ctx_id != f->ctx_id) {
+                    continue;
+                }
+            } else if (f->ring_idx >= 0) {
+                /* ctx0 GL-query fences don't have ring info */
+                continue;
+            }
+            virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
+            QTAILQ_REMOVE(&g->fenceq, cmd, next);
+            g_free(cmd);
+        }
+
+        trace_virtio_gpu_fence_resp(f->fence_id);
+        g_free(f);
+        g->inflight--;
+        if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
+            trace_virtio_gpu_dec_inflight_fences(g->inflight);
+        }
+    }
+}
+
+static void
+virtio_gpu_virgl_push_async_fence(VirtIOGPU *g, uint32_t ctx_id,
+                                  int64_t ring_idx, uint64_t fence_id)
+{
+    struct virtio_gpu_virgl_context_fence *f;
+    VirtIOGPUGL *gl = VIRTIO_GPU_GL(g);
+
+    f = g_new(struct virtio_gpu_virgl_context_fence, 1);
+    f->ctx_id = ctx_id;
+    f->ring_idx = ring_idx;
+    f->fence_id = fence_id;
+
+    QSLIST_INSERT_HEAD_ATOMIC(&gl->async_fenceq, f, next);
+
+    qemu_bh_schedule(gl->async_fence_bh);
+}
+
+static void virgl_write_async_fence(void *opaque, uint32_t fence)
+{
+    VirtIOGPU *g = opaque;
+
+    virtio_gpu_virgl_push_async_fence(g, 0, -1, fence);
+}
+
+static void virgl_write_async_context_fence(void *opaque, uint32_t ctx_id,
+                                            uint32_t ring_idx, uint64_t fence)
+{
+    VirtIOGPU *g = opaque;
+
+    virtio_gpu_virgl_push_async_fence(g, ctx_id, ring_idx, fence);
+}
+#endif
+
 static virgl_renderer_gl_context
 virgl_create_context(void *opaque, int scanout_idx,
                      struct virgl_renderer_gl_ctx_param *params)
@@ -1095,6 +1206,8 @@ void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g)
         dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
         dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
     }
+
+    virtio_gpu_virgl_reset_async_fences(g);
 }
 
 void virtio_gpu_virgl_reset(VirtIOGPU *g)
@@ -1112,6 +1225,13 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
     if (qemu_egl_display) {
         virtio_gpu_3d_cbs.version = 4;
         virtio_gpu_3d_cbs.get_egl_display = virgl_get_egl_display;
+#if VIRGL_VERSION_MAJOR >= 1
+        virtio_gpu_3d_cbs.write_fence         = virgl_write_async_fence;
+        virtio_gpu_3d_cbs.write_context_fence = virgl_write_async_context_fence;
+        flags |= VIRGL_RENDERER_ASYNC_FENCE_CB;
+        flags |= VIRGL_RENDERER_THREAD_SYNC;
+        gl->context_fence_enabled = true;
+#endif
     }
 #endif
 #ifdef VIRGL_RENDERER_D3D11_SHARE_TEXTURE
@@ -1145,6 +1265,10 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
     gl->cmdq_resume_bh = aio_bh_new(qemu_get_aio_context(),
                                     virtio_gpu_virgl_resume_cmdq_bh,
                                     g);
+
+    gl->async_fence_bh = aio_bh_new(qemu_get_aio_context(),
+                                    virtio_gpu_virgl_async_fence_bh,
+                                    g);
 #endif
 
     return 0;
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index a42957c4e2cc..bd2cccdc60d7 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -230,6 +230,13 @@ struct VirtIOGPUClass {
                              Error **errp);
 };
 
+struct virtio_gpu_virgl_context_fence {
+    uint32_t ctx_id;
+    int64_t ring_idx;
+    uint64_t fence_id;
+    QSLIST_ENTRY(virtio_gpu_virgl_context_fence) next;
+};
+
 /* VirtIOGPUGL renderer states */
 typedef enum {
     RS_START,       /* starting state */
@@ -247,6 +254,11 @@ struct VirtIOGPUGL {
     QEMUTimer *print_stats;
 
     QEMUBH *cmdq_resume_bh;
+
+    QEMUBH *async_fence_bh;
+    QSLIST_HEAD(, virtio_gpu_virgl_context_fence) async_fenceq;
+
+    bool context_fence_enabled;
 };
 
 struct VhostUserGPU {
@@ -376,5 +388,6 @@ void virtio_gpu_virgl_reset_scanout(VirtIOGPU *g);
 void virtio_gpu_virgl_reset(VirtIOGPU *g);
 int virtio_gpu_virgl_init(VirtIOGPU *g);
 GArray *virtio_gpu_virgl_get_capsets(VirtIOGPU *g);
+void virtio_gpu_virgl_reset_async_fences(VirtIOGPU *g);
 
 #endif
-- 
2.47.1



  parent reply	other threads:[~2025-01-19 22:04 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-19 22:00 [PATCH v5 0/8] Support virtio-gpu DRM native context Dmitry Osipenko
2025-01-19 22:00 ` [PATCH v5 1/8] ui/sdl2: Restore original context after new context creation Dmitry Osipenko
2025-01-19 22:00 ` [PATCH v5 2/8] ui/sdl2: Implement dpy dmabuf functions Dmitry Osipenko
2025-01-19 22:00 ` [PATCH v5 3/8] virtio-gpu: Handle virgl fence creation errors Dmitry Osipenko
2025-01-19 22:00 ` Dmitry Osipenko [this message]
     [not found]   ` <87cyghr3l2.fsf@draig.linaro.org>
2025-01-22 12:18     ` [PATCH v5 4/8] virtio-gpu: Support asynchronous fencing Dmitry Osipenko
2025-01-19 22:00 ` [PATCH v5 5/8] virtio-gpu: Support DRM native context Dmitry Osipenko
2025-01-19 22:00 ` [PATCH v5 6/8] ui/sdl2: Don't disable scanout when display is refreshed Dmitry Osipenko
2025-01-19 22:00 ` [PATCH v5 7/8] ui/gtk: " Dmitry Osipenko
2025-01-19 22:00 ` [PATCH v5 8/8] docs/system: Expand the virtio-gpu documentation Dmitry Osipenko
     [not found]   ` <c2e1c362-5d02-488e-b849-d0b14781a60f@daynix.com>
     [not found]     ` <87ikq9r7wj.fsf@draig.linaro.org>
2025-01-21  4:26       ` Akihiko Odaki
2025-01-26 18:06         ` Dmitry Osipenko
2025-01-27  4:57           ` Akihiko Odaki
2025-02-02 22:08             ` Dmitry Osipenko
2025-02-03  5:31               ` Akihiko Odaki
2025-02-05 17:40                 ` Dmitry Osipenko
2025-02-06  5:41                   ` Akihiko Odaki
2025-02-09 21:03                     ` Dmitry Osipenko
2025-02-13  4:32                       ` Akihiko Odaki
2025-02-18  6:27                         ` Dmitry Osipenko
2025-02-18  6:35                           ` Dmitry Osipenko
2025-02-27  6:40                           ` Akihiko Odaki
     [not found] ` <871pwxqyr3.fsf@draig.linaro.org>
2025-01-22 12:25   ` [PATCH v5 0/8] Support virtio-gpu DRM native context Dmitry Osipenko
2025-01-22 17:00 ` Alex Bennée
2025-01-23 11:23   ` Dmitry Osipenko
2025-01-23 11:58     ` Alex Bennée
2025-01-23 12:37       ` Dmitry Osipenko
2025-01-27 14:50         ` Alex Bennée

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250119220050.15167-5-dmitry.osipenko@collabora.com \
    --to=dmitry.osipenko@collabora.com \
    --cc=Jiqian.Chen@amd.com \
    --cc=akihiko.odaki@daynix.com \
    --cc=alex.bennee@linaro.org \
    --cc=alexander.deucher@amd.com \
    --cc=christian.koenig@amd.com \
    --cc=gert.wollny@collabora.com \
    --cc=gurchetansingh@chromium.org \
    --cc=hi@alyssa.is \
    --cc=honglei1.huang@amd.com \
    --cc=julia.zhang@amd.com \
    --cc=kraxel@redhat.com \
    --cc=marcandre.lureau@redhat.com \
    --cc=mst@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=philmd@linaro.org \
    --cc=pierre-eric.pelloux-prayer@amd.com \
    --cc=qemu-devel@nongnu.org \
    --cc=ray.huang@amd.com \
    --cc=robdclark@gmail.com \
    --cc=roger.pau@citrix.com \
    --cc=slp@redhat.com \
    --cc=stefano.stabellini@amd.com \
    --cc=xenia.ragiadakou@amd.com \
    --cc=zzyiwei@chromium.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).