* [PATCH 1/2] virtio-gpu: refactor generate_edid function to virtio_gpu_base
2023-05-11 12:58 [PATCH 0/2] vhost-user-gpu get_edid feature Erico Nunes
@ 2023-05-11 12:58 ` Erico Nunes
2023-05-11 12:58 ` [PATCH 2/2] vhost-user-gpu: implement get_edid feature Erico Nunes
2023-05-15 11:38 ` [PATCH 0/2] vhost-user-gpu " Marc-André Lureau
2 siblings, 0 replies; 7+ messages in thread
From: Erico Nunes @ 2023-05-11 12:58 UTC (permalink / raw)
To: qemu-devel; +Cc: marcandre.lureau, mst, kraxel, Erico Nunes
This can be shared with upcoming use in vhost-user-gpu, so move it
to the shared file to avoid duplicating it.
Signed-off-by: Erico Nunes <ernunes@redhat.com>
---
hw/display/virtio-gpu-base.c | 17 +++++++++++++++++
hw/display/virtio-gpu.c | 20 +-------------------
include/hw/virtio/virtio-gpu.h | 2 ++
3 files changed, 20 insertions(+), 19 deletions(-)
diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c
index a29f191aa8..7ab7d08d0a 100644
--- a/hw/display/virtio-gpu-base.c
+++ b/hw/display/virtio-gpu-base.c
@@ -17,6 +17,7 @@
#include "migration/blocker.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
+#include "hw/display/edid.h"
#include "trace.h"
void
@@ -51,6 +52,22 @@ virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
}
}
+void
+virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
+ struct virtio_gpu_resp_edid *edid)
+{
+ qemu_edid_info info = {
+ .width_mm = g->req_state[scanout].width_mm,
+ .height_mm = g->req_state[scanout].height_mm,
+ .prefx = g->req_state[scanout].width,
+ .prefy = g->req_state[scanout].height,
+ .refresh_rate = g->req_state[scanout].refresh_rate,
+ };
+
+ edid->size = cpu_to_le32(sizeof(edid->edid));
+ qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
+}
+
static void virtio_gpu_invalidate_display(void *opaque)
{
}
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 5e15c79b94..5649269d4c 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -24,7 +24,6 @@
#include "hw/virtio/virtio-gpu-bswap.h"
#include "hw/virtio/virtio-gpu-pixman.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/display/edid.h"
#include "hw/qdev-properties.h"
#include "qemu/log.h"
#include "qemu/module.h"
@@ -207,23 +206,6 @@ void virtio_gpu_get_display_info(VirtIOGPU *g,
sizeof(display_info));
}
-static void
-virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
- struct virtio_gpu_resp_edid *edid)
-{
- VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
- qemu_edid_info info = {
- .width_mm = b->req_state[scanout].width_mm,
- .height_mm = b->req_state[scanout].height_mm,
- .prefx = b->req_state[scanout].width,
- .prefy = b->req_state[scanout].height,
- .refresh_rate = b->req_state[scanout].refresh_rate,
- };
-
- edid->size = cpu_to_le32(sizeof(edid->edid));
- qemu_edid_generate(edid->edid, sizeof(edid->edid), &info);
-}
-
void virtio_gpu_get_edid(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -242,7 +224,7 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
trace_virtio_gpu_cmd_get_edid(get_edid.scanout);
memset(&edid, 0, sizeof(edid));
edid.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
- virtio_gpu_generate_edid(g, get_edid.scanout, &edid);
+ virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), get_edid.scanout, &edid);
virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
}
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 2e28507efe..39c1a6a6ea 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -239,6 +239,8 @@ void virtio_gpu_base_reset(VirtIOGPUBase *g);
void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
struct virtio_gpu_resp_display_info *dpy_info);
+void virtio_gpu_base_generate_edid(VirtIOGPUBase *g, int scanout,
+ struct virtio_gpu_resp_edid *edid);
/* virtio-gpu.c */
void virtio_gpu_ctrl_response(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
--
2.39.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/2] vhost-user-gpu: implement get_edid feature
2023-05-11 12:58 [PATCH 0/2] vhost-user-gpu get_edid feature Erico Nunes
2023-05-11 12:58 ` [PATCH 1/2] virtio-gpu: refactor generate_edid function to virtio_gpu_base Erico Nunes
@ 2023-05-11 12:58 ` Erico Nunes
2023-05-15 11:38 ` [PATCH 0/2] vhost-user-gpu " Marc-André Lureau
2 siblings, 0 replies; 7+ messages in thread
From: Erico Nunes @ 2023-05-11 12:58 UTC (permalink / raw)
To: qemu-devel; +Cc: marcandre.lureau, mst, kraxel, Erico Nunes
Implement the virtio-gpu feature in vhost-user-gpu, which was
unsupported until now.
In this implementation, the feature is enabled inconditionally to avoid
creating another optional config option.
Similarly to get_display_info, vhost-user-gpu sends a message back to
the qemu process to have access to all the display information. In the
case of get_edid, it also needs to pass which scanout we should retrieve
the edid for.
Signed-off-by: Erico Nunes <ernunes@redhat.com>
---
contrib/vhost-user-gpu/vhost-user-gpu.c | 53 ++++++++++++++++++++++++-
contrib/vhost-user-gpu/virgl.c | 3 ++
contrib/vhost-user-gpu/vugpu.h | 8 ++++
docs/interop/vhost-user-gpu.rst | 9 +++++
hw/display/vhost-user-gpu.c | 31 +++++++++++++++
5 files changed, 102 insertions(+), 2 deletions(-)
diff --git a/contrib/vhost-user-gpu/vhost-user-gpu.c b/contrib/vhost-user-gpu/vhost-user-gpu.c
index bfb8d93cf8..26820be954 100644
--- a/contrib/vhost-user-gpu/vhost-user-gpu.c
+++ b/contrib/vhost-user-gpu/vhost-user-gpu.c
@@ -303,6 +303,53 @@ vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
cmd->state = VG_CMD_STATE_PENDING;
}
+static gboolean
+get_edid_cb(gint fd, GIOCondition condition, gpointer user_data)
+{
+ struct virtio_gpu_resp_edid resp_edid;
+ VuGpu *vg = user_data;
+ struct virtio_gpu_ctrl_command *cmd = QTAILQ_LAST(&vg->fenceq);
+
+ g_debug("get edid cb");
+ assert(cmd->cmd_hdr.type == VIRTIO_GPU_CMD_GET_EDID);
+ if (!vg_recv_msg(vg, VHOST_USER_GPU_GET_EDID,
+ sizeof(resp_edid), &resp_edid)) {
+ return G_SOURCE_CONTINUE;
+ }
+
+ QTAILQ_REMOVE(&vg->fenceq, cmd, next);
+ vg_ctrl_response(vg, cmd, &resp_edid.hdr, sizeof(resp_edid));
+
+ vg->wait_in = 0;
+ vg_handle_ctrl(&vg->dev.parent, 0);
+
+ return G_SOURCE_REMOVE;
+}
+
+void
+vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_cmd_get_edid get_edid;
+
+ VUGPU_FILL_CMD(get_edid);
+ virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
+
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_GET_EDID,
+ .size = sizeof(VhostUserGpuEdidRequest),
+ .payload.edid_req = {
+ .scanout_id = get_edid.scanout,
+ },
+ };
+
+ assert(vg->wait_in == 0);
+
+ vg_send_msg(vg, &msg, -1);
+ vg->wait_in = g_unix_fd_add(vg->sock_fd, G_IO_IN | G_IO_HUP,
+ get_edid_cb, vg);
+ cmd->state = VG_CMD_STATE_PENDING;
+}
+
static void
vg_resource_create_2d(VuGpu *g,
struct virtio_gpu_ctrl_command *cmd)
@@ -837,8 +884,9 @@ vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
vg_resource_detach_backing(vg, cmd);
break;
- /* case VIRTIO_GPU_CMD_GET_EDID: */
- /* break */
+ case VIRTIO_GPU_CMD_GET_EDID:
+ vg_get_edid(vg, cmd);
+ break;
default:
g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
@@ -1086,6 +1134,7 @@ vg_get_features(VuDev *dev)
if (opt_virgl) {
features |= 1 << VIRTIO_GPU_F_VIRGL;
}
+ features |= 1 << VIRTIO_GPU_F_EDID;
return features;
}
diff --git a/contrib/vhost-user-gpu/virgl.c b/contrib/vhost-user-gpu/virgl.c
index 3e45e1bd33..211aa110a9 100644
--- a/contrib/vhost-user-gpu/virgl.c
+++ b/contrib/vhost-user-gpu/virgl.c
@@ -495,6 +495,9 @@ void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd)
case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
vg_get_display_info(g, cmd);
break;
+ case VIRTIO_GPU_CMD_GET_EDID:
+ vg_get_edid(g, cmd);
+ break;
default:
g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
diff --git a/contrib/vhost-user-gpu/vugpu.h b/contrib/vhost-user-gpu/vugpu.h
index e2864bba68..db8b72b866 100644
--- a/contrib/vhost-user-gpu/vugpu.h
+++ b/contrib/vhost-user-gpu/vugpu.h
@@ -36,6 +36,7 @@ typedef enum VhostUserGpuRequest {
VHOST_USER_GPU_UPDATE,
VHOST_USER_GPU_DMABUF_SCANOUT,
VHOST_USER_GPU_DMABUF_UPDATE,
+ VHOST_USER_GPU_GET_EDID,
} VhostUserGpuRequest;
typedef struct VhostUserGpuDisplayInfoReply {
@@ -83,6 +84,10 @@ typedef struct VhostUserGpuDMABUFScanout {
int fd_drm_fourcc;
} QEMU_PACKED VhostUserGpuDMABUFScanout;
+typedef struct VhostUserGpuEdidRequest {
+ uint32_t scanout_id;
+} QEMU_PACKED VhostUserGpuEdidRequest;
+
typedef struct VhostUserGpuMsg {
uint32_t request; /* VhostUserGpuRequest */
uint32_t flags;
@@ -93,6 +98,8 @@ typedef struct VhostUserGpuMsg {
VhostUserGpuScanout scanout;
VhostUserGpuUpdate update;
VhostUserGpuDMABUFScanout dmabuf_scanout;
+ VhostUserGpuEdidRequest edid_req;
+ struct virtio_gpu_resp_edid resp_edid;
struct virtio_gpu_resp_display_info display_info;
uint64_t u64;
} payload;
@@ -171,6 +178,7 @@ int vg_create_mapping_iov(VuGpu *g,
struct iovec **iov);
void vg_cleanup_mapping_iov(VuGpu *g, struct iovec *iov, uint32_t count);
void vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
+void vg_get_edid(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
void vg_wait_ok(VuGpu *g);
diff --git a/docs/interop/vhost-user-gpu.rst b/docs/interop/vhost-user-gpu.rst
index 1640553729..c6fe2d9c2a 100644
--- a/docs/interop/vhost-user-gpu.rst
+++ b/docs/interop/vhost-user-gpu.rst
@@ -141,6 +141,8 @@ In QEMU the vhost-user-gpu message is implemented with the following struct:
VhostUserGpuScanout scanout;
VhostUserGpuUpdate update;
VhostUserGpuDMABUFScanout dmabuf_scanout;
+ VhostUserGpuEdidRequest edid_req;
+ struct virtio_gpu_resp_edid resp_edid;
struct virtio_gpu_resp_display_info display_info;
uint64_t u64;
} payload;
@@ -241,3 +243,10 @@ Message types
Note: there is no data payload, since the scanout is shared thanks
to DMABUF, that must have been set previously with
``VHOST_USER_GPU_DMABUF_SCANOUT``.
+
+``VHOST_USER_GPU_GET_EDID``
+ :id: 11
+ :request payload: ``struct VhostUserGpuEdidRequest``
+ :reply payload: ``struct virtio_gpu_resp_edid`` (from virtio specification)
+
+ Retrieve the EDID data for a given scanout.
diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
index 71dfd956b8..a124e3847b 100644
--- a/hw/display/vhost-user-gpu.c
+++ b/hw/display/vhost-user-gpu.c
@@ -31,6 +31,7 @@ typedef enum VhostUserGpuRequest {
VHOST_USER_GPU_UPDATE,
VHOST_USER_GPU_DMABUF_SCANOUT,
VHOST_USER_GPU_DMABUF_UPDATE,
+ VHOST_USER_GPU_GET_EDID,
} VhostUserGpuRequest;
typedef struct VhostUserGpuDisplayInfoReply {
@@ -78,6 +79,10 @@ typedef struct VhostUserGpuDMABUFScanout {
int fd_drm_fourcc;
} QEMU_PACKED VhostUserGpuDMABUFScanout;
+typedef struct VhostUserGpuEdidRequest {
+ uint32_t scanout_id;
+} QEMU_PACKED VhostUserGpuEdidRequest;
+
typedef struct VhostUserGpuMsg {
uint32_t request; /* VhostUserGpuRequest */
uint32_t flags;
@@ -88,6 +93,8 @@ typedef struct VhostUserGpuMsg {
VhostUserGpuScanout scanout;
VhostUserGpuUpdate update;
VhostUserGpuDMABUFScanout dmabuf_scanout;
+ VhostUserGpuEdidRequest edid_req;
+ struct virtio_gpu_resp_edid resp_edid;
struct virtio_gpu_resp_display_info display_info;
uint64_t u64;
} payload;
@@ -184,6 +191,30 @@ vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
vhost_user_gpu_send_msg(g, &reply);
break;
}
+ case VHOST_USER_GPU_GET_EDID: {
+ VhostUserGpuEdidRequest *m = &msg->payload.edid_req;
+ struct virtio_gpu_resp_edid resp = { {} };
+ int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
+ VhostUserGpuMsg reply = {
+ .request = msg->request,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ .size = sizeof(reply.payload.resp_edid),
+ };
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ error_report("invalid scanout: %d", m->scanout_id);
+ if (fd >= 0) {
+ close(fd);
+ }
+ break;
+ }
+
+ resp.hdr.type = VIRTIO_GPU_RESP_OK_EDID;
+ virtio_gpu_base_generate_edid(VIRTIO_GPU_BASE(g), m->scanout_id, &resp);
+ memcpy(&reply.payload.resp_edid, &resp, sizeof(resp));
+ vhost_user_gpu_send_msg(g, &reply);
+ break;
+ }
case VHOST_USER_GPU_SCANOUT: {
VhostUserGpuScanout *m = &msg->payload.scanout;
--
2.39.2
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 0/2] vhost-user-gpu get_edid feature
2023-05-11 12:58 [PATCH 0/2] vhost-user-gpu get_edid feature Erico Nunes
2023-05-11 12:58 ` [PATCH 1/2] virtio-gpu: refactor generate_edid function to virtio_gpu_base Erico Nunes
2023-05-11 12:58 ` [PATCH 2/2] vhost-user-gpu: implement get_edid feature Erico Nunes
@ 2023-05-15 11:38 ` Marc-André Lureau
2023-05-17 16:08 ` Erico Nunes
2 siblings, 1 reply; 7+ messages in thread
From: Marc-André Lureau @ 2023-05-15 11:38 UTC (permalink / raw)
To: Erico Nunes; +Cc: qemu-devel, mst, kraxel
[-- Attachment #1: Type: text/plain, Size: 2591 bytes --]
Hi
On Thu, May 11, 2023 at 5:02 PM Erico Nunes <ernunes@redhat.com> wrote:
> This adds support to the virtio-gpu get_edid command when using the
> vhost-user-gpu implementation in contrib/.
> So far, qemu has been outputting the following message:
> EDID requested but the backend doesn't support it.
> when using that implementation.
>
> This is tested with vhost-user-gpu, the dbus ui backend and the
> monitor-edid application, which now shows complete "QEMU Monitor" edid
> data.
>
> In this v1, I would appreciate some feedback especially regarding:
> - Can we enable it by default or do need to create another config option
> flag for it?
>
Enabled as default is ok I think
> - Can we now also remove the "EDID requested but the backend doesn't
> support it." warning and logic from hw/display or do we still want to
> keep that around for other potential implementations of
> vhost-user-gpu?
>
Imho, that should remain. (vhost-user-gpu could have set edid=false by
default to avoid this error, but then it would need to be explicitly turned
on to match the backend implementation)
> - The structs used as payloads of the vhost-user-gpu messages. Looks
> like there was no command so far requiring bidirectional messages with
> different payloads so I just based it on similar available ones.
>
>
That looks fine to me
> Thanks
>
>
> Erico Nunes (2):
> virtio-gpu: refactor generate_edid function to virtio_gpu_base
> vhost-user-gpu: implement get_edid feature
>
> contrib/vhost-user-gpu/vhost-user-gpu.c | 53 ++++++++++++++++++++++++-
> contrib/vhost-user-gpu/virgl.c | 3 ++
> contrib/vhost-user-gpu/vugpu.h | 8 ++++
> docs/interop/vhost-user-gpu.rst | 9 +++++
> hw/display/vhost-user-gpu.c | 31 +++++++++++++++
> hw/display/virtio-gpu-base.c | 17 ++++++++
> hw/display/virtio-gpu.c | 20 +---------
> include/hw/virtio/virtio-gpu.h | 2 +
> 8 files changed, 122 insertions(+), 21 deletions(-)
>
> --
> 2.39.2
>
>
>
I wonder if the backend couldn't have avoided the need for calling the
front-end (the VHOST_USER_GPU_GET_EDID call). But after all, it can still
implement it on its own, so it's optional anyway.
However, I worry about using the new backend (calling GET_EDID) with an
older front-end/QEMU. It may just hang, since
vhost_user_gpu_handle_display() won't reply to unknown messages. That's
what PROTOCOL_FEATURES were meant for, iirc. Can you check? thanks
--
Marc-André Lureau
[-- Attachment #2: Type: text/html, Size: 3748 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread