From: Cornelia Huck <cohuck@redhat.com>
To: peter.maydell@linaro.org
Cc: qemu-devel@nongnu.org, rth@twiddle.net, agraf@suse.de,
thuth@redhat.com, borntraeger@de.ibm.com, david@redhat.com,
Farhan Ali <alifm@linux.vnet.ibm.com>,
Cornelia Huck <cohuck@redhat.com>
Subject: [Qemu-devel] [PULL 36/38] virtio-gpu: Handle endian conversion
Date: Tue, 19 Sep 2017 16:56:34 +0200 [thread overview]
Message-ID: <20170919145636.9389-37-cohuck@redhat.com> (raw)
In-Reply-To: <20170919145636.9389-1-cohuck@redhat.com>
From: Farhan Ali <alifm@linux.vnet.ibm.com>
Virtio GPU code currently only supports litte endian format,
and so using the Virtio GPU device on a big endian machine
does not work.
Let's fix it by supporting the correct host cpu byte order.
Signed-off-by: Farhan Ali <alifm@linux.vnet.ibm.com>
Message-Id: <dc748e15f36db808f90b4f2393bc29ba7556a9f6.1505485574.git.alifm@linux.vnet.ibm.com>
Reviewed-by: Gerd Hoffmann <kraxel@redhat.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
---
hw/display/virtio-gpu.c | 70 +++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 62 insertions(+), 8 deletions(-)
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 622ee300f9..3a8f1e1a2d 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -30,6 +30,48 @@ virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res);
+static void
+virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
+{
+ le32_to_cpus(&hdr->type);
+ le32_to_cpus(&hdr->flags);
+ le64_to_cpus(&hdr->fence_id);
+ le32_to_cpus(&hdr->ctx_id);
+ le32_to_cpus(&hdr->padding);
+}
+
+static void virtio_gpu_bswap_32(void *ptr,
+ size_t size)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+
+ size_t i;
+ struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
+
+ virtio_gpu_ctrl_hdr_bswap(hdr);
+
+ i = sizeof(struct virtio_gpu_ctrl_hdr);
+ while (i < size) {
+ le32_to_cpus((uint32_t *)(ptr + i));
+ i = i + sizeof(uint32_t);
+ }
+
+#endif
+}
+
+static void
+virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
+{
+ virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
+ le32_to_cpus(&t2d->r.x);
+ le32_to_cpus(&t2d->r.y);
+ le32_to_cpus(&t2d->r.width);
+ le32_to_cpus(&t2d->r.height);
+ le64_to_cpus(&t2d->offset);
+ le32_to_cpus(&t2d->resource_id);
+ le32_to_cpus(&t2d->padding);
+}
+
#ifdef CONFIG_VIRGL
#include <virglrenderer.h>
#define VIRGL(_g, _virgl, _simple, ...) \
@@ -205,6 +247,7 @@ void virtio_gpu_ctrl_response(VirtIOGPU *g,
resp->fence_id = cmd->cmd_hdr.fence_id;
resp->ctx_id = cmd->cmd_hdr.ctx_id;
}
+ virtio_gpu_ctrl_hdr_bswap(resp);
s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
if (s != resp_len) {
qemu_log_mask(LOG_GUEST_ERROR,
@@ -236,8 +279,8 @@ virtio_gpu_fill_display_info(VirtIOGPU *g,
for (i = 0; i < g->conf.max_outputs; i++) {
if (g->enabled_output_bitmask & (1 << i)) {
dpy_info->pmodes[i].enabled = 1;
- dpy_info->pmodes[i].r.width = g->req_state[i].width;
- dpy_info->pmodes[i].r.height = g->req_state[i].height;
+ dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
+ dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
}
}
}
@@ -287,6 +330,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
struct virtio_gpu_resource_create_2d c2d;
VIRTIO_GPU_FILL_CMD(c2d);
+ virtio_gpu_bswap_32(&c2d, sizeof(c2d));
trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format,
c2d.width, c2d.height);
@@ -360,6 +404,7 @@ static void virtio_gpu_resource_unref(VirtIOGPU *g,
struct virtio_gpu_resource_unref unref;
VIRTIO_GPU_FILL_CMD(unref);
+ virtio_gpu_bswap_32(&unref, sizeof(unref));
trace_virtio_gpu_cmd_res_unref(unref.resource_id);
res = virtio_gpu_find_resource(g, unref.resource_id);
@@ -383,6 +428,7 @@ static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g,
struct virtio_gpu_transfer_to_host_2d t2d;
VIRTIO_GPU_FILL_CMD(t2d);
+ virtio_gpu_t2d_bswap(&t2d);
trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id);
res = virtio_gpu_find_resource(g, t2d.resource_id);
@@ -439,6 +485,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
int i;
VIRTIO_GPU_FILL_CMD(rf);
+ virtio_gpu_bswap_32(&rf, sizeof(rf));
trace_virtio_gpu_cmd_res_flush(rf.resource_id,
rf.r.width, rf.r.height, rf.r.x, rf.r.y);
@@ -511,6 +558,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
struct virtio_gpu_set_scanout ss;
VIRTIO_GPU_FILL_CMD(ss);
+ virtio_gpu_bswap_32(&ss, sizeof(ss));
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
@@ -633,13 +681,15 @@ int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
*addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
}
for (i = 0; i < ab->nr_entries; i++) {
- hwaddr len = ents[i].length;
- (*iov)[i].iov_len = ents[i].length;
- (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
+ uint64_t a = le64_to_cpu(ents[i].addr);
+ uint32_t l = le32_to_cpu(ents[i].length);
+ hwaddr len = l;
+ (*iov)[i].iov_len = l;
+ (*iov)[i].iov_base = cpu_physical_memory_map(a, &len, 1);
if (addr) {
- (*addr)[i] = ents[i].addr;
+ (*addr)[i] = a;
}
- if (!(*iov)[i].iov_base || len != ents[i].length) {
+ if (!(*iov)[i].iov_base || len != l) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
" resource %d element %d\n",
__func__, ab->resource_id, i);
@@ -686,6 +736,7 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
int ret;
VIRTIO_GPU_FILL_CMD(ab);
+ virtio_gpu_bswap_32(&ab, sizeof(ab));
trace_virtio_gpu_cmd_res_back_attach(ab.resource_id);
res = virtio_gpu_find_resource(g, ab.resource_id);
@@ -718,6 +769,7 @@ virtio_gpu_resource_detach_backing(VirtIOGPU *g,
struct virtio_gpu_resource_detach_backing detach;
VIRTIO_GPU_FILL_CMD(detach);
+ virtio_gpu_bswap_32(&detach, sizeof(detach));
trace_virtio_gpu_cmd_res_back_detach(detach.resource_id);
res = virtio_gpu_find_resource(g, detach.resource_id);
@@ -734,6 +786,7 @@ static void virtio_gpu_simple_process_cmd(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr);
+ virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
switch (cmd->cmd_hdr.type) {
case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
@@ -879,6 +932,7 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
"%s: cursor size incorrect %zu vs %zu\n",
__func__, s, sizeof(cursor_info));
} else {
+ virtio_gpu_bswap_32(&cursor_info, sizeof(cursor_info));
update_cursor(g, &cursor_info);
}
virtqueue_push(vq, elem, 0);
@@ -1135,7 +1189,7 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
}
g->config_size = sizeof(struct virtio_gpu_config);
- g->virtio_config.num_scanouts = g->conf.max_outputs;
+ g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
g->config_size);
--
2.13.5
next prev parent reply other threads:[~2017-09-19 15:01 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-09-19 14:55 [Qemu-devel] [PULL 00/38] various s390x patches (+some fixes) Cornelia Huck
2017-09-19 14:55 ` [Qemu-devel] [PULL 01/38] s390x/css: fix cc handling for XSCH Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 02/38] tests: Enable the drive_del test also on s390x Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 03/38] s390x/pci: remove idx from msix msg data Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 04/38] s390x/pci: fixup ind_offset of msix routing entry Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 05/38] s390x/pci: add iommu replay callback Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 06/38] hw/misc/ivshmem: Fix ivshmem_recv_msg() to also work on big endian systems Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 07/38] s390x/css: drop data-check in interpretation Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 08/38] s390x/css: fix NULL handling for CCW addresses Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 09/38] s390x/css: remove unused error handling branch Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 10/38] s390: set DHCP client architecure id for netboot Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 11/38] exec, dump, i386, ppc, s390x: don't include exec/cpu-all.h explicitly Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 12/38] cpu: drop old comments describing members Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 13/38] s390x: get rid of s390-virtio.c Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 14/38] s390x: rename s390-virtio.h to s390-virtio-hcall.h Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 15/38] s390x: move s390_virtio_hypercall() " Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 16/38] s390x: move subsystem_reset() to s390-virtio-ccw.h Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 17/38] target/s390x: move some s390x typedefs to cpu-qom.h Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 18/38] s390x: move sclp_service_call() to sclp.h Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 19/38] target/s390x: use trigger_pgm_exception() in s390_cpu_handle_mmu_fault() Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 20/38] target/s390x: use program_interrupt() in per_check_exception() Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 21/38] s390x: allow only 1 CPU with TCG Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 22/38] target/s390x: set cpu->id for linux user when realizing Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 23/38] target/s390x: use "core-id" for cpu number/address/id handling Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 24/38] target/s390x: rename next_cpu_id to next_core_id Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 25/38] s390x: print CPU definitions in sorted order Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 26/38] s390x: allow cpu hotplug via device_add Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 27/38] s390x: CPU hot unplug via device_del cannot work for now Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 28/38] s390x: implement query-hotpluggable-cpus Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 29/38] s390x: get rid of cpu_states and use possible_cpus instead Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 30/38] s390x: get rid of cpu_s390x_create() Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 31/38] s390x: generate sclp cpu information from possible_cpus Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 32/38] s390x: allow CPU hotplug in random core-id order Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 33/38] virtio-ccw: remove stale comments on endianness Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 34/38] configure: Allow --enable-seccomp on s390x, too Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 35/38] s390x/ccw: create s390 phb for compat reasons as well Cornelia Huck
2017-09-19 14:56 ` Cornelia Huck [this message]
2017-09-19 14:56 ` [Qemu-devel] [PULL 37/38] virtio-ccw: Create a virtio gpu device for the ccw bus Cornelia Huck
2017-09-19 14:56 ` [Qemu-devel] [PULL 38/38] MAINTAINERS/s390x: add terminal3270.c Cornelia Huck
2017-09-19 16:13 ` [Qemu-devel] [PULL 00/38] various s390x patches (+some fixes) Peter Maydell
2017-09-19 16:27 ` Cornelia Huck
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170919145636.9389-37-cohuck@redhat.com \
--to=cohuck@redhat.com \
--cc=agraf@suse.de \
--cc=alifm@linux.vnet.ibm.com \
--cc=borntraeger@de.ibm.com \
--cc=david@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=thuth@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).