qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Cornelia Huck <cohuck@redhat.com>
To: Peter Maydell <peter.maydell@linaro.org>
Cc: qemu-s390x@nongnu.org, qemu-devel@nongnu.org,
	Cornelia Huck <cohuck@redhat.com>
Subject: [Qemu-devel] [PULL 6/6] hw/s390x/virtio-ccw.c: Don't take address of fields in packed structs
Date: Wed, 12 Dec 2018 10:55:19 +0100	[thread overview]
Message-ID: <20181212095519.6390-7-cohuck@redhat.com> (raw)
In-Reply-To: <20181212095519.6390-1-cohuck@redhat.com>

From: Peter Maydell <peter.maydell@linaro.org>

Taking the address of a field in a packed struct is a bad idea, because
it might not be actually aligned enough for that pointer type (and
thus cause a crash on dereference on some host architectures). Newer
versions of clang warn about this. Avoid the bug by not using the
"modify in place" byte swapping functions.

Patch produced with scripts/coccinelle/inplace-byteswaps.cocci
(with a couple of long lines manually wrapped).

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-Id: <20181210120436.30522-1-peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Halil Pasic <pasic@linux.ibm.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
---
 hw/s390x/virtio-ccw.c | 42 ++++++++++++++++++++++--------------------
 1 file changed, 22 insertions(+), 20 deletions(-)

diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c
index 212b3d3dea..c2b78c8e9b 100644
--- a/hw/s390x/virtio-ccw.c
+++ b/hw/s390x/virtio-ccw.c
@@ -287,18 +287,18 @@ static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len,
     }
     if (is_legacy) {
         ccw_dstream_read(&sch->cds, linfo);
-        be64_to_cpus(&linfo.queue);
-        be32_to_cpus(&linfo.align);
-        be16_to_cpus(&linfo.index);
-        be16_to_cpus(&linfo.num);
+        linfo.queue = be64_to_cpu(linfo.queue);
+        linfo.align = be32_to_cpu(linfo.align);
+        linfo.index = be16_to_cpu(linfo.index);
+        linfo.num = be16_to_cpu(linfo.num);
         ret = virtio_ccw_set_vqs(sch, NULL, &linfo);
     } else {
         ccw_dstream_read(&sch->cds, info);
-        be64_to_cpus(&info.desc);
-        be16_to_cpus(&info.index);
-        be16_to_cpus(&info.num);
-        be64_to_cpus(&info.avail);
-        be64_to_cpus(&info.used);
+        info.desc = be64_to_cpu(info.desc);
+        info.index = be16_to_cpu(info.index);
+        info.num = be16_to_cpu(info.num);
+        info.avail = be64_to_cpu(info.avail);
+        info.used = be64_to_cpu(info.used);
         ret = virtio_ccw_set_vqs(sch, &info, NULL);
     }
     sch->curr_status.scsw.count = 0;
@@ -382,7 +382,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
                 features.features = 0;
             }
             ccw_dstream_rewind(&sch->cds);
-            cpu_to_le32s(&features.features);
+            features.features = cpu_to_le32(features.features);
             ccw_dstream_write(&sch->cds, features.features);
             sch->curr_status.scsw.count = ccw.count - sizeof(features);
             ret = 0;
@@ -403,7 +403,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
             ret = -EFAULT;
         } else {
             ccw_dstream_read(&sch->cds, features);
-            le32_to_cpus(&features.features);
+            features.features = le32_to_cpu(features.features);
             if (features.index == 0) {
                 virtio_set_features(vdev,
                                     (vdev->guest_features & 0xffffffff00000000ULL) |
@@ -546,7 +546,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
             ret = -EFAULT;
         } else {
             ccw_dstream_read(&sch->cds, indicators);
-            be64_to_cpus(&indicators);
+            indicators = be64_to_cpu(indicators);
             dev->indicators = get_indicator(indicators, sizeof(uint64_t));
             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
             ret = 0;
@@ -567,7 +567,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
             ret = -EFAULT;
         } else {
             ccw_dstream_read(&sch->cds, indicators);
-            be64_to_cpus(&indicators);
+            indicators = be64_to_cpu(indicators);
             dev->indicators2 = get_indicator(indicators, sizeof(uint64_t));
             sch->curr_status.scsw.count = ccw.count - sizeof(indicators);
             ret = 0;
@@ -588,14 +588,14 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
             ret = -EFAULT;
         } else {
             ccw_dstream_read(&sch->cds, vq_config.index);
-            be16_to_cpus(&vq_config.index);
+            vq_config.index = be16_to_cpu(vq_config.index);
             if (vq_config.index >= VIRTIO_QUEUE_MAX) {
                 ret = -EINVAL;
                 break;
             }
             vq_config.num_max = virtio_queue_get_num(vdev,
                                                      vq_config.index);
-            cpu_to_be16s(&vq_config.num_max);
+            vq_config.num_max = cpu_to_be16(vq_config.num_max);
             ccw_dstream_write(&sch->cds, vq_config.num_max);
             sch->curr_status.scsw.count = ccw.count - sizeof(vq_config);
             ret = 0;
@@ -621,9 +621,11 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
             if (ccw_dstream_read(&sch->cds, thinint)) {
                 ret = -EFAULT;
             } else {
-                be64_to_cpus(&thinint.ind_bit);
-                be64_to_cpus(&thinint.summary_indicator);
-                be64_to_cpus(&thinint.device_indicator);
+                thinint.ind_bit = be64_to_cpu(thinint.ind_bit);
+                thinint.summary_indicator =
+                    be64_to_cpu(thinint.summary_indicator);
+                thinint.device_indicator =
+                    be64_to_cpu(thinint.device_indicator);
 
                 dev->summary_indicator =
                     get_indicator(thinint.summary_indicator, sizeof(uint8_t));
@@ -654,8 +656,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw)
             break;
         }
         ccw_dstream_read_buf(&sch->cds, &revinfo, 4);
-        be16_to_cpus(&revinfo.revision);
-        be16_to_cpus(&revinfo.length);
+        revinfo.revision = be16_to_cpu(revinfo.revision);
+        revinfo.length = be16_to_cpu(revinfo.length);
         if (ccw.count < len + revinfo.length ||
             (check_len && ccw.count > len + revinfo.length)) {
             ret = -EINVAL;
-- 
2.17.2

  parent reply	other threads:[~2018-12-12  9:55 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-12-12  9:55 [Qemu-devel] [PULL 0/6] s390x: first batch of 4.0 changes Cornelia Huck
2018-12-12  9:55 ` [Qemu-devel] [PULL 1/6] s390x/zpci: drop msix.available Cornelia Huck
2018-12-12  9:55 ` [Qemu-devel] [PULL 2/6] s390x: introduce 4.0 compat machine Cornelia Huck
2018-12-12  9:55 ` [Qemu-devel] [PULL 3/6] s390/MAINTAINERS: Add Halil as kvm and machine maintainer Cornelia Huck
2018-12-12  9:55 ` [Qemu-devel] [PULL 4/6] s390x/tod: Properly stop the KVM TOD while the guest is not running Cornelia Huck
2018-12-12  9:55 ` [Qemu-devel] [PULL 5/6] vfio-ap: flag as compatible with balloon Cornelia Huck
2018-12-12  9:55 ` Cornelia Huck [this message]
2018-12-13 13:41 ` [Qemu-devel] [PULL 0/6] s390x: first batch of 4.0 changes Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181212095519.6390-7-cohuck@redhat.com \
    --to=cohuck@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-s390x@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).