qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Vincenzo Maffione <v.maffione@gmail.com>
Subject: [Qemu-devel] [PATCH 1/3] net: remove implicit peer from offload API
Date: Thu, 20 Feb 2014 12:14:07 +0100	[thread overview]
Message-ID: <1392894849-7907-2-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1392894849-7907-1-git-send-email-stefanha@redhat.com>

The virtio_net offload APIs are used on the NIC's peer (i.e. the tap
device).  The API was defined to implicitly use nc->peer, saving the
caller the trouble.

This wasn't ideal because:
1. There are callers who have the peer but not the NIC.  Currently they
   are forced to bypass the API and access peer->info->... directly.
2. The rest of the net.h API uses nc, not nc->peer, so it is
   inconsistent.

This patch pushes nc->peer back up to callers.

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
 hw/net/virtio-net.c | 12 ++++++------
 hw/net/vmxnet3.c    | 18 +++++++++---------
 include/net/net.h   | 14 +++++++-------
 net/net.c           | 36 ++++++++++++++++++------------------
 4 files changed, 40 insertions(+), 40 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index cda8c75..9218a09 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -325,7 +325,7 @@ static void peer_test_vnet_hdr(VirtIONet *n)
         return;
     }
 
-    n->has_vnet_hdr = qemu_peer_has_vnet_hdr(nc);
+    n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
 }
 
 static int peer_has_vnet_hdr(VirtIONet *n)
@@ -338,7 +338,7 @@ static int peer_has_ufo(VirtIONet *n)
     if (!peer_has_vnet_hdr(n))
         return 0;
 
-    n->has_ufo = qemu_peer_has_ufo(qemu_get_queue(n->nic));
+    n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
 
     return n->has_ufo;
 }
@@ -357,8 +357,8 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
         nc = qemu_get_subqueue(n->nic, i);
 
         if (peer_has_vnet_hdr(n) &&
-            qemu_peer_has_vnet_hdr_len(nc, n->guest_hdr_len)) {
-            qemu_peer_set_vnet_hdr_len(nc, n->guest_hdr_len);
+            qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
+            qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
             n->host_hdr_len = n->guest_hdr_len;
         }
     }
@@ -459,7 +459,7 @@ static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
 
 static void virtio_net_apply_guest_offloads(VirtIONet *n)
 {
-    qemu_peer_set_offload(qemu_get_subqueue(n->nic, 0),
+    qemu_set_offload(qemu_get_subqueue(n->nic, 0)->peer,
             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
             !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
@@ -1540,7 +1540,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
     peer_test_vnet_hdr(n);
     if (peer_has_vnet_hdr(n)) {
         for (i = 0; i < n->max_queues; i++) {
-            qemu_peer_using_vnet_hdr(qemu_get_subqueue(n->nic, i), true);
+            qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
         }
         n->host_hdr_len = sizeof(struct virtio_net_hdr);
     } else {
diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c
index 0524684..5be807c 100644
--- a/hw/net/vmxnet3.c
+++ b/hw/net/vmxnet3.c
@@ -1290,12 +1290,12 @@ static void vmxnet3_update_features(VMXNET3State *s)
               s->lro_supported, rxcso_supported,
               s->rx_vlan_stripping);
     if (s->peer_has_vhdr) {
-        qemu_peer_set_offload(qemu_get_queue(s->nic),
-                        rxcso_supported,
-                        s->lro_supported,
-                        s->lro_supported,
-                        0,
-                        0);
+        qemu_set_offload(qemu_get_queue(s->nic)->peer,
+                         rxcso_supported,
+                         s->lro_supported,
+                         s->lro_supported,
+                         0,
+                         0);
     }
 }
 
@@ -1885,7 +1885,7 @@ static bool vmxnet3_peer_has_vnet_hdr(VMXNET3State *s)
 {
     NetClientState *nc = qemu_get_queue(s->nic);
 
-    if (qemu_peer_has_vnet_hdr(nc)) {
+    if (qemu_has_vnet_hdr(nc->peer)) {
         return true;
     }
 
@@ -1933,10 +1933,10 @@ static void vmxnet3_net_init(VMXNET3State *s)
     s->lro_supported = false;
 
     if (s->peer_has_vhdr) {
-        qemu_peer_set_vnet_hdr_len(qemu_get_queue(s->nic),
+        qemu_set_vnet_hdr_len(qemu_get_queue(s->nic)->peer,
             sizeof(struct virtio_net_hdr));
 
-        qemu_peer_using_vnet_hdr(qemu_get_queue(s->nic), 1);
+        qemu_using_vnet_hdr(qemu_get_queue(s->nic)->peer, 1);
     }
 
     qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
diff --git a/include/net/net.h b/include/net/net.h
index 7b25394..8166345 100644
--- a/include/net/net.h
+++ b/include/net/net.h
@@ -132,13 +132,13 @@ ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
 void qemu_purge_queued_packets(NetClientState *nc);
 void qemu_flush_queued_packets(NetClientState *nc);
 void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]);
-bool qemu_peer_has_ufo(NetClientState *nc);
-bool qemu_peer_has_vnet_hdr(NetClientState *nc);
-bool qemu_peer_has_vnet_hdr_len(NetClientState *nc, int len);
-void qemu_peer_using_vnet_hdr(NetClientState *nc, bool enable);
-void qemu_peer_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
-                           int ecn, int ufo);
-void qemu_peer_set_vnet_hdr_len(NetClientState *nc, int len);
+bool qemu_has_ufo(NetClientState *nc);
+bool qemu_has_vnet_hdr(NetClientState *nc);
+bool qemu_has_vnet_hdr_len(NetClientState *nc, int len);
+void qemu_using_vnet_hdr(NetClientState *nc, bool enable);
+void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
+                      int ecn, int ufo);
+void qemu_set_vnet_hdr_len(NetClientState *nc, int len);
 void qemu_macaddr_default_if_unset(MACAddr *macaddr);
 int qemu_show_nic_models(const char *arg, const char *const *models);
 void qemu_check_nic_model(NICInfo *nd, const char *model);
diff --git a/net/net.c b/net/net.c
index 173673c..912991b 100644
--- a/net/net.c
+++ b/net/net.c
@@ -378,59 +378,59 @@ void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
     }
 }
 
-bool qemu_peer_has_ufo(NetClientState *nc)
+bool qemu_has_ufo(NetClientState *nc)
 {
-    if (!nc->peer || !nc->peer->info->has_ufo) {
+    if (!nc || !nc->info->has_ufo) {
         return false;
     }
 
-    return nc->peer->info->has_ufo(nc->peer);
+    return nc->info->has_ufo(nc);
 }
 
-bool qemu_peer_has_vnet_hdr(NetClientState *nc)
+bool qemu_has_vnet_hdr(NetClientState *nc)
 {
-    if (!nc->peer || !nc->peer->info->has_vnet_hdr) {
+    if (!nc || !nc->info->has_vnet_hdr) {
         return false;
     }
 
-    return nc->peer->info->has_vnet_hdr(nc->peer);
+    return nc->info->has_vnet_hdr(nc);
 }
 
-bool qemu_peer_has_vnet_hdr_len(NetClientState *nc, int len)
+bool qemu_has_vnet_hdr_len(NetClientState *nc, int len)
 {
-    if (!nc->peer || !nc->peer->info->has_vnet_hdr_len) {
+    if (!nc || !nc->info->has_vnet_hdr_len) {
         return false;
     }
 
-    return nc->peer->info->has_vnet_hdr_len(nc->peer, len);
+    return nc->info->has_vnet_hdr_len(nc, len);
 }
 
-void qemu_peer_using_vnet_hdr(NetClientState *nc, bool enable)
+void qemu_using_vnet_hdr(NetClientState *nc, bool enable)
 {
-    if (!nc->peer || !nc->peer->info->using_vnet_hdr) {
+    if (!nc || !nc->info->using_vnet_hdr) {
         return;
     }
 
-    nc->peer->info->using_vnet_hdr(nc->peer, enable);
+    nc->info->using_vnet_hdr(nc, enable);
 }
 
-void qemu_peer_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
+void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
                           int ecn, int ufo)
 {
-    if (!nc->peer || !nc->peer->info->set_offload) {
+    if (!nc || !nc->info->set_offload) {
         return;
     }
 
-    nc->peer->info->set_offload(nc->peer, csum, tso4, tso6, ecn, ufo);
+    nc->info->set_offload(nc, csum, tso4, tso6, ecn, ufo);
 }
 
-void qemu_peer_set_vnet_hdr_len(NetClientState *nc, int len)
+void qemu_set_vnet_hdr_len(NetClientState *nc, int len)
 {
-    if (!nc->peer || !nc->peer->info->set_vnet_hdr_len) {
+    if (!nc || !nc->info->set_vnet_hdr_len) {
         return;
     }
 
-    nc->peer->info->set_vnet_hdr_len(nc->peer, len);
+    nc->info->set_vnet_hdr_len(nc, len);
 }
 
 int qemu_can_send_packet(NetClientState *sender)
-- 
1.8.5.3

  reply	other threads:[~2014-02-20 11:14 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-02-20 11:14 [Qemu-devel] [PATCH 0/3] net: drop implicit peer from offload API Stefan Hajnoczi
2014-02-20 11:14 ` Stefan Hajnoczi [this message]
2014-02-20 11:14 ` [Qemu-devel] [PATCH 2/3] vhost_net: use offload API instead of bypassing it Stefan Hajnoczi
2014-02-20 11:14 ` [Qemu-devel] [PATCH 3/3] virtio-net: use qemu_get_queue() where possible Stefan Hajnoczi
2014-02-20 11:58 ` [Qemu-devel] [PATCH 0/3] net: drop implicit peer from offload API Vincenzo Maffione

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1392894849-7907-2-git-send-email-stefanha@redhat.com \
    --to=stefanha@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=v.maffione@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).