From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([208.118.235.92]:39989) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UUKAh-0004Bn-Da for qemu-devel@nongnu.org; Mon, 22 Apr 2013 13:04:59 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1UUK6m-0005h3-59 for qemu-devel@nongnu.org; Mon, 22 Apr 2013 13:00:56 -0400 Received: from mx1.redhat.com ([209.132.183.28]:13746) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1UUK6l-0005gX-NX for qemu-devel@nongnu.org; Mon, 22 Apr 2013 13:00:52 -0400 Date: Mon, 22 Apr 2013 20:00:36 +0300 From: "Michael S. Tsirkin" Message-ID: <20130422170035.GA10601@redhat.com> References: <1365316448-4604-1-git-send-email-dmitry@daynix.com> <1365316448-4604-2-git-send-email-dmitry@daynix.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <1365316448-4604-2-git-send-email-dmitry@daynix.com> Subject: Re: [Qemu-devel] [PATCH 1/1 V4] virtio-net: dynamic network offloads configuration List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Dmitry Fleytman Cc: Rusty.Russell.rusty@rustcorp.com.au, qemu-devel@nongnu.org, Ronen Hod , Anthony Liguori , Yan Vugenfirer , Dmitry Fleytman On Sun, Apr 07, 2013 at 09:34:08AM +0300, Dmitry Fleytman wrote: > From: Dmitry Fleytman > > Virtio-net driver currently negotiates network offloads > on startup via features mechanism and have no ability to > disable and re-enable offloads later. > This patch introduced a new control command that allows > to configure device network offloads state dynamically. > The patch also introduces a new feature flag > VIRTIO_NET_F_CTRL_GUEST_OFFLOADS. > > Signed-off-by: Dmitry Fleytman Acked-by: Michael S. Tsirkin Pls pick this up for 1.5. > --- > hw/pc.h | 4 +++ > hw/virtio-net.c | 97 ++++++++++++++++++++++++++++++++++++++++++++++++--------- > hw/virtio-net.h | 13 ++++++++ > 3 files changed, 99 insertions(+), 15 deletions(-) > > diff --git a/hw/pc.h b/hw/pc.h > index 8e1dd4c..7ca4698 100644 > --- a/hw/pc.h > +++ b/hw/pc.h > @@ -221,6 +221,10 @@ int e820_add_entry(uint64_t, uint64_t, uint32_t); > .property = "vectors",\ > /* DEV_NVECTORS_UNSPECIFIED as a uint32_t string */\ > .value = stringify(0xFFFFFFFF),\ > + },{ \ > + .driver = "virtio-net-pci", \ > + .property = "ctrl_guest_offloads", \ > + .value = "off", \ > },{\ > .driver = "e1000",\ > .property = "romfile",\ > diff --git a/hw/virtio-net.c b/hw/virtio-net.c > index 5917740..a92d061 100644 > --- a/hw/virtio-net.c > +++ b/hw/virtio-net.c > @@ -360,6 +360,33 @@ static uint32_t virtio_net_bad_features(VirtIODevice *vdev) > return features; > } > > +static void virtio_net_apply_guest_offloads(VirtIONet *n) > +{ > + tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer, > + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), > + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), > + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), > + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), > + !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO))); > +} > + > +static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) > +{ > + static const uint64_t guest_offloads_mask = > + (1ULL << VIRTIO_NET_F_GUEST_CSUM) | > + (1ULL << VIRTIO_NET_F_GUEST_TSO4) | > + (1ULL << VIRTIO_NET_F_GUEST_TSO6) | > + (1ULL << VIRTIO_NET_F_GUEST_ECN) | > + (1ULL << VIRTIO_NET_F_GUEST_UFO); > + > + return guest_offloads_mask & features; > +} > + > +static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) > +{ > + return virtio_net_guest_offloads_by_features(n->vdev.guest_features); > +} > + > static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) > { > VirtIONet *n = to_virtio_net(vdev); > @@ -371,12 +398,9 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features) > virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF))); > > if (n->has_vnet_hdr) { > - tap_set_offload(qemu_get_subqueue(n->nic, 0)->peer, > - (features >> VIRTIO_NET_F_GUEST_CSUM) & 1, > - (features >> VIRTIO_NET_F_GUEST_TSO4) & 1, > - (features >> VIRTIO_NET_F_GUEST_TSO6) & 1, > - (features >> VIRTIO_NET_F_GUEST_ECN) & 1, > - (features >> VIRTIO_NET_F_GUEST_UFO) & 1); > + n->curr_guest_offloads = > + virtio_net_guest_offloads_by_features(features); > + virtio_net_apply_guest_offloads(n); > } > > for (i = 0; i < n->max_queues; i++) { > @@ -422,6 +446,42 @@ static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd, > return VIRTIO_NET_OK; > } > > +static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd, > + struct iovec *iov, unsigned int iov_cnt) > +{ > + uint64_t offloads; > + size_t s; > + > + if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & n->vdev.guest_features)) { > + return VIRTIO_NET_ERR; > + } > + > + s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads)); > + if (s != sizeof(offloads)) { > + return VIRTIO_NET_ERR; > + } > + > + if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) { > + uint64_t supported_offloads; > + > + if (!n->has_vnet_hdr) { > + return VIRTIO_NET_ERR; > + } > + > + supported_offloads = virtio_net_supported_guest_offloads(n); > + if (offloads & ~supported_offloads) { > + return VIRTIO_NET_ERR; > + } > + > + n->curr_guest_offloads = offloads; > + virtio_net_apply_guest_offloads(n); > + > + return VIRTIO_NET_OK; > + } else { > + return VIRTIO_NET_ERR; > + } > +} > + > static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd, > struct iovec *iov, unsigned int iov_cnt) > { > @@ -591,6 +651,8 @@ static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) > status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); > } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { > status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); > + } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { > + status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); > } > > s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status)); > @@ -1100,6 +1162,10 @@ static void virtio_net_save(QEMUFile *f, void *opaque) > qemu_put_be32(f, n->vqs[i].tx_waiting); > } > } > + > + if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & n->vdev.guest_features) { > + qemu_put_be64(f, n->curr_guest_offloads); > + } > } > > static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) > @@ -1156,15 +1222,6 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) > error_report("virtio-net: saved image requires vnet_hdr=on"); > return -1; > } > - > - if (n->has_vnet_hdr) { > - tap_set_offload(qemu_get_queue(n->nic)->peer, > - (n->vdev.guest_features >> VIRTIO_NET_F_GUEST_CSUM) & 1, > - (n->vdev.guest_features >> VIRTIO_NET_F_GUEST_TSO4) & 1, > - (n->vdev.guest_features >> VIRTIO_NET_F_GUEST_TSO6) & 1, > - (n->vdev.guest_features >> VIRTIO_NET_F_GUEST_ECN) & 1, > - (n->vdev.guest_features >> VIRTIO_NET_F_GUEST_UFO) & 1); > - } > } > > if (version_id >= 9) { > @@ -1198,6 +1255,16 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id) > } > } > > + if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & n->vdev.guest_features) { > + n->curr_guest_offloads = qemu_get_be64(f); > + } else { > + n->curr_guest_offloads = virtio_net_supported_guest_offloads(n); > + } > + > + if (peer_has_vnet_hdr(n)) { > + virtio_net_apply_guest_offloads(n); > + } > + > virtio_net_set_queues(n); > > /* Find the first multicast entry in the saved MAC filter */ > diff --git a/hw/virtio-net.h b/hw/virtio-net.h > index 4d1a8cd..70e362d 100644 > --- a/hw/virtio-net.h > +++ b/hw/virtio-net.h > @@ -27,6 +27,8 @@ > /* The feature bitmap for virtio net */ > #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ > #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ > +#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Control channel offload > + * configuration support */ > #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ > #define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ > #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ > @@ -182,6 +184,7 @@ typedef struct VirtIONet { > uint16_t max_queues; > uint16_t curr_queues; > size_t config_size; > + uint64_t curr_guest_offloads; > } VirtIONet; > > #define VIRTIO_NET_CTRL_MAC 1 > @@ -221,6 +224,15 @@ struct virtio_net_ctrl_mq { > #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 > #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 > > +/* > + * Control network offloads > + * > + * Dynamic offloads are available with the > + * VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit. > + */ > +#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 > + #define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 > + > #define DEFINE_VIRTIO_NET_FEATURES(_state, _field) \ > DEFINE_VIRTIO_COMMON_FEATURES(_state, _field), \ > DEFINE_PROP_BIT("csum", _state, _field, VIRTIO_NET_F_CSUM, true), \ > @@ -241,6 +253,7 @@ struct virtio_net_ctrl_mq { > DEFINE_PROP_BIT("ctrl_vlan", _state, _field, VIRTIO_NET_F_CTRL_VLAN, true), \ > DEFINE_PROP_BIT("ctrl_rx_extra", _state, _field, VIRTIO_NET_F_CTRL_RX_EXTRA, true), \ > DEFINE_PROP_BIT("ctrl_mac_addr", _state, _field, VIRTIO_NET_F_CTRL_MAC_ADDR, true), \ > + DEFINE_PROP_BIT("ctrl_guest_offloads", _state, _field, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true), \ > DEFINE_PROP_BIT("mq", _state, _field, VIRTIO_NET_F_MQ, false) > > #endif > -- > 1.8.1.4