From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jakub Kicinski Subject: [RFC bpf-next 3/6] xsk: don't allow umem replace at stack level Date: Thu, 26 Jul 2018 14:41:45 -0700 Message-ID: <20180726214148.2087-4-jakub.kicinski@netronome.com> References: <20180726214148.2087-1-jakub.kicinski@netronome.com> Cc: oss-drivers@netronome.com, netdev@vger.kernel.org, Jakub Kicinski To: alexei.starovoitov@gmail.com, daniel@iogearbox.net, brouer@redhat.com, =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= , Magnus Karlsson Return-path: Received: from mail-qt0-f195.google.com ([209.85.216.195]:45729 "EHLO mail-qt0-f195.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1731479AbeGZXBU (ORCPT ); Thu, 26 Jul 2018 19:01:20 -0400 Received: by mail-qt0-f195.google.com with SMTP id y5-v6so3131051qti.12 for ; Thu, 26 Jul 2018 14:42:37 -0700 (PDT) In-Reply-To: <20180726214148.2087-1-jakub.kicinski@netronome.com> Sender: netdev-owner@vger.kernel.org List-ID: Currently drivers have to check if they already have a umem installed for a given queue and return an error if so. Make better use of XDP_QUERY_XSK_UMEM and move this functionality to the core. We need to keep rtnl across the calls now. Signed-off-by: Jakub Kicinski Reviewed-by: Quentin Monnet --- include/linux/netdevice.h | 7 ++++--- net/xdp/xdp_umem.c | 37 ++++++++++++++++++++++++++++--------- 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6717dc7e8fbf..a5a34f0fb485 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -872,10 +872,10 @@ struct netdev_bpf { struct { struct bpf_offloaded_map *offmap; }; - /* XDP_SETUP_XSK_UMEM */ + /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ struct { - struct xdp_umem *umem; - u16 queue_id; + struct xdp_umem *umem; /* out for query*/ + u16 queue_id; /* in for query */ } xsk; }; }; @@ -3566,6 +3566,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, u32 flags); u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, enum bpf_netdev_command cmd); +int xdp_umem_query(struct net_device *dev, u16 queue_id); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index c199d66b5f3f..911ca6d3cb5a 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include "xdp_umem.h" #include "xsk_queue.h" @@ -40,6 +42,21 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs) } } +int xdp_umem_query(struct net_device *dev, u16 queue_id) +{ + struct netdev_bpf bpf; + + ASSERT_RTNL(); + + memset(&bpf, 0, sizeof(bpf)); + bpf.command = XDP_QUERY_XSK_UMEM; + bpf.xsk.queue_id = queue_id; + + if (!dev->netdev_ops->ndo_bpf) + return 0; + return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem; +} + int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, u32 queue_id, u16 flags) { @@ -62,28 +79,30 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, bpf.command = XDP_QUERY_XSK_UMEM; rtnl_lock(); - err = dev->netdev_ops->ndo_bpf(dev, &bpf); - rtnl_unlock(); - - if (err) - return force_zc ? -ENOTSUPP : 0; + err = xdp_umem_query(dev, queue_id); + if (err) { + err = err < 0 ? -ENOTSUPP : -EBUSY; + goto err_rtnl_unlock; + } bpf.command = XDP_SETUP_XSK_UMEM; bpf.xsk.umem = umem; bpf.xsk.queue_id = queue_id; - rtnl_lock(); err = dev->netdev_ops->ndo_bpf(dev, &bpf); - rtnl_unlock(); - if (err) - return force_zc ? err : 0; /* fail or fallback */ + goto err_rtnl_unlock; + rtnl_unlock(); dev_hold(dev); umem->dev = dev; umem->queue_id = queue_id; umem->zc = true; return 0; + +err_rtnl_unlock: + rtnl_unlock(); + return force_zc ? err : 0; /* fail or fallback */ } static void xdp_umem_clear_dev(struct xdp_umem *umem) -- 2.17.1