From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS, URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 6A6C0C41517 for ; Sun, 28 Jul 2019 06:53:02 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 4EAB8214C6 for ; Sun, 28 Jul 2019 06:53:02 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726023AbfG1GxA (ORCPT ); Sun, 28 Jul 2019 02:53:00 -0400 Received: from m9784.mail.qiye.163.com ([220.181.97.84]:1531 "EHLO m9784.mail.qiye.163.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725937AbfG1GxA (ORCPT ); Sun, 28 Jul 2019 02:53:00 -0400 Received: from localhost.localdomain (unknown [123.59.132.129]) by m9784.mail.qiye.163.com (Hmail) with ESMTPA id 23EEE416E5; Sun, 28 Jul 2019 14:52:50 +0800 (CST) From: wenxu@ucloud.cn To: pablo@netfilter.org, fw@strlen.de, jakub.kicinski@netronome.com Cc: netfilter-devel@vger.kernel.org, netdev@vger.kernel.org Subject: [PATCH net-next v4 3/3] netfilter: nf_tables_offload: support indr block call Date: Sun, 28 Jul 2019 14:52:49 +0800 Message-Id: <1564296769-32294-4-git-send-email-wenxu@ucloud.cn> X-Mailer: git-send-email 1.8.3.1 In-Reply-To: <1564296769-32294-1-git-send-email-wenxu@ucloud.cn> References: <1564296769-32294-1-git-send-email-wenxu@ucloud.cn> X-HM-Spam-Status: e1kfGhgUHx5ZQUtXWQgYFAkeWUFZVkpVS09NQkJCQ0pJSklNSk1ZV1koWU FJQjdXWS1ZQUlXWQkOFx4IWUFZNTQpNjo3JCkuNz5ZBg++ X-HM-Sender-Digest: e1kMHhlZQR0aFwgeV1kSHx4VD1lBWUc6NBQ6MRw4SDg6LkosMhUvKy4* OA0wCz9VSlVKTk1PSUJNTExLT0hIVTMWGhIXVQweFQMOOw4YFxQOH1UYFUVZV1kSC1lBWUpJSFVO QlVKSElVSklCWVdZCAFZQU1ITEI3Bg++ X-HM-Tid: 0a6c375a72752086kuqy23eee416e5 Sender: netfilter-devel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netfilter-devel@vger.kernel.org From: wenxu nftable support indr-block call. It makes nftable an offload vlan and tunnel device. nft add table netdev firewall nft add chain netdev firewall aclout { type filter hook ingress offload device mlx_pf0vf0 priority - 300 \; } nft add rule netdev firewall aclout ip daddr 10.0.0.1 fwd to vlan0 nft add chain netdev firewall aclin { type filter hook ingress device vlan0 priority - 300 \; } nft add rule netdev firewall aclin ip daddr 10.0.0.7 fwd to mlx_pf0vf0 Signed-off-by: wenxu --- v3: subsys_initcall for init_flow_indr_rhashtable v4: guarantee only one offload base chain used per indr dev. If the indr_block_cmd bind fail return unsupported. net/netfilter/nf_tables_offload.c | 131 +++++++++++++++++++++++++++++++------- 1 file changed, 107 insertions(+), 24 deletions(-) diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 64f5fd5..19214ad 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -171,24 +171,123 @@ static int nft_flow_offload_unbind(struct flow_block_offload *bo, return 0; } +static int nft_block_setup(struct nft_base_chain *basechain, + struct flow_block_offload *bo, + enum flow_block_command cmd) +{ + int err; + + switch (cmd) { + case FLOW_BLOCK_BIND: + err = nft_flow_offload_bind(bo, basechain); + break; + case FLOW_BLOCK_UNBIND: + err = nft_flow_offload_unbind(bo, basechain); + break; + default: + WARN_ON_ONCE(1); + err = -EOPNOTSUPP; + } + + return err; +} + +static int nft_block_offload_cmd(struct nft_base_chain *chain, + struct net_device *dev, + enum flow_block_command cmd) +{ + struct netlink_ext_ack extack = {}; + struct flow_block_offload bo = {}; + int err; + + bo.net = dev_net(dev); + bo.block = &chain->flow_block; + bo.command = cmd; + bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + bo.extack = &extack; + INIT_LIST_HEAD(&bo.cb_list); + + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); + if (err < 0) + return err; + + return nft_block_setup(chain, &bo, cmd); +} + +static void nft_indr_block_ing_cmd(struct net_device *dev, + struct flow_block *flow_block, + struct flow_indr_block_cb *indr_block_cb, + enum flow_block_command cmd) +{ + struct netlink_ext_ack extack = {}; + struct flow_block_offload bo = {}; + struct nft_base_chain *chain; + + if (flow_block) + return; + + chain = container_of(flow_block, struct nft_base_chain, flow_block); + + bo.net = dev_net(dev); + bo.block = flow_block; + bo.command = cmd; + bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + bo.extack = &extack; + INIT_LIST_HEAD(&bo.cb_list); + + indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, &bo); + + nft_block_setup(chain, &bo, cmd); +} + +static int nft_indr_block_offload_cmd(struct nft_base_chain *chain, + struct net_device *dev, + enum flow_block_command cmd) +{ + struct flow_indr_block_cb *indr_block_cb; + struct flow_indr_block_dev *indr_dev; + struct flow_block_offload bo = {}; + struct netlink_ext_ack extack = {}; + + bo.net = dev_net(dev); + bo.block = &chain->flow_block; + bo.command = cmd; + bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; + bo.extack = &extack; + INIT_LIST_HEAD(&bo.cb_list); + + indr_dev = flow_indr_block_dev_lookup(dev); + if (!indr_dev) + return -EOPNOTSUPP; + + indr_dev->flow_block = cmd == FLOW_BLOCK_BIND ? &chain->flow_block : NULL; + indr_dev->ing_cmd_cb = cmd == FLOW_BLOCK_BIND ? nft_indr_block_ing_cmd : NULL; + + list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list) + indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK, + &bo); + + if (list_empty(&bo.cb_list)) + return -EOPNOTSUPP; + + return nft_block_setup(chain, &bo, cmd); +} + #define FLOW_SETUP_BLOCK TC_SETUP_BLOCK static int nft_flow_offload_chain(struct nft_trans *trans, enum flow_block_command cmd) { struct nft_chain *chain = trans->ctx.chain; - struct netlink_ext_ack extack = {}; - struct flow_block_offload bo = {}; struct nft_base_chain *basechain; struct net_device *dev; - int err; if (!nft_is_base_chain(chain)) return -EOPNOTSUPP; basechain = nft_base_chain(chain); dev = basechain->ops.dev; - if (!dev || !dev->netdev_ops->ndo_setup_tc) + if (!dev) return -EOPNOTSUPP; /* Only default policy to accept is supported for now. */ @@ -197,26 +296,10 @@ static int nft_flow_offload_chain(struct nft_trans *trans, nft_trans_chain_policy(trans) != NF_ACCEPT) return -EOPNOTSUPP; - bo.command = cmd; - bo.block = &basechain->flow_block; - bo.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; - bo.extack = &extack; - INIT_LIST_HEAD(&bo.cb_list); - - err = dev->netdev_ops->ndo_setup_tc(dev, FLOW_SETUP_BLOCK, &bo); - if (err < 0) - return err; - - switch (cmd) { - case FLOW_BLOCK_BIND: - err = nft_flow_offload_bind(&bo, basechain); - break; - case FLOW_BLOCK_UNBIND: - err = nft_flow_offload_unbind(&bo, basechain); - break; - } - - return err; + if (dev->netdev_ops->ndo_setup_tc) + return nft_block_offload_cmd(basechain, dev, cmd); + else + return nft_indr_block_offload_cmd(basechain, dev, cmd); } int nft_flow_rule_offload_commit(struct net *net) -- 1.8.3.1