netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [Patch net-next] net_sched: use idr to allocate bpf filter handles
@ 2017-09-21 23:43 Cong Wang
  2017-09-24  0:15 ` David Miller
  0 siblings, 1 reply; 4+ messages in thread
From: Cong Wang @ 2017-09-21 23:43 UTC (permalink / raw)
  To: netdev; +Cc: Cong Wang, Daniel Borkmann, Chris Mi, Jamal Hadi Salim

Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Chris Mi <chrism@mellanox.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
---
 net/sched/cls_bpf.c | 57 ++++++++++++++++++++++++++---------------------------
 1 file changed, 28 insertions(+), 29 deletions(-)

diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 520c5027646a..4fd352a1778e 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -17,6 +17,7 @@
 #include <linux/skbuff.h>
 #include <linux/filter.h>
 #include <linux/bpf.h>
+#include <linux/idr.h>
 
 #include <net/rtnetlink.h>
 #include <net/pkt_cls.h>
@@ -32,7 +33,7 @@ MODULE_DESCRIPTION("TC BPF based classifier");
 
 struct cls_bpf_head {
 	struct list_head plist;
-	u32 hgen;
+	struct idr handle_idr;
 	struct rcu_head rcu;
 };
 
@@ -238,6 +239,7 @@ static int cls_bpf_init(struct tcf_proto *tp)
 		return -ENOBUFS;
 
 	INIT_LIST_HEAD_RCU(&head->plist);
+	idr_init(&head->handle_idr);
 	rcu_assign_pointer(tp->root, head);
 
 	return 0;
@@ -264,6 +266,9 @@ static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
 
 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
 {
+	struct cls_bpf_head *head = rtnl_dereference(tp->root);
+
+	idr_remove_ext(&head->handle_idr, prog->handle);
 	cls_bpf_stop_offload(tp, prog);
 	list_del_rcu(&prog->link);
 	tcf_unbind_filter(tp, &prog->res);
@@ -287,6 +292,7 @@ static void cls_bpf_destroy(struct tcf_proto *tp)
 	list_for_each_entry_safe(prog, tmp, &head->plist, link)
 		__cls_bpf_delete(tp, prog);
 
+	idr_destroy(&head->handle_idr);
 	kfree_rcu(head, rcu);
 }
 
@@ -421,27 +427,6 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
 	return 0;
 }
 
-static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
-				   struct cls_bpf_head *head)
-{
-	unsigned int i = 0x80000000;
-	u32 handle;
-
-	do {
-		if (++head->hgen == 0x7FFFFFFF)
-			head->hgen = 1;
-	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
-
-	if (unlikely(i == 0)) {
-		pr_err("Insufficient number of handles\n");
-		handle = 0;
-	} else {
-		handle = head->hgen;
-	}
-
-	return handle;
-}
-
 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 			  struct tcf_proto *tp, unsigned long base,
 			  u32 handle, struct nlattr **tca,
@@ -451,6 +436,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 	struct cls_bpf_prog *oldprog = *arg;
 	struct nlattr *tb[TCA_BPF_MAX + 1];
 	struct cls_bpf_prog *prog;
+	unsigned long idr_index;
 	int ret;
 
 	if (tca[TCA_OPTIONS] == NULL)
@@ -476,21 +462,30 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 		}
 	}
 
-	if (handle == 0)
-		prog->handle = cls_bpf_grab_new_handle(tp, head);
-	else
+	if (handle == 0) {
+		ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
+				    1, 0x80000000, GFP_KERNEL);
+		if (ret)
+			goto errout;
+		prog->handle = idr_index;
+	} else {
+		if (!oldprog) {
+			ret = idr_alloc_ext(&head->handle_idr, prog, &idr_index,
+					    handle, handle + 1, GFP_KERNEL);
+			if (ret)
+				goto errout;
+		}
 		prog->handle = handle;
-	if (prog->handle == 0) {
-		ret = -EINVAL;
-		goto errout;
 	}
 
 	ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
 	if (ret < 0)
-		goto errout;
+		goto errout_idr;
 
 	ret = cls_bpf_offload(tp, prog, oldprog);
 	if (ret) {
+		if (!oldprog)
+			idr_remove_ext(&head->handle_idr, prog->handle);
 		__cls_bpf_delete_prog(prog);
 		return ret;
 	}
@@ -499,6 +494,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 		prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 
 	if (oldprog) {
+		idr_replace_ext(&head->handle_idr, prog, handle);
 		list_replace_rcu(&oldprog->link, &prog->link);
 		tcf_unbind_filter(tp, &oldprog->res);
 		call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
@@ -509,6 +505,9 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
 	*arg = prog;
 	return 0;
 
+errout_idr:
+	if (!oldprog)
+		idr_remove_ext(&head->handle_idr, prog->handle);
 errout:
 	tcf_exts_destroy(&prog->exts);
 	kfree(prog);
-- 
2.13.0

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [Patch net-next] net_sched: use idr to allocate bpf filter handles
  2017-09-21 23:43 [Patch net-next] net_sched: use idr to allocate bpf filter handles Cong Wang
@ 2017-09-24  0:15 ` David Miller
  2017-09-25 16:46   ` Cong Wang
  0 siblings, 1 reply; 4+ messages in thread
From: David Miller @ 2017-09-24  0:15 UTC (permalink / raw)
  To: xiyou.wangcong; +Cc: netdev, daniel, chrism, jhs

From: Cong Wang <xiyou.wangcong@gmail.com>
Date: Thu, 21 Sep 2017 16:43:00 -0700

> @@ -421,27 +427,6 @@ static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
>  	return 0;
>  }
>  
> -static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
> -				   struct cls_bpf_head *head)
> -{
> -	unsigned int i = 0x80000000;
> -	u32 handle;
> -
> -	do {
> -		if (++head->hgen == 0x7FFFFFFF)
> -			head->hgen = 1;
> -	} while (--i > 0 && cls_bpf_get(tp, head->hgen));
> -
> -	if (unlikely(i == 0)) {
> -		pr_err("Insufficient number of handles\n");
> -		handle = 0;
> -	} else {
> -		handle = head->hgen;
> -	}
> -
> -	return handle;
> -}
> -

If I read the code properly, the existing code allows IDs from '1' to
and including '0x7ffffffe', whereas the new code allows up to and
including '0x7ffffffff'.

Whether intentional or not this is a change in behavior.  If it's OK,
it should be at least documented either in a comment or in the
commit log itself.

Similarly for your other scheduler IDR changes.

Thanks.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Patch net-next] net_sched: use idr to allocate bpf filter handles
  2017-09-24  0:15 ` David Miller
@ 2017-09-25 16:46   ` Cong Wang
  2017-09-25 16:49     ` Cong Wang
  0 siblings, 1 reply; 4+ messages in thread
From: Cong Wang @ 2017-09-25 16:46 UTC (permalink / raw)
  To: David Miller
  Cc: Linux Kernel Network Developers, Daniel Borkmann, Chris Mi,
	Jamal Hadi Salim

On Sat, Sep 23, 2017 at 5:15 PM, David Miller <davem@davemloft.net> wrote:
> If I read the code properly, the existing code allows IDs from '1' to
> and including '0x7ffffffe', whereas the new code allows up to and
> including '0x7ffffffff'.
>
> Whether intentional or not this is a change in behavior.  If it's OK,
> it should be at least documented either in a comment or in the
> commit log itself.
>
> Similarly for your other scheduler IDR changes.

Good catch! Obviously off-by-one. I agree we should keep
the old behavior no matter the reason.

I will update the patches.

Thanks.

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Patch net-next] net_sched: use idr to allocate bpf filter handles
  2017-09-25 16:46   ` Cong Wang
@ 2017-09-25 16:49     ` Cong Wang
  0 siblings, 0 replies; 4+ messages in thread
From: Cong Wang @ 2017-09-25 16:49 UTC (permalink / raw)
  To: David Miller
  Cc: Linux Kernel Network Developers, Daniel Borkmann, Chris Mi,
	Jamal Hadi Salim

On Mon, Sep 25, 2017 at 9:46 AM, Cong Wang <xiyou.wangcong@gmail.com> wrote:
> On Sat, Sep 23, 2017 at 5:15 PM, David Miller <davem@davemloft.net> wrote:
>> If I read the code properly, the existing code allows IDs from '1' to
>> and including '0x7ffffffe', whereas the new code allows up to and
>> including '0x7ffffffff'.
>>
>> Whether intentional or not this is a change in behavior.  If it's OK,
>> it should be at least documented either in a comment or in the
>> commit log itself.
>>
>> Similarly for your other scheduler IDR changes.
>
> Good catch! Obviously off-by-one. I agree we should keep
> the old behavior no matter the reason.
>
> I will update the patches.
>

By the way, same problem for commit fe2502e49b58606580c77.
:)

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-09-25 16:50 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-09-21 23:43 [Patch net-next] net_sched: use idr to allocate bpf filter handles Cong Wang
2017-09-24  0:15 ` David Miller
2017-09-25 16:46   ` Cong Wang
2017-09-25 16:49     ` Cong Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).