From: Florian Westphal <fw@strlen.de>
To: <netfilter-devel@vger.kernel.org>
Cc: Florian Westphal <fw@strlen.de>
Subject: [PATCH nf-next 7/7] netfilter: conntrack: remove the percpu dying list
Date: Wed, 9 Feb 2022 17:10:57 +0100 [thread overview]
Message-ID: <20220209161057.30688-8-fw@strlen.de> (raw)
In-Reply-To: <20220209161057.30688-1-fw@strlen.de>
Its no longer needed. Entries that need event redelivery are placed
on the new pernet dying list.
The advantage is that there is no need to take additional spinlock on
conntrack removal unless event redelivery failed or the conntrack entry
was never added to the table in the first place (confirmed bit not set).
Signed-off-by: Florian Westphal <fw@strlen.de>
---
include/net/netns/conntrack.h | 1 -
net/netfilter/nf_conntrack_core.c | 35 ++++------------------------
net/netfilter/nf_conntrack_ecache.c | 1 -
net/netfilter/nf_conntrack_netlink.c | 21 ++++-------------
4 files changed, 9 insertions(+), 49 deletions(-)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 3bb62e938fa9..dd1d096b2ada 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -96,7 +96,6 @@ struct nf_ip_net {
struct ct_pcpu {
spinlock_t lock;
struct hlist_nulls_head unconfirmed;
- struct hlist_nulls_head dying;
};
struct netns_ct {
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 7eefcfa55fc2..9ca862bc5d7d 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -512,21 +512,6 @@ clean_from_lists(struct nf_conn *ct)
nf_ct_remove_expectations(ct);
}
-/* must be called with local_bh_disable */
-static void nf_ct_add_to_dying_list(struct nf_conn *ct)
-{
- struct ct_pcpu *pcpu;
-
- /* add this conntrack to the (per cpu) dying list */
- ct->cpu = smp_processor_id();
- pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
-
- spin_lock(&pcpu->lock);
- hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
- &pcpu->dying);
- spin_unlock(&pcpu->lock);
-}
-
/* must be called with local_bh_disable */
static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
{
@@ -543,11 +528,11 @@ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
}
/* must be called with local_bh_disable */
-static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
+static void nf_ct_del_from_unconfirmed_list(struct nf_conn *ct)
{
struct ct_pcpu *pcpu;
- /* We overload first tuple to link into unconfirmed or dying list.*/
+ /* We overload first tuple to link into unconfirmed list.*/
pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
spin_lock(&pcpu->lock);
@@ -635,7 +620,8 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
*/
nf_ct_remove_expectations(ct);
- nf_ct_del_from_dying_or_unconfirmed_list(ct);
+ if (unlikely(!nf_ct_is_confirmed(ct)))
+ nf_ct_del_from_unconfirmed_list(ct);
local_bh_enable();
@@ -673,23 +659,18 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
local_bh_disable();
__nf_ct_delete_from_lists(ct);
- nf_ct_add_to_dying_list(ct);
local_bh_enable();
}
static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
{
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct nf_conntrack_net *cnet = nf_ct_pernet(nf_ct_net(ct));
spin_lock(&cnet->ecache.dying_lock);
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&cnet->ecache.dying_list);
spin_unlock(&cnet->ecache.dying_lock);
-#else
- nf_ct_add_to_dying_list(ct);
-#endif
}
bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
@@ -1011,7 +992,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
nf_conntrack_get(&ct->ct_general);
nf_ct_acct_merge(ct, ctinfo, loser_ct);
- nf_ct_add_to_dying_list(loser_ct);
nf_ct_put(loser_ct);
nf_ct_set(skb, ct, ctinfo);
@@ -1144,7 +1124,6 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
return ret;
drop:
- nf_ct_add_to_dying_list(loser_ct);
NF_CT_STAT_INC(net, drop);
NF_CT_STAT_INC(net, insert_failed);
return NF_DROP;
@@ -1211,10 +1190,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM.
*/
- nf_ct_del_from_dying_or_unconfirmed_list(ct);
+ nf_ct_del_from_unconfirmed_list(ct);
if (unlikely(nf_ct_is_dying(ct))) {
- nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
@@ -1238,7 +1216,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto out;
if (chainlen++ > max_chainlen) {
chaintoolong:
- nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, chaintoolong);
NF_CT_STAT_INC(net, insert_failed);
ret = NF_DROP;
@@ -2752,7 +2729,6 @@ void nf_conntrack_init_end(void)
* We need to use special "null" values, not used in hash table
*/
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
-#define DYING_NULLS_VAL ((1<<30)+1)
int nf_conntrack_init_net(struct net *net)
{
@@ -2773,7 +2749,6 @@ int nf_conntrack_init_net(struct net *net)
spin_lock_init(&pcpu->lock);
INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
- INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
}
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5c8918857dfc..7a948d23c44f 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -95,7 +95,6 @@ static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) {
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
- hlist_nulls_add_fake(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
nf_ct_put(ct);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 831e717c5847..1ff37e8f7838 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -62,7 +62,6 @@ struct ctnetlink_list_dump_ctx {
struct nf_conn *last;
unsigned int cpu;
bool done;
- bool retrans_done;
};
static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
@@ -1751,13 +1750,12 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb,
}
static int
-ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
+ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
{
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- struct hlist_nulls_head *list;
struct net *net = sock_net(skb->sk);
int res, cpu;
@@ -1774,12 +1772,11 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock);
- list = dying ? &pcpu->dying : &pcpu->unconfirmed;
restart:
- hlist_nulls_for_each_entry(h, n, list, hnnode) {
+ hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
- res = ctnetlink_dump_one_entry(skb, cb, ct, dying);
+ res = ctnetlink_dump_one_entry(skb, cb, ct, false);
if (res < 0) {
ctx->cpu = cpu;
spin_unlock_bh(&pcpu->lock);
@@ -1810,9 +1807,6 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
- if (ctx->retrans_done)
- return ctnetlink_dump_list(skb, cb, true);
-
ctx->last = NULL;
ecache_net = nf_conn_pernet_ecache(net);
spin_lock_bh(&ecache_net->dying_lock);
@@ -1836,11 +1830,10 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
last = NULL;
}
- ctx->retrans_done = true;
spin_unlock_bh(&ecache_net->dying_lock);
nf_ct_put(last);
- return ctnetlink_dump_list(skb, cb, true);
+ return skb->len;
}
static int ctnetlink_get_ct_dying(struct sk_buff *skb,
@@ -1858,12 +1851,6 @@ static int ctnetlink_get_ct_dying(struct sk_buff *skb,
return -EOPNOTSUPP;
}
-static int
-ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
-{
- return ctnetlink_dump_list(skb, cb, false);
-}
-
static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
const struct nfnl_info *info,
const struct nlattr * const cda[])
--
2.34.1
next prev parent reply other threads:[~2022-02-09 16:11 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-02-09 16:10 [PATCH nf-next 0/7] metfilter: remove pcpu dying list Florian Westphal
2022-02-09 16:10 ` [PATCH nf-next 1/7] nfnetlink: handle already-released nl socket Florian Westphal
2022-02-09 16:10 ` [PATCH nf-next 2/7] netfilter: ctnetlink: make ecache event cb global again Florian Westphal
2022-02-09 16:10 ` [PATCH nf-next 3/7] netfilter: ecache: move to separate structure Florian Westphal
2022-02-09 16:10 ` [PATCH nf-next 4/7] netfilter: ecache: use dedicated list for event redelivery Florian Westphal
2022-02-09 16:10 ` [PATCH nf-next 5/7] netfilter: conntrack: split inner loop of list dumping to own function Florian Westphal
2022-02-09 16:10 ` [PATCH nf-next 6/7] netfilter: conntrack: include ecache dying list in dumps Florian Westphal
2022-02-09 16:10 ` Florian Westphal [this message]
2022-02-09 16:26 ` [PATCH nf-next 0/7] metfilter: remove pcpu dying list Florian Westphal
2022-02-24 16:04 ` Pablo Neira Ayuso
2022-02-24 16:15 ` Florian Westphal
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220209161057.30688-8-fw@strlen.de \
--to=fw@strlen.de \
--cc=netfilter-devel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).