* [PATCH nf-next 2/4] netfilter: nft_set_rbtree: check for partial overlaps in anonymous sets
2026-01-28 1:42 [PATCH nf-next 0/4] nf_tables: complete interval overlap detection Pablo Neira Ayuso
2026-01-28 1:42 ` [PATCH nf-next 1/4] netfilter: nft_set_rbtree: fix bogus EEXIST with NLM_F_CREATE with null interval Pablo Neira Ayuso
@ 2026-01-28 1:42 ` Pablo Neira Ayuso
2026-01-28 1:42 ` [PATCH nf-next 3/4] netfilter: nft_set_rbtree: validate element belonging to interval Pablo Neira Ayuso
` (2 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Pablo Neira Ayuso @ 2026-01-28 1:42 UTC (permalink / raw)
To: netfilter-devel
Userspace provides an optimized representation in case intervals are
adjacent, where the end element is omitted.
The existing partial overlap detection logic skips anonymous set checks
on start elements for this reason.
However, it is possible to add intervals that overlap to this anonymous
where two start elements with the same, eg. A-B, A-C where C < B.
start end
A B
start end
A C
Restore the check on overlapping start elements to report an overlap.
Fixes: 7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps on insertion")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
net/netfilter/nft_set_rbtree.c | 29 ++++++++++++++++++++++++-----
1 file changed, 24 insertions(+), 5 deletions(-)
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 345d51dc4a89..0581184cacf9 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -267,11 +267,22 @@ static bool nft_rbtree_update_first(const struct nft_set *set,
return false;
}
+static struct nft_rbtree_elem *nft_rbtree_prev_active(struct nft_rbtree_elem *rbe)
+{
+ struct rb_node *node;
+
+ node = rb_prev(&rbe->node);
+ if (!node)
+ return NULL;
+
+ return rb_entry(node, struct nft_rbtree_elem, node);
+}
+
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_elem_priv **elem_priv)
{
- struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
+ struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL, *rbe_prev;
struct rb_node *node, *next, *parent, **p, *first = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
u8 cur_genmask = nft_genmask_cur(net);
@@ -409,11 +420,19 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
/* - new start element with existing closest, less or equal key value
* being a start element: partial overlap, reported as -ENOTEMPTY.
* Anonymous sets allow for two consecutive start element since they
- * are constant, skip them to avoid bogus overlap reports.
+ * are constant, but validate that this new start element does not
+ * sit in between an existing new and end elements: partial overlap,
+ * reported as -ENOTEMPTY.
*/
- if (!nft_set_is_anonymous(set) && rbe_le &&
- nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new))
- return -ENOTEMPTY;
+ if (rbe_le &&
+ nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new)) {
+ if (!nft_set_is_anonymous(set))
+ return -ENOTEMPTY;
+
+ rbe_prev = nft_rbtree_prev_active(rbe_le);
+ if (rbe_prev && nft_rbtree_interval_end(rbe_prev))
+ return -ENOTEMPTY;
+ }
/* - new end element with existing closest, less or equal key value
* being a end element: partial overlap, reported as -ENOTEMPTY.
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH nf-next 3/4] netfilter: nft_set_rbtree: validate element belonging to interval
2026-01-28 1:42 [PATCH nf-next 0/4] nf_tables: complete interval overlap detection Pablo Neira Ayuso
2026-01-28 1:42 ` [PATCH nf-next 1/4] netfilter: nft_set_rbtree: fix bogus EEXIST with NLM_F_CREATE with null interval Pablo Neira Ayuso
2026-01-28 1:42 ` [PATCH nf-next 2/4] netfilter: nft_set_rbtree: check for partial overlaps in anonymous sets Pablo Neira Ayuso
@ 2026-01-28 1:42 ` Pablo Neira Ayuso
2026-01-28 1:42 ` [PATCH nf-next 4/4] netfilter: nft_set_rbtree: validate open interval overlap Pablo Neira Ayuso
2026-01-28 15:45 ` [PATCH nf-next 0/4] nf_tables: complete interval overlap detection Florian Westphal
4 siblings, 0 replies; 7+ messages in thread
From: Pablo Neira Ayuso @ 2026-01-28 1:42 UTC (permalink / raw)
To: netfilter-devel
The existing partial overlap detection does not check if the elements
belong to the interval, eg.
add element inet x y { 1.1.1.1-2.2.2.2, 4.4.4.4-5.5.5.5 }
add element inet x y { 1.1.1.1-5.5.5.5 } => this should fail: ENOENT
Similar situation occurs with deletions:
add element inet x y { 1.1.1.1-2.2.2.2, 4.4.4.4-5.5.5.5}
delete element inet x y { 1.1.1.1-5.5.5.5 } => this should fail: ENOENT
This currently works via mitigation by nft in userspace, which is
performing the overlap detection before sending the elements to the
kernel. This requires a previous netlink dump of the set content which
slows down incremental updates on interval sets, because a netlink set
content dump is needed.
This patch extends the existing overlap detection to track the most
recent start element that already exists. The pointer to the existing
start element is stored as a cookie (no pointer dereference is ever
possible). If the end element is added and it already exists, then
check that the existing end element is adjacent to the already existing
start element. Similar logic applies to element deactivation.
There is still a few more corner cases of overlap detection related to
the open interval that are addressed in follow up patches.
Fixes: 7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps on insertion")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
net/netfilter/nft_set_rbtree.c | 127 ++++++++++++++++++++++++++++++++-
1 file changed, 126 insertions(+), 1 deletion(-)
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 0581184cacf9..6580b8e2ec25 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -33,6 +33,7 @@ struct nft_rbtree {
rwlock_t lock;
struct nft_array __rcu *array;
struct nft_array *array_next;
+ unsigned long start_rbe_cookie;
unsigned long last_gc;
};
@@ -278,6 +279,67 @@ static struct nft_rbtree_elem *nft_rbtree_prev_active(struct nft_rbtree_elem *rb
return rb_entry(node, struct nft_rbtree_elem, node);
}
+static struct nft_rbtree_elem *
+__nft_rbtree_next_active(struct rb_node *node, u8 genmask)
+{
+ struct nft_rbtree_elem *next_rbe;
+
+ while (node) {
+ next_rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ if (!nft_set_elem_active(&next_rbe->ext, genmask)) {
+ node = rb_next(node);
+ continue;
+ }
+
+ return next_rbe;
+ }
+
+ return NULL;
+}
+
+static struct nft_rbtree_elem *
+nft_rbtree_next_active(struct nft_rbtree_elem *rbe, u8 genmask)
+{
+ return __nft_rbtree_next_active(rb_next(&rbe->node), genmask);
+}
+
+static void nft_rbtree_set_start_cookie(struct nft_rbtree *priv,
+ const struct nft_rbtree_elem *rbe)
+{
+ priv->start_rbe_cookie = (unsigned long)rbe;
+}
+
+static bool nft_rbtree_cmp_start_cookie(struct nft_rbtree *priv,
+ const struct nft_rbtree_elem *rbe)
+{
+ return priv->start_rbe_cookie == (unsigned long)rbe;
+}
+
+static bool nft_rbtree_insert_same_interval(const struct net *net,
+ struct nft_rbtree *priv,
+ struct nft_rbtree_elem *rbe)
+{
+ u8 genmask = nft_genmask_next(net);
+ struct nft_rbtree_elem *next_rbe;
+
+ if (!priv->start_rbe_cookie)
+ return true;
+
+ next_rbe = nft_rbtree_next_active(rbe, genmask);
+ if (next_rbe) {
+ /* Closest start element differs from last element added. */
+ if (nft_rbtree_interval_start(next_rbe) &&
+ nft_rbtree_cmp_start_cookie(priv, next_rbe)) {
+ priv->start_rbe_cookie = 0;
+ return true;
+ }
+ }
+
+ priv->start_rbe_cookie = 0;
+
+ return false;
+}
+
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_elem_priv **elem_priv)
@@ -393,12 +455,18 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
}
}
+ if (nft_rbtree_interval_null(set, new))
+ priv->start_rbe_cookie = 0;
+ else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie)
+ priv->start_rbe_cookie = 0;
+
/* - new start element matching existing start element: full overlap
* reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
*/
if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
*elem_priv = &rbe_ge->priv;
+ nft_rbtree_set_start_cookie(priv, rbe_ge);
return -EEXIST;
}
@@ -414,6 +482,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
return -ECANCELED;
*elem_priv = &rbe_le->priv;
+
+ /* - start and end element belong to the same interval. */
+ if (!nft_rbtree_insert_same_interval(net, priv, rbe_le))
+ return -ENOTEMPTY;
+
return -EEXIST;
}
@@ -603,6 +676,48 @@ static void nft_rbtree_activate(const struct net *net,
nft_clear(net, &rbe->ext);
}
+static struct nft_rbtree_elem *
+nft_rbtree_next_inactive(struct nft_rbtree_elem *rbe, u8 genmask)
+{
+ struct nft_rbtree_elem *next_rbe;
+ struct rb_node *node;
+
+ node = rb_next(&rbe->node);
+ if (node) {
+ next_rbe = rb_entry(node, struct nft_rbtree_elem, node);
+ if (nft_rbtree_interval_start(next_rbe) &&
+ !nft_set_elem_active(&next_rbe->ext, genmask))
+ return next_rbe;
+ }
+
+ return NULL;
+}
+
+static bool nft_rbtree_deactivate_same_interval(const struct net *net,
+ struct nft_rbtree *priv,
+ struct nft_rbtree_elem *rbe)
+{
+ u8 genmask = nft_genmask_next(net);
+ struct nft_rbtree_elem *next_rbe;
+
+ if (!priv->start_rbe_cookie)
+ return true;
+
+ next_rbe = nft_rbtree_next_inactive(rbe, genmask);
+ if (next_rbe) {
+ /* Closest start element differs from last element added. */
+ if (nft_rbtree_interval_start(next_rbe) &&
+ nft_rbtree_cmp_start_cookie(priv, next_rbe)) {
+ priv->start_rbe_cookie = 0;
+ return true;
+ }
+ }
+
+ priv->start_rbe_cookie = 0;
+
+ return false;
+}
+
static void nft_rbtree_flush(const struct net *net,
const struct nft_set *set,
struct nft_elem_priv *elem_priv)
@@ -617,12 +732,16 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv);
- const struct nft_rbtree *priv = nft_set_priv(set);
+ struct nft_rbtree *priv = nft_set_priv(set);
const struct rb_node *parent = priv->root.rb_node;
u8 genmask = nft_genmask_next(net);
u64 tstamp = nft_net_tstamp(net);
int d;
+ if (nft_rbtree_interval_start(this) ||
+ nft_rbtree_interval_null(set, this))
+ priv->start_rbe_cookie = 0;
+
if (nft_array_may_resize(set) < 0)
return NULL;
@@ -650,6 +769,12 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
parent = parent->rb_left;
continue;
}
+
+ if (nft_rbtree_interval_start(rbe))
+ nft_rbtree_set_start_cookie(priv, rbe);
+ else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe))
+ return NULL;
+
nft_rbtree_flush(net, set, &rbe->priv);
return &rbe->priv;
}
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH nf-next 4/4] netfilter: nft_set_rbtree: validate open interval overlap
2026-01-28 1:42 [PATCH nf-next 0/4] nf_tables: complete interval overlap detection Pablo Neira Ayuso
` (2 preceding siblings ...)
2026-01-28 1:42 ` [PATCH nf-next 3/4] netfilter: nft_set_rbtree: validate element belonging to interval Pablo Neira Ayuso
@ 2026-01-28 1:42 ` Pablo Neira Ayuso
2026-01-30 12:34 ` Florian Westphal
2026-01-28 15:45 ` [PATCH nf-next 0/4] nf_tables: complete interval overlap detection Florian Westphal
4 siblings, 1 reply; 7+ messages in thread
From: Pablo Neira Ayuso @ 2026-01-28 1:42 UTC (permalink / raw)
To: netfilter-devel
Open intervals do not have an end element, in particular an open
interval at the end of the set is hard to validate because of it is
lacking the end element, and interval validation relies on such end
element to perform the checks.
This patch adds a new flag field to struct nft_set_elem, this is not an
issue because this is a temporary object that is allocated in the stack
from the insert/deactivate path. This flag field is used to specify that
this is the last element in this add/delete command.
The last flag is used, in combination with the start element cookie, to
check if there is a partial overlap, eg.
Already exists: 255.255.255.0-255.255.255.254
Add interval: 255.255.255.0-255.255.255.255
~~~~~~~~~~~~~
start element overlap
Basically, the idea is to check for an existing end element in the set
if there is an overlap with an existing start element.
However, the last open interval can come in any position in the add
command, the corner case can get a bit more complicated:
Already exists: 255.255.255.0-255.255.255.254
Add intervals: 255.255.255.0-255.255.255.255,255.255.255.0-255.255.255.254
~~~~~~~~~~~~~
start element overlap
To catch this overlap, annotate that the new start element is a possible
overlap, then report the overlap if the next element is another start
element that confirms that previous element in an open interval at the
end of the set.
For deletions, do not update the start cookie when deleting an open interval,
otherwise this can trigger spurious EEXIST when adding new elements.
Fixes: 7c84d41416d8 ("netfilter: nft_set_rbtree: Detect partial overlaps on insertion")
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
include/net/netfilter/nf_tables.h | 4 ++
net/netfilter/nf_tables_api.c | 21 ++++++++--
net/netfilter/nft_set_rbtree.c | 70 +++++++++++++++++++++++++++----
3 files changed, 82 insertions(+), 13 deletions(-)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index f1b67b40dd4d..05f57ba62244 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -278,6 +278,8 @@ struct nft_userdata {
unsigned char data[];
};
+#define NFT_SET_ELEM_INTERNAL_LAST 0x1
+
/* placeholder structure for opaque set element backend representation. */
struct nft_elem_priv { };
@@ -287,6 +289,7 @@ struct nft_elem_priv { };
* @key: element key
* @key_end: closing element key
* @data: element data
+ * @flags: flags
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -302,6 +305,7 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} data;
+ u32 flags;
struct nft_elem_priv *priv;
};
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index a3649d88ac64..4cdb3691f9f8 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -7269,7 +7269,8 @@ static u32 nft_set_maxsize(const struct nft_set *set)
}
static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
- const struct nlattr *attr, u32 nlmsg_flags)
+ const struct nlattr *attr, u32 nlmsg_flags,
+ bool last)
{
struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {};
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
@@ -7555,6 +7556,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (flags)
*nft_set_ext_flags(ext) = flags;
+ if (last)
+ elem.flags = NFT_SET_ELEM_INTERNAL_LAST;
+ else
+ elem.flags = 0;
+
if (obj)
*nft_set_ext_obj(ext) = obj;
@@ -7718,7 +7724,8 @@ static int nf_tables_newsetelem(struct sk_buff *skb,
nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla);
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
- err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags);
+ err = nft_add_set_elem(&ctx, set, attr, info->nlh->nlmsg_flags,
+ nla_is_last(attr, rem));
if (err < 0) {
NL_SET_BAD_ATTR(extack, attr);
return err;
@@ -7842,7 +7849,7 @@ static void nft_trans_elems_destroy_abort(const struct nft_ctx *ctx,
}
static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
- const struct nlattr *attr)
+ const struct nlattr *attr, bool last)
{
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
struct nft_set_ext_tmpl tmpl;
@@ -7910,6 +7917,11 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
if (flags)
*nft_set_ext_flags(ext) = flags;
+ if (last)
+ elem.flags = NFT_SET_ELEM_INTERNAL_LAST;
+ else
+ elem.flags = 0;
+
trans = nft_trans_elem_alloc(ctx, NFT_MSG_DELSETELEM, set);
if (trans == NULL)
goto fail_trans;
@@ -8057,7 +8069,8 @@ static int nf_tables_delsetelem(struct sk_buff *skb,
return nft_set_flush(&ctx, set, genmask);
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
- err = nft_del_setelem(&ctx, set, attr);
+ err = nft_del_setelem(&ctx, set, attr,
+ nla_is_last(attr, rem));
if (err == -ENOENT &&
NFNL_MSG_TYPE(info->nlh->nlmsg_type) == NFT_MSG_DESTROYSETELEM)
continue;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 6580b8e2ec25..fc9f1e12a43d 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -309,9 +309,20 @@ static void nft_rbtree_set_start_cookie(struct nft_rbtree *priv,
priv->start_rbe_cookie = (unsigned long)rbe;
}
+static void nft_rbtree_set_start_cookie_open(struct nft_rbtree *priv,
+ const struct nft_rbtree_elem *rbe,
+ unsigned long open_interval)
+{
+ priv->start_rbe_cookie = (unsigned long)rbe | open_interval;
+}
+
+#define NFT_RBTREE_OPEN_INTERVAL 1UL
+
static bool nft_rbtree_cmp_start_cookie(struct nft_rbtree *priv,
const struct nft_rbtree_elem *rbe)
{
+ return (priv->start_rbe_cookie & ~NFT_RBTREE_OPEN_INTERVAL) == (unsigned long)rbe;
+
return priv->start_rbe_cookie == (unsigned long)rbe;
}
@@ -342,7 +353,7 @@ static bool nft_rbtree_insert_same_interval(const struct net *net,
static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
- struct nft_elem_priv **elem_priv)
+ struct nft_elem_priv **elem_priv, bool last)
{
struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL, *rbe_prev;
struct rb_node *node, *next, *parent, **p, *first = NULL;
@@ -350,6 +361,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
u8 cur_genmask = nft_genmask_cur(net);
u8 genmask = nft_genmask_next(net);
u64 tstamp = nft_net_tstamp(net);
+ unsigned long open_interval = 0;
int d;
/* Descend the tree to search for an existing element greater than the
@@ -455,10 +467,18 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
}
}
- if (nft_rbtree_interval_null(set, new))
- priv->start_rbe_cookie = 0;
- else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie)
+ if (nft_rbtree_interval_null(set, new)) {
priv->start_rbe_cookie = 0;
+ } else if (nft_rbtree_interval_start(new) && priv->start_rbe_cookie) {
+ if (nft_set_is_anonymous(set)) {
+ priv->start_rbe_cookie = 0;
+ } else if (priv->start_rbe_cookie & NFT_RBTREE_OPEN_INTERVAL) {
+ /* Previous element is an open interval that partially
+ * overlaps with an existing non-open interval.
+ */
+ return -ENOTEMPTY;
+ }
+ }
/* - new start element matching existing start element: full overlap
* reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given.
@@ -466,7 +486,26 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) &&
nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) {
*elem_priv = &rbe_ge->priv;
- nft_rbtree_set_start_cookie(priv, rbe_ge);
+
+ /* - Corner case: new start element of open interval (which
+ * comes as last element in the batch) overlaps the start of
+ * an existing interval with an end element: partial overlap.
+ */
+ node = rb_first(&priv->root);
+ rbe = __nft_rbtree_next_active(node, genmask);
+ if (nft_rbtree_interval_end(rbe)) {
+ rbe = nft_rbtree_next_active(rbe, genmask);
+ if (nft_rbtree_interval_start(rbe) &&
+ !nft_rbtree_cmp(set, new, rbe)) {
+ if (last)
+ return -ENOTEMPTY;
+
+ /* Maybe open interval? */
+ open_interval = 1UL;
+ }
+ }
+ nft_rbtree_set_start_cookie_open(priv, rbe_ge, open_interval);
+
return -EEXIST;
}
@@ -521,6 +560,12 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new))
return -ENOTEMPTY;
+ /* - start element overlaps an open interval but end element is new:
+ * partial overlap, reported as -ENOEMPTY.
+ */
+ if (!rbe_ge && priv->start_rbe_cookie && nft_rbtree_interval_end(new))
+ return -ENOTEMPTY;
+
/* Accepted element: pick insertion point depending on key value */
parent = NULL;
p = &priv->root.rb_node;
@@ -630,6 +675,7 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_elem_priv **elem_priv)
{
struct nft_rbtree_elem *rbe = nft_elem_priv_cast(elem->priv);
+ bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST);
struct nft_rbtree *priv = nft_set_priv(set);
int err;
@@ -643,8 +689,12 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
cond_resched();
write_lock_bh(&priv->lock);
- err = __nft_rbtree_insert(net, set, rbe, elem_priv);
+ err = __nft_rbtree_insert(net, set, rbe, elem_priv, last);
write_unlock_bh(&priv->lock);
+
+ if (nft_rbtree_interval_end(rbe))
+ priv->start_rbe_cookie = 0;
+
} while (err == -EAGAIN);
return err;
@@ -732,6 +782,7 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem)
{
struct nft_rbtree_elem *rbe, *this = nft_elem_priv_cast(elem->priv);
+ bool last = !!(elem->flags & NFT_SET_ELEM_INTERNAL_LAST);
struct nft_rbtree *priv = nft_set_priv(set);
const struct rb_node *parent = priv->root.rb_node;
u8 genmask = nft_genmask_next(net);
@@ -770,9 +821,10 @@ nft_rbtree_deactivate(const struct net *net, const struct nft_set *set,
continue;
}
- if (nft_rbtree_interval_start(rbe))
- nft_rbtree_set_start_cookie(priv, rbe);
- else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe))
+ if (nft_rbtree_interval_start(rbe)) {
+ if (!last)
+ nft_rbtree_set_start_cookie(priv, rbe);
+ } else if (!nft_rbtree_deactivate_same_interval(net, priv, rbe))
return NULL;
nft_rbtree_flush(net, set, &rbe->priv);
--
2.47.3
^ permalink raw reply related [flat|nested] 7+ messages in thread