* [PATCH nft 1/5] parser_json: move some code around
2024-03-07 12:26 [PATCH nft 0/5] parser_json: fix up transaction ordering Florian Westphal
@ 2024-03-07 12:26 ` Florian Westphal
2024-03-07 12:26 ` [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd Florian Westphal
` (3 subsequent siblings)
4 siblings, 0 replies; 11+ messages in thread
From: Florian Westphal @ 2024-03-07 12:26 UTC (permalink / raw)
To: netfilter-devel; +Cc: phil, Florian Westphal
Place the helper earlier, next pach will call this from json_parse_cmd.
Signed-off-by: Florian Westphal <fw@strlen.de>
---
src/parser_json.c | 44 ++++++++++++++++++++++----------------------
1 file changed, 22 insertions(+), 22 deletions(-)
diff --git a/src/parser_json.c b/src/parser_json.c
index ff52423af4d7..d4cc2c1e4e9c 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -63,6 +63,17 @@ struct json_ctx {
#define is_DTYPE(ctx) (ctx->flags & CTX_F_DTYPE)
#define is_SET_RHS(ctx) (ctx->flags & CTX_F_SET_RHS)
+struct json_cmd_assoc {
+ struct json_cmd_assoc *next;
+ struct hlist_node hnode;
+ const struct cmd *cmd;
+ json_t *json;
+};
+
+#define CMD_ASSOC_HSIZE 512
+static struct hlist_head json_cmd_assoc_hash[CMD_ASSOC_HSIZE];
+static struct json_cmd_assoc *json_cmd_assoc_list;
+
static char *ctx_flags_to_string(struct json_ctx *ctx)
{
static char buf[1024];
@@ -4089,6 +4100,17 @@ static struct cmd *json_parse_cmd_rename(struct json_ctx *ctx,
return cmd;
}
+static void json_cmd_assoc_add(json_t *json, const struct cmd *cmd)
+{
+ struct json_cmd_assoc *new = xzalloc(sizeof *new);
+
+ new->json = json;
+ new->cmd = cmd;
+ new->next = json_cmd_assoc_list;
+
+ json_cmd_assoc_list = new;
+}
+
static struct cmd *json_parse_cmd(struct json_ctx *ctx, json_t *root)
{
struct {
@@ -4141,17 +4163,6 @@ static int json_verify_metainfo(struct json_ctx *ctx, json_t *root)
return 0;
}
-struct json_cmd_assoc {
- struct json_cmd_assoc *next;
- struct hlist_node hnode;
- const struct cmd *cmd;
- json_t *json;
-};
-
-#define CMD_ASSOC_HSIZE 512
-static struct hlist_head json_cmd_assoc_hash[CMD_ASSOC_HSIZE];
-static struct json_cmd_assoc *json_cmd_assoc_list;
-
static void json_cmd_assoc_free(void)
{
struct json_cmd_assoc *cur;
@@ -4173,17 +4184,6 @@ static void json_cmd_assoc_free(void)
}
}
-static void json_cmd_assoc_add(json_t *json, const struct cmd *cmd)
-{
- struct json_cmd_assoc *new = xzalloc(sizeof *new);
-
- new->json = json;
- new->cmd = cmd;
- new->next = json_cmd_assoc_list;
-
- json_cmd_assoc_list = new;
-}
-
static json_t *seqnum_to_json(const uint32_t seqnum)
{
struct json_cmd_assoc *cur;
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd
2024-03-07 12:26 [PATCH nft 0/5] parser_json: fix up transaction ordering Florian Westphal
2024-03-07 12:26 ` [PATCH nft 1/5] parser_json: move some code around Florian Westphal
@ 2024-03-07 12:26 ` Florian Westphal
2024-03-07 14:31 ` Phil Sutter
2024-03-07 12:26 ` [PATCH nft 3/5] parser_json: add and use CMD_ERR helpers Florian Westphal
` (2 subsequent siblings)
4 siblings, 1 reply; 11+ messages in thread
From: Florian Westphal @ 2024-03-07 12:26 UTC (permalink / raw)
To: netfilter-devel; +Cc: phil, Florian Westphal
The existing parser cannot handle certain inputs. Example:
"map": {
"family": "ip",
"name": "testmap",
"table": "test",
"type": "ipv4_addr",
"handle": 2,
"map": "verdict",
"elem": [ [ "*", {
"jump": {
"target": "testchain"
[..]
},
{
"chain": {
"family": "ip",
"table": "test",
"name": "testchain",
...
Problem is that the json input parser does cmd_add at the earliest opportunity.
For a simple input file defining a table, set, set element and chain, we get
following transaction:
* add table
* add set
* add setelem
* add chain
This is rejected by the kernel, because the set element references a chain
that does (not yet) exist.
Normal input parser only allocates a CMD_ADD request for the table.
Rest of the transactional commands are created much later, via nft_cmd_expand(),
which walks "struct table" and then creates the needed CMD_ADD for the objects
owned by that table.
This transaction will be:
* add table
* add chain
* add set
* add setelem
This is not fixable with the current json parser. To make this work, we
will need to let nft_cmd_expand() take care of building the transaction
commands in the right order.
For this, we must suppress the cmd_alloc() and add the object to struct
table (->sets, ->chains, etc).
This preparation patch moves the list_add into json_parse_cmd so that
followup patches are allowed to avoid command allocation completely
and add objects to struct table/chain instead.
Signed-off-by: Florian Westphal <fw@strlen.de>
---
src/parser_json.c | 34 ++++++++++++++++++----------------
1 file changed, 18 insertions(+), 16 deletions(-)
diff --git a/src/parser_json.c b/src/parser_json.c
index d4cc2c1e4e9c..7540df59dc8f 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -4111,7 +4111,7 @@ static void json_cmd_assoc_add(json_t *json, const struct cmd *cmd)
json_cmd_assoc_list = new;
}
-static struct cmd *json_parse_cmd(struct json_ctx *ctx, json_t *root)
+static int json_parse_cmd(struct json_ctx *ctx, json_t *root)
{
struct {
const char *key;
@@ -4132,6 +4132,7 @@ static struct cmd *json_parse_cmd(struct json_ctx *ctx, json_t *root)
//{ "monitor", CMD_MONITOR, json_parse_cmd_monitor },
//{ "describe", CMD_DESCRIBE, json_parse_cmd_describe }
};
+ struct cmd *cmd;
unsigned int i;
json_t *tmp;
@@ -4140,10 +4141,21 @@ static struct cmd *json_parse_cmd(struct json_ctx *ctx, json_t *root)
if (!tmp)
continue;
- return parse_cb_table[i].cb(ctx, tmp, parse_cb_table[i].op);
+ cmd = parse_cb_table[i].cb(ctx, tmp, parse_cb_table[i].op);
+ goto out;
}
+
/* to accept 'list ruleset' output 1:1, try add command */
- return json_parse_cmd_add(ctx, root, CMD_ADD);
+ cmd = json_parse_cmd_add(ctx, root, CMD_ADD);
+out:
+ if (cmd) {
+ list_add_tail(&cmd->list, ctx->cmds);
+
+ if (nft_output_echo(&ctx->nft->output))
+ json_cmd_assoc_add(root, cmd);
+ }
+
+ return 0;
}
static int json_verify_metainfo(struct json_ctx *ctx, json_t *root)
@@ -4222,10 +4234,8 @@ static int __json_parse(struct json_ctx *ctx)
}
json_array_foreach(tmp, index, value) {
- /* this is more or less from parser_bison.y:716 */
- LIST_HEAD(list);
- struct cmd *cmd;
json_t *tmp2;
+ int err;
if (!json_is_object(value)) {
json_error(ctx, "Unexpected command array element of type %s, expected object.", json_typename(value));
@@ -4241,19 +4251,11 @@ static int __json_parse(struct json_ctx *ctx)
continue;
}
- cmd = json_parse_cmd(ctx, value);
-
- if (!cmd) {
+ err = json_parse_cmd(ctx, value);
+ if (err < 0) {
json_error(ctx, "Parsing command array at index %zd failed.", index);
return -1;
}
-
- list_add_tail(&cmd->list, &list);
-
- list_splice_tail(&list, ctx->cmds);
-
- if (nft_output_echo(&ctx->nft->output))
- json_cmd_assoc_add(value, cmd);
}
return 0;
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd
2024-03-07 12:26 ` [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd Florian Westphal
@ 2024-03-07 14:31 ` Phil Sutter
2024-03-07 15:10 ` Florian Westphal
0 siblings, 1 reply; 11+ messages in thread
From: Phil Sutter @ 2024-03-07 14:31 UTC (permalink / raw)
To: Florian Westphal; +Cc: netfilter-devel
Hi Florian,
On Thu, Mar 07, 2024 at 01:26:32PM +0100, Florian Westphal wrote:
> The existing parser cannot handle certain inputs. Example:
>
> "map": {
> "family": "ip",
> "name": "testmap",
> "table": "test",
> "type": "ipv4_addr",
> "handle": 2,
> "map": "verdict",
> "elem": [ [ "*", {
> "jump": {
> "target": "testchain"
> [..]
> },
> {
> "chain": {
> "family": "ip",
> "table": "test",
> "name": "testchain",
> ...
>
> Problem is that the json input parser does cmd_add at the earliest opportunity.
>
> For a simple input file defining a table, set, set element and chain, we get
> following transaction:
> * add table
> * add set
> * add setelem
> * add chain
>
> This is rejected by the kernel, because the set element references a chain
> that does (not yet) exist.
>
> Normal input parser only allocates a CMD_ADD request for the table.
>
> Rest of the transactional commands are created much later, via nft_cmd_expand(),
> which walks "struct table" and then creates the needed CMD_ADD for the objects
> owned by that table.
JSON parser simply does not support nested syntax, like, for instance:
| table test {
| map testmap {
| type ipv4_addr : verdict
| elements = {
| "*" : jump testchain
| }
| }
| chain testchain {
| }
| }
Your example above is equivalent to the following in standard syntax:
| add table t
| add map t m { type ipv4_addr : verdict; elements = { 10.0.0.1 : jump mychain }; }
| add chain t mychain
It is rejected by nft as well:
| /tmp/input.nft:2:54-61: Error: Could not process rule: No such file or directory
| add map t m { type ipv4_addr : verdict; elements = { 10.0.0.1 : jump mychain }; }
| ^^^^^^^^
(Note the wrong marker position, an unrelated bug it seems.)
If I swap the 'add map' and 'add chain' commands in input, it is
accepted.
Cheers, Phil
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd
2024-03-07 14:31 ` Phil Sutter
@ 2024-03-07 15:10 ` Florian Westphal
2024-03-07 15:52 ` Phil Sutter
0 siblings, 1 reply; 11+ messages in thread
From: Florian Westphal @ 2024-03-07 15:10 UTC (permalink / raw)
To: Phil Sutter, Florian Westphal, netfilter-devel
Phil Sutter <phil@nwl.cc> wrote:
> > Problem is that the json input parser does cmd_add at the earliest opportunity.
> >
> > For a simple input file defining a table, set, set element and chain, we get
> > following transaction:
> > * add table
> > * add set
> > * add setelem
> > * add chain
> >
> > This is rejected by the kernel, because the set element references a chain
> > that does (not yet) exist.
> >
> > Normal input parser only allocates a CMD_ADD request for the table.
> >
> > Rest of the transactional commands are created much later, via nft_cmd_expand(),
> > which walks "struct table" and then creates the needed CMD_ADD for the objects
> > owned by that table.
>
> JSON parser simply does not support nested syntax, like, for instance:
You mean, WONTFIX? Fine with me.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd
2024-03-07 15:10 ` Florian Westphal
@ 2024-03-07 15:52 ` Phil Sutter
2024-03-07 16:44 ` Florian Westphal
0 siblings, 1 reply; 11+ messages in thread
From: Phil Sutter @ 2024-03-07 15:52 UTC (permalink / raw)
To: Florian Westphal; +Cc: netfilter-devel
On Thu, Mar 07, 2024 at 04:10:05PM +0100, Florian Westphal wrote:
> Phil Sutter <phil@nwl.cc> wrote:
> > > Problem is that the json input parser does cmd_add at the earliest opportunity.
> > >
> > > For a simple input file defining a table, set, set element and chain, we get
> > > following transaction:
> > > * add table
> > > * add set
> > > * add setelem
> > > * add chain
> > >
> > > This is rejected by the kernel, because the set element references a chain
> > > that does (not yet) exist.
> > >
> > > Normal input parser only allocates a CMD_ADD request for the table.
> > >
> > > Rest of the transactional commands are created much later, via nft_cmd_expand(),
> > > which walks "struct table" and then creates the needed CMD_ADD for the objects
> > > owned by that table.
> >
> > JSON parser simply does not support nested syntax, like, for instance:
>
> You mean, WONTFIX? Fine with me.
Not quite, I see the problem you're trying to solve in patch 5. Sorting
elements in the JSON "nftables" array properly for later insertion may
become a non-trivial task given how maps and rules may refer to chains.
So IIUC, JSON parser will now collapse all new ruleset items into a tree
and use the existing nft_cmd_expand() to split things up again. This may
impose significant overhead depending on input data (bogus/OpenShift use
cases involving many chains maybe) on one hand, on the other might allow
for overhead elimination in other cases (e.g. long lists of 'add
element' commands for different sets in alternating fashion).
We may want to do this for standard syntax as well if the benefits
outweigh the downsides. Thus generalize the JSON-specific helpers you
wrote for use within bison parser, too?
An alternative might be to reorder code in table_print_json_full(),
copying what nft_cmd_expand() does for CMD_OBJ_TABLE. AIUI, it should
solve the current issue of failing 'nft -j list ruleset | nft -j -f -'
for special cases.
Cheers, Phil
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd
2024-03-07 15:52 ` Phil Sutter
@ 2024-03-07 16:44 ` Florian Westphal
2024-03-07 17:58 ` Phil Sutter
0 siblings, 1 reply; 11+ messages in thread
From: Florian Westphal @ 2024-03-07 16:44 UTC (permalink / raw)
To: Phil Sutter, Florian Westphal, netfilter-devel
Phil Sutter <phil@nwl.cc> wrote:
> So IIUC, JSON parser will now collapse all new ruleset items into a tree
> and use the existing nft_cmd_expand() to split things up again. This may
> impose significant overhead depending on input data (bogus/OpenShift use
> cases involving many chains maybe) on one hand, on the other might allow
> for overhead elimination in other cases (e.g. long lists of 'add
> element' commands for different sets in alternating fashion).
>
> We may want to do this for standard syntax as well if the benefits
> outweigh the downsides. Thus generalize the JSON-specific helpers you
> wrote for use within bison parser, too?
It tries to do same as bison parser when using nft -f with a standard
'list ruleset' input.
A 'batch file' with sequential 'add table x', 'add chain x c' etc.
does separate 'add' requests. The json parser is supposed to follow
this, i.e. 'ctx->in_ruleset' is only supoosed to be set when this
is a json listing, not when some input daemon is feeding independent
add requests.
> An alternative might be to reorder code in table_print_json_full(),
> copying what nft_cmd_expand() does for CMD_OBJ_TABLE. AIUI, it should
> solve the current issue of failing 'nft -j list ruleset | nft -j -f -'
> for special cases.
Its indeed possible to reorder things but I was not sure if there is
a simple way to do this.
One case is 'verdict map', where the elements need to be created
after the chains.
The other one is rules, those need to come after the chains.
So what could work is:
1. tables
2. chains (but not rules)
3. flowtables
4. objects
5. maps and sets
6. map/set elements
7. rules (they could reference maps and sets or objects)
If you prefer to resolve it by sorting the output (input) as needed
please let me know.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd
2024-03-07 16:44 ` Florian Westphal
@ 2024-03-07 17:58 ` Phil Sutter
0 siblings, 0 replies; 11+ messages in thread
From: Phil Sutter @ 2024-03-07 17:58 UTC (permalink / raw)
To: Florian Westphal; +Cc: netfilter-devel
On Thu, Mar 07, 2024 at 05:44:22PM +0100, Florian Westphal wrote:
> Phil Sutter <phil@nwl.cc> wrote:
> > So IIUC, JSON parser will now collapse all new ruleset items into a tree
> > and use the existing nft_cmd_expand() to split things up again. This may
> > impose significant overhead depending on input data (bogus/OpenShift use
> > cases involving many chains maybe) on one hand, on the other might allow
> > for overhead elimination in other cases (e.g. long lists of 'add
> > element' commands for different sets in alternating fashion).
> >
> > We may want to do this for standard syntax as well if the benefits
> > outweigh the downsides. Thus generalize the JSON-specific helpers you
> > wrote for use within bison parser, too?
>
> It tries to do same as bison parser when using nft -f with a standard
> 'list ruleset' input.
>
> A 'batch file' with sequential 'add table x', 'add chain x c' etc.
> does separate 'add' requests. The json parser is supposed to follow
> this, i.e. 'ctx->in_ruleset' is only supoosed to be set when this
> is a json listing, not when some input daemon is feeding independent
> add requests.
>
> > An alternative might be to reorder code in table_print_json_full(),
> > copying what nft_cmd_expand() does for CMD_OBJ_TABLE. AIUI, it should
> > solve the current issue of failing 'nft -j list ruleset | nft -j -f -'
> > for special cases.
>
> Its indeed possible to reorder things but I was not sure if there is
> a simple way to do this.
It seems there is! Taking nft_cmd_expand() as an example, all that's
missing for table_print_json_full() is to move (bare) chain listing
first and later list rules only instead of chain + rules. I have a patch
at hand and am currently tickling the testsuite to get things tested. It
should work though, because what nft_cmd_expand() does is proven to
work.
[...]
> If you prefer to resolve it by sorting the output (input) as needed
> please let me know.
I'm more confident with the reordering as it must work. Your approach is
interesting, but it may fail if e.g. input does not contain the table
(user knows it exists already). Though it may still be of value for
other purposes. Also my "reorder output" approach does not cover for
user-compiled input (although one may call PEBKAC there).
Thanks, Phil
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH nft 3/5] parser_json: add and use CMD_ERR helpers
2024-03-07 12:26 [PATCH nft 0/5] parser_json: fix up transaction ordering Florian Westphal
2024-03-07 12:26 ` [PATCH nft 1/5] parser_json: move some code around Florian Westphal
2024-03-07 12:26 ` [PATCH nft 2/5] parser_json: move list_add into json_parse_cmd Florian Westphal
@ 2024-03-07 12:26 ` Florian Westphal
2024-03-07 12:26 ` [PATCH nft 4/5] parser_json: defer command allocation to nft_cmd_expand Florian Westphal
2024-03-07 12:26 ` [PATCH nft 5/5] tests: shell: add more json-nft dumps Florian Westphal
4 siblings, 0 replies; 11+ messages in thread
From: Florian Westphal @ 2024-03-07 12:26 UTC (permalink / raw)
To: netfilter-devel; +Cc: phil, Florian Westphal
Upcoming patch will make NULL a valid return value,
indicating that the data was appended to an existing object,
e.g. table->chains, table->objs, chain->rules, etc.
Switch all return locations queueing erros to return CMD_ERR_PTR(-1).
Signed-off-by: Florian Westphal <fw@strlen.de>
---
src/parser_json.c | 128 ++++++++++++++++++++++++++--------------------
1 file changed, 73 insertions(+), 55 deletions(-)
diff --git a/src/parser_json.c b/src/parser_json.c
index 7540df59dc8f..91c1e01cee52 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -63,6 +63,18 @@ struct json_ctx {
#define is_DTYPE(ctx) (ctx->flags & CTX_F_DTYPE)
#define is_SET_RHS(ctx) (ctx->flags & CTX_F_SET_RHS)
+static inline bool CMD_IS_ERR(struct cmd *cmd)
+{
+ long x = (long)cmd;
+
+ return x < 0;
+}
+
+static inline struct cmd *CMD_ERR_PTR(long err)
+{
+ return (struct cmd *)err;
+}
+
struct json_cmd_assoc {
struct json_cmd_assoc *next;
struct hlist_node hnode;
@@ -2976,22 +2988,22 @@ static struct cmd *json_parse_cmd_add_table(struct json_ctx *ctx, json_t *root,
if (json_unpack_err(ctx, root, "{s:s}",
"family", &family))
- return NULL;
+ return CMD_ERR_PTR(-1);
if (op != CMD_DELETE) {
if (json_unpack_err(ctx, root, "{s:s}", "name", &h.table.name))
- return NULL;
+ return CMD_ERR_PTR(-1);
json_unpack(root, "{s:s}", "comment", &comment);
} else if (op == CMD_DELETE &&
json_unpack(root, "{s:s}", "name", &h.table.name) &&
json_unpack(root, "{s:I}", "handle", &h.handle.id)) {
json_error(ctx, "Either name or handle required to delete a table.");
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (parse_family(family, &h.family)) {
json_error(ctx, "Unknown family '%s'.", family);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (h.table.name)
h.table.name = xstrdup(h.table.name);
@@ -3072,21 +3084,21 @@ static struct cmd *json_parse_cmd_add_chain(struct json_ctx *ctx, json_t *root,
if (json_unpack_err(ctx, root, "{s:s, s:s}",
"family", &family,
"table", &h.table.name))
- return NULL;
+ return CMD_ERR_PTR(-1);
if (op != CMD_DELETE) {
if (json_unpack_err(ctx, root, "{s:s}", "name", &h.chain.name))
- return NULL;
+ return CMD_ERR_PTR(-1);
json_unpack(root, "{s:s}", "comment", &comment);
} else if (op == CMD_DELETE &&
json_unpack(root, "{s:s}", "name", &h.chain.name) &&
json_unpack(root, "{s:I}", "handle", &h.handle.id)) {
json_error(ctx, "Either name or handle required to delete a chain.");
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (parse_family(family, &h.family)) {
json_error(ctx, "Unknown family '%s'.", family);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
h.table.name = xstrdup(h.table.name);
if (h.chain.name)
@@ -3118,7 +3130,7 @@ static struct cmd *json_parse_cmd_add_chain(struct json_ctx *ctx, json_t *root,
if (!chain->hook.name) {
json_error(ctx, "Invalid chain hook '%s'.", hookstr);
chain_free(chain);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
json_unpack(root, "{s:o}", "dev", &devs);
@@ -3128,7 +3140,7 @@ static struct cmd *json_parse_cmd_add_chain(struct json_ctx *ctx, json_t *root,
if (!chain->dev_expr) {
json_error(ctx, "Invalid chain dev.");
chain_free(chain);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
@@ -3137,7 +3149,7 @@ static struct cmd *json_parse_cmd_add_chain(struct json_ctx *ctx, json_t *root,
if (!chain->policy) {
json_error(ctx, "Unknown policy '%s'.", policy);
chain_free(chain);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
@@ -3165,17 +3177,17 @@ static struct cmd *json_parse_cmd_add_rule(struct json_ctx *ctx, json_t *root,
"family", &family,
"table", &h.table.name,
"chain", &h.chain.name))
- return NULL;
+ return CMD_ERR_PTR(-1);
if (op != CMD_DELETE &&
json_unpack_err(ctx, root, "{s:o}", "expr", &tmp))
- return NULL;
+ return CMD_ERR_PTR(-1);
else if ((op == CMD_DELETE || op == CMD_DESTROY) &&
json_unpack_err(ctx, root, "{s:I}", "handle", &h.handle.id))
- return NULL;
+ return CMD_ERR_PTR(-1);
if (parse_family(family, &h.family)) {
json_error(ctx, "Unknown family '%s'.", family);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
h.table.name = xstrdup(h.table.name);
h.chain.name = xstrdup(h.chain.name);
@@ -3185,7 +3197,7 @@ static struct cmd *json_parse_cmd_add_rule(struct json_ctx *ctx, json_t *root,
if (!json_is_array(tmp)) {
json_error(ctx, "Value of property \"expr\" must be an array.");
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (!json_unpack(root, "{s:I}", "index", &h.index.id)) {
@@ -3205,7 +3217,7 @@ static struct cmd *json_parse_cmd_add_rule(struct json_ctx *ctx, json_t *root,
json_error(ctx, "Unexpected expr array element of type %s, expected object.",
json_typename(value));
rule_free(rule);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
stmt = json_parse_stmt(ctx, value);
@@ -3213,7 +3225,7 @@ static struct cmd *json_parse_cmd_add_rule(struct json_ctx *ctx, json_t *root,
if (!stmt) {
json_error(ctx, "Parsing expr array at index %zd failed.", index);
rule_free(rule);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
rule_stmt_append(rule, stmt);
@@ -3273,15 +3285,15 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
if (json_unpack_err(ctx, root, "{s:s, s:s}",
"family", &family,
"table", &h.table.name))
- return NULL;
+ return CMD_ERR_PTR(-1);
if (op != CMD_DELETE &&
json_unpack_err(ctx, root, "{s:s}", "name", &h.set.name)) {
- return NULL;
+ return CMD_ERR_PTR(-1);
} else if ((op == CMD_DELETE || op == CMD_DESTROY) &&
json_unpack(root, "{s:s}", "name", &h.set.name) &&
json_unpack(root, "{s:I}", "handle", &h.handle.id)) {
json_error(ctx, "Either name or handle required to delete a set.");
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (parse_family(family, &h.family)) {
@@ -3309,14 +3321,14 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
json_error(ctx, "Invalid set type.");
set_free(set);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
set->key = json_parse_dtype_expr(ctx, tmp);
if (!set->key) {
json_error(ctx, "Invalid set type.");
set_free(set);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (!json_unpack(root, "{s:s}", "map", &dtype_ext)) {
@@ -3334,7 +3346,7 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
json_error(ctx, "Invalid map type '%s'.", dtype_ext);
set_free(set);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
if (!json_unpack(root, "{s:s}", "policy", &policy)) {
@@ -3346,7 +3358,7 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
json_error(ctx, "Unknown set policy '%s'.", policy);
set_free(set);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
if (!json_unpack(root, "{s:o}", "flags", &tmp)) {
@@ -3361,7 +3373,7 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
json_error(ctx, "Invalid set flag at index %zu.", index);
set_free(set);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
set->flags |= flag;
}
@@ -3372,7 +3384,7 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
json_error(ctx, "Invalid set elem expression.");
set_free(set);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
if (!json_unpack(root, "{s:I}", "timeout", &set->timeout))
@@ -3407,11 +3419,11 @@ static struct cmd *json_parse_cmd_add_element(struct json_ctx *ctx,
"table", &h.table.name,
"name", &h.set.name,
"elem", &tmp))
- return NULL;
+ return CMD_ERR_PTR(-1);
if (parse_family(family, &h.family)) {
json_error(ctx, "Unknown family '%s'.", family);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
h.table.name = xstrdup(h.table.name);
h.set.name = xstrdup(h.set.name);
@@ -3420,7 +3432,7 @@ static struct cmd *json_parse_cmd_add_element(struct json_ctx *ctx,
if (!expr) {
json_error(ctx, "Invalid set.");
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
return cmd_alloc(op, cmd_obj, &h, int_loc, expr);
}
@@ -3438,21 +3450,21 @@ static struct cmd *json_parse_cmd_add_flowtable(struct json_ctx *ctx,
if (json_unpack_err(ctx, root, "{s:s, s:s}",
"family", &family,
"table", &h.table.name))
- return NULL;
+ return CMD_ERR_PTR(-1);
if (op != CMD_DELETE &&
json_unpack_err(ctx, root, "{s:s}", "name", &h.flowtable.name)) {
- return NULL;
+ return CMD_ERR_PTR(-1);
} else if ((op == CMD_DELETE || op == CMD_DESTROY) &&
json_unpack(root, "{s:s}", "name", &h.flowtable.name) &&
json_unpack(root, "{s:I}", "handle", &h.handle.id)) {
json_error(ctx, "Either name or handle required to delete a flowtable.");
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (parse_family(family, &h.family)) {
json_error(ctx, "Unknown family '%s'.", family);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
h.table.name = xstrdup(h.table.name);
if (h.flowtable.name)
@@ -3465,7 +3477,7 @@ static struct cmd *json_parse_cmd_add_flowtable(struct json_ctx *ctx,
"hook", &hook,
"prio", &prio)) {
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
json_unpack(root, "{s:o}", "dev", &devs);
@@ -3474,7 +3486,7 @@ static struct cmd *json_parse_cmd_add_flowtable(struct json_ctx *ctx,
if (!hookstr) {
json_error(ctx, "Invalid flowtable hook '%s'.", hook);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
flowtable = flowtable_alloc(int_loc);
@@ -3490,7 +3502,7 @@ static struct cmd *json_parse_cmd_add_flowtable(struct json_ctx *ctx,
json_error(ctx, "Invalid flowtable dev.");
flowtable_free(flowtable);
handle_free(&h);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
return cmd_alloc(op, cmd_obj, &h, int_loc, flowtable);
@@ -3542,22 +3554,22 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
if (json_unpack_err(ctx, root, "{s:s, s:s}",
"family", &family,
"table", &h.table.name))
- return NULL;
+ return CMD_ERR_PTR(-1);
if ((op != CMD_DELETE ||
cmd_obj == NFT_OBJECT_CT_HELPER) &&
json_unpack_err(ctx, root, "{s:s}", "name", &h.obj.name)) {
- return NULL;
+ return CMD_ERR_PTR(-1);
} else if ((op == CMD_DELETE || op == CMD_DESTROY) &&
cmd_obj != NFT_OBJECT_CT_HELPER &&
json_unpack(root, "{s:s}", "name", &h.obj.name) &&
json_unpack(root, "{s:I}", "handle", &h.handle.id)) {
json_error(ctx, "Either name or handle required to delete an object.");
- return NULL;
+ return CMD_ERR_PTR(-1);
}
if (parse_family(family, &h.family)) {
json_error(ctx, "Unknown family '%s'.", family);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
h.table.name = xstrdup(h.table.name);
if (h.obj.name)
@@ -3598,7 +3610,7 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
json_error(ctx, "Invalid secmark context '%s', max length is %zu.",
tmp, sizeof(obj->secmark.ctx));
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
break;
@@ -3615,7 +3627,7 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
json_error(ctx, "Invalid CT helper type '%s', max length is %zu.",
tmp, sizeof(obj->ct_helper.name));
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
if (!json_unpack(root, "{s:s}", "protocol", &tmp)) {
@@ -3626,14 +3638,14 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
} else {
json_error(ctx, "Invalid ct helper protocol '%s'.", tmp);
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
if (!json_unpack(root, "{s:s}", "l3proto", &tmp) &&
parse_family(tmp, &l3proto)) {
json_error(ctx, "Invalid ct helper l3proto '%s'.", tmp);
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
obj->ct_helper.l3proto = l3proto;
break;
@@ -3648,21 +3660,21 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
} else {
json_error(ctx, "Invalid ct timeout protocol '%s'.", tmp);
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
if (!json_unpack(root, "{s:s}", "l3proto", &tmp) &&
parse_family(tmp, &l3proto)) {
json_error(ctx, "Invalid ct timeout l3proto '%s'.", tmp);
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
obj->ct_timeout.l3proto = l3proto;
init_list_head(&obj->ct_timeout.timeout_list);
if (json_parse_ct_timeout_policy(ctx, root, obj)) {
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
break;
case NFT_OBJECT_CT_EXPECT:
@@ -3672,7 +3684,7 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
parse_family(tmp, &l3proto)) {
json_error(ctx, "Invalid ct expectation l3proto '%s'.", tmp);
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
obj->ct_expect.l3proto = l3proto;
if (!json_unpack(root, "{s:s}", "protocol", &tmp)) {
@@ -3683,7 +3695,7 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
} else {
json_error(ctx, "Invalid ct expectation protocol '%s'.", tmp);
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
}
if (!json_unpack(root, "{s:i}", "dport", &i))
@@ -3699,7 +3711,7 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
"rate", &obj->limit.rate,
"per", &tmp)) {
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
json_unpack(root, "{s:s}", "rate_unit", &rate_unit);
json_unpack(root, "{s:b}", "inv", &inv);
@@ -3723,7 +3735,7 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
if (json_unpack_err(ctx, root, "{s:i, s:i}",
"mss", &i, "wscale", &j)) {
obj_free(obj);
- return NULL;
+ return CMD_ERR_PTR(-1);
}
obj->synproxy.mss = i;
obj->synproxy.wscale = j;
@@ -3778,7 +3790,7 @@ static struct cmd *json_parse_cmd_add(struct json_ctx *ctx,
if (!json_is_object(root)) {
json_error(ctx, "Value of add command must be object (got %s instead).",
json_typename(root));
- return NULL;
+ return CMD_ERR_PTR(-1);
}
for (i = 0; i < array_size(cmd_obj_table); i++) {
@@ -3794,7 +3806,7 @@ static struct cmd *json_parse_cmd_add(struct json_ctx *ctx,
return cmd_obj_table[i].cb(ctx, tmp, op, cmd_obj_table[i].obj);
}
json_error(ctx, "Unknown object passed to add command.");
- return NULL;
+ return CMD_ERR_PTR(-1);
}
static struct cmd *json_parse_cmd_replace(struct json_ctx *ctx,
@@ -4142,11 +4154,17 @@ static int json_parse_cmd(struct json_ctx *ctx, json_t *root)
continue;
cmd = parse_cb_table[i].cb(ctx, tmp, parse_cb_table[i].op);
- goto out;
+ if (!CMD_IS_ERR(cmd))
+ goto out;
+
+ return -1;
}
/* to accept 'list ruleset' output 1:1, try add command */
cmd = json_parse_cmd_add(ctx, root, CMD_ADD);
+ if (CMD_IS_ERR(cmd))
+ return -1;
+
out:
if (cmd) {
list_add_tail(&cmd->list, ctx->cmds);
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH nft 4/5] parser_json: defer command allocation to nft_cmd_expand
2024-03-07 12:26 [PATCH nft 0/5] parser_json: fix up transaction ordering Florian Westphal
` (2 preceding siblings ...)
2024-03-07 12:26 ` [PATCH nft 3/5] parser_json: add and use CMD_ERR helpers Florian Westphal
@ 2024-03-07 12:26 ` Florian Westphal
2024-03-07 12:26 ` [PATCH nft 5/5] tests: shell: add more json-nft dumps Florian Westphal
4 siblings, 0 replies; 11+ messages in thread
From: Florian Westphal @ 2024-03-07 12:26 UTC (permalink / raw)
To: netfilter-devel; +Cc: phil, Florian Westphal
Place new chains, flowtables, and sets and objects in struct table
and let nft_cmd_expand() allocate the commands instead.
Likewise new rules get appended to chain->rules.
This makes sure chains are created before set elements that reference
them, and rules get created after sets that are referenced by rules.
Instead of allocating a new command, search for the table/chain in the
existing transaction queue and append the object there.
Signed-off-by: Florian Westphal <fw@strlen.de>
---
src/parser_json.c | 129 +++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 123 insertions(+), 6 deletions(-)
diff --git a/src/parser_json.c b/src/parser_json.c
index 91c1e01cee52..a557e3ee81a3 100644
--- a/src/parser_json.c
+++ b/src/parser_json.c
@@ -55,6 +55,7 @@ struct json_ctx {
struct list_head *msgs;
struct list_head *cmds;
uint32_t flags;
+ bool in_ruleset;
};
#define is_RHS(ctx) (ctx->flags & CTX_F_RHS)
@@ -2977,6 +2978,55 @@ static struct stmt *json_parse_stmt(struct json_ctx *ctx, json_t *root)
return NULL;
}
+static bool table_eq(const struct handle *th, const struct handle *handle)
+{
+ return th->family == handle->family &&
+ strcmp(th->table.name, handle->table.name) == 0;
+}
+
+static struct table *json_cmd_get_table(struct json_ctx *ctx,
+ const struct handle *handle)
+{
+ struct cmd *cmd;
+
+ if (!ctx->in_ruleset)
+ return NULL;
+
+ list_for_each_entry(cmd, ctx->cmds, list) {
+ if (cmd->op != CMD_ADD)
+ continue;
+ if (cmd->obj != CMD_OBJ_TABLE)
+ continue;
+ if (table_eq(&cmd->handle, handle)) {
+ if (cmd->table)
+ return cmd->table;
+
+ cmd->table = table_alloc();
+ handle_merge(&cmd->table->handle, &cmd->handle);
+ return cmd->table;
+ }
+ }
+
+ return NULL;
+}
+
+static struct chain *json_cmd_get_chain(struct json_ctx *ctx,
+ const struct handle *handle)
+{
+ struct table *table =json_cmd_get_table(ctx, handle);
+ struct chain *chain;
+
+ if (!table)
+ return NULL;
+
+ list_for_each_entry(chain, &table->chains, list) {
+ if (strcmp(chain->handle.chain.name, handle->chain.name) == 0)
+ return chain;
+ }
+
+ return NULL;
+}
+
static struct cmd *json_parse_cmd_add_table(struct json_ctx *ctx, json_t *root,
enum cmd_ops op, enum cmd_obj obj)
{
@@ -3114,8 +3164,23 @@ static struct cmd *json_parse_cmd_add_chain(struct json_ctx *ctx, json_t *root,
op == CMD_LIST ||
op == CMD_FLUSH ||
json_unpack(root, "{s:s, s:s, s:i}",
- "type", &type, "hook", &hookstr, "prio", &prio))
+ "type", &type, "hook", &hookstr, "prio", &prio)) {
+ struct table *table = json_cmd_get_table(ctx, &h);
+
+ if (table) {
+ assert(op == CMD_ADD);
+
+ if (!chain)
+ chain = chain_alloc();
+
+ handle_merge(&chain->handle, &h);
+ list_add_tail(&chain->list, &table->chains);
+ handle_free(&h);
+ return NULL;
+ }
+
return cmd_alloc(op, obj, &h, int_loc, chain);
+ }
if (!chain)
chain = chain_alloc();
@@ -3153,10 +3218,20 @@ static struct cmd *json_parse_cmd_add_chain(struct json_ctx *ctx, json_t *root,
}
}
- if (op == CMD_ADD)
+ handle_merge(&chain->handle, &h);
+
+ if (op == CMD_ADD) {
+ struct table *table = json_cmd_get_table(ctx, &h);
+
json_object_del(root, "handle");
- handle_merge(&chain->handle, &h);
+ if (table) {
+ list_add_tail(&chain->list, &table->chains);
+ handle_free(&h);
+ return NULL;
+ }
+ }
+
return cmd_alloc(op, obj, &h, int_loc, chain);
}
@@ -3231,9 +3306,17 @@ static struct cmd *json_parse_cmd_add_rule(struct json_ctx *ctx, json_t *root,
rule_stmt_append(rule, stmt);
}
- if (op == CMD_ADD)
+ if (op == CMD_ADD) {
+ struct chain *chain = json_cmd_get_chain(ctx, &h);
+
json_object_del(root, "handle");
+ if (chain) {
+ list_add_tail(&rule->list, &chain->rules);
+ handle_free(&h);
+ return NULL;
+ }
+ }
return cmd_alloc(op, obj, &h, int_loc, rule);
}
@@ -3399,9 +3482,18 @@ static struct cmd *json_parse_cmd_add_set(struct json_ctx *ctx, json_t *root,
handle_merge(&set->handle, &h);
- if (op == CMD_ADD)
+ if (op == CMD_ADD) {
+ struct table *table = json_cmd_get_table(ctx, &h);
+
json_object_del(root, "handle");
+ if (table) {
+ list_add_tail(&set->list, &table->sets);
+ handle_free(&h);
+ return NULL;
+ }
+ }
+
return cmd_alloc(op, obj, &h, int_loc, set);
}
@@ -3505,6 +3597,18 @@ static struct cmd *json_parse_cmd_add_flowtable(struct json_ctx *ctx,
return CMD_ERR_PTR(-1);
}
}
+
+ if (op == CMD_ADD) {
+ struct table *table = json_cmd_get_table(ctx, &h);
+
+ if (table) {
+ handle_merge(&flowtable->handle, &h);
+ list_add_tail(&flowtable->list, &table->flowtables);
+ handle_free(&h);
+ return NULL;
+ }
+ }
+
return cmd_alloc(op, cmd_obj, &h, int_loc, flowtable);
}
@@ -3754,9 +3858,19 @@ static struct cmd *json_parse_cmd_add_object(struct json_ctx *ctx,
BUG("Invalid CMD '%d'", cmd_obj);
}
- if (op == CMD_ADD)
+ if (op == CMD_ADD) {
+ struct table *table = json_cmd_get_table(ctx, &h);
+
json_object_del(root, "handle");
+ if (table) {
+ handle_merge(&obj->handle, &h);
+ list_add_tail(&obj->list, &table->objs);
+ handle_free(&h);
+ return NULL;
+ }
+ }
+
return cmd_alloc(op, cmd_obj, &h, int_loc, obj);
}
@@ -4160,10 +4274,13 @@ static int json_parse_cmd(struct json_ctx *ctx, json_t *root)
return -1;
}
+ assert(!ctx->in_ruleset);
+ ctx->in_ruleset = true;
/* to accept 'list ruleset' output 1:1, try add command */
cmd = json_parse_cmd_add(ctx, root, CMD_ADD);
if (CMD_IS_ERR(cmd))
return -1;
+ ctx->in_ruleset = false;
out:
if (cmd) {
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH nft 5/5] tests: shell: add more json-nft dumps
2024-03-07 12:26 [PATCH nft 0/5] parser_json: fix up transaction ordering Florian Westphal
` (3 preceding siblings ...)
2024-03-07 12:26 ` [PATCH nft 4/5] parser_json: defer command allocation to nft_cmd_expand Florian Westphal
@ 2024-03-07 12:26 ` Florian Westphal
4 siblings, 0 replies; 11+ messages in thread
From: Florian Westphal @ 2024-03-07 12:26 UTC (permalink / raw)
To: netfilter-devel; +Cc: phil, Florian Westphal
The previous patch makes json input build transactions in
the correct order so these dumps now work as expected.
Signed-off-by: Florian Westphal <fw@strlen.de>
---
.../dumps/0011endless_jump_loop_1.json-nft | 75 +++
.../testcases/maps/dumps/0011vmap_0.json-nft | 145 +++++
.../dumps/map_catchall_double_free_2.json-nft | 46 ++
.../maps/dumps/vmap_mark_bitwise_0.json-nft | 158 +++++
.../maps/dumps/vmap_timeout.json-nft | 229 ++++++++
.../dumps/0008create_verdict_map_0.json-nft | 78 +++
.../sets/dumps/sets_with_ifnames.json-nft | 551 ++++++++++++++++++
7 files changed, 1282 insertions(+)
create mode 100644 tests/shell/testcases/chains/dumps/0011endless_jump_loop_1.json-nft
create mode 100644 tests/shell/testcases/maps/dumps/0011vmap_0.json-nft
create mode 100644 tests/shell/testcases/maps/dumps/map_catchall_double_free_2.json-nft
create mode 100644 tests/shell/testcases/maps/dumps/vmap_mark_bitwise_0.json-nft
create mode 100644 tests/shell/testcases/maps/dumps/vmap_timeout.json-nft
create mode 100644 tests/shell/testcases/sets/dumps/0008create_verdict_map_0.json-nft
create mode 100644 tests/shell/testcases/sets/dumps/sets_with_ifnames.json-nft
diff --git a/tests/shell/testcases/chains/dumps/0011endless_jump_loop_1.json-nft b/tests/shell/testcases/chains/dumps/0011endless_jump_loop_1.json-nft
new file mode 100644
index 000000000000..2521e1094c3c
--- /dev/null
+++ b/tests/shell/testcases/chains/dumps/0011endless_jump_loop_1.json-nft
@@ -0,0 +1,75 @@
+{
+ "nftables": [
+ {
+ "metainfo": {
+ "version": "VERSION",
+ "release_name": "RELEASE_NAME",
+ "json_schema_version": 1
+ }
+ },
+ {
+ "table": {
+ "family": "ip",
+ "name": "t",
+ "handle": 0
+ }
+ },
+ {
+ "map": {
+ "family": "ip",
+ "name": "m",
+ "table": "t",
+ "type": "inet_service",
+ "handle": 0,
+ "map": "verdict",
+ "elem": [
+ [
+ 2,
+ {
+ "jump": {
+ "target": "c2"
+ }
+ }
+ ]
+ ]
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "t",
+ "name": "c1",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "t",
+ "name": "c2",
+ "handle": 0
+ }
+ },
+ {
+ "rule": {
+ "family": "ip",
+ "table": "t",
+ "chain": "c1",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "payload": {
+ "protocol": "tcp",
+ "field": "dport"
+ }
+ },
+ "data": "@m"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/tests/shell/testcases/maps/dumps/0011vmap_0.json-nft b/tests/shell/testcases/maps/dumps/0011vmap_0.json-nft
new file mode 100644
index 000000000000..76d9aeb11b22
--- /dev/null
+++ b/tests/shell/testcases/maps/dumps/0011vmap_0.json-nft
@@ -0,0 +1,145 @@
+{
+ "nftables": [
+ {
+ "metainfo": {
+ "version": "VERSION",
+ "release_name": "RELEASE_NAME",
+ "json_schema_version": 1
+ }
+ },
+ {
+ "table": {
+ "family": "inet",
+ "name": "filter",
+ "handle": 0
+ }
+ },
+ {
+ "map": {
+ "family": "inet",
+ "name": "portmap",
+ "table": "filter",
+ "type": "inet_service",
+ "handle": 0,
+ "map": "verdict",
+ "elem": [
+ [
+ {
+ "elem": {
+ "val": 22,
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ },
+ {
+ "jump": {
+ "target": "ssh_input"
+ }
+ }
+ ],
+ [
+ {
+ "elem": {
+ "val": "*",
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ },
+ {
+ "drop": null
+ }
+ ]
+ ],
+ "stmt": [
+ {
+ "counter": null
+ }
+ ]
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "ssh_input",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "wan_input",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "prerouting",
+ "handle": 0,
+ "type": "filter",
+ "hook": "prerouting",
+ "prio": -300,
+ "policy": "accept"
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "filter",
+ "chain": "wan_input",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "payload": {
+ "protocol": "tcp",
+ "field": "dport"
+ }
+ },
+ "data": "@portmap"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "filter",
+ "chain": "prerouting",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "meta": {
+ "key": "iif"
+ }
+ },
+ "data": {
+ "set": [
+ [
+ "lo",
+ {
+ "jump": {
+ "target": "wan_input"
+ }
+ }
+ ]
+ ]
+ }
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/tests/shell/testcases/maps/dumps/map_catchall_double_free_2.json-nft b/tests/shell/testcases/maps/dumps/map_catchall_double_free_2.json-nft
new file mode 100644
index 000000000000..0a123b700dd6
--- /dev/null
+++ b/tests/shell/testcases/maps/dumps/map_catchall_double_free_2.json-nft
@@ -0,0 +1,46 @@
+{
+ "nftables": [
+ {
+ "metainfo": {
+ "version": "VERSION",
+ "release_name": "RELEASE_NAME",
+ "json_schema_version": 1
+ }
+ },
+ {
+ "table": {
+ "family": "ip",
+ "name": "test",
+ "handle": 0
+ }
+ },
+ {
+ "map": {
+ "family": "ip",
+ "name": "testmap",
+ "table": "test",
+ "type": "ipv4_addr",
+ "handle": 0,
+ "map": "verdict",
+ "elem": [
+ [
+ "*",
+ {
+ "jump": {
+ "target": "testchain"
+ }
+ }
+ ]
+ ]
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "test",
+ "name": "testchain",
+ "handle": 0
+ }
+ }
+ ]
+}
diff --git a/tests/shell/testcases/maps/dumps/vmap_mark_bitwise_0.json-nft b/tests/shell/testcases/maps/dumps/vmap_mark_bitwise_0.json-nft
new file mode 100644
index 000000000000..df9e597b20b6
--- /dev/null
+++ b/tests/shell/testcases/maps/dumps/vmap_mark_bitwise_0.json-nft
@@ -0,0 +1,158 @@
+{
+ "nftables": [
+ {
+ "metainfo": {
+ "version": "VERSION",
+ "release_name": "RELEASE_NAME",
+ "json_schema_version": 1
+ }
+ },
+ {
+ "table": {
+ "family": "ip",
+ "name": "x",
+ "handle": 0
+ }
+ },
+ {
+ "counter": {
+ "family": "ip",
+ "name": "c_o0_0",
+ "table": "x",
+ "handle": 0,
+ "packets": 0,
+ "bytes": 0
+ }
+ },
+ {
+ "map": {
+ "family": "ip",
+ "name": "sctm_o0",
+ "table": "x",
+ "type": "mark",
+ "handle": 0,
+ "map": "verdict",
+ "elem": [
+ [
+ 0,
+ {
+ "jump": {
+ "target": "sctm_o0_0"
+ }
+ }
+ ],
+ [
+ 1,
+ {
+ "jump": {
+ "target": "sctm_o0_1"
+ }
+ }
+ ]
+ ]
+ }
+ },
+ {
+ "map": {
+ "family": "ip",
+ "name": "sctm_o1",
+ "table": "x",
+ "type": "mark",
+ "handle": 0,
+ "map": "counter",
+ "elem": [
+ [
+ 0,
+ "c_o0_0"
+ ]
+ ]
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "x",
+ "name": "sctm_o0_0",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "x",
+ "name": "sctm_o0_1",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "x",
+ "name": "SET_ctmark_RPLYroute",
+ "handle": 0
+ }
+ },
+ {
+ "rule": {
+ "family": "ip",
+ "table": "x",
+ "chain": "SET_ctmark_RPLYroute",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "&": [
+ {
+ ">>": [
+ {
+ "meta": {
+ "key": "mark"
+ }
+ },
+ 8
+ ]
+ },
+ 15
+ ]
+ },
+ "data": "@sctm_o0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "ip",
+ "table": "x",
+ "chain": "SET_ctmark_RPLYroute",
+ "handle": 0,
+ "expr": [
+ {
+ "counter": {
+ "map": {
+ "key": {
+ "&": [
+ {
+ ">>": [
+ {
+ "meta": {
+ "key": "mark"
+ }
+ },
+ 8
+ ]
+ },
+ 15
+ ]
+ },
+ "data": "@sctm_o1"
+ }
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/tests/shell/testcases/maps/dumps/vmap_timeout.json-nft b/tests/shell/testcases/maps/dumps/vmap_timeout.json-nft
new file mode 100644
index 000000000000..ec5dce577d6c
--- /dev/null
+++ b/tests/shell/testcases/maps/dumps/vmap_timeout.json-nft
@@ -0,0 +1,229 @@
+{
+ "nftables": [
+ {
+ "metainfo": {
+ "version": "VERSION",
+ "release_name": "RELEASE_NAME",
+ "json_schema_version": 1
+ }
+ },
+ {
+ "table": {
+ "family": "inet",
+ "name": "filter",
+ "handle": 0
+ }
+ },
+ {
+ "map": {
+ "family": "inet",
+ "name": "portmap",
+ "table": "filter",
+ "type": "inet_service",
+ "handle": 0,
+ "map": "verdict",
+ "flags": [
+ "timeout"
+ ],
+ "gc-interval": 10,
+ "elem": [
+ [
+ 22,
+ {
+ "jump": {
+ "target": "ssh_input"
+ }
+ }
+ ]
+ ]
+ }
+ },
+ {
+ "map": {
+ "family": "inet",
+ "name": "portaddrmap",
+ "table": "filter",
+ "type": [
+ "ipv4_addr",
+ "inet_service"
+ ],
+ "handle": 0,
+ "map": "verdict",
+ "flags": [
+ "timeout"
+ ],
+ "gc-interval": 10,
+ "elem": [
+ [
+ {
+ "concat": [
+ "1.2.3.4",
+ 22
+ ]
+ },
+ {
+ "jump": {
+ "target": "ssh_input"
+ }
+ }
+ ]
+ ]
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "ssh_input",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "log_and_drop",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "other_input",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "wan_input",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "filter",
+ "name": "prerouting",
+ "handle": 0,
+ "type": "filter",
+ "hook": "prerouting",
+ "prio": -300,
+ "policy": "accept"
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "filter",
+ "chain": "log_and_drop",
+ "handle": 0,
+ "expr": [
+ {
+ "drop": null
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "filter",
+ "chain": "other_input",
+ "handle": 0,
+ "expr": [
+ {
+ "goto": {
+ "target": "log_and_drop"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "filter",
+ "chain": "wan_input",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "concat": [
+ {
+ "payload": {
+ "protocol": "ip",
+ "field": "daddr"
+ }
+ },
+ {
+ "payload": {
+ "protocol": "tcp",
+ "field": "dport"
+ }
+ }
+ ]
+ },
+ "data": "@portaddrmap"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "filter",
+ "chain": "wan_input",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "payload": {
+ "protocol": "tcp",
+ "field": "dport"
+ }
+ },
+ "data": "@portmap"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "filter",
+ "chain": "prerouting",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "meta": {
+ "key": "iif"
+ }
+ },
+ "data": {
+ "set": [
+ [
+ "lo",
+ {
+ "jump": {
+ "target": "wan_input"
+ }
+ }
+ ]
+ ]
+ }
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/tests/shell/testcases/sets/dumps/0008create_verdict_map_0.json-nft b/tests/shell/testcases/sets/dumps/0008create_verdict_map_0.json-nft
new file mode 100644
index 000000000000..69c7e2df5a54
--- /dev/null
+++ b/tests/shell/testcases/sets/dumps/0008create_verdict_map_0.json-nft
@@ -0,0 +1,78 @@
+{
+ "nftables": [
+ {
+ "metainfo": {
+ "version": "VERSION",
+ "release_name": "RELEASE_NAME",
+ "json_schema_version": 1
+ }
+ },
+ {
+ "table": {
+ "family": "ip",
+ "name": "t",
+ "handle": 0
+ }
+ },
+ {
+ "map": {
+ "family": "ip",
+ "name": "sourcemap",
+ "table": "t",
+ "type": "ipv4_addr",
+ "handle": 0,
+ "map": "verdict",
+ "elem": [
+ [
+ "100.123.10.2",
+ {
+ "jump": {
+ "target": "c"
+ }
+ }
+ ]
+ ]
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "t",
+ "name": "postrouting",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "ip",
+ "table": "t",
+ "name": "c",
+ "handle": 0
+ }
+ },
+ {
+ "rule": {
+ "family": "ip",
+ "table": "t",
+ "chain": "postrouting",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "payload": {
+ "protocol": "ip",
+ "field": "saddr"
+ }
+ },
+ "data": "@sourcemap"
+ }
+ },
+ {
+ "accept": null
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/tests/shell/testcases/sets/dumps/sets_with_ifnames.json-nft b/tests/shell/testcases/sets/dumps/sets_with_ifnames.json-nft
new file mode 100644
index 000000000000..10e69dcac199
--- /dev/null
+++ b/tests/shell/testcases/sets/dumps/sets_with_ifnames.json-nft
@@ -0,0 +1,551 @@
+{
+ "nftables": [
+ {
+ "metainfo": {
+ "version": "VERSION",
+ "release_name": "RELEASE_NAME",
+ "json_schema_version": 1
+ }
+ },
+ {
+ "table": {
+ "family": "inet",
+ "name": "testifsets",
+ "handle": 0
+ }
+ },
+ {
+ "set": {
+ "family": "inet",
+ "name": "simple",
+ "table": "testifsets",
+ "type": "ifname",
+ "handle": 0,
+ "elem": [
+ "abcdef0",
+ "abcdef1",
+ "othername"
+ ]
+ }
+ },
+ {
+ "set": {
+ "family": "inet",
+ "name": "simple_wild",
+ "table": "testifsets",
+ "type": "ifname",
+ "handle": 0,
+ "flags": [
+ "interval"
+ ],
+ "elem": [
+ "abcdef*",
+ "othername",
+ "ppp0"
+ ]
+ }
+ },
+ {
+ "set": {
+ "family": "inet",
+ "name": "concat",
+ "table": "testifsets",
+ "type": [
+ "ipv4_addr",
+ "ifname"
+ ],
+ "handle": 0,
+ "elem": [
+ {
+ "concat": [
+ "10.1.2.2",
+ "abcdef0"
+ ]
+ },
+ {
+ "concat": [
+ "10.1.2.2",
+ "abcdef1"
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "set": {
+ "family": "inet",
+ "name": "concat_wild",
+ "table": "testifsets",
+ "type": [
+ "ipv4_addr",
+ "ifname"
+ ],
+ "handle": 0,
+ "flags": [
+ "interval"
+ ],
+ "elem": [
+ {
+ "concat": [
+ "10.1.2.2",
+ "abcdef*"
+ ]
+ },
+ {
+ "concat": [
+ "10.1.2.1",
+ "bar"
+ ]
+ },
+ {
+ "concat": [
+ {
+ "prefix": {
+ "addr": "1.1.2.0",
+ "len": 24
+ }
+ },
+ "abcdef0"
+ ]
+ },
+ {
+ "concat": [
+ {
+ "prefix": {
+ "addr": "12.2.2.0",
+ "len": 24
+ }
+ },
+ "abcdef*"
+ ]
+ }
+ ]
+ }
+ },
+ {
+ "map": {
+ "family": "inet",
+ "name": "map_wild",
+ "table": "testifsets",
+ "type": "ifname",
+ "handle": 0,
+ "map": "verdict",
+ "flags": [
+ "interval"
+ ],
+ "elem": [
+ [
+ "abcdef*",
+ {
+ "jump": {
+ "target": "do_nothing"
+ }
+ }
+ ],
+ [
+ "eth0",
+ {
+ "jump": {
+ "target": "do_nothing"
+ }
+ }
+ ]
+ ]
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "testifsets",
+ "name": "v4icmp",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "testifsets",
+ "name": "v4icmpc",
+ "handle": 0
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "testifsets",
+ "name": "input",
+ "handle": 0,
+ "type": "filter",
+ "hook": "input",
+ "prio": 0,
+ "policy": "accept"
+ }
+ },
+ {
+ "chain": {
+ "family": "inet",
+ "table": "testifsets",
+ "name": "do_nothing",
+ "handle": 0
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmp",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "meta": {
+ "key": "iifname"
+ }
+ },
+ "right": "@simple"
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmp",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "meta": {
+ "key": "iifname"
+ }
+ },
+ "right": "@simple_wild"
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmp",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "meta": {
+ "key": "iifname"
+ }
+ },
+ "right": {
+ "set": [
+ "eth0",
+ "abcdef0"
+ ]
+ }
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmp",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "meta": {
+ "key": "iifname"
+ }
+ },
+ "right": {
+ "set": [
+ "abcdef*",
+ "eth0"
+ ]
+ }
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmp",
+ "handle": 0,
+ "expr": [
+ {
+ "vmap": {
+ "key": {
+ "meta": {
+ "key": "iifname"
+ }
+ },
+ "data": "@map_wild"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmpc",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "concat": [
+ {
+ "payload": {
+ "protocol": "ip",
+ "field": "saddr"
+ }
+ },
+ {
+ "meta": {
+ "key": "iifname"
+ }
+ }
+ ]
+ },
+ "right": "@concat"
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmpc",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "concat": [
+ {
+ "payload": {
+ "protocol": "ip",
+ "field": "saddr"
+ }
+ },
+ {
+ "meta": {
+ "key": "iifname"
+ }
+ }
+ ]
+ },
+ "right": "@concat_wild"
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmpc",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "concat": [
+ {
+ "payload": {
+ "protocol": "ip",
+ "field": "saddr"
+ }
+ },
+ {
+ "meta": {
+ "key": "iifname"
+ }
+ }
+ ]
+ },
+ "right": {
+ "set": [
+ {
+ "concat": [
+ "10.1.2.2",
+ "abcdef0"
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "v4icmpc",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "concat": [
+ {
+ "payload": {
+ "protocol": "ip",
+ "field": "saddr"
+ }
+ },
+ {
+ "meta": {
+ "key": "iifname"
+ }
+ }
+ ]
+ },
+ "right": {
+ "set": [
+ {
+ "concat": [
+ "10.1.2.2",
+ "abcdef*"
+ ]
+ }
+ ]
+ }
+ }
+ },
+ {
+ "counter": {
+ "packets": 0,
+ "bytes": 0
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "input",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "payload": {
+ "protocol": "ip",
+ "field": "protocol"
+ }
+ },
+ "right": "icmp"
+ }
+ },
+ {
+ "jump": {
+ "target": "v4icmp"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "rule": {
+ "family": "inet",
+ "table": "testifsets",
+ "chain": "input",
+ "handle": 0,
+ "expr": [
+ {
+ "match": {
+ "op": "==",
+ "left": {
+ "payload": {
+ "protocol": "ip",
+ "field": "protocol"
+ }
+ },
+ "right": "icmp"
+ }
+ },
+ {
+ "goto": {
+ "target": "v4icmpc"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
--
2.43.0
^ permalink raw reply related [flat|nested] 11+ messages in thread