From: John Fastabend <john.fastabend@gmail.com>
To: tgraf@suug.ch, simon.horman@netronome.com, sfeldma@gmail.com
Cc: netdev@vger.kernel.org, jhs@mojatatu.com, davem@davemloft.net,
gerlitz.or@gmail.com, andy@greyhouse.net, ast@plumgrid.com
Subject: [net-next PATCH v3 08/12] net: rocker: add group_id slices and drop explicit goto
Date: Tue, 20 Jan 2015 12:29:37 -0800 [thread overview]
Message-ID: <20150120202935.1741.85478.stgit@nitbit.x32> (raw)
In-Reply-To: <20150120202404.1741.8658.stgit@nitbit.x32>
This adds the group tables for l3_unicast, l2_rewrite and l2. In
addition to adding the tables we extend the metadata fields to
support three different group id lookups. One for each table and
drop the more generic one previously being used.
Finally we can also drop the goto action as it is not used anymore.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
---
drivers/net/ethernet/rocker/rocker.c | 174 ++++++++++++++++++++++++
drivers/net/ethernet/rocker/rocker_pipeline.h | 180 ++++++++++++++++++++++---
2 files changed, 328 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 51290882..2be8f61 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4088,8 +4088,8 @@ static int rocker_flow_set_bridge(struct net_device *dev,
case ACTION_COPY_TO_CPU:
copy_to_cpu = true;
break;
- case ROCKER_ACTION_SET_GROUP_ID:
- group_id = arg->value_u32;
+ case ROCKER_ACTION_SET_L3_UNICAST_GID:
+ group_id = ROCKER_GROUP_L3_UNICAST(arg->value_u32);
break;
default:
return -EINVAL;
@@ -4188,9 +4188,11 @@ static int rocker_flow_set_acl(struct net_device *dev,
group_id = ROCKER_GROUP_NONE;
for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ struct net_flow_action_arg *arg = &rule->actions[i].args[0];
+
switch (rule->actions[i].uid) {
- case ROCKER_ACTION_SET_GROUP_ID:
- group_id = rule->actions[i].args[0].value_u32;
+ case ROCKER_ACTION_SET_L3_UNICAST_GID:
+ group_id = ROCKER_GROUP_L3_UNICAST(arg->value_u32);
break;
default:
return -EINVAL;
@@ -4207,6 +4209,161 @@ static int rocker_flow_set_acl(struct net_device *dev,
group_id);
}
+static int rocker_flow_set_group_slice_l3_unicast(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker_group_tbl_entry *entry;
+ int i, flags = 0;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ struct net_flow_field_ref *r = &rule->matches[i];
+
+ switch (r->instance) {
+ case ROCKER_HEADER_INSTANCE_L3_UNICAST_GID:
+ entry->group_id = ROCKER_GROUP_L3_UNICAST(r->value_u32);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ struct net_flow_action_arg *arg = &rule->actions[i].args[0];
+
+ switch (rule->actions[i].uid) {
+ case ACTION_SET_ETH_SRC:
+ ether_addr_copy(entry->l3_unicast.eth_src,
+ (u8 *)&arg->value_u64);
+ break;
+ case ACTION_SET_ETH_DST:
+ ether_addr_copy(entry->l3_unicast.eth_dst,
+ (u8 *)&arg->value_u64);
+ break;
+ case ACTION_SET_VLAN_ID:
+ entry->l3_unicast.vlan_id = htons(arg->value_u16);
+ break;
+ case ACTION_CHECK_TTL_DROP:
+ entry->l3_unicast.ttl_check = true;
+ break;
+ case ROCKER_ACTION_SET_L2_REWRITE_GID:
+ entry->l3_unicast.group_id =
+ ROCKER_GROUP_L2_REWRITE(arg->value_u32);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_set_group_slice_l2_rewrite(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker_group_tbl_entry *entry;
+ int i, flags = 0;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ struct net_flow_field_ref *r = &rule->matches[i];
+
+ switch (r->instance) {
+ case ROCKER_HEADER_INSTANCE_L2_REWRITE_GID:
+ entry->group_id = ROCKER_GROUP_L2_REWRITE(r->value_u32);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ struct net_flow_action_arg *arg = &rule->actions[i].args[0];
+
+ switch (rule->actions[i].uid) {
+ case ACTION_SET_ETH_SRC:
+ ether_addr_copy(entry->l2_rewrite.eth_src,
+ (u8 *)&arg->value_u64);
+ break;
+ case ACTION_SET_ETH_DST:
+ ether_addr_copy(entry->l2_rewrite.eth_dst,
+ (u8 *)&arg->value_u64);
+ break;
+ case ACTION_SET_VLAN_ID:
+ entry->l2_rewrite.vlan_id = htons(arg->value_u16);
+ break;
+ case ROCKER_ACTION_SET_L2_GID:
+ entry->l2_rewrite.group_id =
+ ROCKER_GROUP_L2_INTERFACE(arg->value_u32,
+ rocker_port->lport);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
+static int rocker_flow_set_group_slice_l2(struct net_device *dev,
+ struct net_flow_rule *rule)
+{
+ struct rocker_port *rocker_port = netdev_priv(dev);
+ struct rocker_group_tbl_entry *entry;
+ int i, flags = 0;
+ u32 lport;
+
+ entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
+ if (!entry)
+ return -ENOMEM;
+
+ lport = rocker_port->lport;
+
+ /* Use the dev lport if we don't have a specified lport instance
+ * from the user. We need to walk the list once before to extract
+ * any lport attribute.
+ */
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ switch (rule->matches[i].instance) {
+ case ROCKER_HEADER_METADATA_IN_LPORT:
+ lport = rule->matches[i].value_u32;
+ }
+ }
+
+ for (i = 0; rule->matches && rule->matches[i].instance; i++) {
+ struct net_flow_field_ref *r = &rule->matches[i];
+
+ switch (r->instance) {
+ case ROCKER_HEADER_INSTANCE_L2_GID:
+ entry->group_id =
+ ROCKER_GROUP_L2_INTERFACE(r->value_u32, lport);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; rule->actions && rule->actions[i].uid; i++) {
+ switch (rule->actions[i].uid) {
+ case ACTION_POP_VLAN:
+ entry->l2_interface.pop_vlan = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return rocker_group_tbl_do(rocker_port, flags, entry);
+}
+
static int rocker_set_rules(struct net_device *dev,
struct net_flow_rule *rule)
{
@@ -4234,6 +4391,15 @@ static int rocker_set_rules(struct net_device *dev,
case ROCKER_FLOW_TABLE_ID_ACL_POLICY:
err = rocker_flow_set_acl(dev, rule);
break;
+ case ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L3_UNICAST:
+ err = rocker_flow_set_group_slice_l3_unicast(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2_REWRITE:
+ err = rocker_flow_set_group_slice_l2_rewrite(dev, rule);
+ break;
+ case ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2:
+ err = rocker_flow_set_group_slice_l2(dev, rule);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/rocker/rocker_pipeline.h b/drivers/net/ethernet/rocker/rocker_pipeline.h
index 7136380..6d1e2ee 100644
--- a/drivers/net/ethernet/rocker/rocker_pipeline.h
+++ b/drivers/net/ethernet/rocker/rocker_pipeline.h
@@ -22,19 +22,23 @@ enum rocker_header_ids {
enum rocker_header_metadata_fields {
ROCKER_HEADER_METADATA_UNSPEC,
ROCKER_HEADER_METADATA_IN_LPORT,
- ROCKER_HEADER_METADATA_GOTO_TBL,
- ROCKER_HEADER_METADATA_GROUP_ID,
+ ROCKER_HEADER_METADATA_L3_UNICAST_GID,
+ ROCKER_HEADER_METADATA_L2_REWRITE_GID,
+ ROCKER_HEADER_METADATA_L2_GID,
};
struct net_flow_field rocker_metadata_fields[] = {
{ .name = "in_lport",
.uid = ROCKER_HEADER_METADATA_IN_LPORT,
.bitwidth = 32,},
- { .name = "goto_tbl",
- .uid = ROCKER_HEADER_METADATA_GOTO_TBL,
- .bitwidth = 16,},
- { .name = "group_id",
- .uid = ROCKER_HEADER_METADATA_GROUP_ID,
+ { .name = "l3_unicast_group_id",
+ .uid = ROCKER_HEADER_METADATA_L3_UNICAST_GID,
+ .bitwidth = 32,},
+ { .name = "l2_rewrite_group_id",
+ .uid = ROCKER_HEADER_METADATA_L2_REWRITE_GID,
+ .bitwidth = 32,},
+ { .name = "l2_group_id",
+ .uid = ROCKER_HEADER_METADATA_L2_GID,
.bitwidth = 32,},
};
@@ -68,22 +72,39 @@ struct net_flow_action_arg rocker_set_group_id_args[] = {
enum rocker_action_ids {
ROCKER_ACTION_UNSPEC = ACTION_MAX_UID,
- ROCKER_ACTION_SET_GROUP_ID,
+ ROCKER_ACTION_SET_L3_UNICAST_GID,
+ ROCKER_ACTION_SET_L2_REWRITE_GID,
+ ROCKER_ACTION_SET_L2_GID,
+};
+
+struct net_flow_action rocker_set_l3_unicast_group_id = {
+ .name = "set_l3_unicast_group_id",
+ .uid = ROCKER_ACTION_SET_L3_UNICAST_GID,
+ .args = rocker_set_group_id_args,
+};
+
+struct net_flow_action rocker_set_l2_rewrite_group_id = {
+ .name = "set_l2_rewrite_group_id",
+ .uid = ROCKER_ACTION_SET_L2_REWRITE_GID,
+ .args = rocker_set_group_id_args,
};
-struct net_flow_action rocker_set_group_id = {
- .name = "set_group_id",
- .uid = ROCKER_ACTION_SET_GROUP_ID,
+struct net_flow_action rocker_set_l2_group_id = {
+ .name = "set_l2_group_id",
+ .uid = ROCKER_ACTION_SET_L2_GID,
.args = rocker_set_group_id_args,
};
struct net_flow_action *rocker_action_list[] = {
&net_flow_set_vlan_id,
&net_flow_copy_to_cpu,
- &rocker_set_group_id,
+ &rocker_set_l3_unicast_group_id,
+ &rocker_set_l2_rewrite_group_id,
+ &rocker_set_l2_group_id,
&net_flow_pop_vlan,
&net_flow_set_eth_src,
&net_flow_set_eth_dst,
+ &net_flow_check_ttl_drop,
NULL,
};
@@ -94,8 +115,9 @@ enum rocker_header_instance_ids {
ROCKER_HEADER_INSTANCE_VLAN_OUTER,
ROCKER_HEADER_INSTANCE_IPV4,
ROCKER_HEADER_INSTANCE_IN_LPORT,
- ROCKER_HEADER_INSTANCE_GOTO_TABLE,
- ROCKER_HEADER_INSTANCE_GROUP_ID,
+ ROCKER_HEADER_INSTANCE_L3_UNICAST_GID,
+ ROCKER_HEADER_INSTANCE_L2_REWRITE_GID,
+ ROCKER_HEADER_INSTANCE_L2_GID,
};
struct net_flow_jump_table rocker_parse_ethernet[] = {
@@ -183,9 +205,23 @@ struct net_flow_hdr_node rocker_in_lport_header_node = {
.jump = rocker_terminal_headers,
};
-struct net_flow_hdr_node rocker_group_id_header_node = {
- .name = "group_id",
- .uid = ROCKER_HEADER_INSTANCE_GROUP_ID,
+struct net_flow_hdr_node rocker_l2_group_id_header_node = {
+ .name = "l2_group_id",
+ .uid = ROCKER_HEADER_INSTANCE_L2_GID,
+ .hdrs = rocker_metadata_headers,
+ .jump = rocker_terminal_headers,
+};
+
+struct net_flow_hdr_node rocker_l2_rewrite_group_id_header_node = {
+ .name = "l2_rewrite_group_id",
+ .uid = ROCKER_HEADER_INSTANCE_L2_REWRITE_GID,
+ .hdrs = rocker_metadata_headers,
+ .jump = rocker_terminal_headers,
+};
+
+struct net_flow_hdr_node rocker_l3_unicast_group_id_header_node = {
+ .name = "l3_uniscast_group_id",
+ .uid = ROCKER_HEADER_INSTANCE_L3_UNICAST_GID,
.hdrs = rocker_metadata_headers,
.jump = rocker_terminal_headers,
};
@@ -195,7 +231,9 @@ struct net_flow_hdr_node *rocker_header_nodes[] = {
&rocker_vlan_header_node,
&rocker_ipv4_header_node,
&rocker_in_lport_header_node,
- &rocker_group_id_header_node,
+ &rocker_l3_unicast_group_id_header_node,
+ &rocker_l2_rewrite_group_id_header_node,
+ &rocker_l2_group_id_header_node,
NULL,
};
@@ -296,13 +334,48 @@ struct net_flow_field_ref rocker_matches_acl[] = {
{ .instance = 0, .field = 0},
};
+struct net_flow_field_ref rocker_matches_l3_unicast_group_slice[2] = {
+ { .instance = ROCKER_HEADER_INSTANCE_L3_UNICAST_GID,
+ .header = ROCKER_HEADER_METADATA,
+ .field = ROCKER_HEADER_METADATA_L3_UNICAST_GID,
+ .mask_type = NFL_MASK_TYPE_EXACT},
+ { .instance = 0, .field = 0},
+};
+
+struct net_flow_field_ref rocker_matches_l2_rewrite_group_slice[2] = {
+ { .instance = ROCKER_HEADER_INSTANCE_L2_REWRITE_GID,
+ .header = ROCKER_HEADER_METADATA,
+ .field = ROCKER_HEADER_METADATA_L2_REWRITE_GID,
+ .mask_type = NFL_MASK_TYPE_EXACT},
+ { .instance = 0, .field = 0},
+};
+
+struct net_flow_field_ref rocker_matches_l2_group_slice[2] = {
+ { .instance = ROCKER_HEADER_INSTANCE_L2_GID,
+ .header = ROCKER_HEADER_METADATA,
+ .field = ROCKER_HEADER_METADATA_L2_GID,
+ .mask_type = NFL_MASK_TYPE_EXACT},
+ { .instance = 0, .field = 0},
+};
+
int rocker_actions_ig_port[] = {0};
int rocker_actions_vlan[] = {ACTION_SET_VLAN_ID, 0};
int rocker_actions_term_mac[] = {ACTION_COPY_TO_CPU, 0};
-int rocker_actions_ucast_routing[] = {ROCKER_ACTION_SET_GROUP_ID, 0};
-int rocker_actions_bridge[] = {ROCKER_ACTION_SET_GROUP_ID,
+int rocker_actions_ucast_routing[] = {ROCKER_ACTION_SET_L3_UNICAST_GID, 0};
+int rocker_actions_bridge[] = {ROCKER_ACTION_SET_L3_UNICAST_GID,
ACTION_COPY_TO_CPU, 0};
-int rocker_actions_acl[] = {ROCKER_ACTION_SET_GROUP_ID, 0};
+int rocker_actions_acl[] = {ROCKER_ACTION_SET_L3_UNICAST_GID, 0};
+int rocker_actions_group_slice_l3_unicast[] = {ACTION_SET_ETH_SRC,
+ ACTION_SET_ETH_DST,
+ ACTION_SET_VLAN_ID,
+ ROCKER_ACTION_SET_L2_REWRITE_GID,
+ ACTION_CHECK_TTL_DROP, 0};
+int rocker_actions_group_slice_l2_rewrite[] = {ACTION_SET_ETH_SRC,
+ ACTION_SET_ETH_DST,
+ ACTION_SET_VLAN_ID,
+ ROCKER_ACTION_SET_L2_GID,
+ 0};
+int rocker_actions_group_slice_l2[] = {ACTION_POP_VLAN, 0};
enum rocker_flow_table_id_space {
ROCKER_FLOW_TABLE_NULL,
@@ -313,6 +386,9 @@ enum rocker_flow_table_id_space {
ROCKER_FLOW_TABLE_ID_MULTICAST_ROUTING,
ROCKER_FLOW_TABLE_ID_BRIDGING,
ROCKER_FLOW_TABLE_ID_ACL_POLICY,
+ ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L3_UNICAST,
+ ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2_REWRITE,
+ ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2,
};
struct net_flow_tbl rocker_ingress_port_table = {
@@ -375,6 +451,33 @@ struct net_flow_tbl rocker_acl_table = {
.cache = {0},
};
+struct net_flow_tbl rocker_group_slice_l3_unicast_table = {
+ .name = "group_slice_l3_unicast",
+ .uid = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L3_UNICAST,
+ .source = 1,
+ .size = -1,
+ .matches = rocker_matches_l3_unicast_group_slice,
+ .actions = rocker_actions_group_slice_l3_unicast,
+};
+
+struct net_flow_tbl rocker_group_slice_l2_rewrite_table = {
+ .name = "group_slice_l2_rewrite",
+ .uid = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2_REWRITE,
+ .source = 1,
+ .size = -1,
+ .matches = rocker_matches_l2_rewrite_group_slice,
+ .actions = rocker_actions_group_slice_l2_rewrite,
+};
+
+struct net_flow_tbl rocker_group_slice_l2_table = {
+ .name = "group_slice_l2",
+ .uid = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2,
+ .source = 1,
+ .size = -1,
+ .matches = rocker_matches_l2_group_slice,
+ .actions = rocker_actions_group_slice_l2,
+};
+
struct net_flow_tbl *rocker_table_list[] = {
&rocker_ingress_port_table,
&rocker_vlan_table,
@@ -382,6 +485,9 @@ struct net_flow_tbl *rocker_table_list[] = {
&rocker_ucast_routing_table,
&rocker_bridge_table,
&rocker_acl_table,
+ &rocker_group_slice_l3_unicast_table,
+ &rocker_group_slice_l2_rewrite_table,
+ &rocker_group_slice_l2_table,
NULL,
};
@@ -432,6 +538,7 @@ struct net_flow_tbl_node rocker_table_node_ucast_routing = {
.jump = rocker_table_node_ucast_routing_next};
struct net_flow_jump_table rocker_table_node_acl_next[] = {
+ { .field = {0}, .node = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L3_UNICAST},
{ .field = {0}, .node = 0},
};
@@ -439,6 +546,32 @@ struct net_flow_tbl_node rocker_table_node_acl = {
.uid = ROCKER_FLOW_TABLE_ID_ACL_POLICY,
.jump = rocker_table_node_acl_next};
+struct net_flow_jump_table rocker_table_node_group_l3_unicast_next[1] = {
+ { .field = {0}, .node = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2_REWRITE},
+};
+
+struct net_flow_tbl_node rocker_table_node_group_l3_unicast = {
+ .uid = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L3_UNICAST,
+ .jump = rocker_table_node_group_l3_unicast_next};
+
+struct net_flow_jump_table rocker_table_node_group_l2_rewrite_next[1] = {
+ { .field = {0}, .node = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2},
+};
+
+struct net_flow_tbl_node rocker_table_node_group_l2_rewrite = {
+ .uid = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2_REWRITE,
+ .jump = rocker_table_node_group_l2_rewrite_next};
+
+struct net_flow_jump_table rocker_table_node_group_l2_next[1] = {
+ { .field = {0}, .node = 0},
+};
+
+struct net_flow_tbl_node rocker_table_node_group_l2 = {
+ .uid = ROCKER_FLOW_TABLE_ID_GROUP_SLICE_L2,
+ .jump = rocker_table_node_group_l2_next};
+
+struct net_flow_tbl_node rocker_table_node_nil = {.uid = 0, .jump = NULL};
+
struct net_flow_tbl_node *rocker_table_nodes[] = {
&rocker_table_node_ingress_port,
&rocker_table_node_vlan,
@@ -446,6 +579,9 @@ struct net_flow_tbl_node *rocker_table_nodes[] = {
&rocker_table_node_ucast_routing,
&rocker_table_node_bridge,
&rocker_table_node_acl,
- NULL,
+ &rocker_table_node_group_l3_unicast,
+ &rocker_table_node_group_l2_rewrite,
+ &rocker_table_node_group_l2,
+ NULL
};
#endif /*_ROCKER_PIPELINE_H_*/
next prev parent reply other threads:[~2015-01-20 20:30 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-01-20 20:26 [net-next PATCH v3 00/12] Flow API John Fastabend
2015-01-20 20:26 ` [net-next PATCH v3 01/12] net: flow_table: create interface for hw match/action tables John Fastabend
2015-01-22 4:37 ` Simon Horman
2015-01-20 20:27 ` [net-next PATCH v3 02/12] net: flow_table: add rule, delete rule John Fastabend
2015-01-20 20:27 ` [net-next PATCH v3 03/12] net: flow: implement flow cache for get routines John Fastabend
2015-01-20 20:27 ` [net-next PATCH v3 04/12] net: flow_table: create a set of common headers and actions John Fastabend
2015-01-20 20:59 ` John W. Linville
2015-01-20 22:10 ` John Fastabend
2015-01-20 20:28 ` [net-next PATCH v3 05/12] net: flow_table: add validation functions for rules John Fastabend
2015-01-20 20:28 ` [net-next PATCH v3 06/12] net: rocker: add pipeline model for rocker switch John Fastabend
2015-01-20 20:29 ` [net-next PATCH v3 07/12] net: rocker: add set rule ops John Fastabend
2015-01-20 20:29 ` John Fastabend [this message]
2015-01-20 20:30 ` [net-next PATCH v3 09/12] net: rocker: add multicast path to bridging John Fastabend
2015-01-20 20:30 ` [net-next PATCH v3 10/12] net: rocker: add cookie to group acls and use flow_id to set cookie John Fastabend
2015-01-20 20:31 ` [net-next PATCH v3 11/12] net: rocker: have flow api calls set cookie value John Fastabend
2015-01-20 20:31 ` [net-next PATCH v3 12/12] net: rocker: implement delete flow routine John Fastabend
2015-01-22 12:52 ` [net-next PATCH v3 00/12] Flow API Pablo Neira Ayuso
2015-01-22 13:37 ` Thomas Graf
2015-01-22 14:00 ` Pablo Neira Ayuso
2015-01-22 15:00 ` Jamal Hadi Salim
2015-01-22 15:13 ` Thomas Graf
2015-01-22 15:28 ` Jamal Hadi Salim
2015-01-22 15:37 ` Thomas Graf
2015-01-22 15:44 ` Jamal Hadi Salim
2015-01-23 10:10 ` Thomas Graf
2015-01-23 10:24 ` Jiri Pirko
2015-01-23 11:08 ` Thomas Graf
2015-01-23 11:39 ` Jiri Pirko
2015-01-23 12:28 ` Thomas Graf
2015-01-23 13:43 ` Jiri Pirko
2015-01-23 14:07 ` Thomas Graf
2015-01-23 15:25 ` Jiri Pirko
2015-01-23 15:43 ` John Fastabend
2015-01-23 15:56 ` Jiri Pirko
2015-01-23 15:49 ` Thomas Graf
2015-01-23 16:00 ` Jiri Pirko
2015-01-23 15:34 ` John Fastabend
2015-01-23 15:53 ` Jiri Pirko
2015-01-23 16:00 ` Thomas Graf
2015-01-23 16:08 ` John Fastabend
2015-01-23 16:16 ` Jiri Pirko
2015-01-24 13:04 ` Jamal Hadi Salim
2015-01-23 17:46 ` Thomas Graf
2015-01-23 19:59 ` John Fastabend
2015-01-23 23:16 ` Thomas Graf
2015-01-24 13:22 ` Jamal Hadi Salim
2015-01-24 13:34 ` Thomas Graf
2015-01-24 13:01 ` Jamal Hadi Salim
2015-01-26 8:26 ` Simon Horman
2015-01-26 12:26 ` Jamal Hadi Salim
2015-01-27 4:28 ` David Ahern
2015-01-27 4:58 ` Andy Gospodarek
2015-01-27 15:54 ` Jamal Hadi Salim
2015-01-24 12:36 ` Jamal Hadi Salim
2015-01-22 15:48 ` Jiri Pirko
2015-01-22 17:58 ` Thomas Graf
2015-01-22 16:49 ` Pablo Neira Ayuso
2015-01-22 17:10 ` John Fastabend
2015-01-22 17:44 ` Thomas Graf
2015-01-24 12:34 ` Jamal Hadi Salim
2015-01-24 13:48 ` Thomas Graf
2015-01-23 9:00 ` David Miller
2015-01-22 16:58 ` John Fastabend
2015-01-23 10:49 ` Thomas Graf
2015-01-23 16:42 ` John Fastabend
2015-01-24 12:29 ` Jamal Hadi Salim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20150120202935.1741.85478.stgit@nitbit.x32 \
--to=john.fastabend@gmail.com \
--cc=andy@greyhouse.net \
--cc=ast@plumgrid.com \
--cc=davem@davemloft.net \
--cc=gerlitz.or@gmail.com \
--cc=jhs@mojatatu.com \
--cc=netdev@vger.kernel.org \
--cc=sfeldma@gmail.com \
--cc=simon.horman@netronome.com \
--cc=tgraf@suug.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).