From: Bruce Richardson <bruce.richardson@intel.com>
To: Sushmita Hoskeri <sushmita.hoskeri@intel.com>
Cc: <dev@dpdk.org>, <aman.deep.singh@intel.com>,
Atul Patel <atul.patel@intel.com>
Subject: Re: [PATCH] net/cpfl: add LEM block support in CPFL PMD
Date: Wed, 8 Apr 2026 16:20:42 +0100 [thread overview]
Message-ID: <adZySiu2t-9IopUt@bricha3-mobl1.ger.corp.intel.com> (raw)
In-Reply-To: <20260217012153.36545-1-sushmita.hoskeri@intel.com>
On Tue, Feb 17, 2026 at 06:51:53AM +0530, Sushmita Hoskeri wrote:
> Added APIs to enable support for LEM block in CPFL PMD
>
It would be good to explain the LEM acronym in the commit message if you
are using it in the explanation.
> Signed-off-by: Sushmita Hoskeri <sushmita.hoskeri@intel.com>
> Signed-off-by: Atul Patel <atul.patel@intel.com>
> ---
Other comments inline below.
Thanks,
/Bruce
> drivers/net/intel/cpfl/cpfl_flow_engine_fxp.c | 11 +++
> drivers/net/intel/cpfl/cpfl_flow_parser.c | 87 +++++++++++++++++++
> drivers/net/intel/cpfl/cpfl_flow_parser.h | 18 ++++
> drivers/net/intel/cpfl/cpfl_fxp_rule.c | 11 ++-
> drivers/net/intel/cpfl/cpfl_fxp_rule.h | 10 +++
> drivers/net/intel/cpfl/cpfl_rules.c | 36 ++++++++
> drivers/net/intel/cpfl/cpfl_rules.h | 46 +++++++++-
> 7 files changed, 216 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/intel/cpfl/cpfl_flow_engine_fxp.c b/drivers/net/intel/cpfl/cpfl_flow_engine_fxp.c
> index 361827cb10..20a047f57f 100644
> --- a/drivers/net/intel/cpfl/cpfl_flow_engine_fxp.c
> +++ b/drivers/net/intel/cpfl/cpfl_flow_engine_fxp.c
> @@ -41,6 +41,7 @@
> #define CPFL_PORT_NUM_DEF 0
> #define CPFL_RESP_REQ_DEF 2
> #define CPFL_PIN_TO_CACHE_DEF 0
> +#define CPFL_PIN_TO_CACHE_CUST 1
> #define CPFL_CLEAR_MIRROR_1ST_STATE_DEF 0
> #define CPFL_FIXED_FETCH_DEF 0
> #define CPFL_PTI_DEF 0
> @@ -180,6 +181,15 @@ cpfl_fxp_parse_pattern(const struct cpfl_flow_pr_action *pr_action,
> memcpy(rinfo->sem.key, pr_action->sem.cpfl_flow_pr_fv, rinfo->sem.key_byte_len);
> rinfo->sem.pin_to_cache = CPFL_PIN_TO_CACHE_DEF;
> rinfo->sem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
> + } else if (pr_action->type == CPFL_JS_PR_ACTION_TYPE_LEM) {
> + struct cpfl_rule_info *rinfo = &rim->rules[i];
> +
> + rinfo->type = CPFL_RULE_TYPE_LEM;
> + rinfo->lem.prof_id = pr_action->lem.prof;
> + rinfo->lem.key_byte_len = pr_action->lem.keysize;
> + memcpy(rinfo->lem.key, pr_action->lem.cpfl_flow_pr_fv, rinfo->lem.key_byte_len);
> + rinfo->lem.pin_to_cache = CPFL_PIN_TO_CACHE_CUST;
> + rinfo->lem.fixed_fetch = CPFL_FIXED_FETCH_DEF;
> } else {
> PMD_DRV_LOG(ERR, "Invalid pattern item.");
> return false;
> @@ -427,6 +437,7 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
> switch (action_type) {
> case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
> case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
> + case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
This change doesn't look relevant to the LEM block, but rather a globally
applicable one across all blocks, right? Consider adding it in a separate
patch.
> case RTE_FLOW_ACTION_TYPE_PROG:
> return true;
> default:
> diff --git a/drivers/net/intel/cpfl/cpfl_flow_parser.c b/drivers/net/intel/cpfl/cpfl_flow_parser.c
> index e7deb619ee..612f5083ac 100644
> --- a/drivers/net/intel/cpfl/cpfl_flow_parser.c
> +++ b/drivers/net/intel/cpfl/cpfl_flow_parser.c
> @@ -421,6 +421,56 @@ cpfl_flow_js_pattern_act_fv(json_t *ob_fvs, struct cpfl_flow_js_pr_action *js_ac
> return -EINVAL;
> }
>
> +static int
> +cpfl_flow_js_pattern_act_fv_lem(json_t *cjson_fv, struct cpfl_flow_js_pr_action *js_act)
> +{
> + int len, i, ret;
> +
> + len = json_array_size(cjson_fv);
> + js_act->lem.fv = rte_malloc(NULL, sizeof(struct cpfl_flow_js_fv) * len, 0);
> + if (!js_act->lem.fv) {
> + PMD_DRV_LOG(ERR, "Failed to alloc memory.");
> + return -ENOMEM;
> + }
> + js_act->lem.fv_size = len;
> + for (i = 0; i < len; i++) {
> + struct cpfl_flow_js_fv *js_fv;
> + json_t *object, *cjson_value;
> + const char *type;
> +
> + object = json_array_get(cjson_fv, i);
> + js_fv = &js_act->lem.fv[i];
> + ret = cpfl_json_t_to_uint16(object, "offset", &js_fv->offset);
> + if (ret < 0) {
> + PMD_DRV_LOG(ERR, "Can not parse 'offset'.");
> + return -EINVAL;
> + }
> + type = cpfl_json_t_to_string(object, "type");
> + if (!type) {
> + PMD_DRV_LOG(ERR, "Can not parse 'type'.");
> + return -EINVAL;
> + }
> + cjson_value = json_object_get(object, "value");
> + if (strcmp(type, "immediate") == 0) {
> + js_fv->type = CPFL_FV_TYPE_IMMEDIATE;
> + js_fv->immediate = json_integer_value(cjson_value);
> + } else if (strcmp(type, "metadata") == 0) {
> + js_fv->type = CPFL_FV_TYPE_METADATA;
> + cpfl_flow_js_pattern_act_fv_metadata(cjson_value, js_fv);
> + } else if (strcmp(type, "protocol") == 0) {
> + js_fv->type = CPFL_FV_TYPE_PROTOCOL;
> + cpfl_flow_js_pattern_act_fv_proto(cjson_value, js_fv);
> + } else {
> + PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
> + goto err;
> + }
> + }
> + return 0;
> +err:
> + rte_free(js_act->lem.fv);
> + return -EINVAL;
> +}
> +
> static int
> cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *js_act)
> {
> @@ -458,6 +508,25 @@ cpfl_flow_js_pattern_per_act(json_t *ob_per_act, struct cpfl_flow_js_pr_action *
> ret = cpfl_flow_js_pattern_act_fv(ob_fvs, js_act);
> if (ret < 0)
> return ret;
> + } else if (strcmp(type, "lem") == 0) {
> + js_act->type = CPFL_JS_PR_ACTION_TYPE_LEM;
> + json_t *cjson_fv, *ob_lem;
> +
> + ob_lem = json_object_get(ob_per_act, "data");
> + ret = cpfl_json_t_to_uint16(ob_lem, "profile", &js_act->lem.prof);
> + if (ret < 0) {
> + PMD_DRV_LOG(ERR, "Can not parse 'profile'.");
> + return -EINVAL;
> + }
> + ret = cpfl_json_t_to_uint16(ob_lem, "keysize", &js_act->lem.keysize);
> + if (ret < 0) {
> + PMD_DRV_LOG(ERR, "Can not parse 'keysize'.");
> + return -EINVAL;
> + }
> + cjson_fv = json_object_get(ob_lem, "fieldvectors");
> + ret = cpfl_flow_js_pattern_act_fv_lem(cjson_fv, js_act);
> + if (ret < 0)
> + return ret;
> } else {
> PMD_DRV_LOG(ERR, "Not support this type: %s.", type);
> return -EINVAL;
> @@ -984,6 +1053,8 @@ cpfl_parser_free_pr_action(struct cpfl_flow_js_pr_action *pr_act)
> {
> if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_SEM)
> rte_free(pr_act->sem.fv);
> + else if (pr_act->type == CPFL_JS_PR_ACTION_TYPE_LEM)
> + rte_free(pr_act->lem.fv);
> }
>
> int
> @@ -1149,6 +1220,19 @@ cpfl_parse_pr_actions(struct cpfl_itf *itf,
> ret = cpfl_parse_fieldvectors(itf, sem->fv, sem->fv_size,
> pr_action->sem.cpfl_flow_pr_fv, items);
> return ret;
> + } else if (attr->group % 10 == 4 && type == CPFL_JS_PR_ACTION_TYPE_LEM) {
> + struct cpfl_flow_js_pr_action_lem *lem = &pr_act->lem;
> +
> + pr_action->type = CPFL_JS_PR_ACTION_TYPE_LEM;
> + pr_action->lem.prof = lem->prof;
> + pr_action->lem.keysize = lem->keysize;
> + memset(pr_action->lem.cpfl_flow_pr_fv, 0,
> + sizeof(pr_action->lem.cpfl_flow_pr_fv));
> + ret = cpfl_parse_fieldvectors(itf, lem->fv, lem->fv_size,
> + pr_action->lem.cpfl_flow_pr_fv, items);
> + if (ret < 0)
> + return ret;
> + continue;
> } else if (attr->group > 4 || attr->group == 0) {
> return -EPERM;
> }
> @@ -1585,6 +1669,9 @@ cpfl_check_pattern_key_proto(struct cpfl_flow_js_pr_key_proto *protocols,
> return -EINVAL;
> }
> break;
> + case RTE_FLOW_ITEM_TYPE_VLAN:
> + j++;
> + break;
This change too doesn't seem directly related to LEM block.
> default:
> PMD_DRV_LOG(ERR, "Not support this type: %d.", type);
> return -EPERM;
> diff --git a/drivers/net/intel/cpfl/cpfl_flow_parser.h b/drivers/net/intel/cpfl/cpfl_flow_parser.h
> index 23904e39f1..f865ae8282 100644
> --- a/drivers/net/intel/cpfl/cpfl_flow_parser.h
> +++ b/drivers/net/intel/cpfl/cpfl_flow_parser.h
> @@ -16,10 +16,12 @@
> #define CPFL_JS_PROG_CONTENT_FIELD_NUM_MAX 64
> #define CPFL_JS_PROG_CONSTANT_VALUE_NUM_MAX 8
> #define CPFL_JS_PROG_PARAM_NUM_MAX 10
> +#define CPFL_JS_LEM_FV_KEY_NUM_MAX 32
>
> /* Pattern Rules Storage */
> enum cpfl_flow_pr_action_type {
> CPFL_JS_PR_ACTION_TYPE_SEM,
> + CPFL_JS_PR_ACTION_TYPE_LEM,
> CPFL_JS_PR_ACTION_TYPE_UNKNOWN = -1,
> };
>
> @@ -85,11 +87,19 @@ struct cpfl_flow_js_pr_action_sem {
> int fv_size;
> };
>
> +struct cpfl_flow_js_pr_action_lem {
> + uint16_t prof;
> + uint16_t keysize;
> + struct cpfl_flow_js_fv *fv;
> + int fv_size;
> +};
> +
> /* define how to map current key to low level pipeline configuration */
> struct cpfl_flow_js_pr_action {
> enum cpfl_flow_pr_action_type type;
> union {
> struct cpfl_flow_js_pr_action_sem sem;
> + struct cpfl_flow_js_pr_action_lem lem;
> };
> };
>
> @@ -230,10 +240,18 @@ struct cpfl_flow_pr_action_sem {
> uint8_t cpfl_flow_pr_fv[CPFL_JS_SEM_FV_KEY_NUM_MAX];
> };
>
> +struct cpfl_flow_pr_action_lem {
> + uint16_t prof;
> + uint16_t keysize;
> + uint8_t cpfl_flow_pr_fv[CPFL_JS_LEM_FV_KEY_NUM_MAX];
> +};
> +
> +
> struct cpfl_flow_pr_action {
> enum cpfl_flow_pr_action_type type;
> union {
> struct cpfl_flow_pr_action_sem sem;
> + struct cpfl_flow_pr_action_lem lem;
> };
> };
>
> diff --git a/drivers/net/intel/cpfl/cpfl_fxp_rule.c b/drivers/net/intel/cpfl/cpfl_fxp_rule.c
> index 42553c9641..b4efbaa4ff 100644
> --- a/drivers/net/intel/cpfl/cpfl_fxp_rule.c
> +++ b/drivers/net/intel/cpfl/cpfl_fxp_rule.c
> @@ -25,7 +25,6 @@ cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, u16 num_q_msg,
> ret = -ENOMEM;
> goto err;
> }
> -
You probably should keep this blank line unless there is a good reason to
omit it. That drops this whole hunk from the diff, so I suspect this is an
accidental change.
> ret = cpfl_vport_ctlq_send(hw, cq, num_q_msg, q_msg);
> if (ret) {
> PMD_INIT_LOG(ERR, "cpfl_vport_ctlq_send() failed with error: 0x%4x", ret);
> @@ -192,6 +191,14 @@ cpfl_default_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
> rinfo->act_bytes, rinfo->act_byte_len,
> cfg_ctrl, blob);
> opc = add ? cpfl_ctlq_sem_add_rule : cpfl_ctlq_sem_del_rule;
> + } else if (rinfo->type == CPFL_RULE_TYPE_LEM) {
> + cfg_ctrl = CPFL_GET_MEV_LEM_RULE_CFG_CTRL(rinfo->lem.prof_id,
> + rinfo->lem.pin_to_cache,
> + rinfo->clear_mirror_1st_state);
> + cpfl_prep_lem_rule_blob(rinfo->lem.key, rinfo->lem.key_byte_len,
> + rinfo->act_bytes, rinfo->act_byte_len,
> + cfg_ctrl, blob);
> + opc = add ? cpfl_ctlq_lem_add_update_rule : cpfl_ctlq_lem_del_rule;
> } else {
> PMD_INIT_LOG(ERR, "not support %d rule.", rinfo->type);
> return -1;
> @@ -219,7 +226,7 @@ cpfl_rule_pack(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
> {
> int ret = 0;
>
> - if (rinfo->type == CPFL_RULE_TYPE_SEM) {
> + if (rinfo->type == CPFL_RULE_TYPE_SEM || rinfo->type == CPFL_RULE_TYPE_LEM) {
> if (cpfl_default_rule_pack(rinfo, dma, msg, add) < 0)
> ret = -1;
> } else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
> diff --git a/drivers/net/intel/cpfl/cpfl_fxp_rule.h b/drivers/net/intel/cpfl/cpfl_fxp_rule.h
> index 94eab6808c..544f441025 100644
> --- a/drivers/net/intel/cpfl/cpfl_fxp_rule.h
> +++ b/drivers/net/intel/cpfl/cpfl_fxp_rule.h
> @@ -28,9 +28,18 @@ struct cpfl_mod_rule_info {
> uint8_t mod_obj_size;
> };
>
> +struct cpfl_lem_rule_info {
> + uint16_t prof_id;
> + uint8_t key[CPFL_MAX_KEY_LEN];
> + uint8_t key_byte_len;
> + uint8_t pin_to_cache;
> + uint8_t fixed_fetch;
> +};
> +
> enum cpfl_rule_type {
> CPFL_RULE_TYPE_NONE,
> CPFL_RULE_TYPE_SEM,
> + CPFL_RULE_TYPE_LEM,
> CPFL_RULE_TYPE_MOD
> };
>
> @@ -50,6 +59,7 @@ struct cpfl_rule_info {
> union {
> struct cpfl_mod_rule_info mod;
> struct cpfl_sem_rule_info sem;
> + struct cpfl_lem_rule_info lem;
> };
> };
>
> diff --git a/drivers/net/intel/cpfl/cpfl_rules.c b/drivers/net/intel/cpfl/cpfl_rules.c
> index 6c0e435b1d..ec636fdf4b 100644
> --- a/drivers/net/intel/cpfl/cpfl_rules.c
> +++ b/drivers/net/intel/cpfl/cpfl_rules.c
> @@ -18,6 +18,14 @@ cpfl_prep_rule_desc_common_ctx(struct cpfl_rule_cfg_data_common *cmn_cfg)
> case cpfl_ctlq_mod_query_rule:
> case cpfl_ctlq_mod_add_update_rule:
> /* fallthrough */
> + case cpfl_ctlq_lem_del_rule:
> + case cpfl_ctlq_lem_query_rule:
> + case cpfl_ctlq_lem_add_update_rule:
> + case cpfl_ctlq_lem_query_rule_hash_addr:
> + case cpfl_ctlq_lem_query_del_rule_hash_addr:
> + context |= SHIFT_VAL64(cmn_cfg->vsi_id,
> + MEV_RULE_VSI_ID);
> + /* fallthrough */
> case cpfl_ctlq_sem_query_rule_hash_addr:
> case cpfl_ctlq_sem_query_del_rule_hash_addr:
> case cpfl_ctlq_sem_add_rule:
> @@ -66,6 +74,8 @@ cpfl_prep_rule_desc_ctx(struct cpfl_rule_cfg_data *cfg_data)
> break;
> case cpfl_ctlq_sem_query_rule_hash_addr:
> case cpfl_ctlq_sem_query_del_rule_hash_addr:
> + case cpfl_ctlq_lem_query_rule_hash_addr:
> + case cpfl_ctlq_lem_query_del_rule_hash_addr:
> context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_id,
> MEV_RULE_OBJ_ID);
> context |= SHIFT_VAL64(cfg_data->ext.query_del_addr.obj_addr,
> @@ -124,3 +134,29 @@ cpfl_prep_sem_rule_blob(const uint8_t *key,
> rule_blob->sem_rule.cfg_ctrl[0] = cfg_ctrl & 0xFF;
> rule_blob->sem_rule.cfg_ctrl[1] = (cfg_ctrl >> 8) & 0xFF;
> }
> +
> +/**
> + * cpfl_prep_lem_rule_blob - build LEM rule blob data from rule entry info
> + * note: call this function before sending rule to HW via fast path
> + */
> +void
> +cpfl_prep_lem_rule_blob(uint8_t *key,
> + uint8_t key_byte_len,
> + uint8_t *act_bytes,
> + uint8_t act_byte_len,
> + uint16_t cfg_ctrl,
> + union cpfl_rule_cfg_pkt_record *rule_blob)
> +{
> + uint32_t *act_dst = (uint32_t *)&rule_blob->lem_rule.actions;
> + uint32_t *act_src = (uint32_t *)act_bytes;
> + uint32_t i;
> +
> + idpf_memset(rule_blob, 0, sizeof(*rule_blob), IDPF_DMA_MEM);
> + memcpy(rule_blob->lem_rule.key, key, key_byte_len);
> +
> + for (i = 0; i < act_byte_len / sizeof(uint32_t); i++)
> + *act_dst++ = CPU_TO_LE32(*act_src++);
> +
> + rule_blob->lem_rule.cfg_ctrl[0] = cfg_ctrl & 0xFF;
> + rule_blob->lem_rule.cfg_ctrl[1] = (cfg_ctrl >> 8) & 0xFF;
> +}
> diff --git a/drivers/net/intel/cpfl/cpfl_rules.h b/drivers/net/intel/cpfl/cpfl_rules.h
> index 10569b1fdc..2b65c7ecc8 100644
> --- a/drivers/net/intel/cpfl/cpfl_rules.h
> +++ b/drivers/net/intel/cpfl/cpfl_rules.h
> @@ -46,6 +46,12 @@ enum cpfl_ctlq_rule_cfg_opc {
> cpfl_ctlq_sem_query_rule_hash_addr = 0x1307,
> cpfl_ctlq_sem_query_del_rule_hash_addr = 0x1308,
>
> + cpfl_ctlq_lem_add_update_rule = 0x1343,
> + cpfl_ctlq_lem_del_rule = 0x1345,
> + cpfl_ctlq_lem_query_rule = 0x1346,
> + cpfl_ctlq_lem_query_rule_hash_addr = 0x1347,
> + cpfl_ctlq_lem_query_del_rule_hash_addr = 0x1348,
> +
> cpfl_ctlq_mod_add_update_rule = 0x1360,
> cpfl_ctlq_mod_query_rule = 0x1361,
> };
> @@ -188,11 +194,43 @@ struct cpfl_sem_rule_cfg_pkt {
> uint8_t padding[46];
> };
>
> +/**
> + * struct cpfl_lem_rule_cfg_pkt - Describes rule information for LEM
> + * note: The key may be in mixed big/little endian format, the rest of members
> + * are in little endian
> + */
> +struct cpfl_lem_rule_cfg_pkt {
> +#define MEV_LEM_RULE_KEY_SIZE 128
> + uint8_t key[MEV_LEM_RULE_KEY_SIZE];
> +
> +#define MEV_LEM_RULE_ACT_SIZE 48
> + uint8_t actions[MEV_LEM_RULE_ACT_SIZE];
> + /* Bit(s):
> + * 10:0 : PROFILE_ID
> + * 12:11: Reserved
> + * 13 : pin the LEM key content into the cache
> + * 14 : if set, clear mirror first state for first index in actions
> + * 15 : Reserved.
> + */
> + uint8_t cfg_ctrl[2];
> + /* Bit(s):
> + * 0: valid
> + * 15:1: Hints
> + * 26:16: PROFILE_ID, the profile associated with the entry
> + * 31:27: PF
> + * 55:32: FLOW ID (assigned by HW)
> + * 63:56: EPOCH
> + */
> + uint8_t ctrl_word[8];
> + uint8_t padding[70];
> +};
> +
> /**
> * union cpfl_rule_cfg_pkt_record - Describes rule data blob
> */
> union cpfl_rule_cfg_pkt_record {
> struct cpfl_sem_rule_cfg_pkt sem_rule;
> + struct cpfl_lem_rule_cfg_pkt lem_rule;
> uint8_t pkt_data[256];
> uint8_t mod_blob[256];
> };
> @@ -313,5 +351,11 @@ cpfl_prep_sem_rule_blob(const uint8_t *key,
> uint8_t act_byte_len,
> uint16_t cfg_ctrl,
> union cpfl_rule_cfg_pkt_record *rule_blob);
> -
> +void
> +cpfl_prep_lem_rule_blob(uint8_t *key,
> + uint8_t key_byte_len,
> + uint8_t *act_bytes,
> + uint8_t act_byte_len,
> + uint16_t cfg_ctrl,
> + union cpfl_rule_cfg_pkt_record *rule_blob);
> #endif /* _CPFL_RULES_API_H_ */
> --
> 2.34.1
>
next prev parent reply other threads:[~2026-04-08 15:20 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-17 1:21 [PATCH] net/cpfl: add LEM block support in CPFL PMD Sushmita Hoskeri
2026-04-08 15:20 ` Bruce Richardson [this message]
2026-04-09 1:51 ` [PATCH v2] " Sushmita Hoskeri
2026-04-09 9:48 ` Bruce Richardson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=adZySiu2t-9IopUt@bricha3-mobl1.ger.corp.intel.com \
--to=bruce.richardson@intel.com \
--cc=aman.deep.singh@intel.com \
--cc=atul.patel@intel.com \
--cc=dev@dpdk.org \
--cc=sushmita.hoskeri@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox