From: Boris Pismenny <borisp@nvidia.com>
To: <dsahern@gmail.com>, <kuba@kernel.org>, <davem@davemloft.net>,
<saeedm@nvidia.com>, <hch@lst.de>, <sagi@grimberg.me>,
<axboe@fb.com>, <kbusch@kernel.org>, <viro@zeniv.linux.org.uk>,
<edumazet@google.com>, <smalin@marvell.com>
Cc: <boris.pismenny@gmail.com>, <linux-nvme@lists.infradead.org>,
<netdev@vger.kernel.org>, <benishay@nvidia.com>,
<ogerlitz@nvidia.com>, <yorayz@nvidia.com>
Subject: [PATCH v5 net-next 32/36] net/mlx5e: NVMEoTCP DDGST TX BSF and PSV
Date: Thu, 22 Jul 2021 14:03:21 +0300 [thread overview]
Message-ID: <20210722110325.371-33-borisp@nvidia.com> (raw)
In-Reply-To: <20210722110325.371-1-borisp@nvidia.com>
From: Yoray Zack <yorayz@nvidia.com>
Change the function that build NVMEoTCP progress params and static params,
to work for Tx/Rx.
Signed-off-by: Yoray Zack <yorayz@nvidia.com>
---
.../mellanox/mlx5/core/en_accel/nvmeotcp.c | 130 ++++++++++++++----
.../mlx5/core/en_accel/nvmeotcp_utils.h | 4 +-
2 files changed, 108 insertions(+), 26 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
index 6023e1ae7be4..624d8a28dc21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
@@ -155,8 +155,11 @@ static int mlx5e_nvmeotcp_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
}
#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_TIR_PARAMS 0x2
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_TIS_PARAMS 0x1
#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_STATIC_PARAMS 0x2
#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_UMR 0x0
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_STATIC_PARAMS 0x1
+#define MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_PROGRESS_PARAMS 0x3
#define STATIC_PARAMS_DS_CNT \
DIV_ROUND_UP(MLX5E_NVMEOTCP_STATIC_PARAMS_WQE_SZ, MLX5_SEND_WQE_DS)
@@ -250,56 +253,75 @@ build_nvmeotcp_klm_umr(struct mlx5e_nvmeotcp_queue *queue,
static void
fill_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5_seg_nvmeotcp_progress_params *params,
- u32 seq)
+ u32 seq, bool is_tx)
{
void *ctx = params->ctx;
- params->tir_num = cpu_to_be32(queue->tirn);
+ params->tir_num = is_tx ? cpu_to_be32(queue->tisn) : cpu_to_be32(queue->tirn);
MLX5_SET(nvmeotcp_progress_params, ctx,
next_pdu_tcp_sn, seq);
MLX5_SET(nvmeotcp_progress_params, ctx, pdu_tracker_state,
MLX5E_NVMEOTCP_PROGRESS_PARAMS_PDU_TRACKER_STATE_START);
+ if (is_tx)
+ MLX5_SET(nvmeotcp_progress_params, ctx, offloading_state, 0);
+}
+
+static void nvme_tx_fill_wi(struct mlx5e_txqsq *sq,
+ u16 pi, u8 num_wqebbs, u32 num_bytes,
+ struct page *page, enum mlx5e_dump_wqe_type type)
+{
+ struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
+
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .num_wqebbs = num_wqebbs,
+ .num_bytes = num_bytes,
+ };
}
void
build_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_progress_params_wqe *wqe,
- u32 seq)
+ u32 seq, bool is_rx, bool resync, u16 pc, u32 sqn)
{
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- u32 sqn = queue->sq->icosq.sqn;
- u16 pc = queue->sq->icosq.pc;
- u8 opc_mod;
+ u8 opc_mod = is_rx ?
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_PROGRESS_PARAMS :
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_PROGRESS_PARAMS;
memset(wqe, 0, MLX5E_NVMEOTCP_PROGRESS_PARAMS_WQE_SZ);
- opc_mod = MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_PROGRESS_PARAMS;
+
cseg->opmod_idx_opcode = cpu_to_be32((pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_SET_PSV | (opc_mod << 24));
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
PROGRESS_PARAMS_DS_CNT);
- fill_nvmeotcp_progress_params(queue, &wqe->params, seq);
+ fill_nvmeotcp_progress_params(queue, &wqe->params, seq, !is_rx);
}
static void
fill_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5_seg_nvmeotcp_static_params *params,
- u32 resync_seq, bool zero_copy_en,
+ u32 resync_seq, bool is_rx, bool zero_copy_en,
bool ddgst_offload_en)
{
void *ctx = params->ctx;
+ int pda = queue->pda;
+ bool hddgst_en = queue->dgst & NVME_TCP_HDR_DIGEST_ENABLE;
+ bool ddgst_en = queue->dgst & NVME_TCP_DATA_DIGEST_ENABLE;
+
+ if (!is_rx) {
+ pda = 0;
+ }
MLX5_SET(transport_static_params, ctx, const_1, 1);
MLX5_SET(transport_static_params, ctx, const_2, 2);
MLX5_SET(transport_static_params, ctx, acc_type,
MLX5_TRANSPORT_STATIC_PARAMS_ACC_TYPE_NVMETCP);
MLX5_SET(transport_static_params, ctx, nvme_resync_tcp_sn, resync_seq);
- MLX5_SET(transport_static_params, ctx, pda, queue->pda);
- MLX5_SET(transport_static_params, ctx, ddgst_en,
- queue->dgst & NVME_TCP_DATA_DIGEST_ENABLE);
+ MLX5_SET(transport_static_params, ctx, pda, pda);
+ MLX5_SET(transport_static_params, ctx, ddgst_en, ddgst_en);
MLX5_SET(transport_static_params, ctx, ddgst_offload_en, ddgst_offload_en);
- MLX5_SET(transport_static_params, ctx, hddgst_en,
- queue->dgst & NVME_TCP_HDR_DIGEST_ENABLE);
+ MLX5_SET(transport_static_params, ctx, hddgst_en, hddgst_en);
MLX5_SET(transport_static_params, ctx, hdgst_offload_en, 0);
MLX5_SET(transport_static_params, ctx, ti,
MLX5_TRANSPORT_STATIC_PARAMS_TI_INITIATOR);
@@ -310,26 +332,31 @@ fill_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
void
build_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_static_params_wqe *wqe,
- u32 resync_seq, bool zerocopy, bool crc_rx)
+ u32 resync_seq, bool is_rx, u16 pc, u32 sqn,
+ bool zerocopy, bool crc_rx)
{
- u8 opc_mod = MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_STATIC_PARAMS;
struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
- u32 sqn = queue->sq->icosq.sqn;
- u16 pc = queue->sq->icosq.pc;
+ int tirn_tisn = is_rx ? queue->tirn : queue->tisn;
+ u8 opc_mod = is_rx ?
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIR_STATIC_PARAMS :
+ MLX5_CTRL_SEGMENT_OPC_MOD_UMR_NVMEOTCP_TIS_STATIC_PARAMS;
+
memset(wqe, 0, MLX5E_NVMEOTCP_STATIC_PARAMS_WQE_SZ);
- cseg->opmod_idx_opcode = cpu_to_be32((pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
- MLX5_OPCODE_UMR | (opc_mod) << 24);
+ cseg->opmod_idx_opcode = cpu_to_be32((pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR | (opc_mod) << 24);
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
STATIC_PARAMS_DS_CNT);
- cseg->imm = cpu_to_be32(queue->tirn << MLX5_WQE_CTRL_TIR_TIS_INDEX_SHIFT);
+ cseg->imm = cpu_to_be32(tirn_tisn <<
+ MLX5_WQE_CTRL_TIR_TIS_INDEX_SHIFT);
ucseg->flags = MLX5_UMR_INLINE;
ucseg->bsf_octowords =
cpu_to_be16(MLX5E_NVMEOTCP_STATIC_PARAMS_OCTWORD_SIZE);
- fill_nvmeotcp_static_params(queue, &wqe->params, resync_seq, zerocopy, crc_rx);
+ fill_nvmeotcp_static_params(queue, &wqe->params, resync_seq,
+ is_rx, zerocopy, crc_rx);
}
static void
@@ -371,7 +398,8 @@ mlx5e_nvmeotcp_rx_post_static_params_wqe(struct mlx5e_nvmeotcp_queue *queue,
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
wqe = MLX5E_NVMEOTCP_FETCH_STATIC_PARAMS_WQE(sq, pi);
mlx5e_nvmeotcp_fill_wi(NULL, sq, wqe_bbs, pi, 0, BSF_UMR);
- build_nvmeotcp_static_params(queue, wqe, resync_seq, queue->zerocopy, queue->crc_rx);
+ build_nvmeotcp_static_params(queue, wqe, resync_seq, true, sq->pc,
+ sq->sqn, queue->zerocopy, queue->crc_rx);
sq->pc += wqe_bbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
spin_unlock(&queue->nvmeotcp_icosq_lock);
@@ -389,7 +417,7 @@ mlx5e_nvmeotcp_rx_post_progress_params_wqe(struct mlx5e_nvmeotcp_queue *queue,
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
wqe = MLX5E_NVMEOTCP_FETCH_PROGRESS_PARAMS_WQE(sq, pi);
mlx5e_nvmeotcp_fill_wi(queue, sq, wqe_bbs, pi, 0, SET_PSV_UMR);
- build_nvmeotcp_progress_params(queue, wqe, seq);
+ build_nvmeotcp_progress_params(queue, wqe, seq, true, false, sq->pc, sq->sqn);
sq->pc += wqe_bbs;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
}
@@ -1078,6 +1106,60 @@ int mlx5e_nvmeotcp_init(struct mlx5e_priv *priv)
return ret;
}
+static
+void mlx5e_nvmeotcp_tx_post_static_params(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_set_nvmeotcp_static_params_wqe *wqe;
+ enum mlx5e_dump_wqe_type type = MLX5E_DUMP_WQE_NVMEOTCP;
+ u16 pi, wqe_bbs;
+
+ wqe_bbs = MLX5E_NVMEOTCP_STATIC_PARAMS_WQEBBS;
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_bbs);
+ wqe = MLX5E_NVMEOTCP_FETCH_STATIC_PARAMS_WQE(sq, pi);
+ nvme_tx_fill_wi(sq, pi, wqe_bbs, 0, NULL, type);
+ build_nvmeotcp_static_params(queue, wqe, 0, false,
+ sq->pc, sq->sqn, false, true);
+ sq->pc += wqe_bbs;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+static
+void mlx5e_nvmeotcp_tx_post_progress_params(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, u32 seq,
+ bool resync)
+{
+ struct mlx5e_set_nvmeotcp_progress_params_wqe *wqe;
+ enum mlx5e_dump_wqe_type type = MLX5E_DUMP_WQE_NVMEOTCP;
+ u16 pi, wqe_bbs;
+
+ wqe_bbs = MLX5E_NVMEOTCP_PROGRESS_PARAMS_WQEBBS;
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_bbs);
+ wqe = MLX5E_NVMEOTCP_FETCH_PROGRESS_PARAMS_WQE(sq, pi);
+ nvme_tx_fill_wi(sq, pi, wqe_bbs, 0, NULL, type);
+ build_nvmeotcp_progress_params(queue, wqe, seq, false, resync, sq->pc, sq->sqn);
+ sq->pc += wqe_bbs;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+}
+
+static
+bool mlx5e_nvmeotcp_test_and_clear_pending(struct mlx5e_nvmeotcp_queue *ctx)
+{
+ bool ret = ctx->pending;
+
+ ctx->pending = false;
+
+ return ret;
+}
+
+static
+void mlx5e_nvmeotcp_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct sock *sk,
+ struct mlx5e_nvmeotcp_queue *ctx)
+{
+ mlx5e_nvmeotcp_tx_post_static_params(ctx, sq);
+ mlx5e_nvmeotcp_tx_post_progress_params(ctx, sq, tcp_sk(sk)->copied_seq, false);
+}
+
void mlx5e_nvmeotcp_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_nvmeotcp *nvmeotcp = priv->nvmeotcp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h
index 44671e28a9ea..e7436aa01ad4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp_utils.h
@@ -69,12 +69,12 @@ struct mlx5e_get_psv_wqe {
void
build_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_progress_params_wqe *wqe,
- u32 seq);
+ u32 seq, bool is_rx, bool is_resync, u16 pc, u32 sqn);
void
build_nvmeotcp_static_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_static_params_wqe *wqe,
- u32 resync_seq,
+ u32 resync_seq, bool is_rx, u16 pc, u32 sqn,
bool zerocopy, bool crc_rx);
#endif /* __MLX5E_NVMEOTCP_UTILS_H__ */
--
2.24.1
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
next prev parent reply other threads:[~2021-07-22 12:10 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-22 11:02 [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 01/36] net: Introduce direct data placement tcp offload Boris Pismenny
2021-07-22 11:26 ` Eric Dumazet
2021-07-22 12:18 ` Boris Pismenny
2021-07-22 13:10 ` Eric Dumazet
2021-07-22 13:33 ` Boris Pismenny
2021-07-22 13:39 ` Eric Dumazet
2021-07-22 14:02 ` Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 02/36] iov_iter: DDP copy to iter/pages Boris Pismenny
2021-07-22 13:31 ` Christoph Hellwig
2021-07-22 20:23 ` Boris Pismenny
2021-07-23 5:03 ` Christoph Hellwig
2021-07-23 5:21 ` Al Viro
2021-08-04 14:13 ` Or Gerlitz
2021-08-10 13:29 ` Or Gerlitz
2021-07-22 20:55 ` Al Viro
2021-07-22 11:02 ` [PATCH v5 net-next 03/36] net: skb copy(+hash) iterators for DDP offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 04/36] net/tls: expose get_netdev_for_sock Boris Pismenny
2021-07-23 6:06 ` Christoph Hellwig
2021-08-04 13:26 ` Or Gerlitz
2021-07-22 11:02 ` [PATCH v5 net-next 05/36] nvme-tcp: Add DDP offload control path Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 06/36] nvme-tcp: Add DDP data-path Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 07/36] nvme-tcp: RX DDGST offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 08/36] nvme-tcp: Deal with netdevice DOWN events Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 09/36] net/mlx5: Header file changes for nvme-tcp offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 10/36] net/mlx5: Add 128B CQE for NVMEoTCP offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 11/36] net/mlx5e: TCP flow steering for nvme-tcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 12/36] net/mlx5e: NVMEoTCP offload initialization Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 13/36] net/mlx5e: KLM UMR helper macros Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 14/36] net/mlx5e: NVMEoTCP use KLM UMRs Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 15/36] net/mlx5e: NVMEoTCP queue init/teardown Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 16/36] net/mlx5e: NVMEoTCP async ddp invalidation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 17/36] net/mlx5e: NVMEoTCP ddp setup and resync Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 18/36] net/mlx5e: NVMEoTCP, data-path for DDP+DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 19/36] net/mlx5e: NVMEoTCP statistics Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 20/36] Documentation: add ULP DDP offload documentation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 21/36] net: drop ULP DDP HW offload feature if no CSUM offload feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 22/36] net: Add ulp_ddp_pdu_info struct Boris Pismenny
2021-07-23 19:42 ` Sagi Grimberg
2021-07-22 11:03 ` [PATCH v5 net-next 23/36] net: Add to ulp_ddp support for fallback flow Boris Pismenny
2021-07-23 6:09 ` Christoph Hellwig
2021-07-22 11:03 ` [PATCH v5 net-next 24/36] net: Add MSG_DDP_CRC flag Boris Pismenny
2021-07-22 14:23 ` Eric Dumazet
2021-07-22 11:03 ` [PATCH v5 net-next 25/36] nvme-tcp: TX DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 26/36] nvme-tcp: Mapping between Tx NVMEoTCP pdu and TCP sequence Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 27/36] mlx5e: make preparation in TLS code for NVMEoTCP CRC Tx offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 28/36] mlx5: Add sq state test bit for nvmeotcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 29/36] mlx5: Add support to NETIF_F_HW_TCP_DDP_CRC_TX feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 30/36] net/mlx5e: NVMEoTCP DDGST TX offload TIS Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 31/36] net/mlx5e: NVMEoTCP DDGST Tx offload queue init/teardown Boris Pismenny
2021-07-22 11:03 ` Boris Pismenny [this message]
2021-07-22 11:03 ` [PATCH v5 net-next 33/36] net/mlx5e: NVMEoTCP DDGST TX Data path Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 34/36] net/mlx5e: NVMEoTCP DDGST TX handle OOO packets Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 35/36] net/mlx5e: NVMEoTCP DDGST TX offload optimization Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 36/36] net/mlx5e: NVMEoTCP DDGST TX statistics Boris Pismenny
2021-07-23 5:56 ` [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Christoph Hellwig
2021-07-23 19:58 ` Sagi Grimberg
2021-08-04 13:51 ` Or Gerlitz
2021-08-06 19:46 ` Sagi Grimberg
2021-08-10 13:37 ` Or Gerlitz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210722110325.371-33-borisp@nvidia.com \
--to=borisp@nvidia.com \
--cc=axboe@fb.com \
--cc=benishay@nvidia.com \
--cc=boris.pismenny@gmail.com \
--cc=davem@davemloft.net \
--cc=dsahern@gmail.com \
--cc=edumazet@google.com \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=netdev@vger.kernel.org \
--cc=ogerlitz@nvidia.com \
--cc=saeedm@nvidia.com \
--cc=sagi@grimberg.me \
--cc=smalin@marvell.com \
--cc=viro@zeniv.linux.org.uk \
--cc=yorayz@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.