From: Boris Pismenny <borisp@nvidia.com>
To: <dsahern@gmail.com>, <kuba@kernel.org>, <davem@davemloft.net>,
<saeedm@nvidia.com>, <hch@lst.de>, <sagi@grimberg.me>,
<axboe@fb.com>, <kbusch@kernel.org>, <viro@zeniv.linux.org.uk>,
<edumazet@google.com>, <smalin@marvell.com>
Cc: <boris.pismenny@gmail.com>, <linux-nvme@lists.infradead.org>,
<netdev@vger.kernel.org>, <benishay@nvidia.com>,
<ogerlitz@nvidia.com>, <yorayz@nvidia.com>
Subject: [PATCH v5 net-next 34/36] net/mlx5e: NVMEoTCP DDGST TX handle OOO packets
Date: Thu, 22 Jul 2021 14:03:23 +0300 [thread overview]
Message-ID: <20210722110325.371-35-borisp@nvidia.com> (raw)
In-Reply-To: <20210722110325.371-1-borisp@nvidia.com>
From: Yoray Zack <yorayz@nvidia.com>
When the driver indicate an OOO NVMEoTCP Tx packet it starts OOO flow:
1. Get pdu_info from nvme-tcp.
2. Send indication to NIC (set psv)- NIC will rebuild the parse machine.
3. Send the data the NIC needs for computing the DDGST using DUMP wqes.
Signed-off-by: Yoray Zack <yorayz@nvidia.com>
---
.../net/ethernet/mellanox/mlx5/core/en/txrx.h | 2 +-
.../mellanox/mlx5/core/en_accel/nvmeotcp.c | 281 +++++++++++++++++-
2 files changed, 280 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index c7f979dfdd69..1f4beaac488a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -142,7 +142,7 @@ struct mlx5e_tx_wqe_info {
u8 num_wqebbs;
u8 num_dma;
u8 num_fifo_pkts;
-#ifdef CONFIG_MLX5_EN_TLS
+#if defined CONFIG_MLX5_EN_TLS || defined CONFIG_MLX5_EN_NVMEOTCP
struct page *resync_dump_frag_page;
enum mlx5e_dump_wqe_type type;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
index d9f6125f5dbc..f8cba90679ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
@@ -3,6 +3,7 @@
#include <linux/netdevice.h>
#include <linux/idr.h>
+#include <linux/blk-mq.h>
#include <linux/nvme-tcp.h>
#include "en_accel/nvmeotcp.h"
#include "en_accel/nvmeotcp_utils.h"
@@ -267,6 +268,18 @@ fill_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
MLX5_SET(nvmeotcp_progress_params, ctx, offloading_state, 0);
}
+struct mlx5e_dump_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_data_seg data;
+};
+
+#define MLX5E_NVME_DUMP_WQEBBS\
+ (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_NVME_FETCH_DUMP_WQE(sq, pi) \
+ ((struct mlx5e_dump_wqe *)\
+ mlx5e_fetch_wqe(&(sq)->wq, pi, sizeof(struct mlx5e_dump_wqe)))
+
static void nvme_tx_fill_wi(struct mlx5e_txqsq *sq,
u16 pi, u8 num_wqebbs, u32 num_bytes,
struct page *page, enum mlx5e_dump_wqe_type type)
@@ -276,9 +289,65 @@ static void nvme_tx_fill_wi(struct mlx5e_txqsq *sq,
*wi = (struct mlx5e_tx_wqe_info) {
.num_wqebbs = num_wqebbs,
.num_bytes = num_bytes,
+ .resync_dump_frag_page = page,
+ .type = type,
};
}
+static void mlx5e_nvmeotcp_tx_post_fence_nop(struct mlx5e_txqsq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ nvme_tx_fill_wi(sq, pi, 1, 0, NULL, MLX5E_DUMP_WQE_NVMEOTCP);
+
+ mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
+}
+
+static int
+nvmeotcp_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag,
+ u32 tisn, bool first, enum mlx5e_dump_wqe_type type)
+{
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5_wqe_data_seg *dseg;
+ struct mlx5e_dump_wqe *wqe;
+ dma_addr_t dma_addr;
+ u16 ds_cnt;
+ int fsz;
+ u16 pi;
+
+ BUILD_BUG_ON(MLX5E_NVME_DUMP_WQEBBS != 1);
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wqe = MLX5E_NVME_FETCH_DUMP_WQE(sq, pi);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+
+ cseg = &wqe->ctrl;
+ dseg = &wqe->data;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->tis_tir_num = cpu_to_be32(tisn << 8);
+ cseg->fm_ce_se = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
+
+ fsz = skb_frag_size(frag);
+ dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ return -ENOMEM;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(fsz);
+ mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+
+ nvme_tx_fill_wi(sq, pi, MLX5E_NVME_DUMP_WQEBBS,
+ fsz, skb_frag_page(frag), type);
+ sq->pc += MLX5E_NVME_DUMP_WQEBBS;
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl);
+ return 0;
+}
+
void
build_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
struct mlx5e_set_nvmeotcp_progress_params_wqe *wqe,
@@ -295,6 +364,7 @@ build_nvmeotcp_progress_params(struct mlx5e_nvmeotcp_queue *queue,
MLX5_OPCODE_SET_PSV | (opc_mod << 24));
cseg->qpn_ds = cpu_to_be32((sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
PROGRESS_PARAMS_DS_CNT);
+ cseg->fm_ce_se = resync ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
fill_nvmeotcp_progress_params(queue, &wqe->params, seq, !is_rx);
}
@@ -1160,6 +1230,202 @@ void mlx5e_nvmeotcp_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct sock *sk,
mlx5e_nvmeotcp_tx_post_progress_params(ctx, sq, tcp_sk(sk)->copied_seq, false);
}
+enum mlx5e_nvmeotcp_resync_retval {
+ MLX5E_NVMEOTCP_RESYNC_DONE,
+ MLX5E_NVMEOTCP_RESYNC_FAIL,
+ MLX5E_NVMEOTCP_RESYNC_SKIP,
+};
+
+static
+int mlx5e_nvmeotcp_resync_frag(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ int i, skb_frag_t *frag, u32 seq)
+{
+ unsigned int orig_fsz, frag_offset = 0, n = 0;
+ enum mlx5e_dump_wqe_type type = MLX5E_DUMP_WQE_NVMEOTCP;
+
+ orig_fsz = skb_frag_size(frag);
+
+ do {
+ bool fence = !(i || frag_offset);
+ unsigned int fsz;
+
+ n++;
+ fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
+ skb_frag_size_set(frag, fsz);
+ if (nvmeotcp_post_resync_dump(sq, frag, queue->tisn, fence, type)) {
+ page_ref_add(compound_head(skb_frag_page(frag)), n - 1);
+ return -1;
+ }
+
+ skb_frag_off_add(frag, fsz);
+ frag_offset += fsz;
+ } while (frag_offset < orig_fsz);
+
+ page_ref_add(compound_head(skb_frag_page(frag)), n);
+
+ return 0;
+}
+
+static int mlx5e_nvmeotcp_resync_hdr(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, u32 seq,
+ struct sk_buff *skb, int remaining,
+ struct ulp_ddp_pdu_info *pdu_info)
+{
+ skb_frag_t pdu_frag;
+ int size = min_t(int, remaining, pdu_info->hdr_len);
+
+ __skb_frag_set_page(&pdu_frag, virt_to_page(pdu_info->hdr));
+ skb_frag_off_set(&pdu_frag, offset_in_page(pdu_info->hdr));
+ skb_frag_size_set(&pdu_frag, size);
+
+ return mlx5e_nvmeotcp_resync_frag(queue, sq, skb, 0, &pdu_frag, seq);
+}
+
+static void mlx5e_nvmeotcp_init_iter(struct iov_iter *iter, struct bio *bio)
+{
+ unsigned int bio_size;
+ struct bio_vec *vec;
+ int nsegs;
+
+ vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ nsegs = bio_segments(bio);
+ bio_size = bio->bi_iter.bi_size;
+ iov_iter_bvec(iter, 1, vec, nsegs, bio_size);
+ iter->iov_offset = 0;
+}
+
+static int mlx5e_nvmeotcp_resync_data(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, u32 seq,
+ struct sk_buff *skb, int remaining,
+ struct ulp_ddp_pdu_info *pdu_info)
+{
+ struct request *req = pdu_info->req;
+ struct bio *bio = req->bio;
+ struct iov_iter iter;
+ int data_remaining;
+ size_t data_sent = 0;
+
+ mlx5e_nvmeotcp_init_iter(&iter, bio);
+
+ data_remaining = min_t(int, remaining, pdu_info->data_len);
+
+ while (data_remaining > 0) {
+ skb_frag_t frag;
+ size_t size = min_t(size_t,
+ iter.bvec->bv_len - iter.iov_offset
+ , data_remaining);
+
+ __skb_frag_set_page(&frag, iter.bvec->bv_page);
+ skb_frag_off_set(&frag, iter.bvec->bv_offset + iter.iov_offset);
+ skb_frag_size_set(&frag, size);
+ data_remaining -= size;
+
+ if (mlx5e_nvmeotcp_resync_frag(queue, sq, skb, 1, &frag, seq))
+ goto err_out;
+
+ if (!data_remaining)
+ break;
+
+ data_sent += size;
+ iov_iter_advance(&iter, size);
+ if (!iov_iter_count(&iter) && data_sent < pdu_info->data_len) {
+ bio = bio->bi_next;
+ mlx5e_nvmeotcp_init_iter(&iter, bio);
+ }
+ }
+
+ return 0;
+err_out:
+ return -1;
+}
+
+static int mlx5e_nvmeotcp_resync_crc(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, u32 seq,
+ struct sk_buff *skb, int remaining,
+ struct ulp_ddp_pdu_info *pdu_info)
+{
+ skb_frag_t crc_frag;
+ u32 dummy_ddigest = 0;
+
+ __skb_frag_set_page(&crc_frag, virt_to_page(&dummy_ddigest));
+ skb_frag_off_set(&crc_frag, offset_in_page(&dummy_ddigest));
+ skb_frag_size_set(&crc_frag, remaining);
+ return mlx5e_nvmeotcp_resync_frag(queue, sq, skb, 1, &crc_frag, seq);
+}
+
+/* for a pdu info mapping [--------seq----] capsule
+ ******* send to HW [-------|seq *******************/
+static
+bool mlx5e_nvmeotcp_resync_cap(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct ulp_ddp_pdu_info *pdu_info,
+ u32 seq)
+{
+ int remaining = seq - pdu_info->start_seq;
+ int ret;
+
+ ret = mlx5e_nvmeotcp_resync_hdr(queue, sq, seq, skb, remaining,
+ pdu_info);
+ if (unlikely(ret))
+ goto err_out;
+
+ remaining -= pdu_info->hdr_len;
+ if (remaining <= 0)
+ goto out;
+
+ ret = mlx5e_nvmeotcp_resync_data(queue, sq, seq, skb, remaining,
+ pdu_info);
+ if (unlikely(ret))
+ goto err_out;
+
+ remaining -= pdu_info->data_len;
+ if (remaining <= 0)
+ goto out;
+
+ ret = mlx5e_nvmeotcp_resync_crc(queue, sq, seq, skb, remaining,
+ pdu_info);
+ if (unlikely(ret))
+ goto err_out;
+out:
+ return true;
+err_out:
+ return false;
+}
+
+static enum mlx5e_nvmeotcp_resync_retval
+mlx5e_nvmeotcp_handle_ooo_skb(struct mlx5e_nvmeotcp_queue *queue,
+ struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ u32 seq, int datalen)
+{
+ struct ulp_ddp_pdu_info *pdu_info = NULL;
+
+ /* ask for pdu_info that includes the tcp_seq */
+ pdu_info = ulp_ddp_get_pdu_info(skb->sk, seq);
+
+ if (!pdu_info)
+ return MLX5E_NVMEOTCP_RESYNC_SKIP;
+
+ /*update NIC about resync - he will rebuild parse machine
+ *send psv with small fence
+ */
+ mlx5e_nvmeotcp_tx_post_progress_params(queue, sq, pdu_info->start_seq, true);
+
+ if (seq == pdu_info->start_seq || seq == pdu_info->end_seq) {
+ mlx5e_nvmeotcp_tx_post_fence_nop(sq);
+ return MLX5E_NVMEOTCP_RESYNC_DONE;
+ }
+
+ /* post dump wqes -
+ * transfer the needed data to NIC HW using DUMP WQE with data [*,^]
+ * saved in pdu_info
+ */
+ if (unlikely(!mlx5e_nvmeotcp_resync_cap(queue, sq, skb, pdu_info, seq)))
+ return MLX5E_NVMEOTCP_RESYNC_FAIL;
+
+ return MLX5E_NVMEOTCP_RESYNC_DONE;
+}
+
static inline bool mlx5e_is_sk_tx_device_offloaded(struct sock *sk)
{
/* Return True after smp_store_release assing in
@@ -1199,8 +1465,19 @@ bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
mlx5e_nvmeotcp_tx_post_param_wqes(sq, skb->sk, ctx);
seq = ntohl(tcp_hdr(skb)->seq);
- if (unlikely(ctx->ulp_ddp_ctx.expected_seq != seq))
- goto err_out;
+ if (unlikely(ctx->ulp_ddp_ctx.expected_seq != seq)) {
+ enum mlx5e_nvmeotcp_resync_retval ret =
+ mlx5e_nvmeotcp_handle_ooo_skb(ctx, sq, skb,
+ seq, datalen);
+ switch (ret) {
+ case MLX5E_NVMEOTCP_RESYNC_DONE:
+ break;
+ case MLX5E_NVMEOTCP_RESYNC_SKIP:
+ goto out;
+ case MLX5E_NVMEOTCP_RESYNC_FAIL:
+ goto err_out;
+ }
+ }
*nvmeotcp_tisn = ctx->tisn;
ctx->ulp_ddp_ctx.expected_seq = seq + datalen;
--
2.24.1
_______________________________________________
Linux-nvme mailing list
Linux-nvme@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme
next prev parent reply other threads:[~2021-07-22 12:17 UTC|newest]
Thread overview: 60+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-22 11:02 [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 01/36] net: Introduce direct data placement tcp offload Boris Pismenny
2021-07-22 11:26 ` Eric Dumazet
2021-07-22 12:18 ` Boris Pismenny
2021-07-22 13:10 ` Eric Dumazet
2021-07-22 13:33 ` Boris Pismenny
2021-07-22 13:39 ` Eric Dumazet
2021-07-22 14:02 ` Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 02/36] iov_iter: DDP copy to iter/pages Boris Pismenny
2021-07-22 13:31 ` Christoph Hellwig
2021-07-22 20:23 ` Boris Pismenny
2021-07-23 5:03 ` Christoph Hellwig
2021-07-23 5:21 ` Al Viro
2021-08-04 14:13 ` Or Gerlitz
2021-08-10 13:29 ` Or Gerlitz
2021-07-22 20:55 ` Al Viro
2021-07-22 11:02 ` [PATCH v5 net-next 03/36] net: skb copy(+hash) iterators for DDP offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 04/36] net/tls: expose get_netdev_for_sock Boris Pismenny
2021-07-23 6:06 ` Christoph Hellwig
2021-08-04 13:26 ` Or Gerlitz
2021-07-22 11:02 ` [PATCH v5 net-next 05/36] nvme-tcp: Add DDP offload control path Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 06/36] nvme-tcp: Add DDP data-path Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 07/36] nvme-tcp: RX DDGST offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 08/36] nvme-tcp: Deal with netdevice DOWN events Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 09/36] net/mlx5: Header file changes for nvme-tcp offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 10/36] net/mlx5: Add 128B CQE for NVMEoTCP offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 11/36] net/mlx5e: TCP flow steering for nvme-tcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 12/36] net/mlx5e: NVMEoTCP offload initialization Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 13/36] net/mlx5e: KLM UMR helper macros Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 14/36] net/mlx5e: NVMEoTCP use KLM UMRs Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 15/36] net/mlx5e: NVMEoTCP queue init/teardown Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 16/36] net/mlx5e: NVMEoTCP async ddp invalidation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 17/36] net/mlx5e: NVMEoTCP ddp setup and resync Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 18/36] net/mlx5e: NVMEoTCP, data-path for DDP+DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 19/36] net/mlx5e: NVMEoTCP statistics Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 20/36] Documentation: add ULP DDP offload documentation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 21/36] net: drop ULP DDP HW offload feature if no CSUM offload feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 22/36] net: Add ulp_ddp_pdu_info struct Boris Pismenny
2021-07-23 19:42 ` Sagi Grimberg
2021-07-22 11:03 ` [PATCH v5 net-next 23/36] net: Add to ulp_ddp support for fallback flow Boris Pismenny
2021-07-23 6:09 ` Christoph Hellwig
2021-07-22 11:03 ` [PATCH v5 net-next 24/36] net: Add MSG_DDP_CRC flag Boris Pismenny
2021-07-22 14:23 ` Eric Dumazet
2021-07-22 11:03 ` [PATCH v5 net-next 25/36] nvme-tcp: TX DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 26/36] nvme-tcp: Mapping between Tx NVMEoTCP pdu and TCP sequence Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 27/36] mlx5e: make preparation in TLS code for NVMEoTCP CRC Tx offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 28/36] mlx5: Add sq state test bit for nvmeotcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 29/36] mlx5: Add support to NETIF_F_HW_TCP_DDP_CRC_TX feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 30/36] net/mlx5e: NVMEoTCP DDGST TX offload TIS Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 31/36] net/mlx5e: NVMEoTCP DDGST Tx offload queue init/teardown Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 32/36] net/mlx5e: NVMEoTCP DDGST TX BSF and PSV Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 33/36] net/mlx5e: NVMEoTCP DDGST TX Data path Boris Pismenny
2021-07-22 11:03 ` Boris Pismenny [this message]
2021-07-22 11:03 ` [PATCH v5 net-next 35/36] net/mlx5e: NVMEoTCP DDGST TX offload optimization Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 36/36] net/mlx5e: NVMEoTCP DDGST TX statistics Boris Pismenny
2021-07-23 5:56 ` [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Christoph Hellwig
2021-07-23 19:58 ` Sagi Grimberg
2021-08-04 13:51 ` Or Gerlitz
2021-08-06 19:46 ` Sagi Grimberg
2021-08-10 13:37 ` Or Gerlitz
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210722110325.371-35-borisp@nvidia.com \
--to=borisp@nvidia.com \
--cc=axboe@fb.com \
--cc=benishay@nvidia.com \
--cc=boris.pismenny@gmail.com \
--cc=davem@davemloft.net \
--cc=dsahern@gmail.com \
--cc=edumazet@google.com \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=netdev@vger.kernel.org \
--cc=ogerlitz@nvidia.com \
--cc=saeedm@nvidia.com \
--cc=sagi@grimberg.me \
--cc=smalin@marvell.com \
--cc=viro@zeniv.linux.org.uk \
--cc=yorayz@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.