From: nifan.cxl@gmail.com
To: qemu-devel@nongnu.org
Cc: jonathan.cameron@huawei.com, linux-cxl@vger.kernel.org,
gregory.price@memverge.com, ira.weiny@intel.com,
dan.j.williams@intel.com, a.manzanares@samsung.com,
dave@stgolabs.net, nmtadam.samsung@gmail.com,
nifan.cxl@gmail.com, jim.harris@samsung.com,
Jorgen.Hansen@wdc.com, wj28.lee@gmail.com, armbru@redhat.com,
mst@redhat.com, Fan Ni <fan.ni@samsung.com>
Subject: [PATCH v8 08/14] hw/mem/cxl_type3: Add host backend and address space handling for DC regions
Date: Thu, 23 May 2024 10:44:48 -0700 [thread overview]
Message-ID: <20240523174651.1089554-9-nifan.cxl@gmail.com> (raw)
In-Reply-To: <20240523174651.1089554-1-nifan.cxl@gmail.com>
From: Fan Ni <fan.ni@samsung.com>
Add (file/memory backed) host backend for DCD. All the dynamic capacity
regions will share a single, large enough host backend. Set up address
space for DC regions to support read/write operations to dynamic capacity
for DCD.
With the change, the following support is added:
1. Add a new property to type3 device "volatile-dc-memdev" to point to host
memory backend for dynamic capacity. Currently, all DC regions share one
host backend;
2. Add namespace for dynamic capacity for read/write support;
3. Create cdat entries for each dynamic capacity region.
Reviewed-by: Gregory Price <gregory.price@memverge.com>
Signed-off-by: Fan Ni <fan.ni@samsung.com>
---
hw/cxl/cxl-mailbox-utils.c | 16 ++--
hw/mem/cxl_type3.c | 174 +++++++++++++++++++++++++++++-------
include/hw/cxl/cxl_device.h | 8 ++
3 files changed, 162 insertions(+), 36 deletions(-)
diff --git a/hw/cxl/cxl-mailbox-utils.c b/hw/cxl/cxl-mailbox-utils.c
index 0f2ad58a14..831cef0567 100644
--- a/hw/cxl/cxl-mailbox-utils.c
+++ b/hw/cxl/cxl-mailbox-utils.c
@@ -622,7 +622,8 @@ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
size_t *len_out,
CXLCCI *cci)
{
- CXLDeviceState *cxl_dstate = &CXL_TYPE3(cci->d)->cxl_dstate;
+ CXLType3Dev *ct3d = CXL_TYPE3(cci->d);
+ CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
struct {
uint8_t slots_supported;
uint8_t slot_info;
@@ -636,7 +637,8 @@ static CXLRetCode cmd_firmware_update_get_info(const struct cxl_cmd *cmd,
QEMU_BUILD_BUG_ON(sizeof(*fw_info) != 0x50);
if ((cxl_dstate->vmem_size < CXL_CAPACITY_MULTIPLIER) ||
- (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER)) {
+ (cxl_dstate->pmem_size < CXL_CAPACITY_MULTIPLIER) ||
+ (ct3d->dc.total_capacity < CXL_CAPACITY_MULTIPLIER)) {
return CXL_MBOX_INTERNAL_ERROR;
}
@@ -793,7 +795,8 @@ static CXLRetCode cmd_identify_memory_device(const struct cxl_cmd *cmd,
CXLDeviceState *cxl_dstate = &ct3d->cxl_dstate;
if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
- (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
+ (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
+ (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
return CXL_MBOX_INTERNAL_ERROR;
}
@@ -835,9 +838,11 @@ static CXLRetCode cmd_ccls_get_partition_info(const struct cxl_cmd *cmd,
uint64_t next_pmem;
} QEMU_PACKED *part_info = (void *)payload_out;
QEMU_BUILD_BUG_ON(sizeof(*part_info) != 0x20);
+ CXLType3Dev *ct3d = container_of(cxl_dstate, CXLType3Dev, cxl_dstate);
if ((!QEMU_IS_ALIGNED(cxl_dstate->vmem_size, CXL_CAPACITY_MULTIPLIER)) ||
- (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER))) {
+ (!QEMU_IS_ALIGNED(cxl_dstate->pmem_size, CXL_CAPACITY_MULTIPLIER)) ||
+ (!QEMU_IS_ALIGNED(ct3d->dc.total_capacity, CXL_CAPACITY_MULTIPLIER))) {
return CXL_MBOX_INTERNAL_ERROR;
}
@@ -1179,7 +1184,8 @@ static CXLRetCode cmd_media_clear_poison(const struct cxl_cmd *cmd,
struct clear_poison_pl *in = (void *)payload_in;
dpa = ldq_le_p(&in->dpa);
- if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size) {
+ if (dpa + CXL_CACHE_LINE_SIZE > cxl_dstate->static_mem_size +
+ ct3d->dc.total_capacity) {
return CXL_MBOX_INVALID_PA;
}
diff --git a/hw/mem/cxl_type3.c b/hw/mem/cxl_type3.c
index 51be50ce87..f645a3f2e9 100644
--- a/hw/mem/cxl_type3.c
+++ b/hw/mem/cxl_type3.c
@@ -45,7 +45,8 @@ enum {
static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
int dsmad_handle, uint64_t size,
- bool is_pmem, uint64_t dpa_base)
+ bool is_pmem, bool is_dynamic,
+ uint64_t dpa_base)
{
CDATDsmas *dsmas;
CDATDslbis *dslbis0;
@@ -61,7 +62,8 @@ static void ct3_build_cdat_entries_for_mr(CDATSubHeader **cdat_table,
.length = sizeof(*dsmas),
},
.DSMADhandle = dsmad_handle,
- .flags = is_pmem ? CDAT_DSMAS_FLAG_NV : 0,
+ .flags = (is_pmem ? CDAT_DSMAS_FLAG_NV : 0) |
+ (is_dynamic ? CDAT_DSMAS_FLAG_DYNAMIC_CAP : 0),
.DPA_base = dpa_base,
.DPA_length = size,
};
@@ -149,12 +151,13 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
g_autofree CDATSubHeader **table = NULL;
CXLType3Dev *ct3d = priv;
MemoryRegion *volatile_mr = NULL, *nonvolatile_mr = NULL;
+ MemoryRegion *dc_mr = NULL;
uint64_t vmr_size = 0, pmr_size = 0;
int dsmad_handle = 0;
int cur_ent = 0;
int len = 0;
- if (!ct3d->hostpmem && !ct3d->hostvmem) {
+ if (!ct3d->hostpmem && !ct3d->hostvmem && !ct3d->dc.num_regions) {
return 0;
}
@@ -176,21 +179,54 @@ static int ct3_build_cdat_table(CDATSubHeader ***cdat_table, void *priv)
pmr_size = memory_region_size(nonvolatile_mr);
}
+ if (ct3d->dc.num_regions) {
+ if (!ct3d->dc.host_dc) {
+ return -EINVAL;
+ }
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ if (!dc_mr) {
+ return -EINVAL;
+ }
+ len += CT3_CDAT_NUM_ENTRIES * ct3d->dc.num_regions;
+ }
+
table = g_malloc0(len * sizeof(*table));
/* Now fill them in */
if (volatile_mr) {
ct3_build_cdat_entries_for_mr(table, dsmad_handle++, vmr_size,
- false, 0);
+ false, false, 0);
cur_ent = CT3_CDAT_NUM_ENTRIES;
}
if (nonvolatile_mr) {
uint64_t base = vmr_size;
ct3_build_cdat_entries_for_mr(&(table[cur_ent]), dsmad_handle++,
- pmr_size, true, base);
+ pmr_size, true, false, base);
cur_ent += CT3_CDAT_NUM_ENTRIES;
}
+
+ if (dc_mr) {
+ int i;
+ uint64_t region_base = vmr_size + pmr_size;
+
+ /*
+ * We assume the dynamic capacity to be volatile for now.
+ * Non-volatile dynamic capacity will be added if needed in the
+ * future.
+ */
+ for (i = 0; i < ct3d->dc.num_regions; i++) {
+ ct3_build_cdat_entries_for_mr(&(table[cur_ent]),
+ dsmad_handle++,
+ ct3d->dc.regions[i].len,
+ false, true, region_base);
+ ct3d->dc.regions[i].dsmadhandle = dsmad_handle - 1;
+
+ cur_ent += CT3_CDAT_NUM_ENTRIES;
+ region_base += ct3d->dc.regions[i].len;
+ }
+ }
+
assert(len == cur_ent);
*cdat_table = g_steal_pointer(&table);
@@ -301,10 +337,17 @@ static void build_dvsecs(CXLType3Dev *ct3d)
range2_size_lo = (2 << 5) | (2 << 2) | 0x3 |
(ct3d->hostpmem->size & 0xF0000000);
}
- } else {
+ } else if (ct3d->hostpmem) {
range1_size_hi = ct3d->hostpmem->size >> 32;
range1_size_lo = (2 << 5) | (2 << 2) | 0x3 |
(ct3d->hostpmem->size & 0xF0000000);
+ } else {
+ /*
+ * For DCD with no static memory, set memory active, memory class bits.
+ * No range is set.
+ */
+ range1_size_hi = 0;
+ range1_size_lo = (2 << 5) | (2 << 2) | 0x3;
}
dvsec = (uint8_t *)&(CXLDVSECDevice){
@@ -579,11 +622,28 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
{
int i;
uint64_t region_base = 0;
- uint64_t region_len = 2 * GiB;
- uint64_t decode_len = 2 * GiB;
+ uint64_t region_len;
+ uint64_t decode_len;
uint64_t blk_size = 2 * MiB;
CXLDCRegion *region;
MemoryRegion *mr;
+ uint64_t dc_size;
+
+ mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ dc_size = memory_region_size(mr);
+ region_len = DIV_ROUND_UP(dc_size, ct3d->dc.num_regions);
+
+ if (dc_size % (ct3d->dc.num_regions * CXL_CAPACITY_MULTIPLIER) != 0) {
+ error_setg(errp, "backend size is not multiple of region len: 0x%lx",
+ region_len);
+ return false;
+ }
+ if (region_len % CXL_CAPACITY_MULTIPLIER != 0) {
+ error_setg(errp, "DC region size is unaligned to 0x%lx",
+ CXL_CAPACITY_MULTIPLIER);
+ return false;
+ }
+ decode_len = region_len;
if (ct3d->hostvmem) {
mr = host_memory_backend_get_memory(ct3d->hostvmem);
@@ -610,6 +670,7 @@ static bool cxl_create_dc_regions(CXLType3Dev *ct3d, Error **errp)
/* dsmad_handle set when creating CDAT table entries */
.flags = 0,
};
+ ct3d->dc.total_capacity += region->len;
}
return true;
@@ -619,7 +680,8 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
{
DeviceState *ds = DEVICE(ct3d);
- if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem) {
+ if (!ct3d->hostmem && !ct3d->hostvmem && !ct3d->hostpmem
+ && !ct3d->dc.num_regions) {
error_setg(errp, "at least one memdev property must be set");
return false;
} else if (ct3d->hostmem && ct3d->hostpmem) {
@@ -683,7 +745,37 @@ static bool cxl_setup_memory(CXLType3Dev *ct3d, Error **errp)
g_free(p_name);
}
+ ct3d->dc.total_capacity = 0;
if (ct3d->dc.num_regions > 0) {
+ MemoryRegion *dc_mr;
+ char *dc_name;
+
+ if (!ct3d->dc.host_dc) {
+ error_setg(errp, "dynamic capacity must have a backing device");
+ return false;
+ }
+
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ if (!dc_mr) {
+ error_setg(errp, "dynamic capacity must have a backing device");
+ return false;
+ }
+
+ /*
+ * Set DC regions as volatile for now, non-volatile support can
+ * be added in the future if needed.
+ */
+ memory_region_set_nonvolatile(dc_mr, false);
+ memory_region_set_enabled(dc_mr, true);
+ host_memory_backend_set_mapped(ct3d->dc.host_dc, true);
+ if (ds->id) {
+ dc_name = g_strdup_printf("cxl-dcd-dpa-dc-space:%s", ds->id);
+ } else {
+ dc_name = g_strdup("cxl-dcd-dpa-dc-space");
+ }
+ address_space_init(&ct3d->dc.host_dc_as, dc_mr, dc_name);
+ g_free(dc_name);
+
if (!cxl_create_dc_regions(ct3d, errp)) {
error_append_hint(errp, "setup DC regions failed");
return false;
@@ -779,6 +871,9 @@ err_release_cdat:
err_free_special_ops:
g_free(regs->special_ops);
err_address_space_free:
+ if (ct3d->dc.host_dc) {
+ address_space_destroy(&ct3d->dc.host_dc_as);
+ }
if (ct3d->hostpmem) {
address_space_destroy(&ct3d->hostpmem_as);
}
@@ -797,6 +892,9 @@ static void ct3_exit(PCIDevice *pci_dev)
pcie_aer_exit(pci_dev);
cxl_doe_cdat_release(cxl_cstate);
g_free(regs->special_ops);
+ if (ct3d->dc.host_dc) {
+ address_space_destroy(&ct3d->dc.host_dc_as);
+ }
if (ct3d->hostpmem) {
address_space_destroy(&ct3d->hostpmem_as);
}
@@ -875,16 +973,23 @@ static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
AddressSpace **as,
uint64_t *dpa_offset)
{
- MemoryRegion *vmr = NULL, *pmr = NULL;
+ MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
+ uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
if (ct3d->hostvmem) {
vmr = host_memory_backend_get_memory(ct3d->hostvmem);
+ vmr_size = memory_region_size(vmr);
}
if (ct3d->hostpmem) {
pmr = host_memory_backend_get_memory(ct3d->hostpmem);
+ pmr_size = memory_region_size(pmr);
+ }
+ if (ct3d->dc.host_dc) {
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ dc_size = memory_region_size(dc_mr);
}
- if (!vmr && !pmr) {
+ if (!vmr && !pmr && !dc_mr) {
return -ENODEV;
}
@@ -892,19 +997,18 @@ static int cxl_type3_hpa_to_as_and_dpa(CXLType3Dev *ct3d,
return -EINVAL;
}
- if (*dpa_offset > ct3d->cxl_dstate.static_mem_size) {
+ if (*dpa_offset >= vmr_size + pmr_size + dc_size) {
return -EINVAL;
}
- if (vmr) {
- if (*dpa_offset < memory_region_size(vmr)) {
- *as = &ct3d->hostvmem_as;
- } else {
- *as = &ct3d->hostpmem_as;
- *dpa_offset -= memory_region_size(vmr);
- }
- } else {
+ if (*dpa_offset < vmr_size) {
+ *as = &ct3d->hostvmem_as;
+ } else if (*dpa_offset < vmr_size + pmr_size) {
*as = &ct3d->hostpmem_as;
+ *dpa_offset -= vmr_size;
+ } else {
+ *as = &ct3d->dc.host_dc_as;
+ *dpa_offset -= (vmr_size + pmr_size);
}
return 0;
@@ -986,6 +1090,8 @@ static Property ct3_props[] = {
DEFINE_PROP_UINT64("sn", CXLType3Dev, sn, UI64_NULL),
DEFINE_PROP_STRING("cdat", CXLType3Dev, cxl_cstate.cdat.filename),
DEFINE_PROP_UINT8("num-dc-regions", CXLType3Dev, dc.num_regions, 0),
+ DEFINE_PROP_LINK("volatile-dc-memdev", CXLType3Dev, dc.host_dc,
+ TYPE_MEMORY_BACKEND, HostMemoryBackend *),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1052,33 +1158,39 @@ static void set_lsa(CXLType3Dev *ct3d, const void *buf, uint64_t size,
static bool set_cacheline(CXLType3Dev *ct3d, uint64_t dpa_offset, uint8_t *data)
{
- MemoryRegion *vmr = NULL, *pmr = NULL;
+ MemoryRegion *vmr = NULL, *pmr = NULL, *dc_mr = NULL;
AddressSpace *as;
+ uint64_t vmr_size = 0, pmr_size = 0, dc_size = 0;
if (ct3d->hostvmem) {
vmr = host_memory_backend_get_memory(ct3d->hostvmem);
+ vmr_size = memory_region_size(vmr);
}
if (ct3d->hostpmem) {
pmr = host_memory_backend_get_memory(ct3d->hostpmem);
+ pmr_size = memory_region_size(pmr);
}
+ if (ct3d->dc.host_dc) {
+ dc_mr = host_memory_backend_get_memory(ct3d->dc.host_dc);
+ dc_size = memory_region_size(dc_mr);
+ }
- if (!vmr && !pmr) {
+ if (!vmr && !pmr && !dc_mr) {
return false;
}
- if (dpa_offset + CXL_CACHE_LINE_SIZE > ct3d->cxl_dstate.static_mem_size) {
+ if (dpa_offset + CXL_CACHE_LINE_SIZE > vmr_size + pmr_size + dc_size) {
return false;
}
- if (vmr) {
- if (dpa_offset < memory_region_size(vmr)) {
- as = &ct3d->hostvmem_as;
- } else {
- as = &ct3d->hostpmem_as;
- dpa_offset -= memory_region_size(vmr);
- }
- } else {
+ if (dpa_offset < vmr_size) {
+ as = &ct3d->hostvmem_as;
+ } else if (dpa_offset < vmr_size + pmr_size) {
as = &ct3d->hostpmem_as;
+ dpa_offset -= vmr_size;
+ } else {
+ as = &ct3d->dc.host_dc_as;
+ dpa_offset -= (vmr_size + pmr_size);
}
address_space_write(as, dpa_offset, MEMTXATTRS_UNSPECIFIED, &data,
diff --git a/include/hw/cxl/cxl_device.h b/include/hw/cxl/cxl_device.h
index f7f56b44e3..c2c3df0d2a 100644
--- a/include/hw/cxl/cxl_device.h
+++ b/include/hw/cxl/cxl_device.h
@@ -467,6 +467,14 @@ struct CXLType3Dev {
uint64_t poison_list_overflow_ts;
struct dynamic_capacity {
+ HostMemoryBackend *host_dc;
+ AddressSpace host_dc_as;
+ /*
+ * total_capacity is equivalent to the dynamic capability
+ * memory region size.
+ */
+ uint64_t total_capacity; /* 256M aligned */
+
uint8_t num_regions; /* 0-8 regions */
CXLDCRegion regions[DCD_MAX_NUM_REGION];
} dc;
--
2.43.0
next prev parent reply other threads:[~2024-05-23 17:51 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-23 17:44 [PATCH v8 00/14] Enabling DCD emulation support in Qemu nifan.cxl
2024-05-23 17:44 ` [PATCH v8 01/14] hw/cxl/mailbox: change CCI cmd set structure to be a member, not a reference nifan.cxl
2024-05-23 17:44 ` [PATCH v8 02/14] hw/cxl/mailbox: interface to add CCI commands to an existing CCI nifan.cxl
2024-05-23 17:44 ` [PATCH v8 03/14] hw/cxl/cxl-mailbox-utils: Add dc_event_log_size field to output payload of identify memory device command nifan.cxl
2024-05-23 17:44 ` [PATCH v8 04/14] hw/cxl/cxl-mailbox-utils: Add dynamic capacity region representative and mailbox command support nifan.cxl
2024-05-23 17:44 ` [PATCH v8 05/14] include/hw/cxl/cxl_device: Rename mem_size as static_mem_size for type3 memory devices nifan.cxl
2024-05-23 17:44 ` [PATCH v8 06/14] hw/mem/cxl_type3: Add support to create DC regions to " nifan.cxl
2024-05-27 7:42 ` Zhijian Li (Fujitsu) via
2024-05-23 17:44 ` [PATCH v8 07/14] hw/mem/cxl-type3: Refactor ct3_build_cdat_entries_for_mr to take mr size instead of mr as argument nifan.cxl
2024-05-23 17:44 ` nifan.cxl [this message]
2024-06-03 12:27 ` [PATCH v8 08/14] hw/mem/cxl_type3: Add host backend and address space handling for DC regions Jonathan Cameron via
2024-06-03 15:04 ` Michael S. Tsirkin
2024-06-03 17:27 ` Jonathan Cameron via
2024-05-23 17:44 ` [PATCH v8 09/14] hw/mem/cxl_type3: Add DC extent list representative and get DC extent list mailbox support nifan.cxl
2024-05-23 17:44 ` [PATCH v8 10/14] hw/cxl/cxl-mailbox-utils: Add mailbox commands to support add/release dynamic capacity response nifan.cxl
2024-05-23 17:44 ` [PATCH v8 11/14] hw/cxl/events: Add qmp interfaces to add/release dynamic capacity extents nifan.cxl
2024-06-04 7:12 ` Markus Armbruster
2024-06-04 11:55 ` Jonathan Cameron via
2024-06-04 14:49 ` Markus Armbruster
2025-09-02 10:39 ` Alireza Sanaee via
2025-09-02 15:59 ` Ira Weiny
2025-09-04 8:44 ` Alireza Sanaee via
2024-05-23 17:44 ` [PATCH v8 12/14] hw/mem/cxl_type3: Add DPA range validation for accesses to DC regions nifan.cxl
2024-05-23 17:44 ` [PATCH v8 13/14] hw/cxl/cxl-mailbox-utils: Add superset extent release mailbox support nifan.cxl
2024-05-23 17:44 ` [PATCH v8 14/14] hw/mem/cxl_type3: Allow to release extent superset in QMP interface nifan.cxl
2024-06-03 13:51 ` [PATCH v8 00/14] Enabling DCD emulation support in Qemu Jonathan Cameron via
2025-06-25 14:22 ` Alireza Sanaee via
2025-06-26 16:39 ` Fan Ni
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240523174651.1089554-9-nifan.cxl@gmail.com \
--to=nifan.cxl@gmail.com \
--cc=Jorgen.Hansen@wdc.com \
--cc=a.manzanares@samsung.com \
--cc=armbru@redhat.com \
--cc=dan.j.williams@intel.com \
--cc=dave@stgolabs.net \
--cc=fan.ni@samsung.com \
--cc=gregory.price@memverge.com \
--cc=ira.weiny@intel.com \
--cc=jim.harris@samsung.com \
--cc=jonathan.cameron@huawei.com \
--cc=linux-cxl@vger.kernel.org \
--cc=mst@redhat.com \
--cc=nmtadam.samsung@gmail.com \
--cc=qemu-devel@nongnu.org \
--cc=wj28.lee@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).