From: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>
To: intel-xe@lists.freedesktop.org
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
"Himal Prasad Ghimiray" <himal.prasad.ghimiray@intel.com>,
dri-devel@lists.freedesktop.org, apopple@nvidia.com,
airlied@gmail.com, "Simona Vetter" <simona.vetter@ffwll.ch>,
felix.kuehling@amd.com, "Matthew Brost" <matthew.brost@intel.com>,
"Christian König" <christian.koenig@amd.com>,
dakr@kernel.org, "Mrozek, Michal" <michal.mrozek@intel.com>,
"Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>
Subject: [PATCH v5 13/24] drm/xe: Use the vma attibute drm_pagemap to select where to migrate
Date: Thu, 18 Dec 2025 17:20:50 +0100 [thread overview]
Message-ID: <20251218162101.605379-14-thomas.hellstrom@linux.intel.com> (raw)
In-Reply-To: <20251218162101.605379-1-thomas.hellstrom@linux.intel.com>
Honor the drm_pagemap vma attribute when migrating SVM pages.
Ensure that when the desired placement is validated as device
memory, that we also check that the requested drm_pagemap is
consistent with the current.
v2:
- Initialize a struct drm_pagemap pointer to NULL that could
otherwise be dereferenced uninitialized. (CI)
- Remove a redundant assignment (Matt Brost)
- Slightly improved commit message (Matt Brost)
- Extended drm_pagemap validation.
v3:
- Fix a compilation error if CONFIG_DRM_GPUSVM is not enabled.
(kernel test robot <lkp@intel.com>)
Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
---
drivers/gpu/drm/xe/xe_svm.c | 86 ++++++++++++++++++++------------
drivers/gpu/drm/xe/xe_svm.h | 14 +++---
drivers/gpu/drm/xe/xe_vm.c | 24 ++++-----
drivers/gpu/drm/xe/xe_vm_types.h | 6 +--
4 files changed, 72 insertions(+), 58 deletions(-)
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index fba40bf76586..efc25a93d3da 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -909,13 +909,34 @@ void xe_svm_fini(struct xe_vm *vm)
drm_gpusvm_fini(&vm->svm.gpusvm);
}
+static bool xe_svm_range_has_pagemap_locked(const struct xe_svm_range *range,
+ const struct drm_pagemap *dpagemap)
+{
+ return range->base.pages.dpagemap == dpagemap;
+}
+
+static bool xe_svm_range_has_pagemap(struct xe_svm_range *range,
+ const struct drm_pagemap *dpagemap)
+{
+ struct xe_vm *vm = range_to_vm(&range->base);
+ bool ret;
+
+ xe_svm_notifier_lock(vm);
+ ret = xe_svm_range_has_pagemap_locked(range, dpagemap);
+ xe_svm_notifier_unlock(vm);
+
+ return ret;
+}
+
static bool xe_svm_range_is_valid(struct xe_svm_range *range,
struct xe_tile *tile,
- bool devmem_only)
+ bool devmem_only,
+ const struct drm_pagemap *dpagemap)
+
{
return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
range->tile_invalidated) &&
- (!devmem_only || xe_svm_range_in_vram(range)));
+ (!devmem_only || xe_svm_range_has_pagemap(range, dpagemap)));
}
/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
@@ -936,7 +957,8 @@ void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
* @vm: xe_vm pointer
* @range: Pointer to the SVM range structure
* @tile_mask: Mask representing the tiles to be checked
- * @devmem_preferred : if true range needs to be in devmem
+ * @dpagemap: if !%NULL, the range is expected to be present
+ * in device memory identified by this parameter.
*
* The xe_svm_range_validate() function checks if a range is
* valid and located in the desired memory region.
@@ -945,14 +967,15 @@ void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
*/
bool xe_svm_range_validate(struct xe_vm *vm,
struct xe_svm_range *range,
- u8 tile_mask, bool devmem_preferred)
+ u8 tile_mask, const struct drm_pagemap *dpagemap)
{
bool ret;
xe_svm_notifier_lock(vm);
- ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
- (devmem_preferred == range->base.pages.flags.has_devmem_pages);
+ ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask;
+ if (dpagemap)
+ ret = ret && xe_svm_range_has_pagemap_locked(range, dpagemap);
xe_svm_notifier_unlock(vm);
@@ -1066,22 +1089,22 @@ static bool supports_4K_migration(struct xe_device *xe)
* xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
* @range: SVM range for which migration needs to be decided
* @vma: vma which has range
- * @preferred_region_is_vram: preferred region for range is vram
+ * @dpagemap: The preferred struct drm_pagemap to migrate to.
*
* Return: True for range needing migration and migration is supported else false
*/
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
- bool preferred_region_is_vram)
+ const struct drm_pagemap *dpagemap)
{
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
+ if (!range->base.pages.flags.migrate_devmem || !dpagemap)
return false;
xe_assert(vm->xe, IS_DGFX(vm->xe));
- if (xe_svm_range_in_vram(range)) {
+ if (xe_svm_range_has_pagemap(range, dpagemap)) {
drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
@@ -1178,9 +1201,9 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
if (err)
return err;
- dpagemap = xe_vma_resolve_pagemap(vma, tile);
- ctx.device_private_page_owner =
- xe_svm_private_page_owner(vm, !dpagemap && !ctx.devmem_only);
+ dpagemap = ctx.devmem_only ? xe_tile_local_pagemap(tile) :
+ xe_vma_resolve_pagemap(vma, tile);
+ ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !dpagemap);
range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
if (IS_ERR(range))
@@ -1193,7 +1216,7 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
goto out;
}
- if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only, dpagemap)) {
xe_svm_range_valid_fault_count_stats_incr(gt, range);
range_debug(range, "PAGE FAULT - VALID");
goto out;
@@ -1202,16 +1225,11 @@ static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
range_debug(range, "PAGE FAULT");
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
+ xe_svm_range_needs_migrate_to_vram(range, vma, dpagemap)) {
ktime_t migrate_start = xe_gt_stats_ktime_get();
- /* TODO : For multi-device dpagemap will be used to find the
- * remote tile and remote device. Will need to modify
- * xe_svm_alloc_vram to use dpagemap for future multi-device
- * support.
- */
xe_svm_range_migrate_count_stats_incr(gt, range);
- err = xe_svm_alloc_vram(tile, range, &ctx);
+ err = xe_svm_alloc_vram(range, &ctx, dpagemap);
xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
@@ -1528,7 +1546,13 @@ u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
*/
struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
{
- s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
+ struct drm_pagemap *dpagemap = vma->attr.preferred_loc.dpagemap;
+ s32 fd;
+
+ if (dpagemap)
+ return dpagemap;
+
+ fd = (s32)vma->attr.preferred_loc.devmem_fd;
if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
return NULL;
@@ -1536,28 +1560,24 @@ struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *t
if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
return IS_DGFX(tile_to_xe(tile)) ? xe_tile_local_pagemap(tile) : NULL;
- /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
return NULL;
}
/**
* xe_svm_alloc_vram()- Allocate device memory pages for range,
* migrating existing data.
- * @tile: tile to allocate vram from
* @range: SVM range
* @ctx: DRM GPU SVM context
+ * @dpagemap: The struct drm_pagemap representing the memory to allocate.
*
* Return: 0 on success, error code on failure.
*/
-int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
+ struct drm_pagemap *dpagemap)
{
- struct drm_pagemap *dpagemap;
-
- xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
+ xe_assert(range_to_vm(&range->base)->xe, range->base.pages.flags.migrate_devmem);
range_debug(range, "ALLOCATE VRAM");
- dpagemap = xe_tile_local_pagemap(tile);
return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
xe_svm_range_end(range),
range->base.gpusvm->mm,
@@ -1827,9 +1847,9 @@ int xe_pagemap_cache_create(struct xe_tile *tile)
return 0;
}
-int xe_svm_alloc_vram(struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+int xe_svm_alloc_vram(struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx,
+ struct drm_pagemap *dpagemap)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 5adce108f7eb..a003f571c82a 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -94,8 +94,8 @@ int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
-int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx);
+int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
+ struct drm_pagemap *dpagemap);
struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
@@ -104,13 +104,13 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
struct drm_gpusvm_ctx *ctx);
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
- bool preferred_region_is_vram);
+ const struct drm_pagemap *dpagemap);
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
bool xe_svm_range_validate(struct xe_vm *vm,
struct xe_svm_range *range,
- u8 tile_mask, bool devmem_preferred);
+ u8 tile_mask, const struct drm_pagemap *dpagemap);
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
@@ -276,8 +276,8 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
}
static inline int
-xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
+ struct drm_pagemap *dpagemap)
{
return -EOPNOTSUPP;
}
@@ -318,7 +318,7 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
static inline
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
- u32 region)
+ const struct drm_pagemap *dpagemap)
{
return false;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 351046c9587b..808b44fb0569 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -2342,7 +2342,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
struct xe_tile *tile;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
- struct drm_pagemap *dpagemap;
+ struct drm_pagemap *dpagemap = NULL;
u8 id, tile_mask = 0;
u32 i;
@@ -2360,23 +2360,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
op->prefetch_range.ranges_count = 0;
- tile = NULL;
if (prefetch_region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
dpagemap = xe_vma_resolve_pagemap(vma,
xe_device_get_root_tile(vm->xe));
- /*
- * TODO: Once multigpu support is enabled will need
- * something to dereference tile from dpagemap.
- */
- if (dpagemap)
- tile = xe_device_get_root_tile(vm->xe);
} else if (prefetch_region) {
tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] -
XE_PL_VRAM0];
+ dpagemap = xe_tile_local_pagemap(tile);
}
- op->prefetch_range.tile = tile;
+ op->prefetch_range.dpagemap = dpagemap;
alloc_next_range:
svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
@@ -2395,7 +2389,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
goto unwind_prefetch_ops;
}
- if (xe_svm_range_validate(vm, svm_range, tile_mask, !!tile)) {
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, dpagemap)) {
xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
goto check_next_range;
}
@@ -2917,7 +2911,7 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
{
bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- struct xe_tile *tile = op->prefetch_range.tile;
+ struct drm_pagemap *dpagemap = op->prefetch_range.dpagemap;
int err = 0;
struct xe_svm_range *svm_range;
@@ -2930,15 +2924,15 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
- ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !tile);
+ ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !dpagemap);
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
- if (!tile)
+ if (!dpagemap)
xe_svm_range_migrate_to_smem(vm, svm_range);
- if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile)) {
- err = xe_svm_alloc_vram(tile, svm_range, &ctx);
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, dpagemap)) {
+ err = xe_svm_alloc_vram(svm_range, &ctx, dpagemap);
if (err) {
drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 5876a966ed24..594555f1669a 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -408,10 +408,10 @@ struct xe_vma_op_prefetch_range {
/** @ranges_count: number of svm ranges to map */
u32 ranges_count;
/**
- * @tile: Pointer to the tile structure containing memory to prefetch.
- * NULL if prefetch requested region is smem
+ * @dpagemap: Pointer to the dpagemap structure containing memory to prefetch.
+ * NULL if prefetch requested region is smem
*/
- struct xe_tile *tile;
+ struct drm_pagemap *dpagemap;
};
/** enum xe_vma_op_flags - flags for VMA operation */
--
2.51.1
next prev parent reply other threads:[~2025-12-18 16:22 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-12-18 16:20 [PATCH v5 00/24] Dynamic drm_pagemaps and Initial multi-device SVM Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 01/24] drm/xe/svm: Fix a debug printout Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 02/24] drm/pagemap: Remove some dead code Thomas Hellström
2025-12-18 18:16 ` Matthew Brost
2025-12-18 16:20 ` [PATCH v5 03/24] drm/pagemap, drm/xe: Ensure that the devmem allocation is idle before use Thomas Hellström
2025-12-18 18:33 ` Matthew Brost
2025-12-18 19:18 ` Thomas Hellström
2025-12-18 19:33 ` Matthew Brost
2025-12-18 16:20 ` [PATCH v5 04/24] drm/pagemap, drm/xe: Add refcounting to struct drm_pagemap Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 05/24] drm/pagemap: Add a refcounted drm_pagemap backpointer to struct drm_pagemap_zdd Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 06/24] drm/pagemap, drm/xe: Manage drm_pagemap provider lifetimes Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 07/24] drm/pagemap: Add a drm_pagemap cache and shrinker Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 08/24] drm/xe: Use the " Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 09/24] drm/pagemap: Remove the drm_pagemap_create() interface Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 10/24] drm/pagemap_util: Add a utility to assign an owner to a set of interconnected gpus Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 11/24] drm/xe: Use the drm_pagemap_util helper to get a svm pagemap owner Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 12/24] drm/xe: Pass a drm_pagemap pointer around with the memory advise attributes Thomas Hellström
2025-12-18 16:20 ` Thomas Hellström [this message]
2025-12-18 16:20 ` [PATCH v5 14/24] drm/xe: Simplify madvise_preferred_mem_loc() Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 15/24] drm/xe/uapi: Extend the madvise functionality to support foreign pagemap placement for svm Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 16/24] drm/xe: Support pcie p2p dma as a fast interconnect Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 17/24] drm/xe/vm: Add a couple of VM debug printouts Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 18/24] drm/xe/svm: Document how xe keeps drm_pagemap references Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 19/24] drm/pagemap, drm/xe: Clean up the use of the device-private page owner Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 20/24] drm/gpusvm: Introduce a function to scan the current migration state Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 21/24] drm/xe: Use drm_gpusvm_scan_mm() Thomas Hellström
2025-12-18 16:20 ` [PATCH v5 22/24] drm/pagemap, drm/xe: Support destination migration over interconnect Thomas Hellström
2025-12-18 18:40 ` Matthew Brost
2025-12-18 16:21 ` [PATCH v5 23/24] drm/pagemap: Support source " Thomas Hellström
2025-12-18 20:36 ` Matthew Brost
2025-12-18 23:01 ` Matthew Brost
2025-12-18 16:21 ` [PATCH v5 24/24] drm/xe/svm: Serialize migration to device if racing Thomas Hellström
2025-12-18 19:03 ` Matthew Brost
2025-12-18 16:58 ` ✗ CI.checkpatch: warning for Dynamic drm_pagemaps and Initial multi-device SVM (rev6) Patchwork
2025-12-18 16:59 ` ✓ CI.KUnit: success " Patchwork
2025-12-18 17:38 ` ✓ Xe.CI.BAT: " Patchwork
2025-12-19 14:56 ` ✓ Xe.CI.Full: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251218162101.605379-14-thomas.hellstrom@linux.intel.com \
--to=thomas.hellstrom@linux.intel.com \
--cc=airlied@gmail.com \
--cc=apopple@nvidia.com \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=felix.kuehling@amd.com \
--cc=himal.prasad.ghimiray@intel.com \
--cc=intel-xe@lists.freedesktop.org \
--cc=joonas.lahtinen@linux.intel.com \
--cc=matthew.brost@intel.com \
--cc=michal.mrozek@intel.com \
--cc=simona.vetter@ffwll.ch \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox