public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Michal Wajdeczko <michal.wajdeczko@intel.com>
To: "Michał Winiarski" <michal.winiarski@intel.com>,
	"Alex Williamson" <alex.williamson@redhat.com>,
	"Lucas De Marchi" <lucas.demarchi@intel.com>,
	"Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	"Rodrigo Vivi" <rodrigo.vivi@intel.com>,
	"Jason Gunthorpe" <jgg@ziepe.ca>,
	"Yishai Hadas" <yishaih@nvidia.com>,
	"Kevin Tian" <kevin.tian@intel.com>,
	intel-xe@lists.freedesktop.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, "Matthew Brost" <matthew.brost@intel.com>
Cc: <dri-devel@lists.freedesktop.org>,
	Jani Nikula <jani.nikula@linux.intel.com>,
	Joonas Lahtinen <joonas.lahtinen@linux.intel.com>,
	Tvrtko Ursulin <tursulin@ursulin.net>,
	David Airlie <airlied@gmail.com>, Simona Vetter <simona@ffwll.ch>,
	"Lukasz Laguna" <lukasz.laguna@intel.com>
Subject: Re: [PATCH v2 05/26] drm/xe/pf: Add helpers for migration data allocation / free
Date: Thu, 23 Oct 2025 00:18:09 +0200	[thread overview]
Message-ID: <830ac907-684d-439e-9612-e8d2f32d97b6@intel.com> (raw)
In-Reply-To: <20251021224133.577765-6-michal.winiarski@intel.com>



On 10/22/2025 12:41 AM, Michał Winiarski wrote:
> Now that it's possible to free the packets - connect the restore
> handling logic with the ring.
> The helpers will also be used in upcoming changes that will start producing
> migration data packets.
> 
> Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
> ---
>  drivers/gpu/drm/xe/Makefile                   |   1 +
>  drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c   |   7 +
>  drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c |  29 +++-
>  drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h |   1 +
>  drivers/gpu/drm/xe/xe_sriov_migration_data.c  | 127 ++++++++++++++++++
>  drivers/gpu/drm/xe/xe_sriov_migration_data.h  |  31 +++++
>  6 files changed, 195 insertions(+), 1 deletion(-)
>  create mode 100644 drivers/gpu/drm/xe/xe_sriov_migration_data.c
>  create mode 100644 drivers/gpu/drm/xe/xe_sriov_migration_data.h
> 
> diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
> index 89e5b26c27975..3d72db9e528e4 100644
> --- a/drivers/gpu/drm/xe/Makefile
> +++ b/drivers/gpu/drm/xe/Makefile
> @@ -173,6 +173,7 @@ xe-$(CONFIG_PCI_IOV) += \
>  	xe_lmtt_2l.o \
>  	xe_lmtt_ml.o \
>  	xe_pci_sriov.o \
> +	xe_sriov_migration_data.o \
>  	xe_sriov_pf.o \
>  	xe_sriov_pf_control.o \
>  	xe_sriov_pf_debugfs.o \
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> index cad73fdaee93c..dd9bc9c99f78c 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
> @@ -18,6 +18,7 @@
>  #include "xe_gt_sriov_printk.h"
>  #include "xe_guc_ct.h"
>  #include "xe_sriov.h"
> +#include "xe_sriov_migration_data.h"
>  #include "xe_sriov_pf_control.h"
>  #include "xe_sriov_pf_migration.h"
>  #include "xe_sriov_pf_service.h"
> @@ -851,6 +852,8 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
>  static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
>  {
>  	if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
> +		xe_gt_sriov_pf_migration_ring_free(gt, vfid);
> +
>  		pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
>  		pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA);
>  		pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
> @@ -1045,6 +1048,8 @@ int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid)
>  static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid)
>  {
>  	if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
> +		xe_gt_sriov_pf_migration_ring_free(gt, vfid);
> +
>  		pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
>  		pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA);
>  		pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE);
> @@ -1078,6 +1083,8 @@ pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid)
>  
>  	xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n", vfid, data->type);
>  
> +	xe_sriov_migration_data_free(data);
> +
>  	return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> index b6ffd982d6007..8ba72165759b3 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
> @@ -14,6 +14,7 @@
>  #include "xe_guc.h"
>  #include "xe_guc_ct.h"
>  #include "xe_sriov.h"
> +#include "xe_sriov_migration_data.h"
>  #include "xe_sriov_pf_migration.h"
>  
>  #define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5
> @@ -418,6 +419,25 @@ bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid)
>  	return ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring);
>  }
>  
> +/**
> + * xe_gt_sriov_pf_migration_ring_free() - Consume and free all data in migration ring
> + * @gt: the &xe_gt
> + * @vfid: the VF identifier
> + */
> +void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid)
> +{
> +	struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid);
> +	struct xe_sriov_migration_data *data;
> +
> +	if (ptr_ring_empty(&migration->ring))
> +		return;
> +
> +	xe_gt_sriov_notice(gt, "VF%u unprocessed migration data left in the ring!\n", vfid);
> +
> +	while ((data = ptr_ring_consume(&migration->ring)))
> +		xe_sriov_migration_data_free(data);
> +}
> +
>  /**
>   * xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring.
>   * @gt: the &xe_gt
> @@ -543,11 +563,18 @@ xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid)
>  	return ERR_PTR(-EAGAIN);
>  }
>  
> +static void pf_mig_data_destroy(void *ptr)
> +{
> +	struct xe_sriov_migration_data *data = ptr;
> +
> +	xe_sriov_migration_data_free(data);
> +}
> +
>  static void action_ring_cleanup(struct drm_device *dev, void *arg)
>  {
>  	struct ptr_ring *r = arg;
>  
> -	ptr_ring_cleanup(r, NULL);
> +	ptr_ring_cleanup(r, pf_mig_data_destroy);
>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> index 9e67f18ded205..1ed2248f0a17e 100644
> --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
> @@ -17,6 +17,7 @@ int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vf
>  
>  bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid);
>  bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid);
> +void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid);
>  
>  int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid,
>  					  struct xe_sriov_migration_data *data);
> diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.c b/drivers/gpu/drm/xe/xe_sriov_migration_data.c
> new file mode 100644
> index 0000000000000..b04f9be3b7fed
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.c
> @@ -0,0 +1,127 @@
> +// SPDX-License-Identifier: MIT
> +/*
> + * Copyright © 2025 Intel Corporation
> + */
> +
> +#include "xe_bo.h"
> +#include "xe_device.h"
> +#include "xe_sriov_migration_data.h"
> +
> +static bool data_needs_bo(struct xe_sriov_migration_data *data)
> +{
> +	return data->type == XE_SRIOV_MIGRATION_DATA_TYPE_VRAM;
> +}
> +
> +/**
> + * xe_sriov_migration_data() - Allocate migration data packet
> + * @xe: the &xe_device
> + *
> + * Only allocates the "outer" structure, without initializing the migration
> + * data backing storage.
> + *
> + * Return: Pointer to &xe_sriov_migration_data on success,
> + *         NULL in case of error.
> + */
> +struct xe_sriov_migration_data *

no line split

> +xe_sriov_migration_data_alloc(struct xe_device *xe)
> +{
> +	struct xe_sriov_migration_data *data;
> +
> +	data = kzalloc(sizeof(*data), GFP_KERNEL);
> +	if (!data)
> +		return NULL;
> +
> +	data->xe = xe;
> +	data->hdr_remaining = sizeof(data->hdr);
> +
> +	return data;
> +}
> +
> +/**
> + * xe_sriov_migration_data_free() - Free migration data packet.
> + * @data: the &xe_sriov_migration_data packet
> + */
> +void xe_sriov_migration_data_free(struct xe_sriov_migration_data *data)
> +{
> +	if (data_needs_bo(data))
> +		xe_bo_unpin_map_no_vm(data->bo);
> +	else
> +		kvfree(data->buff);
> +
> +	kfree(data);
> +}
> +
> +static int mig_data_init(struct xe_sriov_migration_data *data)
> +{
> +	struct xe_gt *gt = xe_device_get_gt(data->xe, data->gt);
> +
> +	if (data->size == 0)
> +		return 0;
> +
> +	if (data_needs_bo(data)) {

		struct xe_bo *bo;
then
		bo = ...

so will not have that long line

> +		struct xe_bo *bo = xe_bo_create_pin_map_novm(data->xe, gt->tile,
> +							     PAGE_ALIGN(data->size),
> +							     ttm_bo_type_kernel,
> +							     XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED,
> +							     false);
> +		if (IS_ERR(bo))
> +			return PTR_ERR(bo);
> +
> +		data->bo = bo;
> +		data->vaddr = bo->vmap.vaddr;
> +	} else {
> +		void *buff = kvzalloc(data->size, GFP_KERNEL);
> +
> +		if (!buff)
> +			return -ENOMEM;
> +
> +		data->buff = buff;
> +		data->vaddr = buff;
> +	}
> +
> +	return 0;
> +}
> +
> +#define XE_SRIOV_MIGRATION_DATA_SUPPORTED_VERSION 1
> +/**
> + * xe_sriov_migration_data_init() - Initialize the migration data header and backing storage.
> + * @data: the &xe_sriov_migration_data packet
> + * @tile_id: tile identifier
> + * @gt_id: GT identifier
> + * @type: &xe_sriov_migration_data_type
> + * @offset: offset of data packet payload (within wider resource)
> + * @size: size of data packet payload
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_sriov_migration_data_init(struct xe_sriov_migration_data *data, u8 tile_id, u8 gt_id,
> +				 enum xe_sriov_migration_data_type type, loff_t offset, size_t size)
> +{
> +	data->version = XE_SRIOV_MIGRATION_DATA_SUPPORTED_VERSION;
> +	data->type = type;
> +	data->tile = tile_id;
> +	data->gt = gt_id;
> +	data->offset = offset;
> +	data->size = size;
> +	data->remaining = size;
> +
> +	return mig_data_init(data);
> +}
> +
> +/**
> + * xe_sriov_migration_data_init() - Initialize the migration data backing storage based on header.
> + * @data: the &xe_sriov_migration_data packet
> + *
> + * Header data is expected to be filled prior to calling this function.
> + *
> + * Return: 0 on success or a negative error code on failure.
> + */
> +int xe_sriov_migration_data_init_from_hdr(struct xe_sriov_migration_data *data)
> +{
> +	if (data->version != XE_SRIOV_MIGRATION_DATA_SUPPORTED_VERSION)
> +		return -EINVAL;
> +
> +	data->remaining = data->size;
> +
> +	return mig_data_init(data);
> +}
> diff --git a/drivers/gpu/drm/xe/xe_sriov_migration_data.h b/drivers/gpu/drm/xe/xe_sriov_migration_data.h
> new file mode 100644
> index 0000000000000..ef65dccddc035
> --- /dev/null
> +++ b/drivers/gpu/drm/xe/xe_sriov_migration_data.h
> @@ -0,0 +1,31 @@
> +/* SPDX-License-Identifier: MIT */
> +/*
> + * Copyright © 2025 Intel Corporation
> + */
> +
> +#ifndef _XE_SRIOV_MIGRATION_DATA_H_
> +#define _XE_SRIOV_MIGRATION_DATA_H_
> +
> +#include <linux/types.h>
> +
> +struct xe_device;
> +
> +enum xe_sriov_migration_data_type {
> +	/* Skipping 0 to catch uninitialized data */
> +	XE_SRIOV_MIGRATION_DATA_TYPE_DESCRIPTOR = 1,
> +	XE_SRIOV_MIGRATION_DATA_TYPE_TRAILER,
> +	XE_SRIOV_MIGRATION_DATA_TYPE_GGTT,
> +	XE_SRIOV_MIGRATION_DATA_TYPE_MMIO,
> +	XE_SRIOV_MIGRATION_DATA_TYPE_GUC,
> +	XE_SRIOV_MIGRATION_DATA_TYPE_VRAM,
> +};
> +
> +struct xe_sriov_migration_data *

no need for line split here
> +xe_sriov_migration_data_alloc(struct xe_device *xe);
> +void xe_sriov_migration_data_free(struct xe_sriov_migration_data *snapshot);
> +
> +int xe_sriov_migration_data_init(struct xe_sriov_migration_data *data, u8 tile_id, u8 gt_id,
> +				 enum xe_sriov_migration_data_type, loff_t offset, size_t size);
> +int xe_sriov_migration_data_init_from_hdr(struct xe_sriov_migration_data *snapshot);
> +
> +#endif

just few nits, otherwise LGTM

Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>


  reply	other threads:[~2025-10-22 22:18 UTC|newest]

Thread overview: 72+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-10-21 22:41 [PATCH v2 00/26] vfio/xe: Add driver variant for Xe VF migration Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 01/26] drm/xe/pf: Remove GuC version check for migration support Michał Winiarski
2025-10-28  2:33   ` Tian, Kevin
2025-10-28  8:06     ` Winiarski, Michal
2025-10-21 22:41 ` [PATCH v2 02/26] drm/xe: Move migration support to device-level struct Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 03/26] drm/xe/pf: Add save/restore control state stubs and connect to debugfs Michał Winiarski
2025-10-22 22:31   ` Michal Wajdeczko
2025-10-27 12:02     ` Michał Winiarski
2025-10-28  3:06   ` Tian, Kevin
2025-10-28  8:02     ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 04/26] drm/xe/pf: Add data structures and handlers for migration rings Michał Winiarski
2025-10-22 22:06   ` Michal Wajdeczko
2025-10-27 12:33     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 05/26] drm/xe/pf: Add helpers for migration data allocation / free Michał Winiarski
2025-10-22 22:18   ` Michal Wajdeczko [this message]
2025-10-27 12:47     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 06/26] drm/xe/pf: Add support for encap/decap of bitstream to/from packet Michał Winiarski
2025-10-22 22:34   ` Michal Wajdeczko
2025-10-27 13:27     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 07/26] drm/xe/pf: Add minimalistic migration descriptor Michał Winiarski
2025-10-22 22:49   ` Michal Wajdeczko
2025-10-27 14:52     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 08/26] drm/xe/pf: Expose VF migration data size over debugfs Michał Winiarski
2025-10-22 23:02   ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 09/26] drm/xe: Add sa/guc_buf_cache sync interface Michał Winiarski
2025-10-22 23:05   ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 10/26] drm/xe: Allow the caller to pass guc_buf_cache size Michał Winiarski
2025-10-22 23:13   ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 11/26] drm/xe/pf: Increase PF GuC Buffer Cache size and use it for VF migration Michał Winiarski
2025-10-23 17:37   ` Michal Wajdeczko
2025-10-28 10:46     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 12/26] drm/xe/pf: Remove GuC migration data save/restore from GT debugfs Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 13/26] drm/xe/pf: Don't save GuC VF migration data on pause Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 14/26] drm/xe/pf: Switch VF migration GuC save/restore to struct migration data Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 15/26] drm/xe/pf: Handle GuC migration data as part of PF control Michał Winiarski
2025-10-23 20:39   ` Michal Wajdeczko
2025-10-28 13:04     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 16/26] drm/xe/pf: Add helpers for VF GGTT migration data handling Michał Winiarski
2025-10-23 21:50   ` Michal Wajdeczko
2025-10-28 17:03     ` Michał Winiarski
2025-10-28  3:22   ` Tian, Kevin
2025-10-28  7:38     ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 17/26] drm/xe/pf: Handle GGTT migration data as part of PF control Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 18/26] drm/xe/pf: Add helpers for VF MMIO migration data handling Michał Winiarski
2025-10-23 22:10   ` Michal Wajdeczko
2025-10-28 23:37     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 19/26] drm/xe/pf: Handle MMIO migration data as part of PF control Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 20/26] drm/xe/pf: Add helper to retrieve VF's LMEM object Michał Winiarski
2025-10-23 20:25   ` Michal Wajdeczko
2025-10-28 23:40     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 21/26] drm/xe/migrate: Add function to copy of VRAM data in chunks Michał Winiarski
2025-10-23 19:29   ` Michal Wajdeczko
2025-10-30  6:07     ` Laguna, Lukasz
2025-10-21 22:41 ` [PATCH v2 22/26] drm/xe/pf: Handle VRAM migration data as part of PF control Michał Winiarski
2025-10-23 11:44   ` kernel test robot
2025-10-23 19:54   ` Michal Wajdeczko
2025-10-29  8:54     ` Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 23/26] drm/xe/pf: Add wait helper for VF FLR Michał Winiarski
2025-10-21 22:41 ` [PATCH v2 24/26] drm/xe/pf: Enable SR-IOV VF migration for PTL and BMG Michał Winiarski
2025-10-23 20:15   ` Michal Wajdeczko
2025-10-21 22:41 ` [PATCH v2 25/26] drm/xe/pf: Export helpers for VFIO Michał Winiarski
2025-10-28  3:28   ` Tian, Kevin
2025-10-21 22:41 ` [PATCH v2 26/26] vfio/xe: Add vendor-specific vfio_pci driver for Intel graphics Michał Winiarski
2025-10-22  7:12   ` Christoph Hellwig
2025-10-22  8:52     ` Michał Winiarski
2025-10-22  8:54       ` Christoph Hellwig
2025-10-22  9:12         ` Michał Winiarski
2025-10-22 11:33           ` Jason Gunthorpe
2025-10-22 13:27             ` Michał Winiarski
2025-10-27  7:24   ` Tian, Kevin
2025-10-29 20:46     ` Winiarski, Michal
2025-10-27  7:26   ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=830ac907-684d-439e-9612-e8d2f32d97b6@intel.com \
    --to=michal.wajdeczko@intel.com \
    --cc=airlied@gmail.com \
    --cc=alex.williamson@redhat.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-xe@lists.freedesktop.org \
    --cc=jani.nikula@linux.intel.com \
    --cc=jgg@ziepe.ca \
    --cc=joonas.lahtinen@linux.intel.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lucas.demarchi@intel.com \
    --cc=lukasz.laguna@intel.com \
    --cc=matthew.brost@intel.com \
    --cc=michal.winiarski@intel.com \
    --cc=rodrigo.vivi@intel.com \
    --cc=simona@ffwll.ch \
    --cc=thomas.hellstrom@linux.intel.com \
    --cc=tursulin@ursulin.net \
    --cc=yishaih@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox