linux-doc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pranjal Shrivastava <praan@google.com>
To: Nicolin Chen <nicolinc@nvidia.com>
Cc: jgg@nvidia.com, kevin.tian@intel.com, corbet@lwn.net,
	bagasdotme@gmail.com, will@kernel.org, robin.murphy@arm.com,
	joro@8bytes.org, thierry.reding@gmail.com, vdumpa@nvidia.com,
	jonathanh@nvidia.com, shuah@kernel.org, jsnitsel@redhat.com,
	nathan@kernel.org, peterz@infradead.org, yi.l.liu@intel.com,
	mshavit@google.com, zhangzekun11@huawei.com,
	iommu@lists.linux.dev, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-tegra@vger.kernel.org, linux-kselftest@vger.kernel.org,
	patches@lists.linux.dev, mochs@nvidia.com,
	alok.a.tiwari@oracle.com, vasant.hegde@amd.com,
	dwmw2@infradead.org, baolu.lu@linux.intel.com
Subject: Re: [PATCH v9 16/29] iommufd/selftest: Add coverage for IOMMUFD_CMD_HW_QUEUE_ALLOC
Date: Thu, 10 Jul 2025 11:36:15 +0000	[thread overview]
Message-ID: <aG-lr9nUhwff4GuJ@google.com> (raw)
In-Reply-To: <e8a194d187d7ef445f43e4a3c04fb39472050afd.1752126748.git.nicolinc@nvidia.com>

On Wed, Jul 09, 2025 at 10:59:08PM -0700, Nicolin Chen wrote:
> Some simple tests for IOMMUFD_CMD_HW_QUEUE_ALLOC infrastructure covering
> the new iommufd_hw_queue_depend/undepend() helpers.
> 
> Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>

Reviewed-by: Pranjal Shrivastava <praan@google.com>

> ---
>  drivers/iommu/iommufd/iommufd_test.h          |  3 +
>  tools/testing/selftests/iommu/iommufd_utils.h | 31 ++++++
>  drivers/iommu/iommufd/selftest.c              | 97 +++++++++++++++++++
>  tools/testing/selftests/iommu/iommufd.c       | 59 +++++++++++
>  .../selftests/iommu/iommufd_fail_nth.c        |  6 ++
>  5 files changed, 196 insertions(+)
> 
> diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
> index fbf9ecb35a13..51cd744a354f 100644
> --- a/drivers/iommu/iommufd/iommufd_test.h
> +++ b/drivers/iommu/iommufd/iommufd_test.h
> @@ -265,4 +265,7 @@ struct iommu_viommu_event_selftest {
>  	__u32 virt_id;
>  };
>  
> +#define IOMMU_HW_QUEUE_TYPE_SELFTEST 0xdeadbeef
> +#define IOMMU_TEST_HW_QUEUE_MAX 2
> +
>  #endif
> diff --git a/tools/testing/selftests/iommu/iommufd_utils.h b/tools/testing/selftests/iommu/iommufd_utils.h
> index a5d4cbd089ba..9a556f99d992 100644
> --- a/tools/testing/selftests/iommu/iommufd_utils.h
> +++ b/tools/testing/selftests/iommu/iommufd_utils.h
> @@ -956,6 +956,37 @@ static int _test_cmd_vdevice_alloc(int fd, __u32 viommu_id, __u32 idev_id,
>  		     _test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id,   \
>  					     virt_id, vdev_id))
>  
> +static int _test_cmd_hw_queue_alloc(int fd, __u32 viommu_id, __u32 type,
> +				    __u32 idx, __u64 base_addr, __u64 length,
> +				    __u32 *hw_queue_id)
> +{
> +	struct iommu_hw_queue_alloc cmd = {
> +		.size = sizeof(cmd),
> +		.viommu_id = viommu_id,
> +		.type = type,
> +		.index = idx,
> +		.nesting_parent_iova = base_addr,
> +		.length = length,
> +	};
> +	int ret;
> +
> +	ret = ioctl(fd, IOMMU_HW_QUEUE_ALLOC, &cmd);
> +	if (ret)
> +		return ret;
> +	if (hw_queue_id)
> +		*hw_queue_id = cmd.out_hw_queue_id;
> +	return 0;
> +}
> +
> +#define test_cmd_hw_queue_alloc(viommu_id, type, idx, base_addr, len, out_qid) \
> +	ASSERT_EQ(0, _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx,  \
> +					      base_addr, len, out_qid))
> +#define test_err_hw_queue_alloc(_errno, viommu_id, type, idx, base_addr, len, \
> +				out_qid)                                      \
> +	EXPECT_ERRNO(_errno,                                                  \
> +		     _test_cmd_hw_queue_alloc(self->fd, viommu_id, type, idx, \
> +					      base_addr, len, out_qid))
> +
>  static int _test_cmd_veventq_alloc(int fd, __u32 viommu_id, __u32 type,
>  				   __u32 *veventq_id, __u32 *veventq_fd)
>  {
> diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
> index 38066dfeb2e7..2189e9b119ee 100644
> --- a/drivers/iommu/iommufd/selftest.c
> +++ b/drivers/iommu/iommufd/selftest.c
> @@ -150,6 +150,8 @@ to_mock_nested(struct iommu_domain *domain)
>  struct mock_viommu {
>  	struct iommufd_viommu core;
>  	struct mock_iommu_domain *s2_parent;
> +	struct mock_hw_queue *hw_queue[IOMMU_TEST_HW_QUEUE_MAX];
> +	struct mutex queue_mutex;
>  };
>  
>  static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
> @@ -157,6 +159,19 @@ static inline struct mock_viommu *to_mock_viommu(struct iommufd_viommu *viommu)
>  	return container_of(viommu, struct mock_viommu, core);
>  }
>  
> +struct mock_hw_queue {
> +	struct iommufd_hw_queue core;
> +	struct mock_viommu *mock_viommu;
> +	struct mock_hw_queue *prev;
> +	u16 index;
> +};
> +
> +static inline struct mock_hw_queue *
> +to_mock_hw_queue(struct iommufd_hw_queue *hw_queue)
> +{
> +	return container_of(hw_queue, struct mock_hw_queue, core);
> +}
> +
>  enum selftest_obj_type {
>  	TYPE_IDEV,
>  };
> @@ -670,9 +685,11 @@ static void mock_viommu_destroy(struct iommufd_viommu *viommu)
>  {
>  	struct mock_iommu_device *mock_iommu = container_of(
>  		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
> +	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
>  
>  	if (refcount_dec_and_test(&mock_iommu->users))
>  		complete(&mock_iommu->complete);
> +	mutex_destroy(&mock_viommu->queue_mutex);
>  
>  	/* iommufd core frees mock_viommu and viommu */
>  }
> @@ -764,10 +781,86 @@ static int mock_viommu_cache_invalidate(struct iommufd_viommu *viommu,
>  	return rc;
>  }
>  
> +static size_t mock_viommu_get_hw_queue_size(struct iommufd_viommu *viommu,
> +					    enum iommu_hw_queue_type queue_type)
> +{
> +	if (queue_type != IOMMU_HW_QUEUE_TYPE_SELFTEST)
> +		return 0;
> +	return HW_QUEUE_STRUCT_SIZE(struct mock_hw_queue, core);
> +}
> +
> +static void mock_hw_queue_destroy(struct iommufd_hw_queue *hw_queue)
> +{
> +	struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
> +	struct mock_viommu *mock_viommu = mock_hw_queue->mock_viommu;
> +
> +	mutex_lock(&mock_viommu->queue_mutex);
> +	mock_viommu->hw_queue[mock_hw_queue->index] = NULL;
> +	if (mock_hw_queue->prev)
> +		iommufd_hw_queue_undepend(mock_hw_queue, mock_hw_queue->prev,
> +					  core);
> +	mutex_unlock(&mock_viommu->queue_mutex);
> +}
> +
> +/* Test iommufd_hw_queue_depend/undepend() */
> +static int mock_hw_queue_init_phys(struct iommufd_hw_queue *hw_queue, u32 index,
> +				   phys_addr_t base_addr_pa)
> +{
> +	struct mock_viommu *mock_viommu = to_mock_viommu(hw_queue->viommu);
> +	struct mock_hw_queue *mock_hw_queue = to_mock_hw_queue(hw_queue);
> +	struct mock_hw_queue *prev = NULL;
> +	int rc = 0;
> +
> +	if (index >= IOMMU_TEST_HW_QUEUE_MAX)
> +		return -EINVAL;
> +
> +	mutex_lock(&mock_viommu->queue_mutex);
> +
> +	if (mock_viommu->hw_queue[index]) {
> +		rc = -EEXIST;
> +		goto unlock;
> +	}
> +
> +	if (index) {
> +		prev = mock_viommu->hw_queue[index - 1];
> +		if (!prev) {
> +			rc = -EIO;
> +			goto unlock;
> +		}
> +	}
> +
> +	/*
> +	 * Test to catch a kernel bug if the core converted the physical address
> +	 * incorrectly. Let mock_domain_iova_to_phys() WARN_ON if it fails.
> +	 */
> +	if (base_addr_pa != iommu_iova_to_phys(&mock_viommu->s2_parent->domain,
> +					       hw_queue->base_addr)) {
> +		rc = -EFAULT;
> +		goto unlock;
> +	}
> +
> +	if (prev) {
> +		rc = iommufd_hw_queue_depend(mock_hw_queue, prev, core);
> +		if (rc)
> +			goto unlock;
> +	}
> +
> +	mock_hw_queue->prev = prev;
> +	mock_hw_queue->mock_viommu = mock_viommu;
> +	mock_viommu->hw_queue[index] = mock_hw_queue;
> +
> +	hw_queue->destroy = &mock_hw_queue_destroy;
> +unlock:
> +	mutex_unlock(&mock_viommu->queue_mutex);
> +	return rc;
> +}
> +
>  static struct iommufd_viommu_ops mock_viommu_ops = {
>  	.destroy = mock_viommu_destroy,
>  	.alloc_domain_nested = mock_viommu_alloc_domain_nested,
>  	.cache_invalidate = mock_viommu_cache_invalidate,
> +	.get_hw_queue_size = mock_viommu_get_hw_queue_size,
> +	.hw_queue_init_phys = mock_hw_queue_init_phys,
>  };
>  
>  static size_t mock_get_viommu_size(struct device *dev,
> @@ -784,6 +877,7 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
>  {
>  	struct mock_iommu_device *mock_iommu = container_of(
>  		viommu->iommu_dev, struct mock_iommu_device, iommu_dev);
> +	struct mock_viommu *mock_viommu = to_mock_viommu(viommu);
>  	struct iommu_viommu_selftest data;
>  	int rc;
>  
> @@ -801,6 +895,9 @@ static int mock_viommu_init(struct iommufd_viommu *viommu,
>  	}
>  
>  	refcount_inc(&mock_iommu->users);
> +	mutex_init(&mock_viommu->queue_mutex);
> +	mock_viommu->s2_parent = to_mock_domain(parent_domain);
> +
>  	viommu->ops = &mock_viommu_ops;
>  	return 0;
>  }
> diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
> index a9dfcce5e1b2..73426de77675 100644
> --- a/tools/testing/selftests/iommu/iommufd.c
> +++ b/tools/testing/selftests/iommu/iommufd.c
> @@ -3032,6 +3032,65 @@ TEST_F(iommufd_viommu, vdevice_cache)
>  	}
>  }
>  
> +TEST_F(iommufd_viommu, hw_queue)
> +{
> +	__u64 iova = MOCK_APERTURE_START, iova2;
> +	uint32_t viommu_id = self->viommu_id;
> +	uint32_t hw_queue_id[2];
> +
> +	if (!viommu_id)
> +		SKIP(return, "Skipping test for variant no_viommu");
> +
> +	/* Fail IOMMU_HW_QUEUE_TYPE_DEFAULT */
> +	test_err_hw_queue_alloc(EOPNOTSUPP, viommu_id,
> +				IOMMU_HW_QUEUE_TYPE_DEFAULT, 0, iova, PAGE_SIZE,
> +				&hw_queue_id[0]);
> +	/* Fail queue addr and length */
> +	test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				0, iova, 0, &hw_queue_id[0]);
> +	test_err_hw_queue_alloc(EOVERFLOW, viommu_id,
> +				IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, ~(uint64_t)0,
> +				PAGE_SIZE, &hw_queue_id[0]);
> +	/* Fail missing iova */
> +	test_err_hw_queue_alloc(ENOENT, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				0, iova, PAGE_SIZE, &hw_queue_id[0]);
> +
> +	/* Map iova */
> +	test_ioctl_ioas_map(buffer, PAGE_SIZE, &iova);
> +	test_ioctl_ioas_map(buffer + PAGE_SIZE, PAGE_SIZE, &iova2);
> +
> +	/* Fail index=1 and =MAX; must start from index=0 */
> +	test_err_hw_queue_alloc(EIO, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
> +				iova, PAGE_SIZE, &hw_queue_id[0]);
> +	test_err_hw_queue_alloc(EINVAL, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				IOMMU_TEST_HW_QUEUE_MAX, iova, PAGE_SIZE,
> +				&hw_queue_id[0]);
> +
> +	/* Allocate index=0, declare ownership of the iova */
> +	test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 0,
> +				iova, PAGE_SIZE, &hw_queue_id[0]);
> +	/* Fail duplicated index */
> +	test_err_hw_queue_alloc(EEXIST, viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST,
> +				0, iova, PAGE_SIZE, &hw_queue_id[0]);
> +	/* Fail unmap, due to iova ownership */
> +	test_err_ioctl_ioas_unmap(EBUSY, iova, PAGE_SIZE);
> +	/* The 2nd page is not pinned, so it can be unmmap */
> +	test_ioctl_ioas_unmap(iova2, PAGE_SIZE);
> +
> +	/* Allocate index=1, with an unaligned case */
> +	test_cmd_hw_queue_alloc(viommu_id, IOMMU_HW_QUEUE_TYPE_SELFTEST, 1,
> +				iova + PAGE_SIZE / 2, PAGE_SIZE / 2,
> +				&hw_queue_id[1]);
> +	/* Fail to destroy, due to dependency */
> +	EXPECT_ERRNO(EBUSY, _test_ioctl_destroy(self->fd, hw_queue_id[0]));
> +
> +	/* Destroy in descending order */
> +	test_ioctl_destroy(hw_queue_id[1]);
> +	test_ioctl_destroy(hw_queue_id[0]);
> +	/* Now it can unmap the first page */
> +	test_ioctl_ioas_unmap(iova, PAGE_SIZE);
> +}
> +
>  FIXTURE(iommufd_device_pasid)
>  {
>  	int fd;
> diff --git a/tools/testing/selftests/iommu/iommufd_fail_nth.c b/tools/testing/selftests/iommu/iommufd_fail_nth.c
> index f7ccf1822108..41c685bbd252 100644
> --- a/tools/testing/selftests/iommu/iommufd_fail_nth.c
> +++ b/tools/testing/selftests/iommu/iommufd_fail_nth.c
> @@ -634,6 +634,7 @@ TEST_FAIL_NTH(basic_fail_nth, device)
>  	uint32_t idev_id;
>  	uint32_t hwpt_id;
>  	uint32_t viommu_id;
> +	uint32_t hw_queue_id;
>  	uint32_t vdev_id;
>  	__u64 iova;
>  
> @@ -696,6 +697,11 @@ TEST_FAIL_NTH(basic_fail_nth, device)
>  	if (_test_cmd_vdevice_alloc(self->fd, viommu_id, idev_id, 0, &vdev_id))
>  		return -1;
>  
> +	if (_test_cmd_hw_queue_alloc(self->fd, viommu_id,
> +				     IOMMU_HW_QUEUE_TYPE_SELFTEST, 0, iova,
> +				     PAGE_SIZE, &hw_queue_id))
> +		return -1;
> +
>  	if (_test_ioctl_fault_alloc(self->fd, &fault_id, &fault_fd))
>  		return -1;
>  	close(fault_fd);
> -- 
> 2.43.0
> 

  reply	other threads:[~2025-07-10 11:36 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-10  5:58 [PATCH v9 00/29] iommufd: Add vIOMMU infrastructure (Part-4 HW QUEUE) Nicolin Chen
2025-07-10  5:58 ` [PATCH v9 01/29] iommufd: Report unmapped bytes in the error path of iopt_unmap_iova_range Nicolin Chen
2025-07-10  5:58 ` [PATCH v9 02/29] iommufd: Correct virt_id kdoc at struct iommu_vdevice_alloc Nicolin Chen
2025-07-10  5:58 ` [PATCH v9 03/29] iommufd/viommu: Explicitly define vdev->virt_id Nicolin Chen
2025-07-10  5:58 ` [PATCH v9 04/29] iommu: Use enum iommu_hw_info_type for type in hw_info op Nicolin Chen
2025-07-10  5:58 ` [PATCH v9 05/29] iommu: Add iommu_copy_struct_to_user helper Nicolin Chen
2025-07-10  5:58 ` [PATCH v9 06/29] iommu: Pass in a driver-level user data structure to viommu_init op Nicolin Chen
2025-07-10  5:58 ` [PATCH v9 07/29] iommufd/viommu: Allow driver-specific user data for a vIOMMU object Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 08/29] iommufd/selftest: Support user_data in mock_viommu_alloc Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 09/29] iommufd/selftest: Add coverage for viommu data Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 10/29] iommufd/access: Add internal APIs for HW queue to use Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 11/29] iommufd/access: Bypass access->ops->unmap for internal use Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 12/29] iommufd/viommu: Add driver-defined vDEVICE support Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 13/29] iommufd/viommu: Introduce IOMMUFD_OBJ_HW_QUEUE and its related struct Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 14/29] iommufd/viommu: Add IOMMUFD_CMD_HW_QUEUE_ALLOC ioctl Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 15/29] iommufd/driver: Add iommufd_hw_queue_depend/undepend() helpers Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 16/29] iommufd/selftest: Add coverage for IOMMUFD_CMD_HW_QUEUE_ALLOC Nicolin Chen
2025-07-10 11:36   ` Pranjal Shrivastava [this message]
2025-07-10  5:59 ` [PATCH v9 17/29] iommufd: Add mmap interface Nicolin Chen
2025-07-14  6:08   ` Arnd Bergmann
2025-07-14 12:03     ` Jason Gunthorpe
2025-07-10  5:59 ` [PATCH v9 18/29] iommufd/selftest: Add coverage for the new " Nicolin Chen
2025-07-10 11:14   ` Pranjal Shrivastava
2025-07-10  5:59 ` [PATCH v9 19/29] Documentation: userspace-api: iommufd: Update HW QUEUE Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 20/29] iommu: Allow an input type in hw_info op Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 21/29] iommufd: Allow an input data_type via iommu_hw_info Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 22/29] iommufd/selftest: Update hw_info coverage for an input data_type Nicolin Chen
2025-07-10 11:09   ` Pranjal Shrivastava
2025-07-10 15:32     ` Jason Gunthorpe
2025-07-10 16:43       ` Pranjal Shrivastava
2025-07-10 17:12         ` Jason Gunthorpe
2025-07-11  9:56           ` Pranjal Shrivastava
2025-07-10  5:59 ` [PATCH v9 23/29] iommu/arm-smmu-v3-iommufd: Add vsmmu_size/type and vsmmu_init impl ops Nicolin Chen
2025-07-11 16:14   ` Will Deacon
2025-07-15 18:42     ` Nicolin Chen
2025-07-17  8:49       ` Will Deacon
2025-07-10  5:59 ` [PATCH v9 24/29] iommu/arm-smmu-v3-iommufd: Add hw_info to impl_ops Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 25/29] iommu/tegra241-cmdqv: Use request_threaded_irq Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 26/29] iommu/tegra241-cmdqv: Simplify deinit flow in tegra241_cmdqv_remove_vintf() Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 27/29] iommu/tegra241-cmdqv: Do not statically map LVCMDQs Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 28/29] iommu/tegra241-cmdqv: Add user-space use support Nicolin Chen
2025-07-10  5:59 ` [PATCH v9 29/29] iommu/tegra241-cmdqv: Add IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV support Nicolin Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aG-lr9nUhwff4GuJ@google.com \
    --to=praan@google.com \
    --cc=alok.a.tiwari@oracle.com \
    --cc=bagasdotme@gmail.com \
    --cc=baolu.lu@linux.intel.com \
    --cc=corbet@lwn.net \
    --cc=dwmw2@infradead.org \
    --cc=iommu@lists.linux.dev \
    --cc=jgg@nvidia.com \
    --cc=jonathanh@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=jsnitsel@redhat.com \
    --cc=kevin.tian@intel.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=mochs@nvidia.com \
    --cc=mshavit@google.com \
    --cc=nathan@kernel.org \
    --cc=nicolinc@nvidia.com \
    --cc=patches@lists.linux.dev \
    --cc=peterz@infradead.org \
    --cc=robin.murphy@arm.com \
    --cc=shuah@kernel.org \
    --cc=thierry.reding@gmail.com \
    --cc=vasant.hegde@amd.com \
    --cc=vdumpa@nvidia.com \
    --cc=will@kernel.org \
    --cc=yi.l.liu@intel.com \
    --cc=zhangzekun11@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).