From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
To: linux-kernel@vger.kernel.org
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
stable@vger.kernel.org,
Demi Marie Obenour <demi@invisiblethingslab.com>,
Juergen Gross <jgross@suse.com>
Subject: [PATCH 5.4 29/58] xen/gntdev: Avoid blocking in unmap_grant_pages()
Date: Tue, 5 Jul 2022 13:58:05 +0200 [thread overview]
Message-ID: <20220705115611.103876607@linuxfoundation.org> (raw)
In-Reply-To: <20220705115610.236040773@linuxfoundation.org>
From: Demi Marie Obenour <demi@invisiblethingslab.com>
commit dbe97cff7dd9f0f75c524afdd55ad46be3d15295 upstream.
unmap_grant_pages() currently waits for the pages to no longer be used.
In https://github.com/QubesOS/qubes-issues/issues/7481, this lead to a
deadlock against i915: i915 was waiting for gntdev's MMU notifier to
finish, while gntdev was waiting for i915 to free its pages. I also
believe this is responsible for various deadlocks I have experienced in
the past.
Avoid these problems by making unmap_grant_pages async. This requires
making it return void, as any errors will not be available when the
function returns. Fortunately, the only use of the return value is a
WARN_ON(), which can be replaced by a WARN_ON when the error is
detected. Additionally, a failed call will not prevent further calls
from being made, but this is harmless.
Because unmap_grant_pages is now async, the grant handle will be sent to
INVALID_GRANT_HANDLE too late to prevent multiple unmaps of the same
handle. Instead, a separate bool array is allocated for this purpose.
This wastes memory, but stuffing this information in padding bytes is
too fragile. Furthermore, it is necessary to grab a reference to the
map before making the asynchronous call, and release the reference when
the call returns.
It is also necessary to guard against reentrancy in gntdev_map_put(),
and to handle the case where userspace tries to map a mapping whose
contents have not all been freed yet.
Fixes: 745282256c75 ("xen/gntdev: safely unmap grants in case they are still in use")
Cc: stable@vger.kernel.org
Signed-off-by: Demi Marie Obenour <demi@invisiblethingslab.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20220622022726.2538-1-demi@invisiblethingslab.com
Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
drivers/xen/gntdev-common.h | 8 ++
drivers/xen/gntdev.c | 147 ++++++++++++++++++++++++++++++--------------
2 files changed, 110 insertions(+), 45 deletions(-)
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -15,6 +15,8 @@
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/types.h>
+#include <xen/interface/event_channel.h>
+#include <xen/grant_table.h>
struct gntdev_dmabuf_priv;
@@ -61,6 +63,7 @@ struct gntdev_grant_map {
struct gnttab_unmap_grant_ref *unmap_ops;
struct gnttab_map_grant_ref *kmap_ops;
struct gnttab_unmap_grant_ref *kunmap_ops;
+ bool *being_removed;
struct page **pages;
unsigned long pages_vm_start;
@@ -78,6 +81,11 @@ struct gntdev_grant_map {
/* Needed to avoid allocation in gnttab_dma_free_pages(). */
xen_pfn_t *frames;
#endif
+
+ /* Number of live grants */
+ atomic_t live_grants;
+ /* Needed to avoid allocation in __unmap_grant_pages */
+ struct gntab_unmap_queue_data unmap_data;
};
struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count,
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -62,11 +63,12 @@ MODULE_PARM_DESC(limit, "Maximum number
static atomic_t pages_mapped = ATOMIC_INIT(0);
+/* True in PV mode, false otherwise */
static int use_ptemod;
#define populate_freeable_maps use_ptemod
-static int unmap_grant_pages(struct gntdev_grant_map *map,
- int offset, int pages);
+static void unmap_grant_pages(struct gntdev_grant_map *map,
+ int offset, int pages);
static struct miscdevice gntdev_miscdev;
@@ -123,6 +125,7 @@ static void gntdev_free_map(struct gntde
kfree(map->unmap_ops);
kfree(map->kmap_ops);
kfree(map->kunmap_ops);
+ kfree(map->being_removed);
kfree(map);
}
@@ -142,12 +145,15 @@ struct gntdev_grant_map *gntdev_alloc_ma
add->kmap_ops = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
add->pages = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
+ add->being_removed =
+ kcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL);
if (NULL == add->grants ||
NULL == add->map_ops ||
NULL == add->unmap_ops ||
NULL == add->kmap_ops ||
NULL == add->kunmap_ops ||
- NULL == add->pages)
+ NULL == add->pages ||
+ NULL == add->being_removed)
goto err;
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
@@ -243,6 +249,35 @@ void gntdev_put_map(struct gntdev_priv *
return;
atomic_sub(map->count, &pages_mapped);
+ if (map->pages && !use_ptemod) {
+ /*
+ * Increment the reference count. This ensures that the
+ * subsequent call to unmap_grant_pages() will not wind up
+ * re-entering itself. It *can* wind up calling
+ * gntdev_put_map() recursively, but such calls will be with a
+ * reference count greater than 1, so they will return before
+ * this code is reached. The recursion depth is thus limited to
+ * 1. Do NOT use refcount_inc() here, as it will detect that
+ * the reference count is zero and WARN().
+ */
+ refcount_set(&map->users, 1);
+
+ /*
+ * Unmap the grants. This may or may not be asynchronous, so it
+ * is possible that the reference count is 1 on return, but it
+ * could also be greater than 1.
+ */
+ unmap_grant_pages(map, 0, map->count);
+
+ /* Check if the memory now needs to be freed */
+ if (!refcount_dec_and_test(&map->users))
+ return;
+
+ /*
+ * All pages have been returned to the hypervisor, so free the
+ * map.
+ */
+ }
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
@@ -298,6 +333,7 @@ static int set_grant_ptes_as_special(pte
int gntdev_map_grant_pages(struct gntdev_grant_map *map)
{
+ size_t alloced = 0;
int i, err = 0;
if (!use_ptemod) {
@@ -346,87 +382,109 @@ int gntdev_map_grant_pages(struct gntdev
map->pages, map->count);
for (i = 0; i < map->count; i++) {
- if (map->map_ops[i].status == GNTST_okay)
+ if (map->map_ops[i].status == GNTST_okay) {
map->unmap_ops[i].handle = map->map_ops[i].handle;
- else if (!err)
+ if (!use_ptemod)
+ alloced++;
+ } else if (!err)
err = -EINVAL;
if (map->flags & GNTMAP_device_map)
map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr;
if (use_ptemod) {
- if (map->kmap_ops[i].status == GNTST_okay)
+ if (map->kmap_ops[i].status == GNTST_okay) {
+ if (map->map_ops[i].status == GNTST_okay)
+ alloced++;
map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
- else if (!err)
+ } else if (!err)
err = -EINVAL;
}
}
+ atomic_add(alloced, &map->live_grants);
return err;
}
-static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void __unmap_grant_pages_done(int result,
+ struct gntab_unmap_queue_data *data)
{
- int i, err = 0;
- struct gntab_unmap_queue_data unmap_data;
+ unsigned int i;
+ struct gntdev_grant_map *map = data->data;
+ unsigned int offset = data->unmap_ops - map->unmap_ops;
+
+ for (i = 0; i < data->count; i++) {
+ WARN_ON(map->unmap_ops[offset+i].status);
+ pr_debug("unmap handle=%d st=%d\n",
+ map->unmap_ops[offset+i].handle,
+ map->unmap_ops[offset+i].status);
+ map->unmap_ops[offset+i].handle = -1;
+ }
+ /*
+ * Decrease the live-grant counter. This must happen after the loop to
+ * prevent premature reuse of the grants by gnttab_mmap().
+ */
+ atomic_sub(data->count, &map->live_grants);
+ /* Release reference taken by __unmap_grant_pages */
+ gntdev_put_map(NULL, map);
+}
+
+static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
+{
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
+
if (pgno >= offset && pgno < offset + pages) {
/* No need for kmap, pages are in lowmem */
uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
+
tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
}
}
- unmap_data.unmap_ops = map->unmap_ops + offset;
- unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
- unmap_data.pages = map->pages + offset;
- unmap_data.count = pages;
+ map->unmap_data.unmap_ops = map->unmap_ops + offset;
+ map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
+ map->unmap_data.pages = map->pages + offset;
+ map->unmap_data.count = pages;
+ map->unmap_data.done = __unmap_grant_pages_done;
+ map->unmap_data.data = map;
+ refcount_inc(&map->users); /* to keep map alive during async call below */
- err = gnttab_unmap_refs_sync(&unmap_data);
- if (err)
- return err;
-
- for (i = 0; i < pages; i++) {
- if (map->unmap_ops[offset+i].status)
- err = -EINVAL;
- pr_debug("unmap handle=%d st=%d\n",
- map->unmap_ops[offset+i].handle,
- map->unmap_ops[offset+i].status);
- map->unmap_ops[offset+i].handle = -1;
- }
- return err;
+ gnttab_unmap_refs_async(&map->unmap_data);
}
-static int unmap_grant_pages(struct gntdev_grant_map *map, int offset,
- int pages)
+static void unmap_grant_pages(struct gntdev_grant_map *map, int offset,
+ int pages)
{
- int range, err = 0;
+ int range;
+
+ if (atomic_read(&map->live_grants) == 0)
+ return; /* Nothing to do */
pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants. Only unmap valid ranges.
*/
- while (pages && !err) {
- while (pages && map->unmap_ops[offset].handle == -1) {
+ while (pages) {
+ while (pages && map->being_removed[offset]) {
offset++;
pages--;
}
range = 0;
while (range < pages) {
- if (map->unmap_ops[offset+range].handle == -1)
+ if (map->being_removed[offset + range])
break;
+ map->being_removed[offset + range] = true;
range++;
}
- err = __unmap_grant_pages(map, offset, range);
+ if (range)
+ __unmap_grant_pages(map, offset, range);
offset += range;
pages -= range;
}
-
- return err;
}
/* ------------------------------------------------------------------ */
@@ -496,7 +554,6 @@ static int unmap_if_in_range(struct gntd
bool blockable)
{
unsigned long mstart, mend;
- int err;
if (!in_range(map, start, end))
return 0;
@@ -510,10 +567,9 @@ static int unmap_if_in_range(struct gntd
map->index, map->count,
map->vma->vm_start, map->vma->vm_end,
start, end, mstart, mend);
- err = unmap_grant_pages(map,
+ unmap_grant_pages(map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
- WARN_ON(err);
return 0;
}
@@ -554,7 +610,6 @@ static void mn_release(struct mmu_notifi
{
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
struct gntdev_grant_map *map;
- int err;
mutex_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@@ -563,8 +618,7 @@ static void mn_release(struct mmu_notifi
pr_debug("map %d+%d (%lx %lx)\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, /* offset */ 0, map->count);
- WARN_ON(err);
+ unmap_grant_pages(map, /* offset */ 0, map->count);
}
list_for_each_entry(map, &priv->freeable_maps, next) {
if (!map->vma)
@@ -572,8 +626,7 @@ static void mn_release(struct mmu_notifi
pr_debug("map %d+%d (%lx %lx)\n",
map->index, map->count,
map->vma->vm_start, map->vma->vm_end);
- err = unmap_grant_pages(map, /* offset */ 0, map->count);
- WARN_ON(err);
+ unmap_grant_pages(map, /* offset */ 0, map->count);
}
mutex_unlock(&priv->lock);
}
@@ -1102,6 +1155,10 @@ static int gntdev_mmap(struct file *flip
goto unlock_out;
}
+ if (atomic_read(&map->live_grants)) {
+ err = -EAGAIN;
+ goto unlock_out;
+ }
refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
next prev parent reply other threads:[~2022-07-05 12:11 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-05 11:57 [PATCH 5.4 00/58] 5.4.204-rc1 review Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 01/58] ipv6: take care of disable_policy when restoring routes Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 02/58] nvdimm: Fix badblocks clear off-by-one error Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 03/58] powerpc/prom_init: Fix kernel config grep Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 04/58] powerpc/bpf: Fix use of user_pt_regs in uapi Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 05/58] dm raid: fix accesses beyond end of raid member array Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 06/58] dm raid: fix KASAN warning in raid5_add_disks Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 07/58] s390/archrandom: simplify back to earlier design and initialize earlier Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 08/58] SUNRPC: Fix READ_PLUS crasher Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 09/58] net: rose: fix UAF bugs caused by timer handler Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 10/58] net: usb: ax88179_178a: Fix packet receiving Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 11/58] virtio-net: fix race between ndo_open() and virtio_device_ready() Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 12/58] selftests/net: pass ipv6_args to udpgso_benchs IPv6 TCP test Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 13/58] net: tun: unlink NAPI from device on destruction Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 14/58] net: tun: stop NAPI when detaching queues Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 15/58] RDMA/qedr: Fix reporting QP timeout attribute Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 16/58] linux/dim: Fix divide by 0 in RDMA DIM Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 17/58] usbnet: fix memory allocation in helpers Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 18/58] net: ipv6: unexport __init-annotated seg6_hmac_net_init() Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 19/58] caif_virtio: fix race between virtio_device_ready() and ndo_open() Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 20/58] PM / devfreq: exynos-ppmu: Fix refcount leak in of_get_devfreq_events Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 21/58] s390: remove unneeded select BUILD_BIN2C Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 22/58] netfilter: nft_dynset: restore set element counter when failing to update Greg Kroah-Hartman
2022-07-05 11:57 ` [PATCH 5.4 23/58] net/sched: act_api: Notify user space if any actions were flushed before error Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 24/58] net: bonding: fix possible NULL deref in rlb code Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 25/58] net: bonding: fix use-after-free after 802.3ad slave unbind Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 26/58] nfc: nfcmrvl: Fix irq_of_parse_and_map() return value Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 27/58] NFC: nxp-nci: Dont issue a zero length i2c_master_read() Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 28/58] net: tun: avoid disabling NAPI twice Greg Kroah-Hartman
2022-07-05 11:58 ` Greg Kroah-Hartman [this message]
2022-07-05 11:58 ` [PATCH 5.4 30/58] hwmon: (ibmaem) dont call platform_device_del() if platform_device_add() fails Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 31/58] net: dsa: bcm_sf2: force pause link settings Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 32/58] sit: use min Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 33/58] ipv6/sit: fix ipip6_tunnel_get_prl return value Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 34/58] rseq/selftests,x86_64: Add rseq_offset_deref_addv() Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 35/58] selftests/rseq: remove ARRAY_SIZE define from individual tests Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 36/58] selftests/rseq: introduce own copy of rseq uapi header Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 37/58] selftests/rseq: Remove useless assignment to cpu variable Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 38/58] selftests/rseq: Remove volatile from __rseq_abi Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 39/58] selftests/rseq: Introduce rseq_get_abi() helper Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 40/58] selftests/rseq: Introduce thread pointer getters Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 41/58] selftests/rseq: Uplift rseq selftests for compatibility with glibc-2.35 Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 42/58] selftests/rseq: Fix ppc32: wrong rseq_cs 32-bit field pointer on big endian Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 43/58] selftests/rseq: Fix ppc32 missing instruction selection "u" and "x" for load/store Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 44/58] selftests/rseq: Fix ppc32 offsets by using long rather than off_t Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 45/58] selftests/rseq: Fix warnings about #if checks of undefined tokens Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 46/58] selftests/rseq: Remove arm/mips asm goto compiler work-around Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 47/58] selftests/rseq: Fix: work-around asm goto compiler bugs Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 48/58] selftests/rseq: x86-64: use %fs segment selector for accessing rseq thread area Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 49/58] selftests/rseq: x86-32: use %gs " Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 50/58] selftests/rseq: Change type of rseq_offset to ptrdiff_t Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 51/58] xen/blkfront: fix leaking data in shared pages Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 52/58] xen/netfront: " Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 53/58] xen/netfront: force data bouncing when backend is untrusted Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 54/58] xen/blkfront: " Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 55/58] xen/arm: Fix race in RB-tree based P2M accounting Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 56/58] net: usb: qmi_wwan: add Telit 0x1060 composition Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 57/58] net: usb: qmi_wwan: add Telit 0x1070 composition Greg Kroah-Hartman
2022-07-05 11:58 ` [PATCH 5.4 58/58] clocksource/drivers/ixp4xx: remove EXPORT_SYMBOL_GPL from ixp4xx_timer_setup() Greg Kroah-Hartman
2022-07-05 16:53 ` [PATCH 5.4 00/58] 5.4.204-rc1 review Florian Fainelli
2022-07-06 5:51 ` Samuel Zou
2022-07-06 6:55 ` Naresh Kamboju
2022-07-06 10:19 ` Sudip Mukherjee (Codethink)
2022-07-06 13:43 ` Guenter Roeck
2022-07-07 0:00 ` Shuah Khan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220705115611.103876607@linuxfoundation.org \
--to=gregkh@linuxfoundation.org \
--cc=demi@invisiblethingslab.com \
--cc=jgross@suse.com \
--cc=linux-kernel@vger.kernel.org \
--cc=stable@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).