qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Eugenio Perez Martin <eperezma@redhat.com>
To: Si-Wei Liu <si-wei.liu@oracle.com>
Cc: qemu-devel@nongnu.org, Harpreet Singh Anand <hanand@xilinx.com>,
	 "Gonglei (Arei)" <arei.gonglei@huawei.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	 Jason Wang <jasowang@redhat.com>, Cindy Lu <lulu@redhat.com>,
	alvaro.karsz@solid-run.com,
	 Zhu Lingshan <lingshan.zhu@intel.com>,
	Lei Yang <leiyang@redhat.com>,
	 Liuxiangdong <liuxiangdong5@huawei.com>,
	Shannon Nelson <snelson@pensando.io>,
	 Parav Pandit <parav@mellanox.com>,
	Gautam Dawar <gdawar@xilinx.com>, Eli Cohen <eli@mellanox.com>,
	 Stefan Hajnoczi <stefanha@redhat.com>,
	Laurent Vivier <lvivier@redhat.com>,
	longpeng2@huawei.com, virtualization@lists.linux-foundation.org,
	 Stefano Garzarella <sgarzare@redhat.com>
Subject: Re: [PATCH v2 01/13] vdpa net: move iova tree creation from init to start
Date: Tue, 14 Feb 2023 20:07:19 +0100	[thread overview]
Message-ID: <CAJaqyWfTPHseAg9fdku00xtvC7kkJyAKMiN5wqoRVioN3zWDFw@mail.gmail.com> (raw)
In-Reply-To: <bdd4531e-1616-8513-bb33-80fabb7b2074@oracle.com>

On Tue, Feb 14, 2023 at 2:45 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
>
>
> On 2/13/2023 3:14 AM, Eugenio Perez Martin wrote:
> > On Mon, Feb 13, 2023 at 7:51 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
> >>
> >>
> >> On 2/8/2023 1:42 AM, Eugenio Pérez wrote:
> >>> Only create iova_tree if and when it is needed.
> >>>
> >>> The cleanup keeps being responsible of last VQ but this change allows it
> >>> to merge both cleanup functions.
> >>>
> >>> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> >>> Acked-by: Jason Wang <jasowang@redhat.com>
> >>> ---
> >>>    net/vhost-vdpa.c | 99 ++++++++++++++++++++++++++++++++++--------------
> >>>    1 file changed, 71 insertions(+), 28 deletions(-)
> >>>
> >>> diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
> >>> index de5ed8ff22..a9e6c8f28e 100644
> >>> --- a/net/vhost-vdpa.c
> >>> +++ b/net/vhost-vdpa.c
> >>> @@ -178,13 +178,9 @@ err_init:
> >>>    static void vhost_vdpa_cleanup(NetClientState *nc)
> >>>    {
> >>>        VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>> -    struct vhost_dev *dev = &s->vhost_net->dev;
> >>>
> >>>        qemu_vfree(s->cvq_cmd_out_buffer);
> >>>        qemu_vfree(s->status);
> >>> -    if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
> >>> -        g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> >>> -    }
> >>>        if (s->vhost_net) {
> >>>            vhost_net_cleanup(s->vhost_net);
> >>>            g_free(s->vhost_net);
> >>> @@ -234,10 +230,64 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
> >>>        return size;
> >>>    }
> >>>
> >>> +/** From any vdpa net client, get the netclient of first queue pair */
> >>> +static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
> >>> +{
> >>> +    NICState *nic = qemu_get_nic(s->nc.peer);
> >>> +    NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
> >>> +
> >>> +    return DO_UPCAST(VhostVDPAState, nc, nc0);
> >>> +}
> >>> +
> >>> +static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
> >>> +{
> >>> +    struct vhost_vdpa *v = &s->vhost_vdpa;
> >>> +
> >>> +    if (v->shadow_vqs_enabled) {
> >>> +        v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
> >>> +                                           v->iova_range.last);
> >>> +    }
> >>> +}
> >>> +
> >>> +static int vhost_vdpa_net_data_start(NetClientState *nc)
> >>> +{
> >>> +    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>> +    struct vhost_vdpa *v = &s->vhost_vdpa;
> >>> +
> >>> +    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> >>> +
> >>> +    if (v->index == 0) {
> >>> +        vhost_vdpa_net_data_start_first(s);
> >>> +        return 0;
> >>> +    }
> >>> +
> >>> +    if (v->shadow_vqs_enabled) {
> >>> +        VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
> >>> +        v->iova_tree = s0->vhost_vdpa.iova_tree;
> >>> +    }
> >>> +
> >>> +    return 0;
> >>> +}
> >>> +
> >>> +static void vhost_vdpa_net_client_stop(NetClientState *nc)
> >>> +{
> >>> +    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
> >>> +    struct vhost_dev *dev;
> >>> +
> >>> +    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
> >>> +
> >>> +    dev = s->vhost_vdpa.dev;
> >>> +    if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
> >>> +        g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> >>> +    }
> >>> +}
> >>> +
> >>>    static NetClientInfo net_vhost_vdpa_info = {
> >>>            .type = NET_CLIENT_DRIVER_VHOST_VDPA,
> >>>            .size = sizeof(VhostVDPAState),
> >>>            .receive = vhost_vdpa_receive,
> >>> +        .start = vhost_vdpa_net_data_start,
> >>> +        .stop = vhost_vdpa_net_client_stop,
> >>>            .cleanup = vhost_vdpa_cleanup,
> >>>            .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
> >>>            .has_ufo = vhost_vdpa_has_ufo,
> >>> @@ -351,7 +401,7 @@ dma_map_err:
> >>>
> >>>    static int vhost_vdpa_net_cvq_start(NetClientState *nc)
> >>>    {
> >>> -    VhostVDPAState *s;
> >>> +    VhostVDPAState *s, *s0;
> >>>        struct vhost_vdpa *v;
> >>>        uint64_t backend_features;
> >>>        int64_t cvq_group;
> >>> @@ -425,6 +475,15 @@ out:
> >>>            return 0;
> >>>        }
> >>>
> >>> +    s0 = vhost_vdpa_net_first_nc_vdpa(s);
> >>> +    if (s0->vhost_vdpa.iova_tree) {
> >>> +        /* SVQ is already configured for all virtqueues */
> >>> +        v->iova_tree = s0->vhost_vdpa.iova_tree;
> >>> +    } else {
> >>> +        v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
> >>> +                                           v->iova_range.last);
> >> I wonder how this case could happen, vhost_vdpa_net_data_start_first()
> >> should've allocated an iova tree on the first data vq. Is zero data vq
> >> ever possible on net vhost-vdpa?
> >>
> > It's the case of the current qemu master when only CVQ is being
> > shadowed. It's not that "there are no data vq": If that case were
> > possible, CVQ vhost-vdpa state would be s0.
> >
> > The case is that since only CVQ vhost-vdpa is the one being migrated,
> > only CVQ has an iova tree.
> OK, so this corresponds to the case where live migration is not started
> and CVQ starts in its own address space of VHOST_VDPA_NET_CVQ_ASID.
> Thanks for explaining it!
>
> >
> > With this series applied and with no migration running, the case is
> > the same as before: only SVQ gets shadowed. When migration starts, all
> > vqs are migrated, and share iova tree.
> I wonder what is the reason to share the iova tree when migration
> starts, I think CVQ may stay on its own VHOST_VDPA_NET_CVQ_ASID still?
>
> Actually there's discrepancy in vhost_vdpa_net_log_global_enable(), I
> don't see explicit code to switch from VHOST_VDPA_NET_CVQ_ASID to
> VHOST_VDPA_GUEST_PA_ASID for the CVQ. This is the address space I
> collision I mentioned earlier:
>

There is no such change. This code only migrates devices with no CVQ,
as they have their own difficulties.

In the previous RFC there was no such change either. Since it's hard
to modify passthrough devices IOVA tree, CVQ AS updates keep being
VHOST_VDPA_NET_CVQ_ASID.

They both share the same IOVA tree though, just for simplicity. If
address space exhaustion is a problem we can make them independent,
but this complicates the code a little bit.

> 9585@1676093788.259201:vhost_vdpa_dma_map vdpa:0x7ff13088a190 fd: 16
> msg_type: 2 asid: 0 iova: 0x1000 size: 0x2000 uaddr: 0x55a5a7ff3000
> perm: 0x1 type: 2
> 9585@1676093788.279923:vhost_vdpa_dma_map vdpa:0x7ff13088a190 fd: 16
> msg_type: 2 asid: 0 iova: 0x3000 size: 0x1000 uaddr: 0x55a5a7ff6000
> perm: 0x3 type: 2
> 9585@1676093788.290529:vhost_vdpa_set_vring_addr dev: 0x55a5a77cec20
> index: 0 flags: 0x0 desc_user_addr: 0x1000 used_user_addr: 0x3000
> avail_user_addr: 0x2000 log_guest_addr: 0x0
> :
> :
> 9585@1676093788.543567:vhost_vdpa_dma_map vdpa:0x7ff1302b6190 fd: 16
> msg_type: 2 asid: 0 iova: 0x16000 size: 0x2000 uaddr: 0x55a5a7959000
> perm: 0x1 type: 2
> 9585@1676093788.576923:vhost_vdpa_dma_map vdpa:0x7ff1302b6190 fd: 16
> msg_type: 2 asid: 0 iova: 0x18000 size: 0x1000 uaddr: 0x55a5a795c000
> perm: 0x3 type: 2
> 9585@1676093788.593881:vhost_vdpa_set_vring_addr dev: 0x55a5a7580930
> index: 7 flags: 0x0 desc_user_addr: 0x16000 used_user_addr: 0x18000
> avail_user_addr: 0x17000 log_guest_addr: 0x0
> 9585@1676093788.593904:vhost_vdpa_dma_map vdpa:0x7ff13026d190 fd: 16
> msg_type: 2 asid: 1 iova: 0x19000 size: 0x1000 uaddr: 0x55a5a77f8000
> perm: 0x1 type: 2
> 9585@1676093788.606448:vhost_vdpa_dma_map vdpa:0x7ff13026d190 fd: 16
> msg_type: 2 asid: 1 iova: 0x1a000 size: 0x1000 uaddr: 0x55a5a77fa000
> perm: 0x3 type: 2
> 9585@1676093788.616253:vhost_vdpa_dma_map vdpa:0x7ff13026d190 fd: 16
> msg_type: 2 asid: 1 iova: 0x1b000 size: 0x1000 uaddr: 0x55a5a795f000
> perm: 0x1 type: 2
> 9585@1676093788.625956:vhost_vdpa_dma_map vdpa:0x7ff13026d190 fd: 16
> msg_type: 2 asid: 1 iova: 0x1c000 size: 0x1000 uaddr: 0x55a5a7f4e000
> perm: 0x3 type: 2
> 9585@1676093788.635655:vhost_vdpa_set_vring_addr dev: 0x55a5a7580ec0
> index: 8 flags: 0x0 desc_user_addr: 0x1b000 used_user_addr: 0x1c000
> avail_user_addr: 0x1b400 log_guest_addr: 0x0
> 9585@1676093788.635667:vhost_vdpa_listener_region_add vdpa:
> 0x7ff13026d190 iova 0x0 llend 0xa0000 vaddr: 0x7fef1fe00000 read-only: 0
> 9585@1676093788.635670:vhost_vdpa_listener_begin_batch
> vdpa:0x7ff13026d190 fd: 16 msg_type: 2 type: 5
> 9585@1676093788.635677:vhost_vdpa_dma_map vdpa:0x7ff13026d190 fd: 16
> msg_type: 2 asid: 0 iova: 0x0 size: 0xa0000 uaddr: 0x7fef1fe00000 perm:
> 0x3 type: 2
> 2023-02-11T05:36:28.635686Z qemu-system-x86_64: failed to write, fd=16,
> errno=14 (Bad address)
> 2023-02-11T05:36:28.635721Z qemu-system-x86_64: vhost vdpa map fail!
> 2023-02-11T05:36:28.635744Z qemu-system-x86_64: vhost-vdpa: DMA mapping
> failed, unable to continue
>

I'm not sure how you get to this. Maybe you were able to start the
migration because the CVQ migration blocker was not effectively added?

Thanks!


>
> Regards,
> -Siwei
> >
> > Thanks!
> >
> >> Thanks,
> >> -Siwei
> >>> +    }
> >>> +
> >>>        r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
> >>>                                   vhost_vdpa_net_cvq_cmd_page_len(), false);
> >>>        if (unlikely(r < 0)) {
> >>> @@ -449,15 +508,9 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
> >>>        if (s->vhost_vdpa.shadow_vqs_enabled) {
> >>>            vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
> >>>            vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
> >>> -        if (!s->always_svq) {
> >>> -            /*
> >>> -             * If only the CVQ is shadowed we can delete this safely.
> >>> -             * If all the VQs are shadows this will be needed by the time the
> >>> -             * device is started again to register SVQ vrings and similar.
> >>> -             */
> >>> -            g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
> >>> -        }
> >>>        }
> >>> +
> >>> +    vhost_vdpa_net_client_stop(nc);
> >>>    }
> >>>
> >>>    static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
> >>> @@ -667,8 +720,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> >>>                                           int nvqs,
> >>>                                           bool is_datapath,
> >>>                                           bool svq,
> >>> -                                       struct vhost_vdpa_iova_range iova_range,
> >>> -                                       VhostIOVATree *iova_tree)
> >>> +                                       struct vhost_vdpa_iova_range iova_range)
> >>>    {
> >>>        NetClientState *nc = NULL;
> >>>        VhostVDPAState *s;
> >>> @@ -690,7 +742,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
> >>>        s->vhost_vdpa.shadow_vqs_enabled = svq;
> >>>        s->vhost_vdpa.iova_range = iova_range;
> >>>        s->vhost_vdpa.shadow_data = svq;
> >>> -    s->vhost_vdpa.iova_tree = iova_tree;
> >>>        if (!is_datapath) {
> >>>            s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
> >>>                                                vhost_vdpa_net_cvq_cmd_page_len());
> >>> @@ -760,7 +811,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> >>>        uint64_t features;
> >>>        int vdpa_device_fd;
> >>>        g_autofree NetClientState **ncs = NULL;
> >>> -    g_autoptr(VhostIOVATree) iova_tree = NULL;
> >>>        struct vhost_vdpa_iova_range iova_range;
> >>>        NetClientState *nc;
> >>>        int queue_pairs, r, i = 0, has_cvq = 0;
> >>> @@ -812,12 +862,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> >>>            goto err;
> >>>        }
> >>>
> >>> -    if (opts->x_svq) {
> >>> -        if (!vhost_vdpa_net_valid_svq_features(features, errp)) {
> >>> -            goto err_svq;
> >>> -        }
> >>> -
> >>> -        iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
> >>> +    if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
> >>> +        goto err;
> >>>        }
> >>>
> >>>        ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
> >>> @@ -825,7 +871,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> >>>        for (i = 0; i < queue_pairs; i++) {
> >>>            ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> >>>                                         vdpa_device_fd, i, 2, true, opts->x_svq,
> >>> -                                     iova_range, iova_tree);
> >>> +                                     iova_range);
> >>>            if (!ncs[i])
> >>>                goto err;
> >>>        }
> >>> @@ -833,13 +879,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
> >>>        if (has_cvq) {
> >>>            nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
> >>>                                     vdpa_device_fd, i, 1, false,
> >>> -                                 opts->x_svq, iova_range, iova_tree);
> >>> +                                 opts->x_svq, iova_range);
> >>>            if (!nc)
> >>>                goto err;
> >>>        }
> >>>
> >>> -    /* iova_tree ownership belongs to last NetClientState */
> >>> -    g_steal_pointer(&iova_tree);
> >>>        return 0;
> >>>
> >>>    err:
> >>> @@ -849,7 +893,6 @@ err:
> >>>            }
> >>>        }
> >>>
> >>> -err_svq:
> >>>        qemu_close(vdpa_device_fd);
> >>>
> >>>        return -1;
>



  reply	other threads:[~2023-02-14 19:08 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-02-08  9:42 [PATCH v2 00/13] Dynamycally switch to vhost shadow virtqueues at vdpa net migration Eugenio Pérez
2023-02-08  9:42 ` [PATCH v2 01/13] vdpa net: move iova tree creation from init to start Eugenio Pérez
2023-02-13  6:50   ` Si-Wei Liu
2023-02-13 11:14     ` Eugenio Perez Martin
2023-02-14  1:45       ` Si-Wei Liu
2023-02-14 19:07         ` Eugenio Perez Martin [this message]
2023-02-16  2:14           ` Si-Wei Liu
2023-02-16  7:35             ` Eugenio Perez Martin
2023-02-17  7:38               ` Si-Wei Liu
2023-02-17 13:55                 ` Eugenio Perez Martin
2023-02-08  9:42 ` [PATCH v2 02/13] vdpa: Negotiate _F_SUSPEND feature Eugenio Pérez
2023-02-08  9:42 ` [PATCH v2 03/13] vdpa: add vhost_vdpa_suspend Eugenio Pérez
2023-02-21  5:27   ` Jason Wang
2023-02-21  5:33     ` Jason Wang
2023-02-21  7:05       ` Eugenio Perez Martin
2023-02-08  9:42 ` [PATCH v2 04/13] vdpa: move vhost reset after get vring base Eugenio Pérez
2023-02-21  5:36   ` Jason Wang
2023-02-21  7:07     ` Eugenio Perez Martin
2023-02-22  3:43       ` Jason Wang
2023-02-08  9:42 ` [PATCH v2 05/13] vdpa: rewind at get_base, not set_base Eugenio Pérez
2023-02-21  5:40   ` Jason Wang
2023-02-08  9:42 ` [PATCH v2 06/13] vdpa net: allow VHOST_F_LOG_ALL Eugenio Pérez
2023-02-08  9:42 ` [PATCH v2 07/13] vdpa: add vdpa net migration state notifier Eugenio Pérez
2023-02-13  6:50   ` Si-Wei Liu
2023-02-13 15:51     ` Eugenio Perez Martin
2023-02-22  3:55   ` Jason Wang
2023-02-22  7:23     ` Eugenio Perez Martin
2023-02-08  9:42 ` [PATCH v2 08/13] vdpa: disable RAM block discard only for the first device Eugenio Pérez
2023-02-08  9:42 ` [PATCH v2 09/13] vdpa net: block migration if the device has CVQ Eugenio Pérez
2023-02-13  6:50   ` Si-Wei Liu
2023-02-14 18:06     ` Eugenio Perez Martin
2023-02-22  4:00   ` Jason Wang
2023-02-22  7:28     ` Eugenio Perez Martin
2023-02-23  2:41       ` Jason Wang
2023-02-08  9:42 ` [PATCH v2 10/13] vdpa: block migration if device has unsupported features Eugenio Pérez
2023-02-08  9:42 ` [PATCH v2 11/13] vdpa: block migration if dev does not have _F_SUSPEND Eugenio Pérez
2023-02-22  4:05   ` Jason Wang
2023-02-22 14:25     ` Eugenio Perez Martin
2023-02-23  2:38       ` Jason Wang
2023-02-23 11:06         ` Eugenio Perez Martin
2023-02-24  3:16           ` Jason Wang
2023-02-08  9:42 ` [PATCH v2 12/13] vdpa: block migration if SVQ does not admit a feature Eugenio Pérez
2023-02-08  9:42 ` [PATCH v2 13/13] vdpa: return VHOST_F_LOG_ALL in vhost-vdpa devices Eugenio Pérez
2023-02-22  4:07   ` Jason Wang
2023-02-08 10:29 ` [PATCH v2 00/13] Dynamycally switch to vhost shadow virtqueues at vdpa net migration Alvaro Karsz
2023-02-09 14:38   ` Lei Yang
2023-02-10 12:57 ` Gautam Dawar
2023-02-15 18:40   ` Eugenio Perez Martin
2023-02-16 13:50     ` Lei Yang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAJaqyWfTPHseAg9fdku00xtvC7kkJyAKMiN5wqoRVioN3zWDFw@mail.gmail.com \
    --to=eperezma@redhat.com \
    --cc=alvaro.karsz@solid-run.com \
    --cc=arei.gonglei@huawei.com \
    --cc=eli@mellanox.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=leiyang@redhat.com \
    --cc=lingshan.zhu@intel.com \
    --cc=liuxiangdong5@huawei.com \
    --cc=longpeng2@huawei.com \
    --cc=lulu@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=parav@mellanox.com \
    --cc=qemu-devel@nongnu.org \
    --cc=sgarzare@redhat.com \
    --cc=si-wei.liu@oracle.com \
    --cc=snelson@pensando.io \
    --cc=stefanha@redhat.com \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).