qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
	Albert Esteve <aesteve@redhat.com>
Subject: [PULL 63/63] libvhost-user: handle shared_object msg
Date: Wed, 4 Oct 2023 04:46:32 -0400	[thread overview]
Message-ID: <dc1499091ca09db0ac7a5615a592e55f27d4965d.1696408966.git.mst@redhat.com> (raw)
In-Reply-To: <cover.1696408966.git.mst@redhat.com>

From: Albert Esteve <aesteve@redhat.com>

In the libvhost-user library we need to
handle VHOST_USER_GET_SHARED_OBJECT requests,
and add helper functions to allow sending messages
to interact with the virtio shared objects
hash table.

Signed-off-by: Albert Esteve <aesteve@redhat.com>
Message-Id: <20231002065706.94707-5-aesteve@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
 subprojects/libvhost-user/libvhost-user.h |  55 +++++++++-
 subprojects/libvhost-user/libvhost-user.c | 120 ++++++++++++++++++++++
 2 files changed, 174 insertions(+), 1 deletion(-)

diff --git a/subprojects/libvhost-user/libvhost-user.h b/subprojects/libvhost-user/libvhost-user.h
index 708370c5f5..b36a42a7ca 100644
--- a/subprojects/libvhost-user/libvhost-user.h
+++ b/subprojects/libvhost-user/libvhost-user.h
@@ -64,7 +64,8 @@ enum VhostUserProtocolFeature {
     VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
     VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
     VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
-
+    /* Feature 16 is reserved for VHOST_USER_PROTOCOL_F_STATUS. */
+    VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 17,
     VHOST_USER_PROTOCOL_F_MAX
 };
 
@@ -109,6 +110,7 @@ typedef enum VhostUserRequest {
     VHOST_USER_GET_MAX_MEM_SLOTS = 36,
     VHOST_USER_ADD_MEM_REG = 37,
     VHOST_USER_REM_MEM_REG = 38,
+    VHOST_USER_GET_SHARED_OBJECT = 41,
     VHOST_USER_MAX
 } VhostUserRequest;
 
@@ -119,6 +121,9 @@ typedef enum VhostUserBackendRequest {
     VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
     VHOST_USER_BACKEND_VRING_CALL = 4,
     VHOST_USER_BACKEND_VRING_ERR = 5,
+    VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6,
+    VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7,
+    VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
     VHOST_USER_BACKEND_MAX
 }  VhostUserBackendRequest;
 
@@ -172,6 +177,12 @@ typedef struct VhostUserInflight {
     uint16_t queue_size;
 } VhostUserInflight;
 
+#define UUID_LEN 16
+
+typedef struct VhostUserShared {
+    unsigned char uuid[UUID_LEN];
+} VhostUserShared;
+
 #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
 # define VU_PACKED __attribute__((gcc_struct, packed))
 #else
@@ -199,6 +210,7 @@ typedef struct VhostUserMsg {
         VhostUserConfig config;
         VhostUserVringArea area;
         VhostUserInflight inflight;
+        VhostUserShared object;
     } payload;
 
     int fds[VHOST_MEMORY_BASELINE_NREGIONS];
@@ -232,6 +244,7 @@ typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
 typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
                                  uint32_t offset, uint32_t size,
                                  uint32_t flags);
+typedef int (*vu_get_shared_object_cb) (VuDev *dev, const unsigned char *uuid);
 
 typedef struct VuDevIface {
     /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
@@ -258,6 +271,8 @@ typedef struct VuDevIface {
     vu_get_config_cb get_config;
     /* set the config space of the device */
     vu_set_config_cb set_config;
+    /* get virtio shared object from the underlying vhost implementation. */
+    vu_get_shared_object_cb get_shared_object;
 } VuDevIface;
 
 typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
@@ -541,6 +556,44 @@ void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
 bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
                                 int size, int offset);
 
+/**
+ * vu_lookup_shared_object:
+ * @dev: a VuDev context
+ * @uuid: UUID of the shared object
+ * @dmabuf_fd: output dma-buf file descriptor
+ *
+ * Lookup for a virtio shared object (i.e., dma-buf fd) associated with the
+ * received UUID. Result, if found, is stored in the dmabuf_fd argument.
+ *
+ * Returns: whether the virtio object was found.
+ */
+bool vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
+                             int *dmabuf_fd);
+
+/**
+ * vu_add_shared_object:
+ * @dev: a VuDev context
+ * @uuid: UUID of the shared object
+ *
+ * Registers this back-end as the exporter for the object associated with
+ * the received UUID.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
+
+/**
+ * vu_rm_shared_object:
+ * @dev: a VuDev context
+ * @uuid: UUID of the shared object
+ *
+ * Removes a shared object entry (i.e., back-end entry) associated with the
+ * received UUID key from the hash table.
+ *
+ * Returns: TRUE on success, FALSE on failure.
+ */
+bool vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]);
+
 /**
  * vu_queue_set_notification:
  * @dev: a VuDev context
diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c
index 49b57c7ef4..051a611da3 100644
--- a/subprojects/libvhost-user/libvhost-user.c
+++ b/subprojects/libvhost-user/libvhost-user.c
@@ -161,6 +161,7 @@ vu_request_to_string(unsigned int req)
         REQ(VHOST_USER_GET_MAX_MEM_SLOTS),
         REQ(VHOST_USER_ADD_MEM_REG),
         REQ(VHOST_USER_REM_MEM_REG),
+        REQ(VHOST_USER_GET_SHARED_OBJECT),
         REQ(VHOST_USER_MAX),
     };
 #undef REQ
@@ -901,6 +902,24 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) {
     return false;
 }
 
+static bool
+vu_get_shared_object(VuDev *dev, VhostUserMsg *vmsg)
+{
+    int fd_num = 0;
+    int dmabuf_fd = -1;
+    if (dev->iface->get_shared_object) {
+        dmabuf_fd = dev->iface->get_shared_object(
+            dev, &vmsg->payload.object.uuid[0]);
+    }
+    if (dmabuf_fd != -1) {
+        DPRINT("dmabuf_fd found for requested UUID\n");
+        vmsg->fds[fd_num++] = dmabuf_fd;
+    }
+    vmsg->fd_num = fd_num;
+
+    return true;
+}
+
 static bool
 vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
 {
@@ -1404,6 +1423,105 @@ bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
     return vu_process_message_reply(dev, &vmsg);
 }
 
+bool
+vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN],
+                        int *dmabuf_fd)
+{
+    bool result = false;
+    VhostUserMsg msg_reply;
+    VhostUserMsg msg = {
+        .request = VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP,
+        .size = sizeof(msg.payload.object),
+        .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
+    };
+
+    memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+    if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+        return false;
+    }
+
+    pthread_mutex_lock(&dev->backend_mutex);
+    if (!vu_message_write(dev, dev->backend_fd, &msg)) {
+        goto out;
+    }
+
+    if (!vu_message_read_default(dev, dev->backend_fd, &msg_reply)) {
+        goto out;
+    }
+
+    if (msg_reply.request != msg.request) {
+        DPRINT("Received unexpected msg type. Expected %d, received %d",
+               msg.request, msg_reply.request);
+        goto out;
+    }
+
+    if (msg_reply.fd_num != 1) {
+        DPRINT("Received unexpected number of fds. Expected 1, received %d",
+               msg_reply.fd_num);
+        goto out;
+    }
+
+    *dmabuf_fd = msg_reply.fds[0];
+    result = *dmabuf_fd > 0 && msg_reply.payload.u64 == 0;
+out:
+    pthread_mutex_unlock(&dev->backend_mutex);
+
+    return result;
+}
+
+static bool
+vu_send_message(VuDev *dev, VhostUserMsg *vmsg)
+{
+    bool result = false;
+    pthread_mutex_lock(&dev->backend_mutex);
+    if (!vu_message_write(dev, dev->backend_fd, vmsg)) {
+        goto out;
+    }
+
+    result = true;
+out:
+    pthread_mutex_unlock(&dev->backend_mutex);
+
+    return result;
+}
+
+bool
+vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
+{
+    VhostUserMsg msg = {
+        .request = VHOST_USER_BACKEND_SHARED_OBJECT_ADD,
+        .size = sizeof(msg.payload.object),
+        .flags = VHOST_USER_VERSION,
+    };
+
+    memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+    if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+        return false;
+    }
+
+    return vu_send_message(dev, &msg);
+}
+
+bool
+vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN])
+{
+    VhostUserMsg msg = {
+        .request = VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE,
+        .size = sizeof(msg.payload.object),
+        .flags = VHOST_USER_VERSION,
+    };
+
+    memcpy(msg.payload.object.uuid, uuid, sizeof(uuid[0]) * UUID_LEN);
+
+    if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SHARED_OBJECT)) {
+        return false;
+    }
+
+    return vu_send_message(dev, &msg);
+}
+
 static bool
 vu_set_vring_call_exec(VuDev *dev, VhostUserMsg *vmsg)
 {
@@ -1944,6 +2062,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
         return vu_add_mem_reg(dev, vmsg);
     case VHOST_USER_REM_MEM_REG:
         return vu_rem_mem_reg(dev, vmsg);
+    case VHOST_USER_GET_SHARED_OBJECT:
+        return vu_get_shared_object(dev, vmsg);
     default:
         vmsg_close_fds(vmsg);
         vu_panic(dev, "Unhandled request: %d", vmsg->request);
-- 
MST



  parent reply	other threads:[~2023-10-04  9:06 UTC|newest]

Thread overview: 99+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-04  8:43 [PULL 00/63] virtio,pci: features, cleanups Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 01/63] pci: SLT must be RO Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 02/63] hw/virtio: Propagate page_mask to vhost_vdpa_listener_skipped_section() Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 03/63] hw/virtio: Propagate page_mask to vhost_vdpa_section_end() Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 04/63] hw/virtio/vhost-vdpa: Inline TARGET_PAGE_ALIGN() macro Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 05/63] hw/virtio/vhost-vdpa: Use target-agnostic qemu_target_page_mask() Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 06/63] hw/virtio: Build vhost-vdpa.o once Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 07/63] hw/virtio/meson: Rename softmmu_virtio_ss[] -> system_virtio_ss[] Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 08/63] virtio: add vhost-user-base and a generic vhost-user-device Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 09/63] hw/virtio: add config support to vhost-user-device Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 10/63] virtio-net: do not reset vlan filtering at set_features Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 11/63] virtio-net: Expose MAX_VLAN Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 12/63] vdpa: Restore vlan filtering state Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 13/63] vdpa: Allow VIRTIO_NET_F_CTRL_VLAN in SVQ Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 14/63] virtio: don't zero out memory region cache for indirect descriptors Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 15/63] vdpa: use first queue SVQ state for CVQ default Michael S. Tsirkin
2023-10-04  8:43 ` [PULL 16/63] vdpa: export vhost_vdpa_set_vring_ready Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 17/63] vdpa: rename vhost_vdpa_net_load to vhost_vdpa_net_cvq_load Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 18/63] vdpa: move vhost_vdpa_set_vring_ready to the caller Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 19/63] vdpa: remove net cvq migration blocker Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 20/63] vhost: Add count argument to vhost_svq_poll() Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 21/63] qmp: remove virtio_list, search QOM tree instead Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 22/63] qmp: update virtio feature maps, vhost-user-gpio introspection Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 23/63] vhost-user: move VhostUserProtocolFeature definition to header file Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 24/63] vhost-user: strip superfluous whitespace Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 25/63] vhost-user: tighten "reply_supported" scope in "set_vring_addr" Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 26/63] vhost-user: factor out "vhost_user_write_sync" Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 27/63] vhost-user: flatten "enforce_reply" into "vhost_user_write_sync" Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 28/63] vhost-user: hoist "write_sync", "get_features", "get_u64" Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 29/63] vhost-user: allow "vhost_set_vring" to wait for a reply Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 30/63] vhost-user: call VHOST_USER_SET_VRING_ENABLE synchronously Michael S. Tsirkin
2023-10-04 10:11   ` Laszlo Ersek
2023-10-04 12:53     ` Michael S. Tsirkin
2023-10-04 13:28       ` Laszlo Ersek
2023-10-04  8:44 ` [PULL 31/63] hw/isa/ich9: Add comment on imperfect emulation of PIC vs. I/O APIC routing Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 32/63] tests/acpi: Allow update of DSDT.cxl Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 33/63] hw/cxl: Add QTG _DSM support for ACPI0017 device Michael S. Tsirkin
2023-10-04 17:46   ` Thomas Huth
2023-10-04 22:14     ` Michael S. Tsirkin
2023-10-04 23:09       ` [PATCH v3] " Dave Jiang
2023-10-05  3:36         ` Michael S. Tsirkin
2023-10-05 16:11           ` Dave Jiang
2023-10-05 16:32             ` Michael S. Tsirkin
2023-10-06 12:09               ` Jonathan Cameron via
2023-10-06 12:09                 ` Jonathan Cameron
2023-10-06 17:50                 ` Dan Williams
2023-10-06 22:15                   ` [PATCH v4] " Dave Jiang
2023-10-09 15:47                     ` Jonathan Cameron via
2023-10-09 15:47                       ` Jonathan Cameron
2023-10-09 16:06                       ` Dave Jiang
2023-10-09 15:44                   ` [PATCH v3] " Jonathan Cameron via
2023-10-09 15:44                     ` Jonathan Cameron
2023-10-07 21:17                 ` Michael S. Tsirkin
2023-10-09 15:40                   ` Jonathan Cameron via
2023-10-09 15:40                     ` Jonathan Cameron
2023-10-05 17:00             ` Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 34/63] tests/acpi: Update DSDT.cxl with QTG DSM Michael S. Tsirkin
2023-10-04  8:44 ` [PULL 35/63] hw/i386/acpi-build: Use pc_madt_cpu_entry() directly Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 36/63] hw/acpi/cpu: Have build_cpus_aml() take a build_madt_cpu_fn callback Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 37/63] hw/acpi/acpi_dev_interface: Remove now unused madt_cpu virtual method Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 38/63] hw/acpi/acpi_dev_interface: Remove now unused #include "hw/boards.h" Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 39/63] hw/i386: Remove now redundant TYPE_ACPI_GED_X86 Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 40/63] hw/i386/acpi-build: Determine SMI command port just once Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 41/63] hw/acpi: Trace GPE access in all device models, not just PIIX4 Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 42/63] hw/acpi/core: Trace enable and status registers of GPE separately Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 43/63] vdpa: fix gcc cvq_isolated uninitialized variable warning Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 44/63] vdpa net: zero vhost_vdpa iova_tree pointer at cleanup Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 45/63] hw/cxl: Push cxl_decoder_count_enc() and cxl_decode_ig() into .c Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 46/63] hw/cxl: Add utility functions decoder interleave ways and target count Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 47/63] hw/cxl: Fix and use same calculation for HDM decoder block size everywhere Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 48/63] hw/cxl: Support 4 HDM decoders at all levels of topology Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 49/63] hw/pci-bridge/cxl-upstream: Add serial number extended capability support Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 50/63] vdpa net: fix error message setting virtio status Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 51/63] vdpa net: stop probing if cannot set features Michael S. Tsirkin
2023-10-04  8:45 ` [PULL 52/63] vdpa net: follow VirtIO initialization properly at cvq isolation probing Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 53/63] amd_iommu: Fix APIC address check Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 54/63] hw/i386/pc: improve physical address space bound check for 32-bit x86 systems Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 55/63] pcie_sriov: unregister_vfs(): fix error path Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 56/63] libvhost-user.c: add assertion to vu_message_read_default Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 57/63] virtio: use shadow_avail_idx while checking number of heads Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 58/63] virtio: remove unnecessary thread fence while reading next descriptor Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 59/63] virtio: remove unused next argument from virtqueue_split_read_next_desc() Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 60/63] util/uuid: add a hash function Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 61/63] hw/display: introduce virtio-dmabuf Michael S. Tsirkin
2023-10-04  8:46 ` [PULL 62/63] vhost-user: add shared_object msg Michael S. Tsirkin
2023-10-04  8:46 ` Michael S. Tsirkin [this message]
2023-10-04  8:54 ` [PULL 00/63] virtio,pci: features, cleanups Philippe Mathieu-Daudé
2023-10-04  9:08   ` Michael S. Tsirkin
2023-10-04  8:58 ` Michael S. Tsirkin
2023-10-04 16:26 ` Michael S. Tsirkin
2023-10-04 16:50 ` Stefan Hajnoczi
2023-10-04 17:04   ` Michael S. Tsirkin
2023-10-04 17:40     ` Thomas Huth
2023-10-04 22:23       ` Michael S. Tsirkin
2023-10-05  6:10         ` Thomas Huth
2023-10-04 18:23     ` Stefan Hajnoczi
2023-10-04 17:20   ` Michael S. Tsirkin
2023-10-04 18:29     ` Stefan Hajnoczi
2023-10-04 22:16       ` Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=dc1499091ca09db0ac7a5615a592e55f27d4965d.1696408966.git.mst@redhat.com \
    --to=mst@redhat.com \
    --cc=aesteve@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).