From: Zhenzhong Duan <zhenzhong.duan@intel.com>
To: qemu-devel@nongnu.org
Cc: alex.williamson@redhat.com, clg@redhat.com, jgg@nvidia.com,
nicolinc@nvidia.com, joao.m.martins@oracle.com,
eric.auger@redhat.com, peterx@redhat.com, jasowang@redhat.com,
kevin.tian@intel.com, yi.l.liu@intel.com, yi.y.sun@intel.com,
chao.p.peng@intel.com, Zhenzhong Duan <zhenzhong.duan@intel.com>
Subject: [PATCH v6 08/21] vfio/pci: Introduce a vfio pci hot reset interface
Date: Tue, 14 Nov 2023 18:09:42 +0800 [thread overview]
Message-ID: <20231114100955.1961974-9-zhenzhong.duan@intel.com> (raw)
In-Reply-To: <20231114100955.1961974-1-zhenzhong.duan@intel.com>
Legacy vfio pci and iommufd cdev have different process to hot reset
vfio device, expand current code to abstract out pci_hot_reset callback
for legacy vfio, this same interface will also be used by iommufd
cdev vfio device.
Rename vfio_pci_hot_reset to vfio_legacy_pci_hot_reset and move it
into container.c.
vfio_pci_[pre/post]_reset and vfio_pci_host_match are exported so
they could be called in legacy and iommufd pci_hot_reset callback.
Suggested-by: Cédric Le Goater <clg@redhat.com>
Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
---
v6: pci_hot_reset return -errno if fails
hw/vfio/pci.h | 3 +
include/hw/vfio/vfio-container-base.h | 3 +
hw/vfio/container.c | 170 ++++++++++++++++++++++++++
hw/vfio/pci.c | 168 +------------------------
4 files changed, 182 insertions(+), 162 deletions(-)
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
index 1006061afb..6e64a2654e 100644
--- a/hw/vfio/pci.h
+++ b/hw/vfio/pci.h
@@ -218,6 +218,9 @@ void vfio_probe_igd_bar4_quirk(VFIOPCIDevice *vdev, int nr);
extern const PropertyInfo qdev_prop_nv_gpudirect_clique;
+void vfio_pci_pre_reset(VFIOPCIDevice *vdev);
+void vfio_pci_post_reset(VFIOPCIDevice *vdev);
+bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name);
int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
struct vfio_pci_hot_reset_info **info_p);
diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h
index 4b6f017c6f..45bb19c767 100644
--- a/include/hw/vfio/vfio-container-base.h
+++ b/include/hw/vfio/vfio-container-base.h
@@ -106,6 +106,9 @@ struct VFIOIOMMUOps {
int (*set_dirty_page_tracking)(VFIOContainerBase *bcontainer, bool start);
int (*query_dirty_bitmap)(VFIOContainerBase *bcontainer, VFIOBitmap *vbmap,
hwaddr iova, hwaddr size);
+ /* PCI specific */
+ int (*pci_hot_reset)(VFIODevice *vbasedev, bool single);
+
/* SPAPR specific */
int (*add_window)(VFIOContainerBase *bcontainer,
MemoryRegionSection *section,
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index ed2d721b2b..1dbf9b9a17 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -33,6 +33,7 @@
#include "trace.h"
#include "qapi/error.h"
#include "migration/migration.h"
+#include "pci.h"
VFIOGroupList vfio_group_list =
QLIST_HEAD_INITIALIZER(vfio_group_list);
@@ -922,6 +923,174 @@ static void vfio_legacy_detach_device(VFIODevice *vbasedev)
vfio_put_group(group);
}
+static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ VFIOGroup *group;
+ struct vfio_pci_hot_reset_info *info = NULL;
+ struct vfio_pci_dependent_device *devices;
+ struct vfio_pci_hot_reset *reset;
+ int32_t *fds;
+ int ret, i, count;
+ bool multi = false;
+
+ trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
+
+ if (!single) {
+ vfio_pci_pre_reset(vdev);
+ }
+ vdev->vbasedev.needs_reset = false;
+
+ ret = vfio_pci_get_pci_hot_reset_info(vdev, &info);
+
+ if (ret) {
+ goto out_single;
+ }
+ devices = &info->devices[0];
+
+ trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
+
+ /* Verify that we have all the groups required */
+ for (i = 0; i < info->count; i++) {
+ PCIHostDeviceAddress host;
+ VFIOPCIDevice *tmp;
+ VFIODevice *vbasedev_iter;
+
+ host.domain = devices[i].segment;
+ host.bus = devices[i].bus;
+ host.slot = PCI_SLOT(devices[i].devfn);
+ host.function = PCI_FUNC(devices[i].devfn);
+
+ trace_vfio_pci_hot_reset_dep_devices(host.domain,
+ host.bus, host.slot, host.function, devices[i].group_id);
+
+ if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
+ continue;
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ if (group->groupid == devices[i].group_id) {
+ break;
+ }
+ }
+
+ if (!group) {
+ if (!vdev->has_pm_reset) {
+ error_report("vfio: Cannot reset device %s, "
+ "depends on group %d which is not owned.",
+ vdev->vbasedev.name, devices[i].group_id);
+ }
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Prep dependent devices for reset and clear our marker. */
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
+ if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
+ if (single) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+ vfio_pci_pre_reset(tmp);
+ tmp->vbasedev.needs_reset = false;
+ multi = true;
+ break;
+ }
+ }
+ }
+
+ if (!single && !multi) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+
+ /* Determine how many group fds need to be passed */
+ count = 0;
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ for (i = 0; i < info->count; i++) {
+ if (group->groupid == devices[i].group_id) {
+ count++;
+ break;
+ }
+ }
+ }
+
+ reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
+ reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
+ fds = &reset->group_fds[0];
+
+ /* Fill in group fds */
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ for (i = 0; i < info->count; i++) {
+ if (group->groupid == devices[i].group_id) {
+ fds[reset->count++] = group->fd;
+ break;
+ }
+ }
+ }
+
+ /* Bus reset! */
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
+ g_free(reset);
+ if (ret) {
+ ret = -errno;
+ }
+
+ trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
+ ret ? strerror(errno) : "Success");
+
+out:
+ /* Re-enable INTx on affected devices */
+ for (i = 0; i < info->count; i++) {
+ PCIHostDeviceAddress host;
+ VFIOPCIDevice *tmp;
+ VFIODevice *vbasedev_iter;
+
+ host.domain = devices[i].segment;
+ host.bus = devices[i].bus;
+ host.slot = PCI_SLOT(devices[i].devfn);
+ host.function = PCI_FUNC(devices[i].devfn);
+
+ if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
+ continue;
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ if (group->groupid == devices[i].group_id) {
+ break;
+ }
+ }
+
+ if (!group) {
+ break;
+ }
+
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
+ if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
+ vfio_pci_post_reset(tmp);
+ break;
+ }
+ }
+ }
+out_single:
+ if (!single) {
+ vfio_pci_post_reset(vdev);
+ }
+ g_free(info);
+
+ return ret;
+}
+
const VFIOIOMMUOps vfio_legacy_ops = {
.dma_map = vfio_legacy_dma_map,
.dma_unmap = vfio_legacy_dma_unmap,
@@ -929,4 +1098,5 @@ const VFIOIOMMUOps vfio_legacy_ops = {
.detach_device = vfio_legacy_detach_device,
.set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking,
.query_dirty_bitmap = vfio_legacy_query_dirty_bitmap,
+ .pci_hot_reset = vfio_legacy_pci_hot_reset,
};
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
index eb55e8ae88..d00c3472c7 100644
--- a/hw/vfio/pci.c
+++ b/hw/vfio/pci.c
@@ -2374,7 +2374,7 @@ static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
return 0;
}
-static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
+void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
{
PCIDevice *pdev = &vdev->pdev;
uint16_t cmd;
@@ -2411,7 +2411,7 @@ static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
}
-static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
+void vfio_pci_post_reset(VFIOPCIDevice *vdev)
{
Error *err = NULL;
int nr;
@@ -2435,7 +2435,7 @@ static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
vfio_quirk_reset(vdev);
}
-static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
+bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
{
char tmp[13];
@@ -2485,166 +2485,10 @@ int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
{
- VFIOGroup *group;
- struct vfio_pci_hot_reset_info *info = NULL;
- struct vfio_pci_dependent_device *devices;
- struct vfio_pci_hot_reset *reset;
- int32_t *fds;
- int ret, i, count;
- bool multi = false;
-
- trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
-
- if (!single) {
- vfio_pci_pre_reset(vdev);
- }
- vdev->vbasedev.needs_reset = false;
-
- ret = vfio_pci_get_pci_hot_reset_info(vdev, &info);
-
- if (ret) {
- goto out_single;
- }
- devices = &info->devices[0];
-
- trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
-
- /* Verify that we have all the groups required */
- for (i = 0; i < info->count; i++) {
- PCIHostDeviceAddress host;
- VFIOPCIDevice *tmp;
- VFIODevice *vbasedev_iter;
-
- host.domain = devices[i].segment;
- host.bus = devices[i].bus;
- host.slot = PCI_SLOT(devices[i].devfn);
- host.function = PCI_FUNC(devices[i].devfn);
-
- trace_vfio_pci_hot_reset_dep_devices(host.domain,
- host.bus, host.slot, host.function, devices[i].group_id);
-
- if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
- continue;
- }
-
- QLIST_FOREACH(group, &vfio_group_list, next) {
- if (group->groupid == devices[i].group_id) {
- break;
- }
- }
-
- if (!group) {
- if (!vdev->has_pm_reset) {
- error_report("vfio: Cannot reset device %s, "
- "depends on group %d which is not owned.",
- vdev->vbasedev.name, devices[i].group_id);
- }
- ret = -EPERM;
- goto out;
- }
-
- /* Prep dependent devices for reset and clear our marker. */
- QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
- if (!vbasedev_iter->dev->realized ||
- vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
- continue;
- }
- tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
- if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
- if (single) {
- ret = -EINVAL;
- goto out_single;
- }
- vfio_pci_pre_reset(tmp);
- tmp->vbasedev.needs_reset = false;
- multi = true;
- break;
- }
- }
- }
-
- if (!single && !multi) {
- ret = -EINVAL;
- goto out_single;
- }
-
- /* Determine how many group fds need to be passed */
- count = 0;
- QLIST_FOREACH(group, &vfio_group_list, next) {
- for (i = 0; i < info->count; i++) {
- if (group->groupid == devices[i].group_id) {
- count++;
- break;
- }
- }
- }
-
- reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
- reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
- fds = &reset->group_fds[0];
-
- /* Fill in group fds */
- QLIST_FOREACH(group, &vfio_group_list, next) {
- for (i = 0; i < info->count; i++) {
- if (group->groupid == devices[i].group_id) {
- fds[reset->count++] = group->fd;
- break;
- }
- }
- }
-
- /* Bus reset! */
- ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
- g_free(reset);
-
- trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
- ret ? strerror(errno) : "Success");
-
-out:
- /* Re-enable INTx on affected devices */
- for (i = 0; i < info->count; i++) {
- PCIHostDeviceAddress host;
- VFIOPCIDevice *tmp;
- VFIODevice *vbasedev_iter;
-
- host.domain = devices[i].segment;
- host.bus = devices[i].bus;
- host.slot = PCI_SLOT(devices[i].devfn);
- host.function = PCI_FUNC(devices[i].devfn);
-
- if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
- continue;
- }
-
- QLIST_FOREACH(group, &vfio_group_list, next) {
- if (group->groupid == devices[i].group_id) {
- break;
- }
- }
-
- if (!group) {
- break;
- }
-
- QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
- if (!vbasedev_iter->dev->realized ||
- vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
- continue;
- }
- tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
- if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
- vfio_pci_post_reset(tmp);
- break;
- }
- }
- }
-out_single:
- if (!single) {
- vfio_pci_post_reset(vdev);
- }
- g_free(info);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ const VFIOIOMMUOps *ops = vbasedev->bcontainer->ops;
- return ret;
+ return ops->pci_hot_reset(vbasedev, single);
}
/*
--
2.34.1
next prev parent reply other threads:[~2023-11-14 10:29 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-14 10:09 [PATCH v6 00/21] vfio: Adopt iommufd Zhenzhong Duan
2023-11-14 10:09 ` [PATCH v6 01/21] backends/iommufd: Introduce the iommufd object Zhenzhong Duan
2023-11-14 13:28 ` Cédric Le Goater
2023-11-15 4:06 ` Duan, Zhenzhong
2023-11-15 8:15 ` Cédric Le Goater
2023-11-15 12:52 ` Eric Auger
2023-11-16 4:04 ` Duan, Zhenzhong
2023-11-16 8:32 ` Eric Auger
2023-11-16 8:47 ` Duan, Zhenzhong
2023-11-17 11:09 ` Cédric Le Goater
2023-11-17 11:39 ` Duan, Zhenzhong
2023-11-17 12:56 ` Cédric Le Goater
2023-11-17 13:29 ` Eric Auger
2023-11-17 13:56 ` Cédric Le Goater
2023-11-20 3:06 ` Duan, Zhenzhong
2023-11-20 8:24 ` Cédric Le Goater
2023-11-20 10:07 ` Duan, Zhenzhong
2023-11-20 17:08 ` Cédric Le Goater
2023-11-21 3:26 ` Duan, Zhenzhong
2023-11-21 8:05 ` Cédric Le Goater
2023-11-21 8:39 ` Duan, Zhenzhong
2023-11-14 10:09 ` [PATCH v6 02/21] util/char_dev: Add open_cdev() Zhenzhong Duan
2023-11-14 13:29 ` Cédric Le Goater
2023-11-15 13:23 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 03/21] vfio/common: return early if space isn't empty Zhenzhong Duan
2023-11-14 13:29 ` Cédric Le Goater
2023-11-15 13:28 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 04/21] vfio/iommufd: Implement the iommufd backend Zhenzhong Duan
2023-11-14 13:36 ` Cédric Le Goater
2023-11-14 10:09 ` [PATCH v6 05/21] vfio/iommufd: Relax assert check for " Zhenzhong Duan
2023-11-15 13:56 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 06/21] vfio/iommufd: Add support for iova_ranges and pgsizes Zhenzhong Duan
2023-11-14 13:46 ` Cédric Le Goater
2023-11-15 2:36 ` Duan, Zhenzhong
2023-11-15 16:25 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 07/21] vfio/pci: Extract out a helper vfio_pci_get_pci_hot_reset_info Zhenzhong Duan
2023-11-15 17:00 ` Eric Auger
2023-11-14 10:09 ` Zhenzhong Duan [this message]
2023-11-14 13:51 ` [PATCH v6 08/21] vfio/pci: Introduce a vfio pci hot reset interface Cédric Le Goater
2023-11-15 2:55 ` Duan, Zhenzhong
2023-11-15 17:54 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 09/21] vfio/iommufd: Enable pci hot reset through iommufd cdev interface Zhenzhong Duan
2023-11-17 13:53 ` Eric Auger
2023-11-20 4:15 ` Duan, Zhenzhong
2023-11-14 10:09 ` [PATCH v6 10/21] vfio/pci: Allow the selection of a given iommu backend Zhenzhong Duan
2023-11-14 13:57 ` Cédric Le Goater
2023-11-14 10:09 ` [PATCH v6 11/21] vfio/pci: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-11-14 14:08 ` Cédric Le Goater
2023-11-15 12:09 ` Philippe Mathieu-Daudé
2023-11-15 13:05 ` Cédric Le Goater
2023-11-16 2:15 ` Duan, Zhenzhong
2023-11-16 7:25 ` Cédric Le Goater
2023-11-16 7:43 ` Duan, Zhenzhong
2023-11-14 10:09 ` [PATCH v6 12/21] vfio/platform: Allow the selection of a given iommu backend Zhenzhong Duan
2023-11-14 14:03 ` Cédric Le Goater
2023-11-17 14:55 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 13/21] vfio/platform: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-11-14 14:22 ` Cédric Le Goater
2023-11-14 10:09 ` [PATCH v6 14/21] vfio/ap: Allow the selection of a given iommu backend Zhenzhong Duan
2023-11-14 14:03 ` Cédric Le Goater
2023-11-14 10:09 ` [PATCH v6 15/21] vfio/ap: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-11-14 14:04 ` Cédric Le Goater
2023-11-14 10:09 ` [PATCH v6 16/21] vfio/ccw: Allow the selection of a given iommu backend Zhenzhong Duan
2023-11-14 14:04 ` Cédric Le Goater
2023-11-15 18:45 ` Eric Farman
2023-11-14 10:09 ` [PATCH v6 17/21] vfio/ccw: Make vfio cdev pre-openable by passing a file handle Zhenzhong Duan
2023-11-14 14:04 ` Cédric Le Goater
2023-11-15 18:46 ` Eric Farman
2023-11-14 10:09 ` [PATCH v6 18/21] vfio: Make VFIOContainerBase poiner parameter const in VFIOIOMMUOps callbacks Zhenzhong Duan
2023-11-14 14:05 ` Cédric Le Goater
2023-11-17 14:58 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 19/21] hw/arm: Activate IOMMUFD for virt machines Zhenzhong Duan
2023-11-16 9:17 ` Eric Auger
2023-11-14 10:09 ` [PATCH v6 20/21] kconfig: Activate IOMMUFD for s390x machines Zhenzhong Duan
2023-11-15 18:47 ` Eric Farman
2023-11-14 10:09 ` [PATCH v6 21/21] hw/i386: Activate IOMMUFD for q35 machines Zhenzhong Duan
2023-11-16 9:17 ` Eric Auger
2023-11-14 14:51 ` [PATCH v6 00/21] vfio: Adopt iommufd Cédric Le Goater
2023-11-15 4:16 ` Duan, Zhenzhong
2023-11-20 9:15 ` Eric Auger
2023-11-20 10:09 ` Duan, Zhenzhong
2023-11-20 11:22 ` Eric Auger
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231114100955.1961974-9-zhenzhong.duan@intel.com \
--to=zhenzhong.duan@intel.com \
--cc=alex.williamson@redhat.com \
--cc=chao.p.peng@intel.com \
--cc=clg@redhat.com \
--cc=eric.auger@redhat.com \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=joao.m.martins@oracle.com \
--cc=kevin.tian@intel.com \
--cc=nicolinc@nvidia.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).