qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Klaus Jensen <its@irrelevant.dk>
To: qemu-devel@nongnu.org, Peter Maydell <peter.maydell@linaro.org>
Cc: Fam Zheng <fam@euphon.net>, Kevin Wolf <kwolf@redhat.com>,
	qemu-block@nongnu.org, Niklas Cassel <Niklas.Cassel@wdc.com>,
	Dmitry Fomichev <dmitry.fomichev@wdc.com>,
	Klaus Jensen <k.jensen@samsung.com>,
	Max Reitz <mreitz@redhat.com>, Klaus Jensen <its@irrelevant.dk>,
	Stefan Hajnoczi <stefanha@redhat.com>,
	Keith Busch <kbusch@kernel.org>
Subject: [PULL 12/56] hw/block/nvme: Add Commands Supported and Effects log
Date: Tue,  9 Feb 2021 08:30:17 +0100	[thread overview]
Message-ID: <20210209073101.548811-13-its@irrelevant.dk> (raw)
In-Reply-To: <20210209073101.548811-1-its@irrelevant.dk>

From: Dmitry Fomichev <dmitry.fomichev@wdc.com>

This log page becomes necessary to implement to allow checking for
Zone Append command support in Zoned Namespace Command Set.

This commit adds the code to report this log page for NVM Command
Set only. The parts that are specific to zoned operation will be
added later in the series.

All incoming admin and i/o commands are now only processed if their
corresponding support bits are set in this log. This provides an
easy way to control what commands to support and what not to
depending on set CC.CSS.

Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com>
Reviewed-by: Niklas Cassel <Niklas.Cassel@wdc.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
 hw/block/nvme-ns.h    |   1 +
 include/block/nvme.h  |  19 ++++++++
 hw/block/nvme.c       | 102 ++++++++++++++++++++++++++++++++++++++----
 hw/block/trace-events |   1 +
 4 files changed, 114 insertions(+), 9 deletions(-)

diff --git a/hw/block/nvme-ns.h b/hw/block/nvme-ns.h
index aeca810fc7a8..bdeaf1c0de84 100644
--- a/hw/block/nvme-ns.h
+++ b/hw/block/nvme-ns.h
@@ -30,6 +30,7 @@ typedef struct NvmeNamespace {
     int32_t      bootindex;
     int64_t      size;
     NvmeIdNs     id_ns;
+    const uint32_t *iocs;
 
     NvmeNamespaceParams params;
 
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 11ac1c2b7dfb..397f7ca3b5cb 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -752,10 +752,27 @@ enum NvmeSmartWarn {
     NVME_SMART_FAILED_VOLATILE_MEDIA  = 1 << 4,
 };
 
+typedef struct NvmeEffectsLog {
+    uint32_t    acs[256];
+    uint32_t    iocs[256];
+    uint8_t     resv[2048];
+} NvmeEffectsLog;
+
+enum {
+    NVME_CMD_EFF_CSUPP      = 1 << 0,
+    NVME_CMD_EFF_LBCC       = 1 << 1,
+    NVME_CMD_EFF_NCC        = 1 << 2,
+    NVME_CMD_EFF_NIC        = 1 << 3,
+    NVME_CMD_EFF_CCC        = 1 << 4,
+    NVME_CMD_EFF_CSE_MASK   = 3 << 16,
+    NVME_CMD_EFF_UUID_SEL   = 1 << 19,
+};
+
 enum NvmeLogIdentifier {
     NVME_LOG_ERROR_INFO     = 0x01,
     NVME_LOG_SMART_INFO     = 0x02,
     NVME_LOG_FW_SLOT_INFO   = 0x03,
+    NVME_LOG_CMD_EFFECTS    = 0x05,
 };
 
 typedef struct QEMU_PACKED NvmePSD {
@@ -868,6 +885,7 @@ enum NvmeIdCtrlFrmw {
 
 enum NvmeIdCtrlLpa {
     NVME_LPA_NS_SMART = 1 << 0,
+    NVME_LPA_CSE      = 1 << 1,
     NVME_LPA_EXTENDED = 1 << 2,
 };
 
@@ -1076,6 +1094,7 @@ static inline void _nvme_check_size(void)
     QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64);
     QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512);
     QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512);
+    QEMU_BUILD_BUG_ON(sizeof(NvmeEffectsLog) != 4096);
     QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
     QEMU_BUILD_BUG_ON(sizeof(NvmeIdNs) != 4096);
     QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16);
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 4d1ca8c466c5..05e799623c41 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -112,6 +112,30 @@ static const uint32_t nvme_feature_cap[NVME_FID_MAX] = {
     [NVME_TIMESTAMP]                = NVME_FEAT_CAP_CHANGE,
 };
 
+static const uint32_t nvme_cse_acs[256] = {
+    [NVME_ADM_CMD_DELETE_SQ]        = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_CREATE_SQ]        = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_GET_LOG_PAGE]     = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_DELETE_CQ]        = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_CREATE_CQ]        = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_IDENTIFY]         = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_ABORT]            = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_SET_FEATURES]     = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_GET_FEATURES]     = NVME_CMD_EFF_CSUPP,
+    [NVME_ADM_CMD_ASYNC_EV_REQ]     = NVME_CMD_EFF_CSUPP,
+};
+
+static const uint32_t nvme_cse_iocs_none[256];
+
+static const uint32_t nvme_cse_iocs_nvm[256] = {
+    [NVME_CMD_FLUSH]                = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+    [NVME_CMD_WRITE_ZEROES]         = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+    [NVME_CMD_WRITE]                = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+    [NVME_CMD_READ]                 = NVME_CMD_EFF_CSUPP,
+    [NVME_CMD_DSM]                  = NVME_CMD_EFF_CSUPP | NVME_CMD_EFF_LBCC,
+    [NVME_CMD_COMPARE]              = NVME_CMD_EFF_CSUPP,
+};
+
 static void nvme_process_sq(void *opaque);
 
 static uint16_t nvme_cid(NvmeRequest *req)
@@ -1306,10 +1330,6 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
     trace_pci_nvme_io_cmd(nvme_cid(req), nsid, nvme_sqid(req),
                           req->cmd.opcode, nvme_io_opc_str(req->cmd.opcode));
 
-    if (NVME_CC_CSS(n->bar.cc) == NVME_CC_CSS_ADMIN_ONLY) {
-        return NVME_INVALID_OPCODE | NVME_DNR;
-    }
-
     if (!nvme_nsid_valid(n, nsid)) {
         return NVME_INVALID_NSID | NVME_DNR;
     }
@@ -1319,6 +1339,11 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
         return NVME_INVALID_FIELD | NVME_DNR;
     }
 
+    if (!(req->ns->iocs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
+        trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
+        return NVME_INVALID_OPCODE | NVME_DNR;
+    }
+
     switch (req->cmd.opcode) {
     case NVME_CMD_FLUSH:
         return nvme_flush(n, req);
@@ -1333,9 +1358,10 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
     case NVME_CMD_DSM:
         return nvme_dsm(n, req);
     default:
-        trace_pci_nvme_err_invalid_opc(req->cmd.opcode);
-        return NVME_INVALID_OPCODE | NVME_DNR;
+        assert(false);
     }
+
+    return NVME_INVALID_OPCODE | NVME_DNR;
 }
 
 static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
@@ -1570,6 +1596,37 @@ static uint16_t nvme_error_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len,
                     DMA_DIRECTION_FROM_DEVICE, req);
 }
 
+static uint16_t nvme_cmd_effects(NvmeCtrl *n, uint32_t buf_len,
+                                 uint64_t off, NvmeRequest *req)
+{
+    NvmeEffectsLog log = {};
+    const uint32_t *src_iocs = NULL;
+    uint32_t trans_len;
+
+    if (off >= sizeof(log)) {
+        trace_pci_nvme_err_invalid_log_page_offset(off, sizeof(log));
+        return NVME_INVALID_FIELD | NVME_DNR;
+    }
+
+    switch (NVME_CC_CSS(n->bar.cc)) {
+    case NVME_CC_CSS_NVM:
+        src_iocs = nvme_cse_iocs_nvm;
+    case NVME_CC_CSS_ADMIN_ONLY:
+        break;
+    }
+
+    memcpy(log.acs, nvme_cse_acs, sizeof(nvme_cse_acs));
+
+    if (src_iocs) {
+        memcpy(log.iocs, src_iocs, sizeof(log.iocs));
+    }
+
+    trans_len = MIN(sizeof(log) - off, buf_len);
+
+    return nvme_dma(n, ((uint8_t *)&log) + off, trans_len,
+                    DMA_DIRECTION_FROM_DEVICE, req);
+}
+
 static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
 {
     NvmeCmd *cmd = &req->cmd;
@@ -1613,6 +1670,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
         return nvme_smart_info(n, rae, len, off, req);
     case NVME_LOG_FW_SLOT_INFO:
         return nvme_fw_log_info(n, len, off, req);
+    case NVME_LOG_CMD_EFFECTS:
+        return nvme_cmd_effects(n, len, off, req);
     default:
         trace_pci_nvme_err_invalid_log_page(nvme_cid(req), lid);
         return NVME_INVALID_FIELD | NVME_DNR;
@@ -2229,6 +2288,11 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
     trace_pci_nvme_admin_cmd(nvme_cid(req), nvme_sqid(req), req->cmd.opcode,
                              nvme_adm_opc_str(req->cmd.opcode));
 
+    if (!(nvme_cse_acs[req->cmd.opcode] & NVME_CMD_EFF_CSUPP)) {
+        trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
+        return NVME_INVALID_OPCODE | NVME_DNR;
+    }
+
     switch (req->cmd.opcode) {
     case NVME_ADM_CMD_DELETE_SQ:
         return nvme_del_sq(n, req);
@@ -2251,9 +2315,10 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeRequest *req)
     case NVME_ADM_CMD_ASYNC_EV_REQ:
         return nvme_aer(n, req);
     default:
-        trace_pci_nvme_err_invalid_admin_opc(req->cmd.opcode);
-        return NVME_INVALID_OPCODE | NVME_DNR;
+        assert(false);
     }
+
+    return NVME_INVALID_OPCODE | NVME_DNR;
 }
 
 static void nvme_process_sq(void *opaque)
@@ -2352,6 +2417,23 @@ static void nvme_ctrl_shutdown(NvmeCtrl *n)
     }
 }
 
+static void nvme_select_ns_iocs(NvmeCtrl *n)
+{
+    NvmeNamespace *ns;
+    int i;
+
+    for (i = 1; i <= n->num_namespaces; i++) {
+        ns = nvme_ns(n, i);
+        if (!ns) {
+            continue;
+        }
+        ns->iocs = nvme_cse_iocs_none;
+        if (NVME_CC_CSS(n->bar.cc) != NVME_CC_CSS_ADMIN_ONLY) {
+            ns->iocs = nvme_cse_iocs_nvm;
+        }
+    }
+}
+
 static int nvme_start_ctrl(NvmeCtrl *n)
 {
     uint32_t page_bits = NVME_CC_MPS(n->bar.cc) + 12;
@@ -2450,6 +2532,8 @@ static int nvme_start_ctrl(NvmeCtrl *n)
 
     QTAILQ_INIT(&n->aer_queue);
 
+    nvme_select_ns_iocs(n);
+
     return 0;
 }
 
@@ -3057,7 +3141,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
     id->acl = 3;
     id->aerl = n->params.aerl;
     id->frmw = (NVME_NUM_FW_SLOTS << 1) | NVME_FRMW_SLOT1_RO;
-    id->lpa = NVME_LPA_NS_SMART | NVME_LPA_EXTENDED;
+    id->lpa = NVME_LPA_NS_SMART | NVME_LPA_CSE | NVME_LPA_EXTENDED;
 
     /* recommended default value (~70 C) */
     id->wctemp = cpu_to_le16(NVME_TEMPERATURE_WARNING);
diff --git a/hw/block/trace-events b/hw/block/trace-events
index 60262b03c901..9e1a17e62711 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -108,6 +108,7 @@ pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PR
 pci_nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8""
 pci_nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8""
 pci_nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64""
+pci_nvme_err_invalid_log_page_offset(uint64_t ofs, uint64_t size) "must be <= %"PRIu64", got %"PRIu64""
 pci_nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16""
 pci_nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16""
 pci_nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16""
-- 
2.30.0



  parent reply	other threads:[~2021-02-09  7:59 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-09  7:30 [PULL 00/56] emulated nvme patches Klaus Jensen
2021-02-09  7:30 ` [PULL 01/56] hw/block/nvme: remove superfluous NvmeCtrl parameter Klaus Jensen
2021-02-09  7:30 ` [PULL 02/56] hw/block/nvme: pull aio error handling Klaus Jensen
2021-02-09  7:30 ` [PULL 03/56] hw/block/nvme: add dulbe support Klaus Jensen
2021-02-09  7:30 ` [PULL 04/56] nvme: add namespace I/O optimization fields to shared header Klaus Jensen
2021-02-09  7:30 ` [PULL 05/56] hw/block/nvme: add the dataset management command Klaus Jensen
2021-02-09  7:30 ` [PULL 06/56] hw/block/nvme: add compare command Klaus Jensen
2021-02-09  7:30 ` [PULL 07/56] hw/block/nvme: fix bad clearing of CAP Klaus Jensen
2021-02-09  7:30 ` [PULL 08/56] hw/block/nvme: Process controller reset and shutdown differently Klaus Jensen
2021-02-09  7:30 ` [PULL 09/56] hw/block/nvme: Generate namespace UUIDs Klaus Jensen
2021-02-09  7:30 ` [PULL 10/56] hw/block/nvme: Separate read and write handlers Klaus Jensen
2021-02-09  7:30 ` [PULL 11/56] hw/block/nvme: Combine nvme_write_zeroes() and nvme_write() Klaus Jensen
2021-02-09  7:30 ` Klaus Jensen [this message]
2021-02-09  7:30 ` [PULL 13/56] hw/block/nvme: Add support for Namespace Types Klaus Jensen
2021-02-09  7:30 ` [PULL 14/56] hw/block/nvme: Support allocated CNS command variants Klaus Jensen
2021-02-09  7:30 ` [PULL 15/56] nvme: Make ZNS-related definitions Klaus Jensen
2021-02-09  7:30 ` [PULL 16/56] hw/block/nvme: Support Zoned Namespace Command Set Klaus Jensen
2021-02-09  7:30 ` [PULL 17/56] hw/block/nvme: Introduce max active and open zone limits Klaus Jensen
2021-02-09  7:30 ` [PULL 18/56] hw/block/nvme: Support Zone Descriptor Extensions Klaus Jensen
2021-02-09  7:30 ` [PULL 19/56] hw/block/nvme: Document zoned parameters in usage text Klaus Jensen
2021-02-09  7:30 ` [PULL 20/56] hw/block/nvme: fix for non-msix machines Klaus Jensen
2021-02-09  7:30 ` [PULL 21/56] hw/block/nvme: conditionally enable DULBE for zoned namespaces Klaus Jensen
2021-02-09  7:30 ` [PULL 22/56] hw/block/nvme: fix shutdown/reset logic Klaus Jensen
2021-02-09  7:30 ` [PULL 23/56] hw/block/nvme: merge implicitly/explicitly opened processing masks Klaus Jensen
2021-02-09  7:30 ` [PULL 24/56] hw/block/nvme: enum style fix Klaus Jensen
2021-02-09  7:30 ` [PULL 25/56] hw/block/nvme: zero out zones on reset Klaus Jensen
2021-02-09  7:30 ` [PULL 26/56] hw/block/nvme: add missing string representations for commands Klaus Jensen
2021-02-09  7:30 ` [PULL 27/56] hw/block/nvme: remove unnecessary check for append Klaus Jensen
2021-02-09  7:30 ` [PULL 28/56] hw/block/nvme: Correct error status for unaligned ZA Klaus Jensen
2021-02-09  7:30 ` [PULL 29/56] hw/block/nvme: remove unused argument in nvme_ns_init_zoned Klaus Jensen
2021-02-09  7:30 ` [PULL 30/56] hw/block/nvme: open code for volatile write cache Klaus Jensen
2021-02-09  7:30 ` [PULL 31/56] hw/block/nvme: remove unused argument in nvme_ns_init_blk Klaus Jensen
2021-02-09  7:30 ` [PULL 32/56] hw/block/nvme: split setup and register for namespace Klaus Jensen
2021-02-11  9:53   ` Alexander Graf
2021-02-11 10:41     ` Klaus Jensen
2021-02-11 11:40     ` Philippe Mathieu-Daudé
2021-02-11 11:46       ` Klaus Jensen
2021-02-09  7:30 ` [PULL 33/56] hw/block/nvme: remove unused argument in nvme_ns_setup Klaus Jensen
2021-02-09  7:30 ` [PULL 34/56] hw/block/nvme: fix zone write finalize Klaus Jensen
2021-02-09  7:30 ` [PULL 35/56] nvme: introduce bit 5 for critical warning Klaus Jensen
2021-02-09  7:30 ` [PULL 36/56] hw/block/nvme: add smart_critical_warning property Klaus Jensen
2021-02-09  7:30 ` [PULL 37/56] hw/block/nvme: trigger async event during injecting smart warning Klaus Jensen
2021-02-09  7:30 ` [PULL 38/56] hw/block/nvme: add size to mmio read/write trace events Klaus Jensen
2021-02-09  7:30 ` [PULL 39/56] hw/block/nvme: fix 64 bit register hi/lo split writes Klaus Jensen
2021-02-09  7:30 ` [PULL 40/56] hw/block/nvme: indicate CMB support through controller capabilities register Klaus Jensen
2021-02-09  7:30 ` [PULL 41/56] hw/block/nvme: move msix table and pba to BAR 0 Klaus Jensen
2021-02-09  7:30 ` [PULL 42/56] hw/block/nvme: allow cmb and pmr to coexist Klaus Jensen
2021-02-09  7:30 ` [PULL 43/56] hw/block/nvme: rename PMR/CMB shift/mask fields Klaus Jensen
2021-02-09  7:30 ` [PULL 44/56] hw/block/nvme: remove redundant zeroing of PMR registers Klaus Jensen
2021-02-09  7:30 ` [PULL 45/56] hw/block/nvme: disable PMR at boot up Klaus Jensen
2021-02-09  7:30 ` [PULL 46/56] hw/block/nvme: add PMR RDS/WDS support Klaus Jensen
2021-02-09  7:30 ` [PULL 47/56] hw/block/nvme: move cmb logic to v1.4 Klaus Jensen
2021-02-09  7:30 ` [PULL 48/56] hw/block/nvme: bump " Klaus Jensen
2021-02-09  7:30 ` [PULL 49/56] hw/block/nvme: lift cmb restrictions Klaus Jensen
2021-02-09  7:30 ` [PULL 50/56] hw/block/nvme: error if drive less than a zone size Klaus Jensen
2021-02-09  7:30 ` [PULL 51/56] hw/block/nvme: fix set feature for error recovery Klaus Jensen
2021-02-09  7:30 ` [PULL 52/56] hw/block/nvme: fix set feature save field check Klaus Jensen
2021-02-09  7:30 ` [PULL 53/56] hw/block/nvme: align with existing style Klaus Jensen
2021-02-09  7:30 ` [PULL 54/56] hw/block/nvme: fix wrong parameter name 'cross_read' Klaus Jensen
2021-02-09  7:31 ` [PULL 55/56] hw/block/nvme: fix zone boundary check for append Klaus Jensen
2021-02-09  7:31 ` [PULL 56/56] hw/block/nvme: refactor the logic for zone write checks Klaus Jensen
2021-02-09 14:52 ` [PULL 00/56] emulated nvme patches Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210209073101.548811-13-its@irrelevant.dk \
    --to=its@irrelevant.dk \
    --cc=Niklas.Cassel@wdc.com \
    --cc=dmitry.fomichev@wdc.com \
    --cc=fam@euphon.net \
    --cc=k.jensen@samsung.com \
    --cc=kbusch@kernel.org \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).