From: Nilay Shroff <nilay@linux.ibm.com>
To: linux-nvme@lists.infradead.org
Cc: kbusch@kernel.org, hch@lst.de, sagi@grimberg.me, axboe@kernel.dk,
hare@suse.de, dwagner@suse.de, gjoyce@ibm.com
Subject: [RFC PATCH 3/5] nvme-multipath: add sysfs attribute for adaptive I/O policy
Date: Sun, 21 Sep 2025 16:42:23 +0530 [thread overview]
Message-ID: <20250921111234.863853-4-nilay@linux.ibm.com> (raw)
In-Reply-To: <20250921111234.863853-1-nilay@linux.ibm.com>
This commit introduces a new sysfs attribute, "adp_stat", under the
nvme path block device. This attribute provides visibility into the
state of the adaptive I/O policy and is intended to aid debugging and
observability. We now also calculate the per-path aggregated smoothed
(EWMA) latency for reporting it under this new attribute.
The attribute reports per-path aggregated statistics, including I/O
weight, smoothed (EWMA) latency, selection count, processed samples,
and ignored samples.
Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
---
drivers/nvme/host/multipath.c | 77 ++++++++++++++++++++++++++++++++++-
drivers/nvme/host/nvme.h | 2 +
drivers/nvme/host/sysfs.c | 5 +++
3 files changed, 82 insertions(+), 2 deletions(-)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 4f56a2bf7ea3..84c64605d05c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -224,6 +224,22 @@ static inline u64 ewma_update(u64 old, u64 new)
return (old * ((1 << NVME_EWMA_SHIFT) - 1) + new) >> NVME_EWMA_SHIFT;
}
+static inline void path_ewma_update(atomic64_t *ptr, u64 new)
+{
+ u64 old, slat;
+
+ /*
+ * Since multiple CPUs may update the per-path smoothed (EWMA)
+ * latency concurrently, we use an atomic compare-and-exchange
+ * loop to safely apply the update without losing intermediate
+ * changes.
+ */
+ do {
+ old = atomic64_read(ptr);
+ slat = ewma_update(old, new);
+ } while (atomic64_cmpxchg(ptr, old, slat) != old);
+}
+
static void nvme_mpath_add_sample(struct request *rq, struct nvme_ns *ns)
{
int cpu, srcu_idx;
@@ -308,11 +324,18 @@ static void nvme_mpath_add_sample(struct request *rq, struct nvme_ns *ns)
* slat_ns = (prev_slat_ns * (WEIGHT - 1) + (latency)) / WEIGHT
* With WEIGHT = 8, this assigns 7/8 (~87.5 %) weight to the
* existing latency and 1/8 (~12.5%) weight to the new latency.
+ *
+ * Please note that we also calculate here the smooth/EWMA
+ * latency per path which is then used for exposing aggregated
+ * per-path latency using sysfs for observability/debugging.
*/
- if (unlikely(!stat->slat_ns))
+ if (unlikely(!stat->slat_ns)) {
stat->slat_ns = avg_lat_ns;
- else
+ atomic64_set(&ns->slat_ns[rw], avg_lat_ns);
+ } else {
stat->slat_ns = ewma_update(stat->slat_ns, avg_lat_ns);
+ path_ewma_update(&ns->slat_ns[rw], avg_lat_ns);
+ }
stat->batch = stat->batch_count = 0;
@@ -437,6 +460,7 @@ static void nvme_mpath_reset_current_stat(struct nvme_ns *ns)
stat = per_cpu_ptr(ns->cpu_stat, cpu);
memset(stat, 0, 2 * sizeof(struct nvme_path_stat));
}
+ memset(ns->slat_ns, 0, sizeof(ns->slat_ns));
}
static bool nvme_mpath_set_current_adaptive_path(struct nvme_ns *ns)
@@ -1450,6 +1474,55 @@ static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr
}
DEVICE_ATTR_RO(numa_nodes);
+static void adp_stat_read_all(struct nvme_ns *ns, struct nvme_path_stat *batch)
+{
+ int i, cpu;
+ int ncpu[2] = {0};
+ struct nvme_path_stat *stat;
+
+ for_each_online_cpu(cpu) {
+ stat = per_cpu_ptr(ns->cpu_stat, cpu);
+
+ for (i = 0; i < 2; i++) {
+ if (stat[i].weight) {
+ batch[i].weight += stat[i].weight;
+ batch[i].sel += stat[i].sel;
+ batch[i].nr_samples += stat[i].nr_samples;
+ batch[i].nr_ignored += stat[i].nr_ignored;
+ ncpu[i]++;
+ }
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (!ncpu[i])
+ continue;
+ batch[i].weight = DIV_U64_ROUND_CLOSEST(batch[i].weight, ncpu[i]);
+ }
+}
+
+static ssize_t adp_stat_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_path_stat stat[2] = {0};
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+ adp_stat_read_all(ns, stat);
+ return sysfs_emit(buf, "%u %llu %llu %llu %llu %u %llu %llu %llu %llu\n",
+ stat[READ].weight,
+ atomic64_read(&ns->slat_ns[READ]),
+ stat[READ].sel,
+ stat[READ].nr_samples,
+ stat[READ].nr_ignored,
+ stat[WRITE].weight,
+ atomic64_read(&ns->slat_ns[WRITE]),
+ stat[WRITE].sel,
+ stat[WRITE].nr_samples,
+ stat[WRITE].nr_ignored);
+
+}
+DEVICE_ATTR_RO(adp_stat);
+
static ssize_t delayed_removal_secs_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index aa3f681d7376..22445cf4f5d5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -548,6 +548,7 @@ struct nvme_ns {
#ifdef CONFIG_NVME_MULTIPATH
enum nvme_ana_state ana_state;
u32 ana_grpid;
+ atomic64_t slat_ns[2]; /* path smoothed (EWMA) latency in nanosconds */
struct nvme_path_stat __percpu *cpu_stat;
#endif
struct list_head siblings;
@@ -1009,6 +1010,7 @@ extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute dev_attr_queue_depth;
extern struct device_attribute dev_attr_numa_nodes;
+extern struct device_attribute dev_attr_adp_stat;
extern struct device_attribute dev_attr_delayed_removal_secs;
extern struct device_attribute subsys_attr_iopolicy;
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 4f9607e9698a..cb04539e2e2c 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -260,6 +260,7 @@ static struct attribute *nvme_ns_attrs[] = {
&dev_attr_ana_state.attr,
&dev_attr_queue_depth.attr,
&dev_attr_numa_nodes.attr,
+ &dev_attr_adp_stat.attr,
&dev_attr_delayed_removal_secs.attr,
#endif
&dev_attr_io_passthru_err_log_enabled.attr,
@@ -303,6 +304,10 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
if (!nvme_disk_is_ns_head(disk))
return 0;
}
+ if (a == &dev_attr_adp_stat.attr) {
+ if (nvme_disk_is_ns_head(dev_to_disk(dev)))
+ return 0;
+ }
#endif
return a->mode;
}
--
2.51.0
next prev parent reply other threads:[~2025-09-21 11:13 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-21 11:12 [RFC PATCH 0/5] nvme-multipath: introduce adaptive I/O policy Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 1/5] block: expose blk_stat_{enable,disable}_accounting() to drivers Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 2/5] nvme-multipath: add support for adaptive I/O policy Nilay Shroff
2025-09-22 7:30 ` Hannes Reinecke
2025-09-23 3:43 ` Nilay Shroff
2025-09-23 7:03 ` Hannes Reinecke
2025-09-23 10:56 ` Nilay Shroff
2025-09-21 11:12 ` Nilay Shroff [this message]
2025-09-22 7:35 ` [RFC PATCH 3/5] nvme-multipath: add sysfs attribute " Hannes Reinecke
2025-09-23 3:53 ` Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 4/5] nvmf-tcp: add support for retrieving adapter link speed Nilay Shroff
2025-09-22 7:38 ` Hannes Reinecke
2025-09-23 9:33 ` Nilay Shroff
2025-09-23 10:27 ` Hannes Reinecke
2025-09-23 17:58 ` Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 5/5] nvme-multipath: factor fabric link speed into path score Nilay Shroff
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250921111234.863853-4-nilay@linux.ibm.com \
--to=nilay@linux.ibm.com \
--cc=axboe@kernel.dk \
--cc=dwagner@suse.de \
--cc=gjoyce@ibm.com \
--cc=hare@suse.de \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).