From: Nilay Shroff <nilay@linux.ibm.com>
To: linux-nvme@lists.infradead.org
Cc: kbusch@kernel.org, hch@lst.de, sagi@grimberg.me, axboe@kernel.dk,
hare@suse.de, dwagner@suse.de, gjoyce@ibm.com
Subject: [RFC PATCH 4/5] nvmf-tcp: add support for retrieving adapter link speed
Date: Sun, 21 Sep 2025 16:42:24 +0530 [thread overview]
Message-ID: <20250921111234.863853-5-nilay@linux.ibm.com> (raw)
In-Reply-To: <20250921111234.863853-1-nilay@linux.ibm.com>
Add support for retrieving the negotiated NIC link speed (in Mbps).
This value can be factored into path scoring for the adaptive I/O
policy. For visibility and debugging, a new sysfs attribute "speed"
is also added under the NVMe path block device.
Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
---
drivers/nvme/host/multipath.c | 11 ++++++
drivers/nvme/host/nvme.h | 3 ++
drivers/nvme/host/sysfs.c | 5 +++
drivers/nvme/host/tcp.c | 66 +++++++++++++++++++++++++++++++++++
4 files changed, 85 insertions(+)
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 84c64605d05c..bcceb0fceb94 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -548,6 +548,8 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
clear_bit(NVME_NS_READY, &ns->flags);
nvme_mpath_reset_current_stat(ns);
+ if (ns->ctrl->ops->get_link_speed)
+ ns->speed = ns->ctrl->ops->get_link_speed(ns->ctrl);
}
srcu_read_unlock(&head->srcu, srcu_idx);
@@ -1566,6 +1568,15 @@ static ssize_t delayed_removal_secs_store(struct device *dev,
DEVICE_ATTR_RW(delayed_removal_secs);
+static ssize_t speed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
+
+ return sysfs_emit(buf, "%u\n", ns->speed);
+}
+DEVICE_ATTR_RO(speed);
+
static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 22445cf4f5d5..665f4a4cb52b 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -548,6 +548,7 @@ struct nvme_ns {
#ifdef CONFIG_NVME_MULTIPATH
enum nvme_ana_state ana_state;
u32 ana_grpid;
+ u32 speed; /* path link speed (in Mbps) for fabrics */
atomic64_t slat_ns[2]; /* path smoothed (EWMA) latency in nanosconds */
struct nvme_path_stat __percpu *cpu_stat;
#endif
@@ -593,6 +594,7 @@ struct nvme_ctrl_ops {
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
void (*stop_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
+ u32 (*get_link_speed)(struct nvme_ctrl *ctrl);
void (*print_device_info)(struct nvme_ctrl *ctrl);
bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
};
@@ -1012,6 +1014,7 @@ extern struct device_attribute dev_attr_queue_depth;
extern struct device_attribute dev_attr_numa_nodes;
extern struct device_attribute dev_attr_adp_stat;
extern struct device_attribute dev_attr_delayed_removal_secs;
+extern struct device_attribute dev_attr_speed;
extern struct device_attribute subsys_attr_iopolicy;
static inline bool nvme_disk_is_ns_head(struct gendisk *disk)
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index cb04539e2e2c..5858c2426efd 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -262,6 +262,7 @@ static struct attribute *nvme_ns_attrs[] = {
&dev_attr_numa_nodes.attr,
&dev_attr_adp_stat.attr,
&dev_attr_delayed_removal_secs.attr,
+ &dev_attr_speed.attr,
#endif
&dev_attr_io_passthru_err_log_enabled.attr,
NULL,
@@ -308,6 +309,10 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
if (nvme_disk_is_ns_head(dev_to_disk(dev)))
return 0;
}
+ if (a == &dev_attr_speed.attr) {
+ if (nvme_disk_is_ns_head(dev_to_disk(dev)))
+ return 0;
+ }
#endif
return a->mode;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index c0fe8cfb7229..694f8cbe080d 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -11,6 +11,8 @@
#include <linux/crc32.h>
#include <linux/nvme-tcp.h>
#include <linux/nvme-keyring.h>
+#include <linux/ethtool.h>
+#include <net/ip6_route.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <net/tls.h>
@@ -2825,6 +2827,69 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
return len;
}
+static u32 nvme_tcp_get_link_speed(struct nvme_ctrl *ctrl)
+{
+ struct net *net;
+ struct sock *sk;
+ struct dst_entry *dst;
+ struct ethtool_link_ksettings cmd;
+ struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
+ u32 speed = 0;
+
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return 0;
+
+ rtnl_lock();
+ sk = queue->sock->sk;
+ /*
+ * First try to get cached dst entry, if it's not available then
+ * fallback to route lookup.
+ */
+ dst = sk_dst_get(sk);
+ if (likely(dst)) {
+ if (!__ethtool_get_link_ksettings(dst->dev, &cmd))
+ speed = cmd.base.speed;
+ dst_release(dst);
+ } else {
+ net = sock_net(sk);
+
+ if (sk->sk_family == AF_INET) {
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct inet_sock *inet = inet_sk(sk);
+
+ inet_sk_init_flowi4(inet, &fl4);
+ rt = ip_route_output_flow(net, &fl4, sk);
+ if (IS_ERR(rt))
+ goto out;
+ if (!__ethtool_get_link_ksettings(rt->dst.dev, &cmd))
+ speed = cmd.base.speed;
+ ip_rt_put(rt);
+ }
+#if (IS_ENABLED(CONFIG_IPV6))
+ else if (sk->sk_family == AF_INET6) {
+ struct flowi6 fl6;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+
+ fl6.saddr = np->saddr;
+ fl6.daddr = sk->sk_v6_daddr;
+ fl6.flowi6_oif = sk->sk_bound_dev_if;
+ fl6.flowi6_proto = sk->sk_protocol;
+
+ dst = ip6_route_output(net, sk, &fl6);
+ if (dst->error)
+ goto out;
+ if (!__ethtool_get_link_ksettings(dst->dev, &cmd))
+ speed = cmd.base.speed;
+ dst_release(dst);
+ }
+#endif
+ }
+out:
+ rtnl_unlock();
+ return speed;
+}
+
static const struct blk_mq_ops nvme_tcp_mq_ops = {
.queue_rq = nvme_tcp_queue_rq,
.commit_rqs = nvme_tcp_commit_rqs,
@@ -2858,6 +2923,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
.submit_async_event = nvme_tcp_submit_async_event,
.delete_ctrl = nvme_tcp_delete_ctrl,
.get_address = nvme_tcp_get_address,
+ .get_link_speed = nvme_tcp_get_link_speed,
.stop_ctrl = nvme_tcp_stop_ctrl,
};
--
2.51.0
next prev parent reply other threads:[~2025-09-21 11:13 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-09-21 11:12 [RFC PATCH 0/5] nvme-multipath: introduce adaptive I/O policy Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 1/5] block: expose blk_stat_{enable,disable}_accounting() to drivers Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 2/5] nvme-multipath: add support for adaptive I/O policy Nilay Shroff
2025-09-22 7:30 ` Hannes Reinecke
2025-09-23 3:43 ` Nilay Shroff
2025-09-23 7:03 ` Hannes Reinecke
2025-09-23 10:56 ` Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 3/5] nvme-multipath: add sysfs attribute " Nilay Shroff
2025-09-22 7:35 ` Hannes Reinecke
2025-09-23 3:53 ` Nilay Shroff
2025-09-21 11:12 ` Nilay Shroff [this message]
2025-09-22 7:38 ` [RFC PATCH 4/5] nvmf-tcp: add support for retrieving adapter link speed Hannes Reinecke
2025-09-23 9:33 ` Nilay Shroff
2025-09-23 10:27 ` Hannes Reinecke
2025-09-23 17:58 ` Nilay Shroff
2025-09-21 11:12 ` [RFC PATCH 5/5] nvme-multipath: factor fabric link speed into path score Nilay Shroff
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250921111234.863853-5-nilay@linux.ibm.com \
--to=nilay@linux.ibm.com \
--cc=axboe@kernel.dk \
--cc=dwagner@suse.de \
--cc=gjoyce@ibm.com \
--cc=hare@suse.de \
--cc=hch@lst.de \
--cc=kbusch@kernel.org \
--cc=linux-nvme@lists.infradead.org \
--cc=sagi@grimberg.me \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).