netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] ss: pretty-printing BPF socket-local storage
@ 2023-12-08 14:57 Quentin Deslandes
  2023-12-08 14:57 ` [PATCH v2 1/2] ss: add support for " Quentin Deslandes
  2023-12-08 14:57 ` [PATCH v2 2/2] ss: pretty-print " Quentin Deslandes
  0 siblings, 2 replies; 6+ messages in thread
From: Quentin Deslandes @ 2023-12-08 14:57 UTC (permalink / raw)
  To: netdev; +Cc: David Ahern, Martin KaFai Lau, Quentin Deslandes, kernel-team

BPF allows programs to store socket-specific data using
BPF_MAP_TYPE_SK_STORAGE maps. The data is attached to the socket itself,
and Martin added INET_DIAG_REQ_SK_BPF_STORAGES, so it can be fetched
using the INET_DIAG mechanism.

Currently, ss doesn't request the socket-local data, this patch aims to
fix this.

The first patch requests the socket-local data for the requested map ID
(--bpf-map-id=) or all the maps (--bpf-maps). It then prints the map_id
in a dedicated column.

Patch #2 uses libbpf and BTF to pretty print the map's content, like
`bpftool map dump` would do.

While I think it makes sense for ss to provide the socket-local storage
content for the sockets, it's difficult to conciliate the column-based
output of ss and having readable socket-local data. Hence, the
socket-local data is printed in a readable fashion over multiple lines
under its socket statistics, independently of the column-based approach.

Here is an example of ss' output with --bpf-maps:
[...]
ESTAB                  340116             0 [...]     
    map_id: 114 [
        (struct my_sk_storage){
            .field_hh = (char)3,
            (union){
                .a = (int)17,
                .b = (int)17,
            },
        }
    ]

Changes from v1:
* Remove the first patch from the series (fix) and submit it separately.
* Remove double allocation of struct rtattr.
* Close BPF map FDs on exit.
* If bpf_map_get_fd_by_id() fails with ENOENT, print an error message
  and continue to the next map ID.
* Fix typo in new command line option documentation.
* Only use bpf_map_info.btf_value_type_id and ignore
  bpf_map_info.btf_vmlinux_value_type_id (unused for socket-local storage).
* Use btf_dump__dump_type_data() instead of manually using BTF to
  pretty-print socket-local storage data. This change alone divides the size
  of the patch series by 2.

Quentin Deslandes (2):
  ss: add support for BPF socket-local storage
  ss: pretty-print BPF socket-local storage

 misc/ss.c | 433 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 425 insertions(+), 8 deletions(-)

-- 
2.43.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/2] ss: add support for BPF socket-local storage
  2023-12-08 14:57 [PATCH v2 0/2] ss: pretty-printing BPF socket-local storage Quentin Deslandes
@ 2023-12-08 14:57 ` Quentin Deslandes
  2023-12-12 22:49   ` Martin KaFai Lau
  2023-12-08 14:57 ` [PATCH v2 2/2] ss: pretty-print " Quentin Deslandes
  1 sibling, 1 reply; 6+ messages in thread
From: Quentin Deslandes @ 2023-12-08 14:57 UTC (permalink / raw)
  To: netdev; +Cc: David Ahern, Martin KaFai Lau, Quentin Deslandes, kernel-team

While sock_diag is able to return BPF socket-local storage in response
to INET_DIAG_REQ_SK_BPF_STORAGES requests, ss doesn't request it.

This change introduces the --bpf-maps and --bpf-map-id= options to request
BPF socket-local storage for all SK_STORAGE maps, or only specific ones.

The bigger part of this change will check the requested map IDs and
ensure they are valid. A new column has been added named "Socket
storage" to print a list of map ID a given socket has data defined for.
This column is disabled unless --bpf-maps or --bpf-map-id= is used.

Signed-off-by: Quentin Deslandes <qde@naccy.de>
Co-authored-by: Martin KaFai Lau <martin.lau@kernel.org>
---
 misc/ss.c | 290 +++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 287 insertions(+), 3 deletions(-)

diff --git a/misc/ss.c b/misc/ss.c
index 01d29dc5..f979b61d 100644
--- a/misc/ss.c
+++ b/misc/ss.c
@@ -51,6 +51,11 @@
 #include <linux/tls.h>
 #include <linux/mptcp.h>
 
+#ifdef HAVE_LIBBPF
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#endif
+
 #if HAVE_RPC
 #include <rpc/rpc.h>
 #include <rpc/xdr.h>
@@ -101,6 +106,7 @@ enum col_id {
 	COL_RADDR,
 	COL_RSERV,
 	COL_PROC,
+	COL_SKSTOR,
 	COL_EXT,
 	COL_MAX
 };
@@ -130,6 +136,7 @@ static struct column columns[] = {
 	{ ALIGN_RIGHT,	"Peer Address:",	" ",	0, 0, 0 },
 	{ ALIGN_LEFT,	"Port",			"",	0, 0, 0 },
 	{ ALIGN_LEFT,	"Process",		"",	0, 0, 0 },
+	{ ALIGN_LEFT,	"Socket storage",	"",	1, 0, 0 },
 	{ ALIGN_LEFT,	"",			"",	0, 0, 0 },
 };
 
@@ -3374,6 +3381,238 @@ static void parse_diag_msg(struct nlmsghdr *nlh, struct sockstat *s)
 	memcpy(s->remote.data, r->id.idiag_dst, s->local.bytelen);
 }
 
+#ifdef HAVE_LIBBPF
+
+#define MAX_NR_BPF_MAP_ID_OPTS 32
+
+struct btf;
+
+static struct bpf_map_opts {
+	unsigned int nr_maps;
+	struct bpf_sk_storage_map_info {
+		unsigned int id;
+		int fd;
+	} maps[MAX_NR_BPF_MAP_ID_OPTS];
+	bool show_all;
+	struct btf *kernel_btf;
+} bpf_map_opts;
+
+static void bpf_map_opts_mixed_error(void)
+{
+	fprintf(stderr,
+		"ss: --bpf-maps and --bpf-map-id cannot be used together\n");
+}
+
+static int bpf_map_opts_add_all(void)
+{
+	unsigned int i;
+	unsigned int fd;
+	uint32_t id = 0;
+	int r;
+
+	if (bpf_map_opts.nr_maps) {
+		bpf_map_opts_mixed_error();
+		return -1;
+	}
+
+	while (1) {
+		struct bpf_map_info info = {};
+		uint32_t len = sizeof(info);
+
+		r = bpf_map_get_next_id(id, &id);
+		if (r) {
+			if (errno == ENOENT)
+				break;
+
+			fprintf(stderr, "ss: failed to fetch BPF map ID\n");
+			goto err;
+		}
+
+		fd = bpf_map_get_fd_by_id(id);
+		if (fd == -1) {
+			if (errno == -ENOENT) {
+				fprintf(stderr, "ss: missing BPF map ID %u, skipping",
+					id);
+				continue;
+			}
+
+			fprintf(stderr, "ss: cannot get fd for BPF map ID %u%s\n",
+				id, errno == EPERM ?
+				": missing root permissions, CAP_BPF, or CAP_SYS_ADMIN" : "");
+			goto err;
+		}
+
+		r = bpf_obj_get_info_by_fd(fd, &info, &len);
+		if (r) {
+			fprintf(stderr, "ss: failed to get info for BPF map ID %u\n",
+				id);
+			close(fd);
+			goto err;
+		}
+
+		if (info.type != BPF_MAP_TYPE_SK_STORAGE) {
+			close(fd);
+			continue;
+		}
+
+		if (bpf_map_opts.nr_maps == MAX_NR_BPF_MAP_ID_OPTS) {
+			fprintf(stderr, "ss: too many (> %u) BPF socket-local storage maps found, skipping map ID %u\n",
+				MAX_NR_BPF_MAP_ID_OPTS, id);
+			close(fd);
+			continue;
+		}
+
+		bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
+		bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
+	}
+
+	bpf_map_opts.show_all = true;
+
+	return 0;
+
+err:
+	for (i = 0; i < bpf_map_opts.nr_maps; ++i)
+		close(bpf_map_opts.maps[i].fd);
+
+	return -1;
+}
+
+static int bpf_map_opts_add_id(const char *optarg)
+{
+	struct bpf_map_info info = {};
+	uint32_t len = sizeof(info);
+	size_t optarg_len;
+	unsigned long id;
+	unsigned int i;
+	char *end;
+	int fd;
+	int r;
+
+	if (bpf_map_opts.show_all) {
+		bpf_map_opts_mixed_error();
+		return -1;
+	}
+
+	optarg_len = strlen(optarg);
+	id = strtoul(optarg, &end, 0);
+	if (end != optarg + optarg_len || id == 0 || id >= UINT32_MAX) {
+		fprintf(stderr, "ss: invalid BPF map ID %s\n", optarg);
+		return -1;
+	}
+
+	for (i = 0; i < bpf_map_opts.nr_maps; i++) {
+		if (bpf_map_opts.maps[i].id == id)
+			return 0;
+	}
+
+	if (bpf_map_opts.nr_maps == MAX_NR_BPF_MAP_ID_OPTS) {
+		fprintf(stderr, "ss: too many (> %u) BPF socket-local storage maps found, skipping map ID %lu\n",
+			MAX_NR_BPF_MAP_ID_OPTS, id);
+		return 0;
+	}
+
+	fd = bpf_map_get_fd_by_id(id);
+	if (fd == -1) {
+		if (errno == -ENOENT) {
+			fprintf(stderr, "ss: missing BPF map ID %lu, skipping",
+				id);
+			return 0;
+		}
+
+		fprintf(stderr, "ss: cannot get fd for BPF map ID %lu%s\n",
+			id, errno == EPERM ?
+			": missing root permissions, CAP_BPF, or CAP_SYS_ADMIN" : "");
+		return -1;
+	}
+
+	r = bpf_obj_get_info_by_fd(fd, &info, &len);
+	if (r) {
+		fprintf(stderr, "ss: failed to get info for BPF map ID %lu\n", id);
+		close(fd);
+		return -1;
+	}
+
+	if (info.type != BPF_MAP_TYPE_SK_STORAGE) {
+		fprintf(stderr, "ss: BPF map with ID %s has type '%s', expecting 'sk_storage'\n",
+			optarg, libbpf_bpf_map_type_str(info.type));
+		close(fd);
+		return -1;
+	}
+
+	bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
+	bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
+
+	return 0;
+}
+
+static void bpf_map_opts_destroy(void)
+{
+	int i;
+	
+	for (i = 0; i < bpf_map_opts.nr_maps; ++i)
+		close(bpf_map_opts.maps[i].fd);
+}
+
+static inline bool bpf_map_opts_is_enabled(void)
+{
+	return bpf_map_opts.nr_maps;
+}
+
+static struct rtattr *bpf_map_opts_alloc_rta(void)
+{
+	size_t total_size = RTA_LENGTH(RTA_LENGTH(sizeof(int)) * bpf_map_opts.nr_maps);
+	struct rtattr *stgs_rta, *fd_rta;
+	unsigned int i;
+	void *buf;
+
+	buf = malloc(total_size);
+	if (!buf)
+		return NULL;
+
+	stgs_rta = buf;
+	stgs_rta->rta_type = INET_DIAG_REQ_SK_BPF_STORAGES | NLA_F_NESTED;
+	stgs_rta->rta_len = total_size;
+
+	buf = RTA_DATA(stgs_rta);
+	for (i = 0; i < bpf_map_opts.nr_maps; i++) {
+		int *fd;
+
+		fd_rta = buf;
+		fd_rta->rta_type = SK_DIAG_BPF_STORAGE_REQ_MAP_FD;
+		fd_rta->rta_len = RTA_LENGTH(sizeof(int));
+
+		fd = RTA_DATA(fd_rta);
+		*fd = bpf_map_opts.maps[i].fd;
+
+		buf += fd_rta->rta_len;
+	}
+
+	return stgs_rta;
+}
+
+static void show_sk_bpf_storages(struct rtattr *bpf_stgs)
+{
+	struct rtattr *tb[SK_DIAG_BPF_STORAGE_MAX + 1], *bpf_stg;
+	unsigned int rem;
+
+	for (bpf_stg = RTA_DATA(bpf_stgs), rem = RTA_PAYLOAD(bpf_stgs);
+		RTA_OK(bpf_stg, rem); bpf_stg = RTA_NEXT(bpf_stg, rem)) {
+
+		if ((bpf_stg->rta_type & NLA_TYPE_MASK) != SK_DIAG_BPF_STORAGE)
+			continue;
+
+		parse_rtattr_nested(tb, SK_DIAG_BPF_STORAGE_MAX,
+			(struct rtattr *)bpf_stg);
+
+		if (tb[SK_DIAG_BPF_STORAGE_MAP_ID]) {
+			out("map_id:%u",
+				rta_getattr_u32(tb[SK_DIAG_BPF_STORAGE_MAP_ID]));
+		}
+	}
+}
+
+#endif
+
 static int inet_show_sock(struct nlmsghdr *nlh,
 			  struct sockstat *s)
 {
@@ -3381,8 +3620,8 @@ static int inet_show_sock(struct nlmsghdr *nlh,
 	struct inet_diag_msg *r = NLMSG_DATA(nlh);
 	unsigned char v6only = 0;
 
-	parse_rtattr(tb, INET_DIAG_MAX, (struct rtattr *)(r+1),
-		     nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*r)));
+	parse_rtattr_flags(tb, INET_DIAG_MAX, (struct rtattr *)(r+1),
+		nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*r)), NLA_F_NESTED);
 
 	if (tb[INET_DIAG_PROTOCOL])
 		s->type = rta_getattr_u8(tb[INET_DIAG_PROTOCOL]);
@@ -3479,6 +3718,11 @@ static int inet_show_sock(struct nlmsghdr *nlh,
 	}
 	sctp_ino = s->ino;
 
+	if (tb[INET_DIAG_SK_BPF_STORAGES]) {
+		field_set(COL_SKSTOR);
+		show_sk_bpf_storages(tb[INET_DIAG_SK_BPF_STORAGES]);
+	}
+
 	return 0;
 }
 
@@ -3560,13 +3804,14 @@ static int sockdiag_send(int family, int fd, int protocol, struct filter *f)
 {
 	struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
 	DIAG_REQUEST(req, struct inet_diag_req_v2 r);
+	struct rtattr *bpf_stgs_rta = NULL;
 	char    *bc = NULL;
 	int	bclen;
 	__u32	proto;
 	struct msghdr msg;
 	struct rtattr rta_bc;
 	struct rtattr rta_proto;
-	struct iovec iov[5];
+	struct iovec iov[6];
 	int iovlen = 1;
 
 	if (family == PF_UNSPEC)
@@ -3619,6 +3864,17 @@ static int sockdiag_send(int family, int fd, int protocol, struct filter *f)
 		iovlen += 2;
 	}
 
+	if (bpf_map_opts_is_enabled()) {
+		bpf_stgs_rta = bpf_map_opts_alloc_rta();
+		if (!bpf_stgs_rta) {
+			fprintf(stderr, "ss: cannot alloc request for --bpf-map\n");
+			return -1;
+		}
+
+		iov[iovlen++] = (struct iovec){ bpf_stgs_rta, bpf_stgs_rta->rta_len };
+		req.nlh.nlmsg_len += bpf_stgs_rta->rta_len;
+	}
+
 	msg = (struct msghdr) {
 		.msg_name = (void *)&nladdr,
 		.msg_namelen = sizeof(nladdr),
@@ -3627,10 +3883,13 @@ static int sockdiag_send(int family, int fd, int protocol, struct filter *f)
 	};
 
 	if (sendmsg(fd, &msg, 0) < 0) {
+		free(bpf_stgs_rta);
 		close(fd);
 		return -1;
 	}
 
+	free(bpf_stgs_rta);
+
 	return 0;
 }
 
@@ -5350,6 +5609,10 @@ static void _usage(FILE *dest)
 "       --tos           show tos and priority information\n"
 "       --cgroup        show cgroup information\n"
 "   -b, --bpf           show bpf filter socket information\n"
+#ifdef HAVE_LIBBPF
+"       --bpf-maps      show all BPF socket-local storage maps\n"
+"       --bpf-map-id=MAP-ID    show a BPF socket-local storage map\n"
+#endif
 "   -E, --events        continually display sockets as they are destroyed\n"
 "   -Z, --context       display task SELinux security contexts\n"
 "   -z, --contexts      display task and socket SELinux security contexts\n"
@@ -5466,6 +5729,9 @@ static int scan_state(const char *state)
 
 #define OPT_INET_SOCKOPT 262
 
+#define OPT_BPF_MAPS 263
+#define OPT_BPF_MAP_ID 264
+
 static const struct option long_opts[] = {
 	{ "numeric", 0, 0, 'n' },
 	{ "resolve", 0, 0, 'r' },
@@ -5510,6 +5776,10 @@ static const struct option long_opts[] = {
 	{ "mptcp", 0, 0, 'M' },
 	{ "oneline", 0, 0, 'O' },
 	{ "inet-sockopt", 0, 0, OPT_INET_SOCKOPT },
+#ifdef HAVE_LIBBPF
+	{ "bpf-maps", 0, 0, OPT_BPF_MAPS},
+	{ "bpf-map-id", 1, 0, OPT_BPF_MAP_ID},
+#endif
 	{ 0 }
 
 };
@@ -5712,6 +5982,16 @@ int main(int argc, char *argv[])
 		case OPT_INET_SOCKOPT:
 			show_inet_sockopt = 1;
 			break;
+#ifdef HAVE_LIBBPF
+		case OPT_BPF_MAPS:
+			if (bpf_map_opts_add_all())
+				exit(1);
+			break;
+		case OPT_BPF_MAP_ID:
+			if (bpf_map_opts_add_id(optarg))
+				exit(1);
+			break;
+#endif
 		case 'h':
 			help();
 		case '?':
@@ -5810,6 +6090,9 @@ int main(int argc, char *argv[])
 	if (!(current_filter.states & (current_filter.states - 1)))
 		columns[COL_STATE].disabled = 1;
 
+	if (bpf_map_opts.nr_maps)
+		columns[COL_SKSTOR].disabled = 0;
+
 	if (show_header)
 		print_header();
 
@@ -5845,6 +6128,7 @@ int main(int argc, char *argv[])
 
 	if (show_processes || show_threads || show_proc_ctx || show_sock_ctx)
 		user_ent_destroy();
+	bpf_map_opts_destroy();
 
 	render();
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] ss: pretty-print BPF socket-local storage
  2023-12-08 14:57 [PATCH v2 0/2] ss: pretty-printing BPF socket-local storage Quentin Deslandes
  2023-12-08 14:57 ` [PATCH v2 1/2] ss: add support for " Quentin Deslandes
@ 2023-12-08 14:57 ` Quentin Deslandes
  2023-12-12 22:58   ` Martin KaFai Lau
  2023-12-13 17:46   ` Alan Maguire
  1 sibling, 2 replies; 6+ messages in thread
From: Quentin Deslandes @ 2023-12-08 14:57 UTC (permalink / raw)
  To: netdev; +Cc: David Ahern, Martin KaFai Lau, Quentin Deslandes, kernel-team

ss is able to print the map ID(s) for which a given socket has BPF
socket-local storage defined (using --bpf-maps or --bpf-map-id=). However,
the actual content of the map remains hidden.

This change aims to pretty-print the socket-local storage content following
the socket details, similar to what `bpftool map dump` would do. The exact
output format is inspired by drgn, while the BTF data processing is similar
to bpftool's.

ss will use libbpf's btf_dump__dump_type_data() to ease pretty-printing
of binary data. This requires out_bpf_sk_storage_print_fn() as a print
callback function used by btf_dump__dump_type_data(). vout() is also
introduced, which is similar to out() but accepts a va_list as
parameter.

COL_SKSTOR's header is replaced with an empty string, as it doesn't need to
be printed anymore; it's used as a "virtual" column to refer to the
socket-local storage dump, which will be printed under the socket information.
The column's width is fixed to 1, so it doesn't mess up ss' output.

ss' output remains unchanged unless --bpf-maps or --bpf-map-id= is used,
in which case each socket containing BPF local storage will be followed by
the content of the storage before the next socket's info is displayed.

Signed-off-by: Quentin Deslandes <qde@naccy.de>
---
 misc/ss.c | 157 +++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 145 insertions(+), 12 deletions(-)

diff --git a/misc/ss.c b/misc/ss.c
index f979b61d..6051b694 100644
--- a/misc/ss.c
+++ b/misc/ss.c
@@ -51,8 +51,13 @@
 #include <linux/tls.h>
 #include <linux/mptcp.h>
 
+#ifdef HAVE_LIBBPF
+#include <linux/btf.h>
+#endif
+
 #ifdef HAVE_LIBBPF
 #include <bpf/bpf.h>
+#include <bpf/btf.h>
 #include <bpf/libbpf.h>
 #endif
 
@@ -136,7 +141,7 @@ static struct column columns[] = {
 	{ ALIGN_RIGHT,	"Peer Address:",	" ",	0, 0, 0 },
 	{ ALIGN_LEFT,	"Port",			"",	0, 0, 0 },
 	{ ALIGN_LEFT,	"Process",		"",	0, 0, 0 },
-	{ ALIGN_LEFT,	"Socket storage",	"",	1, 0, 0 },
+	{ ALIGN_LEFT,	"",			"",	1, 0, 0 },
 	{ ALIGN_LEFT,	"",			"",	0, 0, 0 },
 };
 
@@ -1039,11 +1044,10 @@ static int buf_update(int len)
 }
 
 /* Append content to buffer as part of the current field */
-__attribute__((format(printf, 1, 2)))
-static void out(const char *fmt, ...)
+static void vout(const char *fmt, va_list args)
 {
 	struct column *f = current_field;
-	va_list args;
+	va_list _args;
 	char *pos;
 	int len;
 
@@ -1054,18 +1058,27 @@ static void out(const char *fmt, ...)
 		buffer.head = buf_chunk_new();
 
 again:	/* Append to buffer: if we have a new chunk, print again */
+	va_copy(_args, args);
 
 	pos = buffer.cur->data + buffer.cur->len;
-	va_start(args, fmt);
 
 	/* Limit to tail room. If we hit the limit, buf_update() will tell us */
 	len = vsnprintf(pos, buf_chunk_avail(buffer.tail), fmt, args);
-	va_end(args);
 
 	if (buf_update(len))
 		goto again;
 }
 
+__attribute__((format(printf, 1, 2)))
+static void out(const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	vout(fmt, args);
+	va_end(args);
+}
+
 static int print_left_spacing(struct column *f, int stored, int printed)
 {
 	int s;
@@ -1213,6 +1226,9 @@ static void render_calc_width(void)
 		 */
 		c->width = min(c->width, screen_width);
 
+		if (c == &columns[COL_SKSTOR])
+			c->width = 1;
+
 		if (c->width)
 			first = 0;
 	}
@@ -3392,6 +3408,8 @@ static struct bpf_map_opts {
 	struct bpf_sk_storage_map_info {
 		unsigned int id;
 		int fd;
+		struct bpf_map_info info;
+		struct btf *btf;
 	} maps[MAX_NR_BPF_MAP_ID_OPTS];
 	bool show_all;
 	struct btf *kernel_btf;
@@ -3403,6 +3421,22 @@ static void bpf_map_opts_mixed_error(void)
 		"ss: --bpf-maps and --bpf-map-id cannot be used together\n");
 }
 
+static int bpf_maps_opts_load_btf(struct bpf_map_info *info, struct btf **btf)
+{
+	if (info->btf_value_type_id) {
+		*btf = btf__load_from_kernel_by_id(info->btf_id);
+		if (!*btf) {
+			fprintf(stderr, "ss: failed to load BTF for map ID %u\n",
+				info->id);
+			return -1;
+		}
+	} else {
+		*btf = NULL;
+	}
+
+	return 0;
+}
+
 static int bpf_map_opts_add_all(void)
 {
 	unsigned int i;
@@ -3418,6 +3452,7 @@ static int bpf_map_opts_add_all(void)
 	while (1) {
 		struct bpf_map_info info = {};
 		uint32_t len = sizeof(info);
+		struct btf *btf;
 
 		r = bpf_map_get_next_id(id, &id);
 		if (r) {
@@ -3462,8 +3497,18 @@ static int bpf_map_opts_add_all(void)
 			continue;
 		}
 
+		r = bpf_maps_opts_load_btf(&info, &btf);
+		if (r) {
+			fprintf(stderr, "ss: failed to get BTF data for BPF map ID: %u\n",
+				id);
+			close(fd);
+			goto err;
+		}
+
 		bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
-		bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
+		bpf_map_opts.maps[bpf_map_opts.nr_maps].fd = fd;
+		bpf_map_opts.maps[bpf_map_opts.nr_maps].info = info;
+		bpf_map_opts.maps[bpf_map_opts.nr_maps++].btf = btf;
 	}
 
 	bpf_map_opts.show_all = true;
@@ -3482,6 +3527,7 @@ static int bpf_map_opts_add_id(const char *optarg)
 	struct bpf_map_info info = {};
 	uint32_t len = sizeof(info);
 	size_t optarg_len;
+	struct btf *btf;
 	unsigned long id;
 	unsigned int i;
 	char *end;
@@ -3539,8 +3585,17 @@ static int bpf_map_opts_add_id(const char *optarg)
 		return -1;
 	}
 
+	r = bpf_maps_opts_load_btf(&info, &btf);
+	if (r) {
+		fprintf(stderr, "ss: failed to get BTF data for BPF map ID: %lu\n",
+			id);
+		return -1;
+	}
+
 	bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
-	bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
+	bpf_map_opts.maps[bpf_map_opts.nr_maps].fd = fd;
+	bpf_map_opts.maps[bpf_map_opts.nr_maps].info = info;
+	bpf_map_opts.maps[bpf_map_opts.nr_maps++].btf = btf;
 
 	return 0;
 }
@@ -3549,8 +3604,23 @@ static void bpf_map_opts_destroy(void)
 {
 	int i;
 	
-	for (i = 0; i < bpf_map_opts.nr_maps; ++i)
+	for (i = 0; i < bpf_map_opts.nr_maps; ++i) {
+		btf__free(bpf_map_opts.maps[i].btf);
 		close(bpf_map_opts.maps[i].fd);
+	}
+}
+
+static const struct bpf_sk_storage_map_info *bpf_map_opts_get_info(
+	unsigned int map_id)
+{
+	unsigned int i;
+
+	for (i = 0; i < bpf_map_opts.nr_maps; ++i) {
+		if (bpf_map_opts.maps[i].id == map_id)
+			return &bpf_map_opts.maps[i];
+	}
+
+	return NULL;
 }
 
 static inline bool bpf_map_opts_is_enabled(void)
@@ -3590,10 +3660,63 @@ static struct rtattr *bpf_map_opts_alloc_rta(void)
 	return stgs_rta;
 }
 
+static void out_bpf_sk_storage_print_fn(void *ctx, const char *fmt, va_list args)
+{
+	vout(fmt, args);
+}
+
+#define SK_STORAGE_INDENT_STR "    "
+
+static void out_bpf_sk_storage(int map_id, const void *data, size_t len)
+{
+	uint32_t type_id;
+	const struct bpf_sk_storage_map_info *map_info;
+	struct btf_dump *dump;
+	struct btf_dump_type_data_opts opts = {
+		.sz = sizeof(struct btf_dump_type_data_opts),
+		.indent_str = SK_STORAGE_INDENT_STR,
+		.indent_level = 2,
+		.emit_zeroes = 1
+	};
+	struct btf_dump_opts dopts = {
+		.sz = sizeof(struct btf_dump_opts)
+	};
+	int r;
+
+	map_info = bpf_map_opts_get_info(map_id);
+	if (!map_info) {
+		fprintf(stderr, "map_id: %d: missing map info", map_id);
+		return;
+	}
+
+	if (map_info->info.value_size != len) {
+		fprintf(stderr, "map_id: %d: invalid value size, expecting %u, got %lu\n",
+			map_id, map_info->info.value_size, len);
+		return;
+	}
+
+	type_id = map_info->info.btf_value_type_id;
+
+	dump = btf_dump__new(map_info->btf, out_bpf_sk_storage_print_fn, NULL, &dopts);
+	if (!dump) {
+		fprintf(stderr, "Failed to create btf_dump object\n");
+		return;
+	}
+
+	out(SK_STORAGE_INDENT_STR "map_id: %d [\n", map_id);
+	r = btf_dump__dump_type_data(dump, type_id, data, len, &opts);
+	if (r < 0)
+		out(SK_STORAGE_INDENT_STR SK_STORAGE_INDENT_STR "failed to dump data: %d", r);
+	out("\n" SK_STORAGE_INDENT_STR "]");
+
+	btf_dump__free(dump);
+}
+
 static void show_sk_bpf_storages(struct rtattr *bpf_stgs)
 {
 	struct rtattr *tb[SK_DIAG_BPF_STORAGE_MAX + 1], *bpf_stg;
-	unsigned int rem;
+	unsigned int rem, map_id;
+	struct rtattr *value;
 
 	for (bpf_stg = RTA_DATA(bpf_stgs), rem = RTA_PAYLOAD(bpf_stgs);
 		RTA_OK(bpf_stg, rem); bpf_stg = RTA_NEXT(bpf_stg, rem)) {
@@ -3605,8 +3728,13 @@ static void show_sk_bpf_storages(struct rtattr *bpf_stgs)
 			(struct rtattr *)bpf_stg);
 
 		if (tb[SK_DIAG_BPF_STORAGE_MAP_ID]) {
-			out("map_id:%u",
-				rta_getattr_u32(tb[SK_DIAG_BPF_STORAGE_MAP_ID]));
+			out("\n");
+
+			map_id = rta_getattr_u32(tb[SK_DIAG_BPF_STORAGE_MAP_ID]);
+			value = tb[SK_DIAG_BPF_STORAGE_MAP_VALUE];
+
+			out_bpf_sk_storage(map_id, RTA_DATA(value),
+				RTA_PAYLOAD(value));
 		}
 	}
 }
@@ -6000,6 +6128,11 @@ int main(int argc, char *argv[])
 		}
 	}
 
+	if (oneline && (bpf_map_opts.nr_maps || bpf_map_opts.show_all)) {
+		fprintf(stderr, "ss: --oneline, --bpf-maps, and --bpf-map-id are incompatible\n");
+		exit(-1);
+	}
+
 	if (show_processes || show_threads || show_proc_ctx || show_sock_ctx)
 		user_ent_hash_build();
 
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 1/2] ss: add support for BPF socket-local storage
  2023-12-08 14:57 ` [PATCH v2 1/2] ss: add support for " Quentin Deslandes
@ 2023-12-12 22:49   ` Martin KaFai Lau
  0 siblings, 0 replies; 6+ messages in thread
From: Martin KaFai Lau @ 2023-12-12 22:49 UTC (permalink / raw)
  To: Quentin Deslandes; +Cc: David Ahern, Martin KaFai Lau, kernel-team, netdev

On 12/8/23 6:57 AM, Quentin Deslandes wrote:
> +static inline bool bpf_map_opts_is_enabled(void)

nit.

This is the only "inline" usage in this file. I would avoid it and depend on the 
compiler to decide.

> +{
> +	return bpf_map_opts.nr_maps;
> +}
> +

[ ... ]

>   static int inet_show_sock(struct nlmsghdr *nlh,
>   			  struct sockstat *s)
>   {
> @@ -3381,8 +3620,8 @@ static int inet_show_sock(struct nlmsghdr *nlh,
>   	struct inet_diag_msg *r = NLMSG_DATA(nlh);
>   	unsigned char v6only = 0;
>   
> -	parse_rtattr(tb, INET_DIAG_MAX, (struct rtattr *)(r+1),
> -		     nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*r)));
> +	parse_rtattr_flags(tb, INET_DIAG_MAX, (struct rtattr *)(r+1),
> +		nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*r)), NLA_F_NESTED);
>   
>   	if (tb[INET_DIAG_PROTOCOL])
>   		s->type = rta_getattr_u8(tb[INET_DIAG_PROTOCOL]);
> @@ -3479,6 +3718,11 @@ static int inet_show_sock(struct nlmsghdr *nlh,
>   	}
>   	sctp_ino = s->ino;
>   
> +	if (tb[INET_DIAG_SK_BPF_STORAGES]) {
> +		field_set(COL_SKSTOR);
> +		show_sk_bpf_storages(tb[INET_DIAG_SK_BPF_STORAGES]);
> +	}
> +
>   	return 0;
>   }
>   
> @@ -3560,13 +3804,14 @@ static int sockdiag_send(int family, int fd, int protocol, struct filter *f)
>   {
>   	struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK };
>   	DIAG_REQUEST(req, struct inet_diag_req_v2 r);
> +	struct rtattr *bpf_stgs_rta = NULL;
>   	char    *bc = NULL;
>   	int	bclen;
>   	__u32	proto;
>   	struct msghdr msg;
>   	struct rtattr rta_bc;
>   	struct rtattr rta_proto;
> -	struct iovec iov[5];
> +	struct iovec iov[6];
>   	int iovlen = 1;
>   
>   	if (family == PF_UNSPEC)
> @@ -3619,6 +3864,17 @@ static int sockdiag_send(int family, int fd, int protocol, struct filter *f)
>   		iovlen += 2;
>   	}
>   
> +	if (bpf_map_opts_is_enabled()) {

This will have compiler error when HAVE_LIBBPF is not set.

> +		bpf_stgs_rta = bpf_map_opts_alloc_rta();
> +		if (!bpf_stgs_rta) {
> +			fprintf(stderr, "ss: cannot alloc request for --bpf-map\n");
> +			return -1;
> +		}
> +
> +		iov[iovlen++] = (struct iovec){ bpf_stgs_rta, bpf_stgs_rta->rta_len };
> +		req.nlh.nlmsg_len += bpf_stgs_rta->rta_len;
> +	}
> +
>   	msg = (struct msghdr) {
>   		.msg_name = (void *)&nladdr,
>   		.msg_namelen = sizeof(nladdr),

[ ... ]

> @@ -5712,6 +5982,16 @@ int main(int argc, char *argv[])
>   		case OPT_INET_SOCKOPT:
>   			show_inet_sockopt = 1;
>   			break;
> +#ifdef HAVE_LIBBPF
> +		case OPT_BPF_MAPS:
> +			if (bpf_map_opts_add_all())
> +				exit(1);
> +			break;
> +		case OPT_BPF_MAP_ID:
> +			if (bpf_map_opts_add_id(optarg))
> +				exit(1);
> +			break;
> +#endif
>   		case 'h':
>   			help();
>   		case '?':
> @@ -5810,6 +6090,9 @@ int main(int argc, char *argv[])
>   	if (!(current_filter.states & (current_filter.states - 1)))
>   		columns[COL_STATE].disabled = 1;
>   
> +	if (bpf_map_opts.nr_maps)

same here when HAVE_LIBBPF is not set

> +		columns[COL_SKSTOR].disabled = 0;
> +
>   	if (show_header)
>   		print_header();
>   
> @@ -5845,6 +6128,7 @@ int main(int argc, char *argv[])
>   
>   	if (show_processes || show_threads || show_proc_ctx || show_sock_ctx)
>   		user_ent_destroy();
> +	bpf_map_opts_destroy();

same here.

A #ifdef is needed.
Another option is to create an empty or always-return-false function.

>   
>   	render();
>   


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 2/2] ss: pretty-print BPF socket-local storage
  2023-12-08 14:57 ` [PATCH v2 2/2] ss: pretty-print " Quentin Deslandes
@ 2023-12-12 22:58   ` Martin KaFai Lau
  2023-12-13 17:46   ` Alan Maguire
  1 sibling, 0 replies; 6+ messages in thread
From: Martin KaFai Lau @ 2023-12-12 22:58 UTC (permalink / raw)
  To: Quentin Deslandes; +Cc: David Ahern, Martin KaFai Lau, kernel-team, netdev

On 12/8/23 6:57 AM, Quentin Deslandes wrote:
> @@ -1039,11 +1044,10 @@ static int buf_update(int len)
>   }
>   
>   /* Append content to buffer as part of the current field */
> -__attribute__((format(printf, 1, 2)))
> -static void out(const char *fmt, ...)
> +static void vout(const char *fmt, va_list args)
>   {
>   	struct column *f = current_field;
> -	va_list args;
> +	va_list _args;
>   	char *pos;
>   	int len;
>   
> @@ -1054,18 +1058,27 @@ static void out(const char *fmt, ...)
>   		buffer.head = buf_chunk_new();
>   
>   again:	/* Append to buffer: if we have a new chunk, print again */
> +	va_copy(_args, args);
>   
>   	pos = buffer.cur->data + buffer.cur->len;
> -	va_start(args, fmt);
>   
>   	/* Limit to tail room. If we hit the limit, buf_update() will tell us */
>   	len = vsnprintf(pos, buf_chunk_avail(buffer.tail), fmt, args);

hmm... based on the changes made in this function, I am pretty sure the 
intention is to pass the "_args" here instead of passing the "args". Please 
double check.

> -	va_end(args);
>   
>   	if (buf_update(len))
>   		goto again;
>   }
>   
> +__attribute__((format(printf, 1, 2)))
> +static void out(const char *fmt, ...)
> +{
> +	va_list args;
> +
> +	va_start(args, fmt);
> +	vout(fmt, args);
> +	va_end(args);
> +}
> +
>   static int print_left_spacing(struct column *f, int stored, int printed)
>   {
>   	int s;
> @@ -1213,6 +1226,9 @@ static void render_calc_width(void)
>   		 */
>   		c->width = min(c->width, screen_width);
>   
> +		if (c == &columns[COL_SKSTOR])
> +			c->width = 1;
> +
>   		if (c->width)
>   			first = 0;
>   	}
> @@ -3392,6 +3408,8 @@ static struct bpf_map_opts {
>   	struct bpf_sk_storage_map_info {
>   		unsigned int id;
>   		int fd;
> +		struct bpf_map_info info;
> +		struct btf *btf;
>   	} maps[MAX_NR_BPF_MAP_ID_OPTS];
>   	bool show_all;
>   	struct btf *kernel_btf;
> @@ -3403,6 +3421,22 @@ static void bpf_map_opts_mixed_error(void)
>   		"ss: --bpf-maps and --bpf-map-id cannot be used together\n");
>   }
>   
> +static int bpf_maps_opts_load_btf(struct bpf_map_info *info, struct btf **btf)
> +{
> +	if (info->btf_value_type_id) {
> +		*btf = btf__load_from_kernel_by_id(info->btf_id);
> +		if (!*btf) {
> +			fprintf(stderr, "ss: failed to load BTF for map ID %u\n",
> +				info->id);
> +			return -1;
> +		}
> +	} else {
> +		*btf = NULL;
> +	}
> +
> +	return 0;
> +}
> +
>   static int bpf_map_opts_add_all(void)
>   {
>   	unsigned int i;
> @@ -3418,6 +3452,7 @@ static int bpf_map_opts_add_all(void)
>   	while (1) {
>   		struct bpf_map_info info = {};
>   		uint32_t len = sizeof(info);
> +		struct btf *btf;
>   
>   		r = bpf_map_get_next_id(id, &id);
>   		if (r) {
> @@ -3462,8 +3497,18 @@ static int bpf_map_opts_add_all(void)
>   			continue;
>   		}
>   
> +		r = bpf_maps_opts_load_btf(&info, &btf);
> +		if (r) {
> +			fprintf(stderr, "ss: failed to get BTF data for BPF map ID: %u\n",
> +				id);
> +			close(fd);
> +			goto err;
> +		}
> +
>   		bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
> -		bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
> +		bpf_map_opts.maps[bpf_map_opts.nr_maps].fd = fd;
> +		bpf_map_opts.maps[bpf_map_opts.nr_maps].info = info;
> +		bpf_map_opts.maps[bpf_map_opts.nr_maps++].btf = btf;

The "err:" path of this function needs a btf__free().

>   	}
>   
>   	bpf_map_opts.show_all = true;
> @@ -3482,6 +3527,7 @@ static int bpf_map_opts_add_id(const char *optarg)
>   	struct bpf_map_info info = {};
>   	uint32_t len = sizeof(info);
>   	size_t optarg_len;
> +	struct btf *btf;
>   	unsigned long id;
>   	unsigned int i;
>   	char *end;
> @@ -3539,8 +3585,17 @@ static int bpf_map_opts_add_id(const char *optarg)
>   		return -1;
>   	}
>   
> +	r = bpf_maps_opts_load_btf(&info, &btf);
> +	if (r) {
> +		fprintf(stderr, "ss: failed to get BTF data for BPF map ID: %lu\n",
> +			id);

close(fd);

> +		return -1;
> +	}
> +
>   	bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
> -	bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
> +	bpf_map_opts.maps[bpf_map_opts.nr_maps].fd = fd;
> +	bpf_map_opts.maps[bpf_map_opts.nr_maps].info = info;
> +	bpf_map_opts.maps[bpf_map_opts.nr_maps++].btf = btf;
>   
>   	return 0;
>   }



^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 2/2] ss: pretty-print BPF socket-local storage
  2023-12-08 14:57 ` [PATCH v2 2/2] ss: pretty-print " Quentin Deslandes
  2023-12-12 22:58   ` Martin KaFai Lau
@ 2023-12-13 17:46   ` Alan Maguire
  1 sibling, 0 replies; 6+ messages in thread
From: Alan Maguire @ 2023-12-13 17:46 UTC (permalink / raw)
  To: Quentin Deslandes, netdev; +Cc: David Ahern, Martin KaFai Lau, kernel-team

On 08/12/2023 14:57, Quentin Deslandes wrote:
> ss is able to print the map ID(s) for which a given socket has BPF
> socket-local storage defined (using --bpf-maps or --bpf-map-id=). However,
> the actual content of the map remains hidden.
> 
> This change aims to pretty-print the socket-local storage content following
> the socket details, similar to what `bpftool map dump` would do. The exact
> output format is inspired by drgn, while the BTF data processing is similar
> to bpftool's.
> 
> ss will use libbpf's btf_dump__dump_type_data() to ease pretty-printing
> of binary data. This requires out_bpf_sk_storage_print_fn() as a print
> callback function used by btf_dump__dump_type_data(). vout() is also
> introduced, which is similar to out() but accepts a va_list as
> parameter.
> 
> COL_SKSTOR's header is replaced with an empty string, as it doesn't need to
> be printed anymore; it's used as a "virtual" column to refer to the
> socket-local storage dump, which will be printed under the socket information.
> The column's width is fixed to 1, so it doesn't mess up ss' output.
> 
> ss' output remains unchanged unless --bpf-maps or --bpf-map-id= is used,
> in which case each socket containing BPF local storage will be followed by
> the content of the storage before the next socket's info is displayed.
> 

this is great! one small idea below, but

> Signed-off-by: Quentin Deslandes <qde@naccy.de>

Reviewed-by: Alan Maguire <alan.maguire@oracle.com>

> ---
>  misc/ss.c | 157 +++++++++++++++++++++++++++++++++++++++++++++++++-----
>  1 file changed, 145 insertions(+), 12 deletions(-)
> 
> diff --git a/misc/ss.c b/misc/ss.c
> index f979b61d..6051b694 100644
> --- a/misc/ss.c
> +++ b/misc/ss.c
> @@ -51,8 +51,13 @@
>  #include <linux/tls.h>
>  #include <linux/mptcp.h>
>  
> +#ifdef HAVE_LIBBPF
> +#include <linux/btf.h>
> +#endif
> +
>  #ifdef HAVE_LIBBPF
>  #include <bpf/bpf.h>
> +#include <bpf/btf.h>
>  #include <bpf/libbpf.h>
>  #endif
>  
> @@ -136,7 +141,7 @@ static struct column columns[] = {
>  	{ ALIGN_RIGHT,	"Peer Address:",	" ",	0, 0, 0 },
>  	{ ALIGN_LEFT,	"Port",			"",	0, 0, 0 },
>  	{ ALIGN_LEFT,	"Process",		"",	0, 0, 0 },
> -	{ ALIGN_LEFT,	"Socket storage",	"",	1, 0, 0 },
> +	{ ALIGN_LEFT,	"",			"",	1, 0, 0 },
>  	{ ALIGN_LEFT,	"",			"",	0, 0, 0 },
>  };
>  
> @@ -1039,11 +1044,10 @@ static int buf_update(int len)
>  }
>  
>  /* Append content to buffer as part of the current field */
> -__attribute__((format(printf, 1, 2)))
> -static void out(const char *fmt, ...)
> +static void vout(const char *fmt, va_list args)
>  {
>  	struct column *f = current_field;
> -	va_list args;
> +	va_list _args;
>  	char *pos;
>  	int len;
>  
> @@ -1054,18 +1058,27 @@ static void out(const char *fmt, ...)
>  		buffer.head = buf_chunk_new();
>  
>  again:	/* Append to buffer: if we have a new chunk, print again */
> +	va_copy(_args, args);
>  
>  	pos = buffer.cur->data + buffer.cur->len;
> -	va_start(args, fmt);
>  
>  	/* Limit to tail room. If we hit the limit, buf_update() will tell us */
>  	len = vsnprintf(pos, buf_chunk_avail(buffer.tail), fmt, args);
> -	va_end(args);
>  
>  	if (buf_update(len))
>  		goto again;
>  }
>  
> +__attribute__((format(printf, 1, 2)))
> +static void out(const char *fmt, ...)
> +{
> +	va_list args;
> +
> +	va_start(args, fmt);
> +	vout(fmt, args);
> +	va_end(args);
> +}
> +
>  static int print_left_spacing(struct column *f, int stored, int printed)
>  {
>  	int s;
> @@ -1213,6 +1226,9 @@ static void render_calc_width(void)
>  		 */
>  		c->width = min(c->width, screen_width);
>  
> +		if (c == &columns[COL_SKSTOR])
> +			c->width = 1;
> +
>  		if (c->width)
>  			first = 0;
>  	}
> @@ -3392,6 +3408,8 @@ static struct bpf_map_opts {
>  	struct bpf_sk_storage_map_info {
>  		unsigned int id;
>  		int fd;
> +		struct bpf_map_info info;
> +		struct btf *btf;
>  	} maps[MAX_NR_BPF_MAP_ID_OPTS];
>  	bool show_all;
>  	struct btf *kernel_btf;
> @@ -3403,6 +3421,22 @@ static void bpf_map_opts_mixed_error(void)
>  		"ss: --bpf-maps and --bpf-map-id cannot be used together\n");
>  }
>  
> +static int bpf_maps_opts_load_btf(struct bpf_map_info *info, struct btf **btf)
> +{
> +	if (info->btf_value_type_id) {
> +		*btf = btf__load_from_kernel_by_id(info->btf_id);
> +		if (!*btf) {
> +			fprintf(stderr, "ss: failed to load BTF for map ID %u\n",
> +				info->id);
> +			return -1;
> +		}
> +	} else {
> +		*btf = NULL;
> +	}
> +
> +	return 0;
> +}
> +
>  static int bpf_map_opts_add_all(void)
>  {
>  	unsigned int i;
> @@ -3418,6 +3452,7 @@ static int bpf_map_opts_add_all(void)
>  	while (1) {
>  		struct bpf_map_info info = {};
>  		uint32_t len = sizeof(info);
> +		struct btf *btf;
>  
>  		r = bpf_map_get_next_id(id, &id);
>  		if (r) {
> @@ -3462,8 +3497,18 @@ static int bpf_map_opts_add_all(void)
>  			continue;
>  		}
>  
> +		r = bpf_maps_opts_load_btf(&info, &btf);
> +		if (r) {
> +			fprintf(stderr, "ss: failed to get BTF data for BPF map ID: %u\n",
> +				id);
> +			close(fd);
> +			goto err;
> +		}
> +
>  		bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
> -		bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
> +		bpf_map_opts.maps[bpf_map_opts.nr_maps].fd = fd;
> +		bpf_map_opts.maps[bpf_map_opts.nr_maps].info = info;
> +		bpf_map_opts.maps[bpf_map_opts.nr_maps++].btf = btf;
>  	}
>  
>  	bpf_map_opts.show_all = true;
> @@ -3482,6 +3527,7 @@ static int bpf_map_opts_add_id(const char *optarg)
>  	struct bpf_map_info info = {};
>  	uint32_t len = sizeof(info);
>  	size_t optarg_len;
> +	struct btf *btf;
>  	unsigned long id;
>  	unsigned int i;
>  	char *end;
> @@ -3539,8 +3585,17 @@ static int bpf_map_opts_add_id(const char *optarg)
>  		return -1;
>  	}
>  
> +	r = bpf_maps_opts_load_btf(&info, &btf);
> +	if (r) {
> +		fprintf(stderr, "ss: failed to get BTF data for BPF map ID: %lu\n",
> +			id);
> +		return -1;
> +	}
> +
>  	bpf_map_opts.maps[bpf_map_opts.nr_maps].id = id;
> -	bpf_map_opts.maps[bpf_map_opts.nr_maps++].fd = fd;
> +	bpf_map_opts.maps[bpf_map_opts.nr_maps].fd = fd;
> +	bpf_map_opts.maps[bpf_map_opts.nr_maps].info = info;
> +	bpf_map_opts.maps[bpf_map_opts.nr_maps++].btf = btf;
>  
>  	return 0;
>  }
> @@ -3549,8 +3604,23 @@ static void bpf_map_opts_destroy(void)
>  {
>  	int i;
>  	
> -	for (i = 0; i < bpf_map_opts.nr_maps; ++i)
> +	for (i = 0; i < bpf_map_opts.nr_maps; ++i) {
> +		btf__free(bpf_map_opts.maps[i].btf);
>  		close(bpf_map_opts.maps[i].fd);
> +	}
> +}
> +
> +static const struct bpf_sk_storage_map_info *bpf_map_opts_get_info(
> +	unsigned int map_id)
> +{
> +	unsigned int i;
> +
> +	for (i = 0; i < bpf_map_opts.nr_maps; ++i) {
> +		if (bpf_map_opts.maps[i].id == map_id)
> +			return &bpf_map_opts.maps[i];
> +	}
> +
> +	return NULL;
>  }
>  
>  static inline bool bpf_map_opts_is_enabled(void)
> @@ -3590,10 +3660,63 @@ static struct rtattr *bpf_map_opts_alloc_rta(void)
>  	return stgs_rta;
>  }
>  
> +static void out_bpf_sk_storage_print_fn(void *ctx, const char *fmt, va_list args)
> +{
> +	vout(fmt, args);
> +}
> +
> +#define SK_STORAGE_INDENT_STR "    "
> +
> +static void out_bpf_sk_storage(int map_id, const void *data, size_t len)
> +{
> +	uint32_t type_id;
> +	const struct bpf_sk_storage_map_info *map_info;
> +	struct btf_dump *dump;
> +	struct btf_dump_type_data_opts opts = {
> +		.sz = sizeof(struct btf_dump_type_data_opts),
> +		.indent_str = SK_STORAGE_INDENT_STR,
> +		.indent_level = 2,
> +		.emit_zeroes = 1
> +	};
> +	struct btf_dump_opts dopts = {
> +		.sz = sizeof(struct btf_dump_opts)
> +	};
> +	int r;
> +
> +	map_info = bpf_map_opts_get_info(map_id);
> +	if (!map_info) {
> +		fprintf(stderr, "map_id: %d: missing map info", map_id);
> +		return;
> +	}
> +
> +	if (map_info->info.value_size != len) {
> +		fprintf(stderr, "map_id: %d: invalid value size, expecting %u, got %lu\n",
> +			map_id, map_info->info.value_size, len);
> +		return;
> +	}
> +
> +	type_id = map_info->info.btf_value_type_id;
> +
> +	dump = btf_dump__new(map_info->btf, out_bpf_sk_storage_print_fn, NULL, &dopts);
> +	if (!dump) {
> +		fprintf(stderr, "Failed to create btf_dump object\n");
> +		return;
> +	}
> +
> +	out(SK_STORAGE_INDENT_STR "map_id: %d [\n", map_id);
> +	r = btf_dump__dump_type_data(dump, type_id, data, len, &opts);
> +	if (r < 0)
> +		out(SK_STORAGE_INDENT_STR SK_STORAGE_INDENT_STR "failed to dump data: %d", r);
> +	out("\n" SK_STORAGE_INDENT_STR "]");
> +
> +	btf_dump__free(dump);
> +}
> +
>  static void show_sk_bpf_storages(struct rtattr *bpf_stgs)
>  {
>  	struct rtattr *tb[SK_DIAG_BPF_STORAGE_MAX + 1], *bpf_stg;
> -	unsigned int rem;
> +	unsigned int rem, map_id;
> +	struct rtattr *value;
>  
>  	for (bpf_stg = RTA_DATA(bpf_stgs), rem = RTA_PAYLOAD(bpf_stgs);
>  		RTA_OK(bpf_stg, rem); bpf_stg = RTA_NEXT(bpf_stg, rem)) {
> @@ -3605,8 +3728,13 @@ static void show_sk_bpf_storages(struct rtattr *bpf_stgs)
>  			(struct rtattr *)bpf_stg);
>  
>  		if (tb[SK_DIAG_BPF_STORAGE_MAP_ID]) {
> -			out("map_id:%u",
> -				rta_getattr_u32(tb[SK_DIAG_BPF_STORAGE_MAP_ID]));
> +			out("\n");
> +
> +			map_id = rta_getattr_u32(tb[SK_DIAG_BPF_STORAGE_MAP_ID]);
> +			value = tb[SK_DIAG_BPF_STORAGE_MAP_VALUE];
> +
> +			out_bpf_sk_storage(map_id, RTA_DATA(value),
> +				RTA_PAYLOAD(value));
>  		}
>  	}
>  }
> @@ -6000,6 +6128,11 @@ int main(int argc, char *argv[])
>  		}
>  	}
>  
> +	if (oneline && (bpf_map_opts.nr_maps || bpf_map_opts.show_all)) {
> +		fprintf(stderr, "ss: --oneline, --bpf-maps, and --bpf-map-id are incompatible\n");
> +		exit(-1);
> +	}
> +

I guess it would be possible to provide oneline output if we used
compact-mode + omit-zeros BTF data dumping. Did you try that or is the
output just too cluttered?

>  	if (show_processes || show_threads || show_proc_ctx || show_sock_ctx)
>  		user_ent_hash_build();
>  

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2023-12-13 17:46 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-12-08 14:57 [PATCH v2 0/2] ss: pretty-printing BPF socket-local storage Quentin Deslandes
2023-12-08 14:57 ` [PATCH v2 1/2] ss: add support for " Quentin Deslandes
2023-12-12 22:49   ` Martin KaFai Lau
2023-12-08 14:57 ` [PATCH v2 2/2] ss: pretty-print " Quentin Deslandes
2023-12-12 22:58   ` Martin KaFai Lau
2023-12-13 17:46   ` Alan Maguire

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).