* [PATCH v5 0/5] relayfs: misc changes
@ 2025-06-12 6:11 Jason Xing
2025-06-12 6:11 ` [PATCH v5 1/5] relayfs: abolish prev_padding Jason Xing
` (4 more replies)
0 siblings, 5 replies; 6+ messages in thread
From: Jason Xing @ 2025-06-12 6:11 UTC (permalink / raw)
To: axboe, rostedt, mhiramat, mathieu.desnoyers, akpm
Cc: linux-kernel, linux-block, linux-trace-kernel, Jason Xing
From: Jason Xing <kernelxing@tencent.com>
The series mostly focuses on the error counters which helps every user
debug their own kernel module.
---
v5
Link: https://lore.kernel.org/all/20250610004844.66688-1-kerneljasonxing@gmail.com/
1. add Masami's reviewed-by in the first patch
2. fix the wrong printk format specifiers in patch [4/5]
v4
Link: https://lore.kernel.org/all/20250518025734.61479-1-kerneljasonxing@gmail.com/
1. add [1] as the firt/prep/clean-up commit in the series.
2. the rest four patches are not touched, compared to v4.
3. add Masami's reviewed-by tags for last four patches.
[1]: https://lore.kernel.org/all/20250507134225.63248-1-kerneljasonxing@gmail.com/
Jason Xing (5):
relayfs: abolish prev_padding
relayfs: support a counter tracking if per-cpu buffers is full
relayfs: introduce getting relayfs statistics function
blktrace: use rbuf->stats.full as a drop indicator in relayfs
relayfs: support a counter tracking if data is too big to write
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 3 +-
drivers/net/wwan/iosm/iosm_ipc_trace.c | 3 +-
drivers/net/wwan/t7xx/t7xx_port_trace.c | 2 +-
include/linux/relay.h | 24 ++++++--
kernel/relay.c | 66 +++++++++++++++++-----
kernel/trace/blktrace.c | 22 +-------
6 files changed, 77 insertions(+), 43 deletions(-)
--
2.43.5
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v5 1/5] relayfs: abolish prev_padding
2025-06-12 6:11 [PATCH v5 0/5] relayfs: misc changes Jason Xing
@ 2025-06-12 6:11 ` Jason Xing
2025-06-12 6:11 ` [PATCH v5 2/5] relayfs: support a counter tracking if per-cpu buffers is full Jason Xing
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Jason Xing @ 2025-06-12 6:11 UTC (permalink / raw)
To: axboe, rostedt, mhiramat, mathieu.desnoyers, akpm
Cc: linux-kernel, linux-block, linux-trace-kernel, Jason Xing,
Yushan Zhou
From: Jason Xing <kernelxing@tencent.com>
prev_padding represents the unused space of certain subbuffer. If the
content of a call of relay_write() exceeds the limit of the remainder of
this subbuffer, it will skip storing in the rest space and record the
start point as buf->prev_padding in relay_switch_subbuf(). Since the buf
is a per-cpu big buffer, the point of prev_padding as a global value for
the whole buffer instead of a single subbuffer (whose padding info is
stored in buf->padding[]) seems meaningless from the real use cases, so
we don't bother to record it any more.
Reviewed-by: Yushan Zhou <katrinzhou@tencent.com>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Jason Xing <kernelxing@tencent.com>
---
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 3 +--
drivers/net/wwan/iosm/iosm_ipc_trace.c | 3 +--
drivers/net/wwan/t7xx/t7xx_port_trace.c | 2 +-
include/linux/relay.h | 5 +----
kernel/relay.c | 14 ++++++++------
kernel/trace/blktrace.c | 2 +-
6 files changed, 13 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index e8a04e476c57..09a64f224c49 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -220,8 +220,7 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable,
*/
static int subbuf_start_callback(struct rchan_buf *buf,
void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
+ void *prev_subbuf)
{
/*
* Use no-overwrite mode by default, where relay will stop accepting
diff --git a/drivers/net/wwan/iosm/iosm_ipc_trace.c b/drivers/net/wwan/iosm/iosm_ipc_trace.c
index eeecfa3d10c5..9656254c1c6c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_trace.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_trace.c
@@ -51,8 +51,7 @@ static int ipc_trace_remove_buf_file_handler(struct dentry *dentry)
}
static int ipc_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf,
- size_t prev_padding)
+ void *prev_subbuf)
{
if (relay_buf_full(buf)) {
pr_err_ratelimited("Relay_buf full dropping traces");
diff --git a/drivers/net/wwan/t7xx/t7xx_port_trace.c b/drivers/net/wwan/t7xx/t7xx_port_trace.c
index 4ed8b4e29bf1..f16d3b01302c 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_trace.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_trace.c
@@ -33,7 +33,7 @@ static int t7xx_trace_remove_buf_file_handler(struct dentry *dentry)
}
static int t7xx_trace_subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf, size_t prev_padding)
+ void *prev_subbuf)
{
if (relay_buf_full(buf)) {
pr_err_ratelimited("Relay_buf full dropping traces");
diff --git a/include/linux/relay.h b/include/linux/relay.h
index b3224111d074..e10a0fdf4325 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -47,7 +47,6 @@ struct rchan_buf
unsigned int page_count; /* number of current buffer pages */
unsigned int finalized; /* buffer has been finalized */
size_t *padding; /* padding counts per sub-buffer */
- size_t prev_padding; /* temporary variable */
size_t bytes_consumed; /* bytes consumed in cur read subbuf */
size_t early_bytes; /* bytes consumed before VFS inited */
unsigned int cpu; /* this buf's cpu */
@@ -84,7 +83,6 @@ struct rchan_callbacks
* @buf: the channel buffer containing the new sub-buffer
* @subbuf: the start of the new sub-buffer
* @prev_subbuf: the start of the previous sub-buffer
- * @prev_padding: unused space at the end of previous sub-buffer
*
* The client should return 1 to continue logging, 0 to stop
* logging.
@@ -100,8 +98,7 @@ struct rchan_callbacks
*/
int (*subbuf_start) (struct rchan_buf *buf,
void *subbuf,
- void *prev_subbuf,
- size_t prev_padding);
+ void *prev_subbuf);
/*
* create_buf_file - create file to represent a relay channel buffer
diff --git a/kernel/relay.c b/kernel/relay.c
index c0c93a04d4ce..94f79f52d826 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -250,13 +250,13 @@ EXPORT_SYMBOL_GPL(relay_buf_full);
*/
static int relay_subbuf_start(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf, size_t prev_padding)
+ void *prev_subbuf)
{
if (!buf->chan->cb->subbuf_start)
return !relay_buf_full(buf);
return buf->chan->cb->subbuf_start(buf, subbuf,
- prev_subbuf, prev_padding);
+ prev_subbuf);
}
/**
@@ -302,7 +302,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
for (i = 0; i < buf->chan->n_subbufs; i++)
buf->padding[i] = 0;
- relay_subbuf_start(buf, buf->data, NULL, 0);
+ relay_subbuf_start(buf, buf->data, NULL);
}
/**
@@ -555,9 +555,11 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
goto toobig;
if (buf->offset != buf->chan->subbuf_size + 1) {
- buf->prev_padding = buf->chan->subbuf_size - buf->offset;
+ size_t prev_padding;
+
+ prev_padding = buf->chan->subbuf_size - buf->offset;
old_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
- buf->padding[old_subbuf] = buf->prev_padding;
+ buf->padding[old_subbuf] = prev_padding;
buf->subbufs_produced++;
if (buf->dentry)
d_inode(buf->dentry)->i_size +=
@@ -582,7 +584,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
new_subbuf = buf->subbufs_produced % buf->chan->n_subbufs;
new = buf->start + new_subbuf * buf->chan->subbuf_size;
buf->offset = 0;
- if (!relay_subbuf_start(buf, new, old, buf->prev_padding)) {
+ if (!relay_subbuf_start(buf, new, old)) {
buf->offset = buf->chan->subbuf_size + 1;
return 0;
}
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 3f6a7bdc6edf..d3083c88474e 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -461,7 +461,7 @@ static const struct file_operations blk_msg_fops = {
* the user space app in telling how many lost events there were.
*/
static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf, size_t prev_padding)
+ void *prev_subbuf)
{
struct blk_trace *bt;
--
2.43.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v5 2/5] relayfs: support a counter tracking if per-cpu buffers is full
2025-06-12 6:11 [PATCH v5 0/5] relayfs: misc changes Jason Xing
2025-06-12 6:11 ` [PATCH v5 1/5] relayfs: abolish prev_padding Jason Xing
@ 2025-06-12 6:11 ` Jason Xing
2025-06-12 6:11 ` [PATCH v5 3/5] relayfs: introduce getting relayfs statistics function Jason Xing
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Jason Xing @ 2025-06-12 6:11 UTC (permalink / raw)
To: axboe, rostedt, mhiramat, mathieu.desnoyers, akpm
Cc: linux-kernel, linux-block, linux-trace-kernel, Jason Xing,
Yushan Zhou
From: Jason Xing <kernelxing@tencent.com>
When using relay mechanism, we often encounter the case where new data
are lost or old unconsumed data are overwritten because of slow reader.
Add 'full' field in per-cpu buffer structure to detect if the above case
is happening. Relay has two modes: 1) non-overwrite mode, 2) overwrite
mode. So buffer being full here respectively means: 1) relayfs doesn't
intend to accept new data and then simply drop them, or 2) relayfs is
going to start over again and overwrite old unread data with new data.
Note: this counter doesn't need any explicit lock to protect from being
modified by different threads for the better performance consideration.
Writers calling __relay_write/relay_write should consider how to use
the lock and ensure it performs under the lock protection, thus it's
not necessary to add a new small lock here.
Reviewed-by: Yushan Zhou <katrinzhou@tencent.com>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Jason Xing <kernelxing@tencent.com>
---
include/linux/relay.h | 9 +++++++++
kernel/relay.c | 8 +++++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/include/linux/relay.h b/include/linux/relay.h
index e10a0fdf4325..cd77eb285a48 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -28,6 +28,14 @@
*/
#define RELAYFS_CHANNEL_VERSION 7
+/*
+ * Relay buffer statistics
+ */
+struct rchan_buf_stats
+{
+ unsigned int full_count; /* counter for buffer full */
+};
+
/*
* Per-cpu relay channel buffer
*/
@@ -43,6 +51,7 @@ struct rchan_buf
struct irq_work wakeup_work; /* reader wakeup */
struct dentry *dentry; /* channel file dentry */
struct kref kref; /* channel buffer refcount */
+ struct rchan_buf_stats stats; /* buffer stats */
struct page **page_array; /* array of current buffer pages */
unsigned int page_count; /* number of current buffer pages */
unsigned int finalized; /* buffer has been finalized */
diff --git a/kernel/relay.c b/kernel/relay.c
index 94f79f52d826..eb3f630f3896 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -252,8 +252,13 @@ EXPORT_SYMBOL_GPL(relay_buf_full);
static int relay_subbuf_start(struct rchan_buf *buf, void *subbuf,
void *prev_subbuf)
{
+ int full = relay_buf_full(buf);
+
+ if (full)
+ buf->stats.full_count++;
+
if (!buf->chan->cb->subbuf_start)
- return !relay_buf_full(buf);
+ return !full;
return buf->chan->cb->subbuf_start(buf, subbuf,
prev_subbuf);
@@ -298,6 +303,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
buf->finalized = 0;
buf->data = buf->start;
buf->offset = 0;
+ buf->stats.full_count = 0;
for (i = 0; i < buf->chan->n_subbufs; i++)
buf->padding[i] = 0;
--
2.43.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v5 3/5] relayfs: introduce getting relayfs statistics function
2025-06-12 6:11 [PATCH v5 0/5] relayfs: misc changes Jason Xing
2025-06-12 6:11 ` [PATCH v5 1/5] relayfs: abolish prev_padding Jason Xing
2025-06-12 6:11 ` [PATCH v5 2/5] relayfs: support a counter tracking if per-cpu buffers is full Jason Xing
@ 2025-06-12 6:11 ` Jason Xing
2025-06-12 6:12 ` [PATCH v5 4/5] blktrace: use rbuf->stats.full as a drop indicator in relayfs Jason Xing
2025-06-12 6:12 ` [PATCH v5 5/5] relayfs: support a counter tracking if data is too big to write Jason Xing
4 siblings, 0 replies; 6+ messages in thread
From: Jason Xing @ 2025-06-12 6:11 UTC (permalink / raw)
To: axboe, rostedt, mhiramat, mathieu.desnoyers, akpm
Cc: linux-kernel, linux-block, linux-trace-kernel, Jason Xing,
Yushan Zhou
From: Jason Xing <kernelxing@tencent.com>
In this version, only support getting the counter for buffer full and
implement the framework of how it works.
Users can pass certain flag to fetch what field/statistics they expect
to know. Each time it only returns one result. So do not pass multiple
flags.
Reviewed-by: Yushan Zhou <katrinzhou@tencent.com>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Jason Xing <kernelxing@tencent.com>
---
include/linux/relay.h | 7 +++++++
kernel/relay.c | 30 ++++++++++++++++++++++++++++++
2 files changed, 37 insertions(+)
diff --git a/include/linux/relay.h b/include/linux/relay.h
index cd77eb285a48..5310967f9d74 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -31,6 +31,12 @@
/*
* Relay buffer statistics
*/
+enum {
+ RELAY_STATS_BUF_FULL = (1 << 0),
+
+ RELAY_STATS_LAST = RELAY_STATS_BUF_FULL,
+};
+
struct rchan_buf_stats
{
unsigned int full_count; /* counter for buffer full */
@@ -167,6 +173,7 @@ struct rchan *relay_open(const char *base_filename,
void *private_data);
extern void relay_close(struct rchan *chan);
extern void relay_flush(struct rchan *chan);
+size_t relay_stats(struct rchan *chan, int flags);
extern void relay_subbufs_consumed(struct rchan *chan,
unsigned int cpu,
size_t consumed);
diff --git a/kernel/relay.c b/kernel/relay.c
index eb3f630f3896..fd70d0e03216 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -701,6 +701,36 @@ void relay_flush(struct rchan *chan)
}
EXPORT_SYMBOL_GPL(relay_flush);
+/**
+ * relay_stats - get channel buffer statistics
+ * @chan: the channel
+ * @flags: select particular information to get
+ *
+ * Returns the count of certain field that caller specifies.
+ */
+size_t relay_stats(struct rchan *chan, int flags)
+{
+ unsigned int i, count = 0;
+ struct rchan_buf *rbuf;
+
+ if (!chan || flags > RELAY_STATS_LAST)
+ return 0;
+
+ if (chan->is_global) {
+ rbuf = *per_cpu_ptr(chan->buf, 0);
+ if (flags & RELAY_STATS_BUF_FULL)
+ count = rbuf->stats.full_count;
+ } else {
+ for_each_online_cpu(i) {
+ rbuf = *per_cpu_ptr(chan->buf, i);
+ if (rbuf && flags & RELAY_STATS_BUF_FULL)
+ count += rbuf->stats.full_count;
+ }
+ }
+
+ return count;
+}
+
/**
* relay_file_open - open file op for relay files
* @inode: the inode
--
2.43.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v5 4/5] blktrace: use rbuf->stats.full as a drop indicator in relayfs
2025-06-12 6:11 [PATCH v5 0/5] relayfs: misc changes Jason Xing
` (2 preceding siblings ...)
2025-06-12 6:11 ` [PATCH v5 3/5] relayfs: introduce getting relayfs statistics function Jason Xing
@ 2025-06-12 6:12 ` Jason Xing
2025-06-12 6:12 ` [PATCH v5 5/5] relayfs: support a counter tracking if data is too big to write Jason Xing
4 siblings, 0 replies; 6+ messages in thread
From: Jason Xing @ 2025-06-12 6:12 UTC (permalink / raw)
To: axboe, rostedt, mhiramat, mathieu.desnoyers, akpm
Cc: linux-kernel, linux-block, linux-trace-kernel, Jason Xing,
Yushan Zhou
From: Jason Xing <kernelxing@tencent.com>
Replace internal subbuf_start in blktrace with the default policy
in relayfs.
Remove dropped field from struct blktrace. Correspondingly, call the
common helper in relay. By incrementing full_count to keep track of how
many times we encountered a full buffer issue, user space will know how
many events were lost.
Reviewed-by: Yushan Zhou <katrinzhou@tencent.com>
Reviewed-by: Jens Axboe <axboe@kernel.dk>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Jason Xing <kernelxing@tencent.com>
---
kernel/trace/blktrace.c | 22 ++--------------------
1 file changed, 2 insertions(+), 20 deletions(-)
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index d3083c88474e..5401b9006135 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -415,9 +415,10 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct blk_trace *bt = filp->private_data;
+ size_t dropped = relay_stats(bt->rchan, RELAY_STATS_BUF_FULL);
char buf[16];
- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
+ snprintf(buf, sizeof(buf), "%zu\n", dropped);
return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
}
@@ -456,23 +457,6 @@ static const struct file_operations blk_msg_fops = {
.llseek = noop_llseek,
};
-/*
- * Keep track of how many times we encountered a full subbuffer, to aid
- * the user space app in telling how many lost events there were.
- */
-static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
- void *prev_subbuf)
-{
- struct blk_trace *bt;
-
- if (!relay_buf_full(buf))
- return 1;
-
- bt = buf->chan->private_data;
- atomic_inc(&bt->dropped);
- return 0;
-}
-
static int blk_remove_buf_file_callback(struct dentry *dentry)
{
debugfs_remove(dentry);
@@ -491,7 +475,6 @@ static struct dentry *blk_create_buf_file_callback(const char *filename,
}
static const struct rchan_callbacks blk_relay_callbacks = {
- .subbuf_start = blk_subbuf_start_callback,
.create_buf_file = blk_create_buf_file_callback,
.remove_buf_file = blk_remove_buf_file_callback,
};
@@ -580,7 +563,6 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
}
bt->dev = dev;
- atomic_set(&bt->dropped, 0);
INIT_LIST_HEAD(&bt->running_list);
ret = -EIO;
--
2.43.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v5 5/5] relayfs: support a counter tracking if data is too big to write
2025-06-12 6:11 [PATCH v5 0/5] relayfs: misc changes Jason Xing
` (3 preceding siblings ...)
2025-06-12 6:12 ` [PATCH v5 4/5] blktrace: use rbuf->stats.full as a drop indicator in relayfs Jason Xing
@ 2025-06-12 6:12 ` Jason Xing
4 siblings, 0 replies; 6+ messages in thread
From: Jason Xing @ 2025-06-12 6:12 UTC (permalink / raw)
To: axboe, rostedt, mhiramat, mathieu.desnoyers, akpm
Cc: linux-kernel, linux-block, linux-trace-kernel, Jason Xing,
Yushan Zhou
From: Jason Xing <kernelxing@tencent.com>
It really doesn't matter if the user/admin knows what the last too
big value is. Record how many times this case is triggered would be
helpful.
Solve the existing issue where relay_reset() doesn't restore
the value.
Store the counter in the per-cpu buffer structure instead of the global
buffer structure. It also solves the racy condition which is likely
to happen when a few of per-cpu buffers encounter the too big data case
and then access the global field last_toobig without lock protection.
Remove the printk in relay_close() since kernel module can directly call
relay_stats() as they want.
Reviewed-by: Yushan Zhou <katrinzhou@tencent.com>
Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
Signed-off-by: Jason Xing <kernelxing@tencent.com>
---
include/linux/relay.h | 5 +++--
kernel/relay.c | 18 ++++++++++--------
2 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/include/linux/relay.h b/include/linux/relay.h
index 5310967f9d74..6772a7075840 100644
--- a/include/linux/relay.h
+++ b/include/linux/relay.h
@@ -33,13 +33,15 @@
*/
enum {
RELAY_STATS_BUF_FULL = (1 << 0),
+ RELAY_STATS_WRT_BIG = (1 << 1),
- RELAY_STATS_LAST = RELAY_STATS_BUF_FULL,
+ RELAY_STATS_LAST = RELAY_STATS_WRT_BIG,
};
struct rchan_buf_stats
{
unsigned int full_count; /* counter for buffer full */
+ unsigned int big_count; /* counter for too big to write */
};
/*
@@ -79,7 +81,6 @@ struct rchan
const struct rchan_callbacks *cb; /* client callbacks */
struct kref kref; /* channel refcount */
void *private_data; /* for user-defined data */
- size_t last_toobig; /* tried to log event > subbuf size */
struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
int is_global; /* One global buffer ? */
struct list_head list; /* for channel list */
diff --git a/kernel/relay.c b/kernel/relay.c
index fd70d0e03216..bcc889dc0970 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -304,6 +304,7 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
buf->data = buf->start;
buf->offset = 0;
buf->stats.full_count = 0;
+ buf->stats.big_count = 0;
for (i = 0; i < buf->chan->n_subbufs; i++)
buf->padding[i] = 0;
@@ -603,7 +604,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
return length;
toobig:
- buf->chan->last_toobig = length;
+ buf->stats.big_count++;
return 0;
}
EXPORT_SYMBOL_GPL(relay_switch_subbuf);
@@ -663,11 +664,6 @@ void relay_close(struct rchan *chan)
if ((buf = *per_cpu_ptr(chan->buf, i)))
relay_close_buf(buf);
- if (chan->last_toobig)
- printk(KERN_WARNING "relay: one or more items not logged "
- "[item size (%zd) > sub-buffer size (%zd)]\n",
- chan->last_toobig, chan->subbuf_size);
-
list_del(&chan->list);
kref_put(&chan->kref, relay_destroy_channel);
mutex_unlock(&relay_channels_mutex);
@@ -720,11 +716,17 @@ size_t relay_stats(struct rchan *chan, int flags)
rbuf = *per_cpu_ptr(chan->buf, 0);
if (flags & RELAY_STATS_BUF_FULL)
count = rbuf->stats.full_count;
+ else if (flags & RELAY_STATS_WRT_BIG)
+ count = rbuf->stats.big_count;
} else {
for_each_online_cpu(i) {
rbuf = *per_cpu_ptr(chan->buf, i);
- if (rbuf && flags & RELAY_STATS_BUF_FULL)
- count += rbuf->stats.full_count;
+ if (rbuf) {
+ if (flags & RELAY_STATS_BUF_FULL)
+ count += rbuf->stats.full_count;
+ else if (flags & RELAY_STATS_WRT_BIG)
+ count += rbuf->stats.big_count;
+ }
}
}
--
2.43.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
end of thread, other threads:[~2025-06-12 6:12 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-12 6:11 [PATCH v5 0/5] relayfs: misc changes Jason Xing
2025-06-12 6:11 ` [PATCH v5 1/5] relayfs: abolish prev_padding Jason Xing
2025-06-12 6:11 ` [PATCH v5 2/5] relayfs: support a counter tracking if per-cpu buffers is full Jason Xing
2025-06-12 6:11 ` [PATCH v5 3/5] relayfs: introduce getting relayfs statistics function Jason Xing
2025-06-12 6:12 ` [PATCH v5 4/5] blktrace: use rbuf->stats.full as a drop indicator in relayfs Jason Xing
2025-06-12 6:12 ` [PATCH v5 5/5] relayfs: support a counter tracking if data is too big to write Jason Xing
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).