linux-trace-devel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vincent Donnefort <vdonnefort@google.com>
To: Steven Rostedt <rostedt@goodmis.org>
Cc: Linux Trace Devel <linux-trace-devel@vger.kernel.org>
Subject: Re: [PATCH] libtracefs: Add ring buffer memory mapping APIs
Date: Fri, 5 Jan 2024 09:17:53 +0000	[thread overview]
Message-ID: <ZZfJQTOyl0dHiTU-@google.com> (raw)
In-Reply-To: <20231228201100.78aae259@rorschach.local.home>

[...]

> +EXAMPLE
> +-------
> +[source,c]
> +--
> +#include <stdlib.h>
> +#include <ctype.h>
> +#include <tracefs.h>
> +
> +static void read_page(struct tep_handle *tep, struct kbuffer *kbuf)

read_subbuf?

> +{
> +	static struct trace_seq seq;
> +	struct tep_record record;
> +
> +	if (seq.buffer)
> +		trace_seq_reset(&seq);
> +	else
> +		trace_seq_init(&seq);
> +
> +	while ((record.data = kbuffer_read_event(kbuf, &record.ts))) {
> +		record.size = kbuffer_event_size(kbuf);
> +		kbuffer_next_event(kbuf, NULL);
> +		tep_print_event(tep, &seq, &record,
> +				"%s-%d %9d\t%s: %s\n",
> +				TEP_PRINT_COMM,
> +				TEP_PRINT_PID,
> +				TEP_PRINT_TIME,
> +				TEP_PRINT_NAME,
> +				TEP_PRINT_INFO);
> +		trace_seq_do_printf(&seq);
> +		trace_seq_reset(&seq);
> +	}
> +}
> +

[...]

> +__hidden void *trace_mmap(int fd, struct kbuffer *kbuf)
> +{
> +	struct trace_mmap *tmap;
> +	int page_size;
> +	void *meta;
> +	void *data;
> +
> +	page_size = getpagesize();
> +	meta = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
> +	if (meta == MAP_FAILED)
> +		return NULL;
> +
> +	tmap = calloc(1, sizeof(*tmap));
> +	if (!tmap) {
> +		munmap(meta, page_size);
> +		return NULL;
> +	}
> +
> +	tmap->kbuf = kbuffer_dup(kbuf);
> +	if (!tmap->kbuf) {
> +		munmap(meta, page_size);
> +		free(tmap);
> +	}
> +
> +	tmap->fd = fd;
> +
> +	tmap->map = meta;
> +	tmap->meta_len = tmap->map->meta_page_size;
> +
> +	if (tmap->meta_len > page_size) {
> +		munmap(meta, page_size);
> +		meta = mmap(NULL, tmap->meta_len, PROT_READ, MAP_SHARED, fd, 0);
> +		if (meta == MAP_FAILED) {
> +			kbuffer_free(tmap->kbuf);
> +			free(tmap);
> +			return NULL;
> +		}
> +		tmap->map = meta;
> +	}
> +
> +	tmap->data_pages = meta + tmap->meta_len;
> +
> +	tmap->data_len = tmap->map->subbuf_size * tmap->map->nr_subbufs;
> +
> +	tmap->data = mmap(NULL, tmap->data_len, PROT_READ, MAP_SHARED,
> +			  fd, tmap->meta_len);
> +	if (tmap->data == MAP_FAILED) {
> +		munmap(meta, tmap->meta_len);
> +		kbuffer_free(tmap->kbuf);
> +		free(tmap);
> +		return NULL;
> +	}
> +
> +	tmap->last_idx = tmap->map->reader.id;
> +
> +	data = tmap->data + tmap->map->subbuf_size * tmap->last_idx;
> +	kbuffer_load_subbuffer(kbuf, data);

Could it fast-forward to the event until tmap->map->reader.read? So we don't
read again the same events.

Something like

  while (kbuf->curr < tmap->map->reader.read)
  	kbuffer_next_event(kbuf, NULL);

> +
> +	return tmap;
> +}
> +
> +__hidden void trace_unmap(void *mapping)
> +{
> +	struct trace_mmap *tmap = mapping;
> +
> +	munmap(tmap->data, tmap->data_len);
> +	munmap(tmap->map, tmap->meta_len);
> +	kbuffer_free(tmap->kbuf);
> +	free(tmap);
> +}
> +
> +__hidden int trace_mmap_load_subbuf(void *mapping, struct kbuffer *kbuf)
> +{
> +	struct trace_mmap *tmap = mapping;
> +	void *data;
> +	int id;
> +
> +	id = tmap->map->reader.id;
> +	data = tmap->data + tmap->map->subbuf_size * id;
> +
> +	/*
> +	 * If kbuf doesn't point to the current sub-buffer
> +	 * just load it and return.
> +	 */
> +	if (data != kbuffer_subbuffer(kbuf)) {
> +		kbuffer_load_subbuffer(kbuf, data);
> +		return 1;
> +	}
> +
> +	/*
> +	 * Perhaps the reader page had a write that added
> +	 * more data.
> +	 */
> +	kbuffer_refresh(kbuf);
> +
> +	/* Are there still events to read? */
> +	if (kbuffer_curr_size(kbuf))
> +		return 1;

It does not seem to be enough, only kbuf->size is updated in kbuffer_refresh()
while kbuffer_curr_size is next - cur.

> +
> +	/* See if a new page is ready? */
> +	if (ioctl(tmap->fd, TRACE_MMAP_IOCTL_GET_READER) < 0)
> +		return -1;

Maybe this ioctl should be called regardless if events are found on the current
reader page. This would at least update the reader->read field and make sure
subsequent readers are not getting the same events we already had here?

> +	id = tmap->map->reader.id;
> +	data = tmap->data + tmap->map->subbuf_size * id;
> +

[...]

  reply	other threads:[~2024-01-05  9:17 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-29  1:11 [PATCH] libtracefs: Add ring buffer memory mapping APIs Steven Rostedt
2024-01-05  9:17 ` Vincent Donnefort [this message]
2024-01-05 13:41   ` Steven Rostedt
2024-01-05 14:25     ` Vincent Donnefort
2024-01-05 17:31       ` Steven Rostedt
2024-01-05 18:23         ` Vincent Donnefort
2024-01-05 19:00           ` Steven Rostedt
2024-01-05 20:11         ` Steven Rostedt
2024-01-05 20:22     ` Steven Rostedt
  -- strict thread matches above, loose matches on Subject: below --
2024-01-05 20:29 Steven Rostedt
2024-01-08 14:25 ` Vincent Donnefort
2024-01-08 17:16   ` Steven Rostedt
2024-01-08 17:34     ` Vincent Donnefort
2024-01-23  9:52 ` Vincent Donnefort
2024-01-23 15:15   ` Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ZZfJQTOyl0dHiTU-@google.com \
    --to=vdonnefort@google.com \
    --cc=linux-trace-devel@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).