From: Arnaldo Carvalho de Melo <acme@kernel.org>
To: Ingo Molnar <mingo@kernel.org>
Cc: Clark Williams <williams@redhat.com>,
linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
Alexey Budankov <alexey.budankov@linux.intel.com>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Andi Kleen <ak@linux.intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Arnaldo Carvalho de Melo <acme@redhat.com>
Subject: [PATCH 19/75] perf record: Enable asynchronous trace writing
Date: Thu, 6 Dec 2018 18:25:06 -0300 [thread overview]
Message-ID: <20181206212602.20474-20-acme@kernel.org> (raw)
In-Reply-To: <20181206212602.20474-1-acme@kernel.org>
From: Alexey Budankov <alexey.budankov@linux.intel.com>
The trace file offset is read once before mmaps iterating loop and
written back after all performance data is enqueued for aio writing.
The trace file offset is incremented linearly after every successful aio
write operation.
record__aio_sync() blocks till completion of the started AIO operation
and then proceeds.
record__aio_mmap_read_sync() implements a barrier for all incomplete
aio write requests.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
Reviewed-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/ce2d45e9-d236-871c-7c8f-1bed2d37e8ac@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/Documentation/perf-record.txt | 5 +
tools/perf/builtin-record.c | 218 ++++++++++++++++++++++-
tools/perf/perf.h | 1 +
tools/perf/util/evlist.c | 6 +-
tools/perf/util/evlist.h | 2 +-
tools/perf/util/mmap.c | 77 +++++++-
tools/perf/util/mmap.h | 14 ++
7 files changed, 314 insertions(+), 9 deletions(-)
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 246dee081efd..7efb4af88a68 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -435,6 +435,11 @@ Specify vmlinux path which has debuginfo.
--buildid-all::
Record build-id of all DSOs regardless whether it's actually hit or not.
+--aio::
+Enable asynchronous (Posix AIO) trace writing mode.
+Asynchronous mode is supported only when linking Perf tool with libc library
+providing implementation for Posix AIO API.
+
--all-kernel::
Configure all used events to run in kernel space.
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 488779bc4c8d..408d6477c960 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -124,6 +124,183 @@ static int record__write(struct record *rec, struct perf_mmap *map __maybe_unuse
return 0;
}
+#ifdef HAVE_AIO_SUPPORT
+static int record__aio_write(struct aiocb *cblock, int trace_fd,
+ void *buf, size_t size, off_t off)
+{
+ int rc;
+
+ cblock->aio_fildes = trace_fd;
+ cblock->aio_buf = buf;
+ cblock->aio_nbytes = size;
+ cblock->aio_offset = off;
+ cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
+
+ do {
+ rc = aio_write(cblock);
+ if (rc == 0) {
+ break;
+ } else if (errno != EAGAIN) {
+ cblock->aio_fildes = -1;
+ pr_err("failed to queue perf data, error: %m\n");
+ break;
+ }
+ } while (1);
+
+ return rc;
+}
+
+static int record__aio_complete(struct perf_mmap *md, struct aiocb *cblock)
+{
+ void *rem_buf;
+ off_t rem_off;
+ size_t rem_size;
+ int rc, aio_errno;
+ ssize_t aio_ret, written;
+
+ aio_errno = aio_error(cblock);
+ if (aio_errno == EINPROGRESS)
+ return 0;
+
+ written = aio_ret = aio_return(cblock);
+ if (aio_ret < 0) {
+ if (aio_errno != EINTR)
+ pr_err("failed to write perf data, error: %m\n");
+ written = 0;
+ }
+
+ rem_size = cblock->aio_nbytes - written;
+
+ if (rem_size == 0) {
+ cblock->aio_fildes = -1;
+ /*
+ * md->refcount is incremented in perf_mmap__push() for
+ * every enqueued aio write request so decrement it because
+ * the request is now complete.
+ */
+ perf_mmap__put(md);
+ rc = 1;
+ } else {
+ /*
+ * aio write request may require restart with the
+ * reminder if the kernel didn't write whole
+ * chunk at once.
+ */
+ rem_off = cblock->aio_offset + written;
+ rem_buf = (void *)(cblock->aio_buf + written);
+ record__aio_write(cblock, cblock->aio_fildes,
+ rem_buf, rem_size, rem_off);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void record__aio_sync(struct perf_mmap *md)
+{
+ struct aiocb *cblock = &md->aio.cblock;
+ struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
+
+ do {
+ if (cblock->aio_fildes == -1 || record__aio_complete(md, cblock))
+ return;
+
+ while (aio_suspend((const struct aiocb**)&cblock, 1, &timeout)) {
+ if (!(errno == EAGAIN || errno == EINTR))
+ pr_err("failed to sync perf data, error: %m\n");
+ }
+ } while (1);
+}
+
+static int record__aio_pushfn(void *to, struct aiocb *cblock, void *bf, size_t size, off_t off)
+{
+ struct record *rec = to;
+ int ret, trace_fd = rec->session->data->file.fd;
+
+ rec->samples++;
+
+ ret = record__aio_write(cblock, trace_fd, bf, size, off);
+ if (!ret) {
+ rec->bytes_written += size;
+ if (switch_output_size(rec))
+ trigger_hit(&switch_output_trigger);
+ }
+
+ return ret;
+}
+
+static off_t record__aio_get_pos(int trace_fd)
+{
+ return lseek(trace_fd, 0, SEEK_CUR);
+}
+
+static void record__aio_set_pos(int trace_fd, off_t pos)
+{
+ lseek(trace_fd, pos, SEEK_SET);
+}
+
+static void record__aio_mmap_read_sync(struct record *rec)
+{
+ int i;
+ struct perf_evlist *evlist = rec->evlist;
+ struct perf_mmap *maps = evlist->mmap;
+
+ if (!rec->opts.nr_cblocks)
+ return;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ struct perf_mmap *map = &maps[i];
+
+ if (map->base)
+ record__aio_sync(map);
+ }
+}
+
+static int nr_cblocks_default = 1;
+
+static int record__aio_parse(const struct option *opt,
+ const char *str __maybe_unused,
+ int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset)
+ opts->nr_cblocks = 0;
+ else
+ opts->nr_cblocks = nr_cblocks_default;
+
+ return 0;
+}
+#else /* HAVE_AIO_SUPPORT */
+static void record__aio_sync(struct perf_mmap *md __maybe_unused)
+{
+}
+
+static int record__aio_pushfn(void *to __maybe_unused, struct aiocb *cblock __maybe_unused,
+ void *bf __maybe_unused, size_t size __maybe_unused, off_t off __maybe_unused)
+{
+ return -1;
+}
+
+static off_t record__aio_get_pos(int trace_fd __maybe_unused)
+{
+ return -1;
+}
+
+static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
+{
+}
+
+static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
+{
+}
+#endif
+
+static int record__aio_enabled(struct record *rec)
+{
+ return rec->opts.nr_cblocks > 0;
+}
+
static int process_synthesized_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -329,7 +506,7 @@ static int record__mmap_evlist(struct record *rec,
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode) < 0) {
+ opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -525,6 +702,8 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
int i;
int rc = 0;
struct perf_mmap *maps;
+ int trace_fd = rec->data.file.fd;
+ off_t off;
if (!evlist)
return 0;
@@ -536,13 +715,29 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
return 0;
+ if (record__aio_enabled(rec))
+ off = record__aio_get_pos(trace_fd);
+
for (i = 0; i < evlist->nr_mmaps; i++) {
struct perf_mmap *map = &maps[i];
if (map->base) {
- if (perf_mmap__push(map, rec, record__pushfn) != 0) {
- rc = -1;
- goto out;
+ if (!record__aio_enabled(rec)) {
+ if (perf_mmap__push(map, rec, record__pushfn) != 0) {
+ rc = -1;
+ goto out;
+ }
+ } else {
+ /*
+ * Call record__aio_sync() to wait till map->data buffer
+ * becomes available after previous aio write request.
+ */
+ record__aio_sync(map);
+ if (perf_mmap__aio_push(map, rec, record__aio_pushfn, &off) != 0) {
+ record__aio_set_pos(trace_fd, off);
+ rc = -1;
+ goto out;
+ }
}
}
@@ -553,6 +748,9 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
}
}
+ if (record__aio_enabled(rec))
+ record__aio_set_pos(trace_fd, off);
+
/*
* Mark the round finished in case we wrote
* at least one event.
@@ -658,6 +856,8 @@ record__switch_output(struct record *rec, bool at_exit)
/* Same Size: "2015122520103046"*/
char timestamp[] = "InvalidTimestamp";
+ record__aio_mmap_read_sync(rec);
+
record__synthesize(rec, true);
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
@@ -1168,6 +1368,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__synthesize_workload(rec, true);
out_child:
+ record__aio_mmap_read_sync(rec);
+
if (forks) {
int exit_status;
@@ -1706,6 +1908,11 @@ static struct option __record_options[] = {
"signal"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
"Parse options then exit"),
+#ifdef HAVE_AIO_SUPPORT
+ OPT_CALLBACK_NOOPT(0, "aio", &record.opts,
+ NULL, "Enable asynchronous trace writing mode",
+ record__aio_parse),
+#endif
OPT_END()
};
@@ -1898,6 +2105,9 @@ int cmd_record(int argc, const char **argv)
goto out;
}
+ if (verbose > 0)
+ pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+
err = __cmd_record(&record, argc, argv);
out:
perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 0ed4a34c74c4..4d40baa45a5f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -83,6 +83,7 @@ struct record_opts {
clockid_t clockid;
u64 clockid_res_ns;
unsigned int proc_map_timeout;
+ int nr_cblocks;
};
struct option;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6f010b9f0a81..e90575192209 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1018,7 +1018,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite)
+ bool auxtrace_overwrite, int nr_cblocks)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1028,7 +1028,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = 0 };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1060,7 +1060,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index d108d167eb36..868294491194 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite);
+ bool auxtrace_overwrite, int nr_cblocks);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 47cdc3ad6546..61aa381d05d0 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -158,7 +158,8 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{
int delta_max;
- if (mp->nr_cblocks) {
+ map->aio.nr_cblocks = mp->nr_cblocks;
+ if (map->aio.nr_cblocks) {
map->aio.data = malloc(perf_mmap__mmap_len(map));
if (!map->aio.data) {
pr_debug2("failed to allocate data buffer, error %m\n");
@@ -187,6 +188,80 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
if (map->aio.data)
zfree(&map->aio.data);
}
+
+int perf_mmap__aio_push(struct perf_mmap *md, void *to,
+ int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
+ off_t *off)
+{
+ u64 head = perf_mmap__read_head(md);
+ unsigned char *data = md->base + page_size;
+ unsigned long size, size0 = 0;
+ void *buf;
+ int rc = 0;
+
+ rc = perf_mmap__read_init(md);
+ if (rc < 0)
+ return (rc == -EAGAIN) ? 0 : -1;
+
+ /*
+ * md->base data is copied into md->data buffer to
+ * release space in the kernel buffer as fast as possible,
+ * thru perf_mmap__consume() below.
+ *
+ * That lets the kernel to proceed with storing more
+ * profiling data into the kernel buffer earlier than other
+ * per-cpu kernel buffers are handled.
+ *
+ * Coping can be done in two steps in case the chunk of
+ * profiling data crosses the upper bound of the kernel buffer.
+ * In this case we first move part of data from md->start
+ * till the upper bound and then the reminder from the
+ * beginning of the kernel buffer till the end of
+ * the data chunk.
+ */
+
+ size = md->end - md->start;
+
+ if ((md->start & md->mask) + size != (md->end & md->mask)) {
+ buf = &data[md->start & md->mask];
+ size = md->mask + 1 - (md->start & md->mask);
+ md->start += size;
+ memcpy(md->aio.data, buf, size);
+ size0 = size;
+ }
+
+ buf = &data[md->start & md->mask];
+ size = md->end - md->start;
+ md->start += size;
+ memcpy(md->aio.data + size0, buf, size);
+
+ /*
+ * Increment md->refcount to guard md->data buffer
+ * from premature deallocation because md object can be
+ * released earlier than aio write request started
+ * on mmap->data is complete.
+ *
+ * perf_mmap__put() is done at record__aio_complete()
+ * after started request completion.
+ */
+ perf_mmap__get(md);
+
+ md->prev = head;
+ perf_mmap__consume(md);
+
+ rc = push(to, &md->aio.cblock, md->aio.data, size0 + size, *off);
+ if (!rc) {
+ *off += size0 + size;
+ } else {
+ /*
+ * Decrement md->refcount back if aio write
+ * operation failed to start.
+ */
+ perf_mmap__put(md);
+ }
+
+ return rc;
+}
#else
static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused,
struct mmap_params *mp __maybe_unused)
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index 3f10ad030c5e..b99213ba11b5 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -12,6 +12,7 @@
#include "auxtrace.h"
#include "event.h"
+struct aiocb;
/**
* struct perf_mmap - perf's ring buffer mmap details
*
@@ -33,6 +34,7 @@ struct perf_mmap {
struct {
void *data;
struct aiocb cblock;
+ int nr_cblocks;
} aio;
#endif
};
@@ -94,6 +96,18 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map);
int perf_mmap__push(struct perf_mmap *md, void *to,
int push(struct perf_mmap *map, void *to, void *buf, size_t size));
+#ifdef HAVE_AIO_SUPPORT
+int perf_mmap__aio_push(struct perf_mmap *md, void *to,
+ int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off),
+ off_t *off);
+#else
+static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused,
+ int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused,
+ off_t *off __maybe_unused)
+{
+ return 0;
+}
+#endif
size_t perf_mmap__mmap_len(struct perf_mmap *map);
--
2.19.2
next prev parent reply other threads:[~2018-12-06 21:25 UTC|newest]
Thread overview: 76+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-12-06 21:24 [GIT PULL 00/75] perf/core improvements and fixes Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 01/75] perf build: Give better hint about devel package for libssl Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 02/75] perf stat: Fix shadow stats for clock events Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 03/75] perf stat: Fix CSV mode column output for non-cgroup events Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 04/75] perf map: Remove extra indirection from map__find() Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 05/75] perf env: Also consider env->arch == NULL as local operation Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 06/75] perf machine: Record if a arch has a single user/kernel address space Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 07/75] perf thread: Add fallback functions for cases where cpumode is insufficient Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 08/75] perf tools: Use fallback for sample_addr_correlates_sym() cases Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 09/75] perf script: Use fallbacks for branch stacks Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 10/75] tools lib traceevent: Fix compile warnings in tools/lib/traceevent/event-parse.c Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 11/75] perf tests record: Allow for 'sleep' being 'coreutils' Arnaldo Carvalho de Melo
2018-12-06 21:24 ` [PATCH 12/75] perf test: Fix perf_event_attr test failure Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 13/75] tools include: Adopt ERR_CAST() from the kernel err.h header Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 14/75] perf bpf: Use ERR_CAST instead of ERR_PTR(PTR_ERR()) Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 15/75] perf top: Allow passing a kallsyms file Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 16/75] perf intel-pt: Fix error with config term "pt=0" Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 17/75] tools build feature: Check if libaio is available Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 18/75] perf mmap: Map data buffer for preserving collected data Arnaldo Carvalho de Melo
2018-12-06 21:25 ` Arnaldo Carvalho de Melo [this message]
2018-12-06 21:25 ` [PATCH 20/75] perf record: Extend trace writing to multi AIO Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 21/75] perf beauty mmap_flags: Check if the arch has a mmap.h file Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 22/75] tools lib traceevent: Add sanity check to is_timestamp_in_us() Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 23/75] perf annotate: Compute average IPC and IPC coverage per symbol Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 24/75] perf annotate: Create a annotate2 flag in struct symbol Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 25/75] perf report: Display average IPC and IPC coverage per symbol Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 26/75] perf report: Documentation average IPC and IPC coverage Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 27/75] tools lib traceevent: Implement new API tep_get_ref() Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 28/75] tools lib traceevent: Added support for pkg-config Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 29/75] tools lib traceevent: Install trace-seq.h API header file Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 30/75] tools lib traceevent, perf tools: Rename 'struct tep_event_format' to 'struct tep_event' Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 31/75] tools lib traceevent: Rename tep_free_format() to tep_free_event() Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 32/75] perf tools: traceevent API cleanup, remove __tep_data2host*() Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 33/75] tools lib traceevent: traceevent API cleanup Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 34/75] perf beauty mmap_flags: Fixed syntax error Fixed missing ']' error Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 35/75] perf cs-etm: Support for ARM A32/T32 instruction sets in CoreSight trace Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 36/75] perf tests ARM: Disable breakpoint tests 32-bit Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 37/75] perf vendor events intel: Fix diverse typos Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 38/75] tools lib traceevent: Fix diverse typos in comments Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 39/75] perf tools Documentation: Fix diverse typos Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 40/75] perf bpf-loader: Fix debugging message typo Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 41/75] perf tools: Fix diverse comment typos Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 42/75] tools lib subcmd: Fix a few source code " Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 43/75] perf tools: Allow specifying proc-map-timeout in config file Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 44/75] perf trace: We need to consider "nr" if "__syscall_nr" is not there Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 45/75] perf tools: Support 'srccode' output Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 46/75] perf ordered_events: Rework show_progress for __ordered_events__flush Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 47/75] perf ordered_events: Add private data member Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 48/75] perf top: Save and display the lost count stats Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 49/75] perf top: Move lost events warning to helpline Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 50/75] perf top: Add processing thread Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 51/75] perf top: Use cond variable instead of a lock Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 52/75] perf top: Set the 'session_done' volatile variable when exiting Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 53/75] perf top: Drop samples which are behind the refresh rate Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 54/75] perf top: Save and display the drop count stats Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 55/75] perf top: Display slow reader warning when droping samples Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 56/75] perf top: Move perf_top__reset_sample_counters() to after counts display Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 57/75] perf cs-etm: Add configuration for ETMv3 trace protocol Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 58/75] perf cs-etm: Add support for ETMv3 trace decoding Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 59/75] perf cs-etm: Add support for PTMv1.1 decoding Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 60/75] perf dso: Fix unchecked usage of strncpy() Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 61/75] perf header: " Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 62/75] " Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 63/75] perf help: Remove needless use " Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 64/75] perf svghelper: Fix unchecked usage " Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 65/75] perf ui helpline: Use strlcpy() as a shorter form of strncpy() + explicit set nul Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 66/75] perf probe: Fix unchecked usage of strncpy() Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 67/75] perf parse-events: " Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 68/75] perf vendor events intel: Fix Load_Miss_Real_Latency on SKL/SKX Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 69/75] perf record: Fix memory leak on AIO objects deallocation Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 70/75] perf config: Modify size factor of snprintf Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 71/75] perf annotate: Introduce basic support for ARC Arnaldo Carvalho de Melo
2018-12-06 21:25 ` [PATCH 72/75] perf ordered_events: Add ordered_events__flush_time interface Arnaldo Carvalho de Melo
2018-12-06 21:26 ` [PATCH 73/75] perf trace: Move event delivery to a new deliver_event() function Arnaldo Carvalho de Melo
2018-12-06 21:26 ` [PATCH 74/75] perf ordered_events: Add first_time() method Arnaldo Carvalho de Melo
2018-12-06 21:26 ` [PATCH 75/75] perf trace: Add ordered processing Arnaldo Carvalho de Melo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181206212602.20474-20-acme@kernel.org \
--to=acme@kernel.org \
--cc=acme@redhat.com \
--cc=ak@linux.intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=alexey.budankov@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=williams@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).