From: Ian Rogers <irogers@google.com>
To: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>,
Arnaldo Carvalho de Melo <acme@kernel.org>,
Mark Rutland <mark.rutland@arm.com>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Jiri Olsa <jolsa@kernel.org>, Namhyung Kim <namhyung@kernel.org>,
Ian Rogers <irogers@google.com>,
Adrian Hunter <adrian.hunter@intel.com>,
Nick Terrell <terrelln@fb.com>,
Kan Liang <kan.liang@linux.intel.com>,
Andi Kleen <ak@linux.intel.com>, Leo Yan <leo.yan@linaro.org>,
Song Liu <song@kernel.org>, Sandipan Das <sandipan.das@amd.com>,
James Clark <james.clark@arm.com>,
Anshuman Khandual <anshuman.khandual@arm.com>,
Miguel Ojeda <ojeda@kernel.org>,
Liam Howlett <liam.howlett@oracle.com>,
Yang Jihong <yangjihong1@huawei.com>,
Athira Rajeev <atrajeev@linux.vnet.ibm.com>,
Kajol Jain <kjain@linux.ibm.com>,
K Prateek Nayak <kprateek.nayak@amd.com>,
Sean Christopherson <seanjc@google.com>,
Yanteng Si <siyanteng@loongson.cn>,
Ravi Bangoria <ravi.bangoria@amd.com>,
German Gomez <german.gomez@arm.com>,
Changbin Du <changbin.du@huawei.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Masami Hiramatsu <mhiramat@kernel.org>,
liuwenyu <liuwenyu7@huawei.com>,
linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org
Subject: [PATCH v3 49/50] perf threads: Switch from rbtree to hashmap
Date: Tue, 24 Oct 2023 15:23:52 -0700 [thread overview]
Message-ID: <20231024222353.3024098-50-irogers@google.com> (raw)
In-Reply-To: <20231024222353.3024098-1-irogers@google.com>
The rbtree provides a sorting on entries but this is unused. Switch to
using hashmap for O(1) rather than O(log n) find/insert/remove
complexity.
Signed-off-by: Ian Rogers <irogers@google.com>
---
tools/perf/util/threads.c | 146 ++++++++++++--------------------------
tools/perf/util/threads.h | 6 +-
2 files changed, 47 insertions(+), 105 deletions(-)
diff --git a/tools/perf/util/threads.c b/tools/perf/util/threads.c
index d984ec939c7b..55923be53180 100644
--- a/tools/perf/util/threads.c
+++ b/tools/perf/util/threads.c
@@ -3,25 +3,30 @@
#include "machine.h"
#include "thread.h"
-struct thread_rb_node {
- struct rb_node rb_node;
- struct thread *thread;
-};
-
static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
{
/* Cast it to handle tid == -1 */
return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
}
+static size_t key_hash(long key, void *ctx __maybe_unused)
+{
+ /* The table lookup removes low bit entropy, but this is just ignored here. */
+ return key;
+}
+
+static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return key1 == key2;
+}
+
void threads__init(struct threads *threads)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
- table->entries = RB_ROOT_CACHED;
+ hashmap__init(&table->shard, key_hash, key_equal, NULL);
init_rwsem(&table->lock);
- table->nr = 0;
table->last_match = NULL;
}
}
@@ -32,6 +37,7 @@ void threads__exit(struct threads *threads)
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
+ hashmap__clear(&table->shard);
exit_rwsem(&table->lock);
}
}
@@ -44,7 +50,7 @@ size_t threads__nr(struct threads *threads)
struct threads_table_entry *table = &threads->table[i];
down_read(&table->lock);
- nr += table->nr;
+ nr += hashmap__size(&table->shard);
up_read(&table->lock);
}
return nr;
@@ -86,28 +92,13 @@ static void threads_table_entry__set_last_match(struct threads_table_entry *tabl
struct thread *threads__find(struct threads *threads, pid_t tid)
{
struct threads_table_entry *table = threads__table(threads, tid);
- struct rb_node **p;
- struct thread *res = NULL;
+ struct thread *res;
down_read(&table->lock);
res = __threads_table_entry__get_last_match(table, tid);
- if (res)
- return res;
-
- p = &table->entries.rb_root.rb_node;
- while (*p != NULL) {
- struct rb_node *parent = *p;
- struct thread *th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
-
- if (thread__tid(th) == tid) {
- res = thread__get(th);
- break;
- }
-
- if (tid < thread__tid(th))
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
+ if (!res) {
+ if (hashmap__find(&table->shard, tid, &res))
+ res = thread__get(res);
}
up_read(&table->lock);
if (res)
@@ -118,49 +109,25 @@ struct thread *threads__find(struct threads *threads, pid_t tid)
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
{
struct threads_table_entry *table = threads__table(threads, tid);
- struct rb_node **p;
- struct rb_node *parent = NULL;
struct thread *res = NULL;
- struct thread_rb_node *nd;
- bool leftmost = true;
*created = false;
down_write(&table->lock);
- p = &table->entries.rb_root.rb_node;
- while (*p != NULL) {
- struct thread *th;
-
- parent = *p;
- th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
-
- if (thread__tid(th) == tid) {
- __threads_table_entry__set_last_match(table, th);
- res = thread__get(th);
- goto out_unlock;
- }
-
- if (tid < thread__tid(th))
- p = &(*p)->rb_left;
- else {
- leftmost = false;
- p = &(*p)->rb_right;
- }
- }
- nd = malloc(sizeof(*nd));
- if (nd == NULL)
- goto out_unlock;
res = thread__new(pid, tid);
- if (!res)
- free(nd);
- else {
- *created = true;
- nd->thread = thread__get(res);
- rb_link_node(&nd->rb_node, parent, p);
- rb_insert_color_cached(&nd->rb_node, &table->entries, leftmost);
- ++table->nr;
- __threads_table_entry__set_last_match(table, res);
+ if (res) {
+ if (hashmap__add(&table->shard, tid, res)) {
+ /* Add failed. Assume a race so find other entry. */
+ thread__put(res);
+ res = NULL;
+ if (hashmap__find(&table->shard, tid, &res))
+ res = thread__get(res);
+ } else {
+ res = thread__get(res);
+ *created = true;
+ }
+ if (res)
+ __threads_table_entry__set_last_match(table, res);
}
-out_unlock:
up_write(&table->lock);
return res;
}
@@ -169,57 +136,32 @@ void threads__remove_all_threads(struct threads *threads)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
- struct rb_node *nd;
+ struct hashmap_entry *cur, *tmp;
+ size_t bkt;
down_write(&table->lock);
__threads_table_entry__set_last_match(table, NULL);
- nd = rb_first_cached(&table->entries);
- while (nd) {
- struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
-
- nd = rb_next(nd);
- thread__put(trb->thread);
- rb_erase_cached(&trb->rb_node, &table->entries);
- RB_CLEAR_NODE(&trb->rb_node);
- --table->nr;
+ hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) {
+ struct thread *old_value;
- free(trb);
+ hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value);
+ thread__put(old_value);
}
- assert(table->nr == 0);
up_write(&table->lock);
}
}
void threads__remove(struct threads *threads, struct thread *thread)
{
- struct rb_node **p;
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
- pid_t tid = thread__tid(thread);
+ struct thread *old_value;
down_write(&table->lock);
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
__threads_table_entry__set_last_match(table, NULL);
- p = &table->entries.rb_root.rb_node;
- while (*p != NULL) {
- struct rb_node *parent = *p;
- struct thread_rb_node *nd = rb_entry(parent, struct thread_rb_node, rb_node);
- struct thread *th = nd->thread;
-
- if (RC_CHK_EQUAL(th, thread)) {
- thread__put(nd->thread);
- rb_erase_cached(&nd->rb_node, &table->entries);
- RB_CLEAR_NODE(&nd->rb_node);
- --table->nr;
- free(nd);
- break;
- }
-
- if (tid < thread__tid(th))
- p = &(*p)->rb_left;
- else
- p = &(*p)->rb_right;
- }
+ hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value);
+ thread__put(old_value);
up_write(&table->lock);
}
@@ -229,11 +171,11 @@ int threads__for_each_thread(struct threads *threads,
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
- struct rb_node *nd;
+ struct hashmap_entry *cur;
+ size_t bkt;
- for (nd = rb_first_cached(&table->entries); nd; nd = rb_next(nd)) {
- struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
- int rc = fn(trb->thread, data);
+ hashmap__for_each_entry((&table->shard), cur, bkt) {
+ int rc = fn((struct thread *)cur->pvalue, data);
if (rc != 0)
return rc;
diff --git a/tools/perf/util/threads.h b/tools/perf/util/threads.h
index ed67de627578..d03bd91a7769 100644
--- a/tools/perf/util/threads.h
+++ b/tools/perf/util/threads.h
@@ -2,7 +2,7 @@
#ifndef __PERF_THREADS_H
#define __PERF_THREADS_H
-#include <linux/rbtree.h>
+#include "hashmap.h"
#include "rwsem.h"
struct thread;
@@ -11,9 +11,9 @@ struct thread;
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads_table_entry {
- struct rb_root_cached entries;
+ /* Key is tid, value is struct thread. */
+ struct hashmap shard;
struct rw_semaphore lock;
- unsigned int nr;
struct thread *last_match;
};
--
2.42.0.758.gaed0368e0e-goog
next prev parent reply other threads:[~2023-10-24 22:36 UTC|newest]
Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-24 22:23 [PATCH v3 00/50] Improvements to memory use Ian Rogers
2023-10-24 22:23 ` [PATCH v3 01/50] perf rwsem: Add debug mode that uses a mutex Ian Rogers
2023-10-24 22:23 ` [PATCH v3 02/50] perf machine: Avoid out of bounds LBR memory read Ian Rogers
2023-10-24 22:23 ` [PATCH v3 03/50] libperf rc_check: Make implicit enabling work for GCC Ian Rogers
2023-10-24 22:23 ` [PATCH v3 04/50] libperf rc_check: Add RC_CHK_EQUAL Ian Rogers
2023-10-24 22:23 ` [PATCH v3 05/50] perf hist: Add missing puts to hist__account_cycles Ian Rogers
2023-10-24 22:23 ` [PATCH v3 06/50] perf threads: Remove unused dead thread list Ian Rogers
2023-10-24 22:23 ` [PATCH v3 07/50] perf offcpu: Add missed btf_free Ian Rogers
2023-10-24 22:23 ` [PATCH v3 08/50] perf callchain: Make display use of branch_type_stat const Ian Rogers
2023-10-24 22:23 ` [PATCH v3 09/50] perf callchain: Make brtype_stat in callchain_list optional Ian Rogers
2023-10-24 22:23 ` [PATCH v3 10/50] perf callchain: Minor layout changes to callchain_list Ian Rogers
2023-10-24 22:23 ` [PATCH v3 11/50] perf mem_info: Add and use map_symbol__exit and addr_map_symbol__exit Ian Rogers
2023-10-24 22:23 ` [PATCH v3 12/50] perf record: Lazy load kernel symbols Ian Rogers
2023-10-25 18:25 ` Namhyung Kim
2023-10-25 18:35 ` Adrian Hunter
2023-10-24 22:23 ` [PATCH v3 13/50] libperf: Lazily allocate mmap event copy Ian Rogers
2023-10-25 2:38 ` Yang Jihong
2023-10-25 3:28 ` Ian Rogers
2023-10-24 22:23 ` [PATCH v3 14/50] perf mmap: Lazily initialize zstd streams Ian Rogers
2023-10-24 22:23 ` [PATCH v3 15/50] perf machine thread: Remove exited threads by default Ian Rogers
2023-10-24 22:23 ` [PATCH v3 16/50] tools api fs: Switch filename__read_str to use io.h Ian Rogers
2023-10-24 22:23 ` [PATCH v3 17/50] tools api fs: Avoid reading whole file for a 1 byte bool Ian Rogers
2023-10-24 22:23 ` [PATCH v3 18/50] tools lib api: Add io_dir an allocation free readdir alternative Ian Rogers
2023-10-25 18:43 ` Namhyung Kim
2023-10-25 22:15 ` Ian Rogers
2023-10-24 22:23 ` [PATCH v3 19/50] perf maps: Switch modules tree walk to io_dir__readdir Ian Rogers
2023-10-24 22:23 ` [PATCH v3 20/50] perf record: Be lazier in allocating lost samples buffer Ian Rogers
2023-10-25 3:44 ` Yang Jihong
2023-10-25 17:00 ` Ian Rogers
2023-10-25 19:04 ` Namhyung Kim
2023-10-25 19:00 ` Namhyung Kim
2023-10-24 22:23 ` [PATCH v3 21/50] perf pmu: Switch to io_dir__readdir Ian Rogers
2023-10-24 22:23 ` [PATCH v3 22/50] perf bpf: Don't synthesize BPF events when disabled Ian Rogers
2023-10-24 22:23 ` [PATCH v3 23/50] perf header: Switch mem topology to io_dir__readdir Ian Rogers
2023-10-24 22:23 ` [PATCH v3 24/50] perf events: Remove scandir in thread synthesis Ian Rogers
2023-10-24 22:23 ` [PATCH v3 25/50] perf map: Simplify map_ip/unmap_ip and make map size smaller Ian Rogers
2023-10-24 22:23 ` [PATCH v3 26/50] perf maps: Move symbol maps functions to maps.c Ian Rogers
2023-10-24 22:23 ` [PATCH v3 27/50] perf thread: Add missing RC_CHK_ACCESS Ian Rogers
2023-10-24 22:23 ` [PATCH v3 28/50] perf maps: Add maps__for_each_map to call a function on each entry Ian Rogers
2023-10-24 22:23 ` [PATCH v3 29/50] perf maps: Add remove maps function to remove a map based on callback Ian Rogers
2023-10-24 22:23 ` [PATCH v3 30/50] perf debug: Expose debug file Ian Rogers
2023-10-24 22:23 ` [PATCH v3 31/50] perf maps: Refactor maps__fixup_overlappings Ian Rogers
2023-10-24 22:23 ` [PATCH v3 32/50] perf maps: Do simple merge if given map doesn't overlap Ian Rogers
2023-10-24 22:23 ` [PATCH v3 33/50] perf maps: Rename clone to copy from Ian Rogers
2023-10-24 22:23 ` [PATCH v3 34/50] perf maps: Add maps__load_first Ian Rogers
2023-10-24 22:23 ` [PATCH v3 35/50] perf maps: Add find next entry to give entry after the given map Ian Rogers
2023-10-24 22:23 ` [PATCH v3 36/50] perf maps: Reduce scope of map_rb_node and maps internals Ian Rogers
2023-10-24 22:23 ` [PATCH v3 37/50] perf maps: Fix up overlaps during fixup_end Ian Rogers
2023-10-24 22:23 ` [PATCH v3 38/50] perf maps: Switch from rbtree to lazily sorted array for addresses Ian Rogers
2023-10-24 22:23 ` [PATCH v3 39/50] perf maps: Get map before returning in maps__find Ian Rogers
2023-10-24 22:23 ` [PATCH v3 40/50] perf maps: Get map before returning in maps__find_by_name Ian Rogers
2023-10-24 22:23 ` [PATCH v3 41/50] perf maps: Get map before returning in maps__find_next_entry Ian Rogers
2023-10-24 22:23 ` [PATCH v3 42/50] perf maps: Hide maps internals Ian Rogers
2023-10-24 22:23 ` [PATCH v3 43/50] perf maps: Locking tidy up of nr_maps Ian Rogers
2023-10-24 22:23 ` [PATCH v3 44/50] perf dso: Reorder variables to save space in struct dso Ian Rogers
2023-10-24 22:23 ` [PATCH v3 45/50] perf report: Sort child tasks by tid Ian Rogers
2023-10-24 22:23 ` [PATCH v3 46/50] perf trace: Ignore thread hashing in summary Ian Rogers
2023-10-24 22:23 ` [PATCH v3 47/50] perf machine: Move fprintf to for_each loop and a callback Ian Rogers
2023-10-24 22:23 ` [PATCH v3 48/50] perf threads: Move threads to its own files Ian Rogers
2023-10-24 22:23 ` Ian Rogers [this message]
2023-10-24 22:23 ` [PATCH v3 50/50] perf threads: Reduce table size from 256 to 8 Ian Rogers
2023-10-26 17:11 ` (subset) [PATCH v3 00/50] Improvements to memory use Namhyung Kim
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231024222353.3024098-50-irogers@google.com \
--to=irogers@google.com \
--cc=acme@kernel.org \
--cc=adrian.hunter@intel.com \
--cc=ak@linux.intel.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=anshuman.khandual@arm.com \
--cc=atrajeev@linux.vnet.ibm.com \
--cc=changbin.du@huawei.com \
--cc=german.gomez@arm.com \
--cc=james.clark@arm.com \
--cc=jolsa@kernel.org \
--cc=kan.liang@linux.intel.com \
--cc=kjain@linux.ibm.com \
--cc=kprateek.nayak@amd.com \
--cc=leo.yan@linaro.org \
--cc=liam.howlett@oracle.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-perf-users@vger.kernel.org \
--cc=liuwenyu7@huawei.com \
--cc=mark.rutland@arm.com \
--cc=mhiramat@kernel.org \
--cc=mingo@redhat.com \
--cc=namhyung@kernel.org \
--cc=ojeda@kernel.org \
--cc=pbonzini@redhat.com \
--cc=peterz@infradead.org \
--cc=ravi.bangoria@amd.com \
--cc=sandipan.das@amd.com \
--cc=seanjc@google.com \
--cc=siyanteng@loongson.cn \
--cc=song@kernel.org \
--cc=terrelln@fb.com \
--cc=yangjihong1@huawei.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox