* [PATCH net-next 0/6] rhashtable self-test improvements
@ 2015-04-30 22:37 Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 1/6] rhashtable-test: Remove unused TEST_NEXPANDS Thomas Graf
` (6 more replies)
0 siblings, 7 replies; 8+ messages in thread
From: Thomas Graf @ 2015-04-30 22:37 UTC (permalink / raw)
To: davem; +Cc: netdev, herbert
This series improves the rhashtable self-test to:
* Avoid allocation of test objects
* Measure the time of test runs
* Use the iterator to walk the table for consistency
* Account for failed insertions due to memory pressure or
utilization pressure
* Ignore failed insertions when checking for consistency
Thomas Graf (6):
rhashtable-test: Remove unused TEST_NEXPANDS
rhashtable-test: Measure time to insert, remove & traverse entries
rhashtable-test: Get rid of ptr in test_obj structure
rhashtable-test: Do not allocate individual test objects
rhashtable-test: Use walker to test bucket statistics
rhashtable-test: Detect insertion failures
lib/test_rhashtable.c | 214 +++++++++++++++++++++++++++++---------------------
1 file changed, 123 insertions(+), 91 deletions(-)
--
1.9.3
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 1/6] rhashtable-test: Remove unused TEST_NEXPANDS
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
@ 2015-04-30 22:37 ` Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 2/6] rhashtable-test: Measure time to insert, remove & traverse entries Thomas Graf
` (5 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Thomas Graf @ 2015-04-30 22:37 UTC (permalink / raw)
To: davem; +Cc: netdev, herbert
Signed-off-by: Thomas Graf <tgraf@suug.ch>
---
lib/test_rhashtable.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index b295754..c60fd5d 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -30,7 +30,6 @@
#define TEST_HT_SIZE 8
#define TEST_ENTRIES 2048
#define TEST_PTR ((void *) 0xdeadbeef)
-#define TEST_NEXPANDS 4
struct test_obj {
void *ptr;
--
1.9.3
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH net-next 2/6] rhashtable-test: Measure time to insert, remove & traverse entries
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 1/6] rhashtable-test: Remove unused TEST_NEXPANDS Thomas Graf
@ 2015-04-30 22:37 ` Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 3/6] rhashtable-test: Get rid of ptr in test_obj structure Thomas Graf
` (4 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Thomas Graf @ 2015-04-30 22:37 UTC (permalink / raw)
To: davem; +Cc: netdev, herbert
Make test configurable by allowing to specify all relevant knobs
through module parameters.
Do several test runs and measure the average time it takes to
insert & remove all entries. Note, a deferred resize might still
continue to run in the background.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
---
lib/test_rhashtable.c | 100 ++++++++++++++++++++++++++++++++++----------------
1 file changed, 69 insertions(+), 31 deletions(-)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c60fd5d..e3d31bf 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -1,14 +1,9 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
- * Based on the following paper:
- * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
- *
- * Code partially derived from nft_hash
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -27,9 +22,28 @@
#include <linux/slab.h>
-#define TEST_HT_SIZE 8
-#define TEST_ENTRIES 2048
#define TEST_PTR ((void *) 0xdeadbeef)
+#define MAX_ENTRIES 1000000
+
+static int entries = 50000;
+module_param(entries, int, 0);
+MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)");
+
+static int runs = 4;
+module_param(runs, int, 0);
+MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)");
+
+static int max_size = 65536;
+module_param(max_size, int, 0);
+MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)");
+
+static bool shrinking = false;
+module_param(shrinking, bool, 0);
+MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)");
+
+static int size = 8;
+module_param(size, int, 0);
+MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
struct test_obj {
void *ptr;
@@ -37,8 +51,7 @@ struct test_obj {
struct rhash_head node;
};
-static const struct rhashtable_params test_rht_params = {
- .nelem_hint = TEST_HT_SIZE,
+static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
.key_len = sizeof(int),
@@ -50,7 +63,7 @@ static int __init test_rht_lookup(struct rhashtable *ht)
{
unsigned int i;
- for (i = 0; i < TEST_ENTRIES * 2; i++) {
+ for (i = 0; i < entries * 2; i++) {
struct test_obj *obj;
bool expected = !(i % 2);
u32 key = i;
@@ -110,26 +123,28 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet)
}
pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n",
- total, atomic_read(&ht->nelems), TEST_ENTRIES);
+ total, atomic_read(&ht->nelems), entries);
- if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
+ if (total != atomic_read(&ht->nelems) || total != entries)
pr_warn("Test failed: Total count mismatch ^^^");
}
-static int __init test_rhashtable(struct rhashtable *ht)
+static s64 __init test_rhashtable(struct rhashtable *ht)
{
struct bucket_table *tbl;
struct test_obj *obj;
struct rhash_head *pos, *next;
int err;
unsigned int i;
+ s64 start, end;
/*
* Insertion Test:
- * Insert TEST_ENTRIES into table with all keys even numbers
+ * Insert entries into table with all keys even numbers
*/
- pr_info(" Adding %d keys\n", TEST_ENTRIES);
- for (i = 0; i < TEST_ENTRIES; i++) {
+ pr_info(" Adding %d keys\n", entries);
+ start = ktime_get_ns();
+ for (i = 0; i < entries; i++) {
struct test_obj *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
@@ -157,8 +172,8 @@ static int __init test_rhashtable(struct rhashtable *ht)
test_bucket_stats(ht, true);
rcu_read_unlock();
- pr_info(" Deleting %d keys\n", TEST_ENTRIES);
- for (i = 0; i < TEST_ENTRIES; i++) {
+ pr_info(" Deleting %d keys\n", entries);
+ for (i = 0; i < entries; i++) {
u32 key = i * 2;
obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
@@ -168,7 +183,10 @@ static int __init test_rhashtable(struct rhashtable *ht)
kfree(obj);
}
- return 0;
+ end = ktime_get_ns();
+ pr_info(" Duration of test: %lld ns\n", end - start);
+
+ return end - start;
error:
tbl = rht_dereference_rcu(ht->tbl, ht);
@@ -183,22 +201,42 @@ static struct rhashtable ht;
static int __init test_rht_init(void)
{
- int err;
+ int i, err;
+ u64 total_time = 0;
- pr_info("Running resizable hashtable tests...\n");
+ entries = min(entries, MAX_ENTRIES);
- err = rhashtable_init(&ht, &test_rht_params);
- if (err < 0) {
- pr_warn("Test failed: Unable to initialize hashtable: %d\n",
- err);
- return err;
- }
+ test_rht_params.automatic_shrinking = shrinking;
+ test_rht_params.max_size = max_size;
+ test_rht_params.nelem_hint = size;
- err = test_rhashtable(&ht);
+ pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n",
+ size, max_size, shrinking);
- rhashtable_destroy(&ht);
+ for (i = 0; i < runs; i++) {
+ s64 time;
- return err;
+ pr_info("Test %02d:\n", i);
+ err = rhashtable_init(&ht, &test_rht_params);
+ if (err < 0) {
+ pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+ err);
+ continue;
+ }
+
+ time = test_rhashtable(&ht);
+ rhashtable_destroy(&ht);
+ if (time < 0) {
+ pr_warn("Test failed: return code %lld\n", time);
+ return -EINVAL;
+ }
+
+ total_time += time;
+ }
+
+ pr_info("Average test time: %llu\n", total_time / runs);
+
+ return 0;
}
static void __exit test_rht_exit(void)
--
1.9.3
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH net-next 3/6] rhashtable-test: Get rid of ptr in test_obj structure
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 1/6] rhashtable-test: Remove unused TEST_NEXPANDS Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 2/6] rhashtable-test: Measure time to insert, remove & traverse entries Thomas Graf
@ 2015-04-30 22:37 ` Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 4/6] rhashtable-test: Do not allocate individual test objects Thomas Graf
` (3 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Thomas Graf @ 2015-04-30 22:37 UTC (permalink / raw)
To: davem; +Cc: netdev, herbert
This only blows up the size of the test structure for no gain
in test coverage. Reduces size of test_obj from 24 to 16 bytes.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
---
lib/test_rhashtable.c | 9 +++------
1 file changed, 3 insertions(+), 6 deletions(-)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index e3d31bf..d6d6719 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -22,7 +22,6 @@
#include <linux/slab.h>
-#define TEST_PTR ((void *) 0xdeadbeef)
#define MAX_ENTRIES 1000000
static int entries = 50000;
@@ -46,7 +45,6 @@ module_param(size, int, 0);
MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)");
struct test_obj {
- void *ptr;
int value;
struct rhash_head node;
};
@@ -78,9 +76,9 @@ static int __init test_rht_lookup(struct rhashtable *ht)
key);
return -EEXIST;
} else if (expected && obj) {
- if (obj->ptr != TEST_PTR || obj->value != i) {
- pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
- obj->ptr, TEST_PTR, obj->value, i);
+ if (obj->value != i) {
+ pr_warn("Test failed: Lookup value mismatch %u!=%u\n",
+ obj->value, i);
return -EINVAL;
}
}
@@ -153,7 +151,6 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
goto error;
}
- obj->ptr = TEST_PTR;
obj->value = i * 2;
err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
--
1.9.3
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH net-next 4/6] rhashtable-test: Do not allocate individual test objects
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
` (2 preceding siblings ...)
2015-04-30 22:37 ` [PATCH net-next 3/6] rhashtable-test: Get rid of ptr in test_obj structure Thomas Graf
@ 2015-04-30 22:37 ` Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 5/6] rhashtable-test: Use walker to test bucket statistics Thomas Graf
` (2 subsequent siblings)
6 siblings, 0 replies; 8+ messages in thread
From: Thomas Graf @ 2015-04-30 22:37 UTC (permalink / raw)
To: davem; +Cc: netdev, herbert
By far the most expensive part of the selftest was the allocation
of entries. Using a static array allows to measure the rhashtable
operations.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
---
lib/test_rhashtable.c | 28 ++++++----------------------
1 file changed, 6 insertions(+), 22 deletions(-)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index d6d6719..935693e 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -49,6 +49,8 @@ struct test_obj {
struct rhash_head node;
};
+static struct test_obj array[MAX_ENTRIES];
+
static struct rhashtable_params test_rht_params = {
.head_offset = offsetof(struct test_obj, node),
.key_offset = offsetof(struct test_obj, value),
@@ -129,9 +131,7 @@ static void test_bucket_stats(struct rhashtable *ht, bool quiet)
static s64 __init test_rhashtable(struct rhashtable *ht)
{
- struct bucket_table *tbl;
struct test_obj *obj;
- struct rhash_head *pos, *next;
int err;
unsigned int i;
s64 start, end;
@@ -143,21 +143,13 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
pr_info(" Adding %d keys\n", entries);
start = ktime_get_ns();
for (i = 0; i < entries; i++) {
- struct test_obj *obj;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj) {
- err = -ENOMEM;
- goto error;
- }
+ struct test_obj *obj = &array[i];
obj->value = i * 2;
err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
- if (err) {
- kfree(obj);
- goto error;
- }
+ if (err)
+ return err;
}
rcu_read_lock();
@@ -177,21 +169,12 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
BUG_ON(!obj);
rhashtable_remove_fast(ht, &obj->node, test_rht_params);
- kfree(obj);
}
end = ktime_get_ns();
pr_info(" Duration of test: %lld ns\n", end - start);
return end - start;
-
-error:
- tbl = rht_dereference_rcu(ht->tbl, ht);
- for (i = 0; i < tbl->size; i++)
- rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
- kfree(obj);
-
- return err;
}
static struct rhashtable ht;
@@ -214,6 +197,7 @@ static int __init test_rht_init(void)
s64 time;
pr_info("Test %02d:\n", i);
+ memset(&array, 0, sizeof(array));
err = rhashtable_init(&ht, &test_rht_params);
if (err < 0) {
pr_warn("Test failed: Unable to initialize hashtable: %d\n",
--
1.9.3
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH net-next 5/6] rhashtable-test: Use walker to test bucket statistics
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
` (3 preceding siblings ...)
2015-04-30 22:37 ` [PATCH net-next 4/6] rhashtable-test: Do not allocate individual test objects Thomas Graf
@ 2015-04-30 22:37 ` Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 6/6] rhashtable-test: Detect insertion failures Thomas Graf
2015-05-04 3:09 ` [PATCH net-next 0/6] rhashtable self-test improvements David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Thomas Graf @ 2015-04-30 22:37 UTC (permalink / raw)
To: davem; +Cc: netdev, herbert
As resizes may continue to run in the background, use walker to
ensure we see all entries. Also print the encountered number
of rehashes queued up while traversing.
This may lead to warnings due to entries being seen multiple
times. We consider them non-fatal.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
---
lib/test_rhashtable.c | 60 +++++++++++++++++++++++++--------------------------
1 file changed, 30 insertions(+), 30 deletions(-)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 935693e..3a9a3d9 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -89,41 +89,43 @@ static int __init test_rht_lookup(struct rhashtable *ht)
return 0;
}
-static void test_bucket_stats(struct rhashtable *ht, bool quiet)
+static void test_bucket_stats(struct rhashtable *ht)
{
- unsigned int cnt, rcu_cnt, i, total = 0;
+ unsigned int err, total = 0, chain_len = 0;
+ struct rhashtable_iter hti;
struct rhash_head *pos;
- struct test_obj *obj;
- struct bucket_table *tbl;
- tbl = rht_dereference_rcu(ht->tbl, ht);
- for (i = 0; i < tbl->size; i++) {
- rcu_cnt = cnt = 0;
+ err = rhashtable_walk_init(ht, &hti);
+ if (err) {
+ pr_warn("Test failed: allocation error");
+ return;
+ }
- if (!quiet)
- pr_info(" [%#4x/%u]", i, tbl->size);
+ err = rhashtable_walk_start(&hti);
+ if (err && err != -EAGAIN) {
+ pr_warn("Test failed: iterator failed: %d\n", err);
+ return;
+ }
- rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
- cnt++;
- total++;
- if (!quiet)
- pr_cont(" [%p],", obj);
+ while ((pos = rhashtable_walk_next(&hti))) {
+ if (PTR_ERR(pos) == -EAGAIN) {
+ pr_info("Info: encountered resize\n");
+ chain_len++;
+ continue;
+ } else if (IS_ERR(pos)) {
+ pr_warn("Test failed: rhashtable_walk_next() error: %ld\n",
+ PTR_ERR(pos));
+ break;
}
- rht_for_each_entry_rcu(obj, pos, tbl, i, node)
- rcu_cnt++;
-
- if (rcu_cnt != cnt)
- pr_warn("Test failed: Chain count mismach %d != %d",
- cnt, rcu_cnt);
-
- if (!quiet)
- pr_cont("\n [%#x] first element: %p, chain length: %u\n",
- i, tbl->buckets[i], cnt);
+ total++;
}
- pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n",
- total, atomic_read(&ht->nelems), entries);
+ rhashtable_walk_stop(&hti);
+ rhashtable_walk_exit(&hti);
+
+ pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n",
+ total, atomic_read(&ht->nelems), entries, chain_len);
if (total != atomic_read(&ht->nelems) || total != entries)
pr_warn("Test failed: Total count mismatch ^^^");
@@ -152,14 +154,12 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
return err;
}
+ test_bucket_stats(ht);
rcu_read_lock();
- test_bucket_stats(ht, true);
test_rht_lookup(ht);
rcu_read_unlock();
- rcu_read_lock();
- test_bucket_stats(ht, true);
- rcu_read_unlock();
+ test_bucket_stats(ht);
pr_info(" Deleting %d keys\n", entries);
for (i = 0; i < entries; i++) {
--
1.9.3
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH net-next 6/6] rhashtable-test: Detect insertion failures
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
` (4 preceding siblings ...)
2015-04-30 22:37 ` [PATCH net-next 5/6] rhashtable-test: Use walker to test bucket statistics Thomas Graf
@ 2015-04-30 22:37 ` Thomas Graf
2015-05-04 3:09 ` [PATCH net-next 0/6] rhashtable self-test improvements David Miller
6 siblings, 0 replies; 8+ messages in thread
From: Thomas Graf @ 2015-04-30 22:37 UTC (permalink / raw)
To: davem; +Cc: netdev, herbert
Account for failed inserts due to memory pressure or EBUSY and
ignore failed entries during the consistency check.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
---
lib/test_rhashtable.c | 26 ++++++++++++++++++++------
1 file changed, 20 insertions(+), 6 deletions(-)
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 3a9a3d9..6893e57 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -21,8 +21,8 @@
#include <linux/rhashtable.h>
#include <linux/slab.h>
-
#define MAX_ENTRIES 1000000
+#define TEST_INSERT_FAIL INT_MAX
static int entries = 50000;
module_param(entries, int, 0);
@@ -68,6 +68,9 @@ static int __init test_rht_lookup(struct rhashtable *ht)
bool expected = !(i % 2);
u32 key = i;
+ if (array[i / 2].value == TEST_INSERT_FAIL)
+ expected = false;
+
obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
if (expected && !obj) {
@@ -135,7 +138,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
{
struct test_obj *obj;
int err;
- unsigned int i;
+ unsigned int i, insert_fails = 0;
s64 start, end;
/*
@@ -150,10 +153,19 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
obj->value = i * 2;
err = rhashtable_insert_fast(ht, &obj->node, test_rht_params);
- if (err)
+ if (err == -ENOMEM || err == -EBUSY) {
+ /* Mark failed inserts but continue */
+ obj->value = TEST_INSERT_FAIL;
+ insert_fails++;
+ } else if (err) {
return err;
+ }
}
+ if (insert_fails)
+ pr_info(" %u insertions failed due to memory pressure\n",
+ insert_fails);
+
test_bucket_stats(ht);
rcu_read_lock();
test_rht_lookup(ht);
@@ -165,10 +177,12 @@ static s64 __init test_rhashtable(struct rhashtable *ht)
for (i = 0; i < entries; i++) {
u32 key = i * 2;
- obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
- BUG_ON(!obj);
+ if (array[i].value != TEST_INSERT_FAIL) {
+ obj = rhashtable_lookup_fast(ht, &key, test_rht_params);
+ BUG_ON(!obj);
- rhashtable_remove_fast(ht, &obj->node, test_rht_params);
+ rhashtable_remove_fast(ht, &obj->node, test_rht_params);
+ }
}
end = ktime_get_ns();
--
1.9.3
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH net-next 0/6] rhashtable self-test improvements
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
` (5 preceding siblings ...)
2015-04-30 22:37 ` [PATCH net-next 6/6] rhashtable-test: Detect insertion failures Thomas Graf
@ 2015-05-04 3:09 ` David Miller
6 siblings, 0 replies; 8+ messages in thread
From: David Miller @ 2015-05-04 3:09 UTC (permalink / raw)
To: tgraf; +Cc: netdev, herbert
From: Thomas Graf <tgraf@suug.ch>
Date: Thu, 30 Apr 2015 22:37:39 +0000
> This series improves the rhashtable self-test to:
> * Avoid allocation of test objects
> * Measure the time of test runs
> * Use the iterator to walk the table for consistency
> * Account for failed insertions due to memory pressure or
> utilization pressure
> * Ignore failed insertions when checking for consistency
This looks find, series applied, thanks Thomas.
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2015-05-04 3:09 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-04-30 22:37 [PATCH net-next 0/6] rhashtable self-test improvements Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 1/6] rhashtable-test: Remove unused TEST_NEXPANDS Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 2/6] rhashtable-test: Measure time to insert, remove & traverse entries Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 3/6] rhashtable-test: Get rid of ptr in test_obj structure Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 4/6] rhashtable-test: Do not allocate individual test objects Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 5/6] rhashtable-test: Use walker to test bucket statistics Thomas Graf
2015-04-30 22:37 ` [PATCH net-next 6/6] rhashtable-test: Detect insertion failures Thomas Graf
2015-05-04 3:09 ` [PATCH net-next 0/6] rhashtable self-test improvements David Miller
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).