netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next 1/3] samples/bpf: add bpf map stress test
@ 2016-03-08 23:07 Alexei Starovoitov
  2016-03-08 23:07 ` [PATCH net-next 2/3] samples/bpf: stress test bpf_get_stackid Alexei Starovoitov
                   ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Alexei Starovoitov @ 2016-03-08 23:07 UTC (permalink / raw)
  To: David S . Miller; +Cc: Daniel Borkmann, netdev, linux-kernel, kernel-team

this test calls bpf programs from different contexts:
from inside of slub, from rcu, from pretty much everywhere,
since it kprobes all spin_lock functions.
It stresses the bpf hash and percpu map pre-allocation,
deallocation logic and call_rcu mechanisms.
User space part adding more stress by walking and deleting map elements.

Note that due to nature bpf_load.c the earlier kprobe+bpf programs are
already active while loader loads new programs, creates new kprobes and
attaches them.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 samples/bpf/Makefile        |  4 +++
 samples/bpf/spintest_kern.c | 59 +++++++++++++++++++++++++++++++++++++++++++++
 samples/bpf/spintest_user.c | 50 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 113 insertions(+)
 create mode 100644 samples/bpf/spintest_kern.c
 create mode 100644 samples/bpf/spintest_user.c

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index c4f8ae0c8afe..75a13e742ab4 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -17,6 +17,7 @@ hostprogs-y += tracex6
 hostprogs-y += trace_output
 hostprogs-y += lathist
 hostprogs-y += offwaketime
+hostprogs-y += spintest
 
 test_verifier-objs := test_verifier.o libbpf.o
 test_maps-objs := test_maps.o libbpf.o
@@ -34,6 +35,7 @@ tracex6-objs := bpf_load.o libbpf.o tracex6_user.o
 trace_output-objs := bpf_load.o libbpf.o trace_output_user.o
 lathist-objs := bpf_load.o libbpf.o lathist_user.o
 offwaketime-objs := bpf_load.o libbpf.o offwaketime_user.o
+spintest-objs := bpf_load.o libbpf.o spintest_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -50,6 +52,7 @@ always += trace_output_kern.o
 always += tcbpf1_kern.o
 always += lathist_kern.o
 always += offwaketime_kern.o
+always += spintest_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 
@@ -67,6 +70,7 @@ HOSTLOADLIBES_tracex6 += -lelf
 HOSTLOADLIBES_trace_output += -lelf -lrt
 HOSTLOADLIBES_lathist += -lelf
 HOSTLOADLIBES_offwaketime += -lelf
+HOSTLOADLIBES_spintest += -lelf
 
 # point this to your LLVM backend with bpf support
 LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
new file mode 100644
index 000000000000..ef8ac33bb2e9
--- /dev/null
+++ b/samples/bpf/spintest_kern.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2016, Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+struct bpf_map_def SEC("maps") my_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(long),
+	.value_size = sizeof(long),
+	.max_entries = 1024,
+};
+struct bpf_map_def SEC("maps") my_map2 = {
+	.type = BPF_MAP_TYPE_PERCPU_HASH,
+	.key_size = sizeof(long),
+	.value_size = sizeof(long),
+	.max_entries = 1024,
+};
+
+#define PROG(foo) \
+int foo(struct pt_regs *ctx) \
+{ \
+	long v = ctx->ip, *val; \
+\
+	val = bpf_map_lookup_elem(&my_map, &v); \
+	bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
+	bpf_map_update_elem(&my_map2, &v, &v, BPF_ANY); \
+	bpf_map_delete_elem(&my_map2, &v); \
+	return 0; \
+}
+
+/* add kprobes to all possible *spin* functions */
+SEC("kprobe/spin_unlock")PROG(p1)
+SEC("kprobe/spin_lock")PROG(p2)
+SEC("kprobe/mutex_spin_on_owner")PROG(p3)
+SEC("kprobe/rwsem_spin_on_owner")PROG(p4)
+SEC("kprobe/spin_unlock_irqrestore")PROG(p5)
+SEC("kprobe/_raw_spin_unlock_irqrestore")PROG(p6)
+SEC("kprobe/_raw_spin_unlock_bh")PROG(p7)
+SEC("kprobe/_raw_spin_unlock")PROG(p8)
+SEC("kprobe/_raw_spin_lock_irqsave")PROG(p9)
+SEC("kprobe/_raw_spin_trylock_bh")PROG(p10)
+SEC("kprobe/_raw_spin_lock_irq")PROG(p11)
+SEC("kprobe/_raw_spin_trylock")PROG(p12)
+SEC("kprobe/_raw_spin_lock")PROG(p13)
+SEC("kprobe/_raw_spin_lock_bh")PROG(p14)
+/* and to inner bpf helpers */
+SEC("kprobe/htab_map_update_elem")PROG(p15)
+SEC("kprobe/__htab_percpu_map_update_elem")PROG(p16)
+SEC("kprobe/htab_map_alloc")PROG(p17)
+
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
new file mode 100644
index 000000000000..311ede532230
--- /dev/null
+++ b/samples/bpf/spintest_user.c
@@ -0,0 +1,50 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <assert.h>
+#include <sys/resource.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+int main(int ac, char **argv)
+{
+	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+	long key, next_key, value;
+	char filename[256];
+	struct ksym *sym;
+	int i;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+	setrlimit(RLIMIT_MEMLOCK, &r);
+
+	if (load_kallsyms()) {
+		printf("failed to process /proc/kallsyms\n");
+		return 2;
+	}
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	for (i = 0; i < 5; i++) {
+		key = 0;
+		printf("kprobing funcs:");
+		while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0) {
+			bpf_lookup_elem(map_fd[0], &next_key, &value);
+			assert(next_key == value);
+			sym = ksym_search(value);
+			printf(" %s", sym->name);
+			key = next_key;
+		}
+		if (key)
+			printf("\n");
+		key = 0;
+		while (bpf_get_next_key(map_fd[0], &key, &next_key) == 0)
+			bpf_delete_elem(map_fd[0], &next_key);
+		sleep(1);
+	}
+
+	return 0;
+}
-- 
2.8.0.rc1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH net-next 2/3] samples/bpf: stress test bpf_get_stackid
  2016-03-08 23:07 [PATCH net-next 1/3] samples/bpf: add bpf map stress test Alexei Starovoitov
@ 2016-03-08 23:07 ` Alexei Starovoitov
  2016-03-09  4:22   ` David Miller
  2016-03-08 23:07 ` [PATCH net-next 3/3] samples/bpf: add map performance test Alexei Starovoitov
  2016-03-09  4:22 ` [PATCH net-next 1/3] samples/bpf: add bpf map stress test David Miller
  2 siblings, 1 reply; 6+ messages in thread
From: Alexei Starovoitov @ 2016-03-08 23:07 UTC (permalink / raw)
  To: David S . Miller; +Cc: Daniel Borkmann, netdev, linux-kernel, kernel-team

increase stress by also calling bpf_get_stackid() from
various *spin* functions

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 samples/bpf/spintest_kern.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
index ef8ac33bb2e9..4b27619d91a4 100644
--- a/samples/bpf/spintest_kern.c
+++ b/samples/bpf/spintest_kern.c
@@ -8,6 +8,7 @@
 #include <linux/netdevice.h>
 #include <linux/version.h>
 #include <uapi/linux/bpf.h>
+#include <uapi/linux/perf_event.h>
 #include "bpf_helpers.h"
 
 struct bpf_map_def SEC("maps") my_map = {
@@ -23,6 +24,13 @@ struct bpf_map_def SEC("maps") my_map2 = {
 	.max_entries = 1024,
 };
 
+struct bpf_map_def SEC("maps") stackmap = {
+	.type = BPF_MAP_TYPE_STACK_TRACE,
+	.key_size = sizeof(u32),
+	.value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
+	.max_entries = 10000,
+};
+
 #define PROG(foo) \
 int foo(struct pt_regs *ctx) \
 { \
@@ -32,6 +40,7 @@ int foo(struct pt_regs *ctx) \
 	bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \
 	bpf_map_update_elem(&my_map2, &v, &v, BPF_ANY); \
 	bpf_map_delete_elem(&my_map2, &v); \
+	bpf_get_stackid(ctx, &stackmap, BPF_F_REUSE_STACKID); \
 	return 0; \
 }
 
-- 
2.8.0.rc1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH net-next 3/3] samples/bpf: add map performance test
  2016-03-08 23:07 [PATCH net-next 1/3] samples/bpf: add bpf map stress test Alexei Starovoitov
  2016-03-08 23:07 ` [PATCH net-next 2/3] samples/bpf: stress test bpf_get_stackid Alexei Starovoitov
@ 2016-03-08 23:07 ` Alexei Starovoitov
  2016-03-09  4:22   ` David Miller
  2016-03-09  4:22 ` [PATCH net-next 1/3] samples/bpf: add bpf map stress test David Miller
  2 siblings, 1 reply; 6+ messages in thread
From: Alexei Starovoitov @ 2016-03-08 23:07 UTC (permalink / raw)
  To: David S . Miller; +Cc: Daniel Borkmann, netdev, linux-kernel, kernel-team

performance tests for hash map and per-cpu hash map
with and without pre-allocation

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
---
 samples/bpf/Makefile             |   4 +
 samples/bpf/map_perf_test_kern.c | 100 +++++++++++++++++++++++++
 samples/bpf/map_perf_test_user.c | 155 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 259 insertions(+)
 create mode 100644 samples/bpf/map_perf_test_kern.c
 create mode 100644 samples/bpf/map_perf_test_user.c

diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 75a13e742ab4..502c9fc8db85 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -18,6 +18,7 @@ hostprogs-y += trace_output
 hostprogs-y += lathist
 hostprogs-y += offwaketime
 hostprogs-y += spintest
+hostprogs-y += map_perf_test
 
 test_verifier-objs := test_verifier.o libbpf.o
 test_maps-objs := test_maps.o libbpf.o
@@ -36,6 +37,7 @@ trace_output-objs := bpf_load.o libbpf.o trace_output_user.o
 lathist-objs := bpf_load.o libbpf.o lathist_user.o
 offwaketime-objs := bpf_load.o libbpf.o offwaketime_user.o
 spintest-objs := bpf_load.o libbpf.o spintest_user.o
+map_perf_test-objs := bpf_load.o libbpf.o map_perf_test_user.o
 
 # Tell kbuild to always build the programs
 always := $(hostprogs-y)
@@ -53,6 +55,7 @@ always += tcbpf1_kern.o
 always += lathist_kern.o
 always += offwaketime_kern.o
 always += spintest_kern.o
+always += map_perf_test_kern.o
 
 HOSTCFLAGS += -I$(objtree)/usr/include
 
@@ -71,6 +74,7 @@ HOSTLOADLIBES_trace_output += -lelf -lrt
 HOSTLOADLIBES_lathist += -lelf
 HOSTLOADLIBES_offwaketime += -lelf
 HOSTLOADLIBES_spintest += -lelf
+HOSTLOADLIBES_map_perf_test += -lelf -lrt
 
 # point this to your LLVM backend with bpf support
 LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
new file mode 100644
index 000000000000..311538e5a701
--- /dev/null
+++ b/samples/bpf/map_perf_test_kern.c
@@ -0,0 +1,100 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <uapi/linux/bpf.h>
+#include "bpf_helpers.h"
+
+#define MAX_ENTRIES 1000
+
+struct bpf_map_def SEC("maps") hash_map = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(u32),
+	.value_size = sizeof(long),
+	.max_entries = MAX_ENTRIES,
+};
+
+struct bpf_map_def SEC("maps") percpu_hash_map = {
+	.type = BPF_MAP_TYPE_PERCPU_HASH,
+	.key_size = sizeof(u32),
+	.value_size = sizeof(long),
+	.max_entries = MAX_ENTRIES,
+};
+
+struct bpf_map_def SEC("maps") hash_map_alloc = {
+	.type = BPF_MAP_TYPE_HASH,
+	.key_size = sizeof(u32),
+	.value_size = sizeof(long),
+	.max_entries = MAX_ENTRIES,
+	.map_flags = BPF_F_NO_PREALLOC,
+};
+
+struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
+	.type = BPF_MAP_TYPE_PERCPU_HASH,
+	.key_size = sizeof(u32),
+	.value_size = sizeof(long),
+	.max_entries = MAX_ENTRIES,
+	.map_flags = BPF_F_NO_PREALLOC,
+};
+
+SEC("kprobe/sys_getuid")
+int stress_hmap(struct pt_regs *ctx)
+{
+	u32 key = bpf_get_current_pid_tgid();
+	long init_val = 1;
+	long *value;
+
+	bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
+	value = bpf_map_lookup_elem(&hash_map, &key);
+	if (value)
+		bpf_map_delete_elem(&hash_map, &key);
+	return 0;
+}
+
+SEC("kprobe/sys_geteuid")
+int stress_percpu_hmap(struct pt_regs *ctx)
+{
+	u32 key = bpf_get_current_pid_tgid();
+	long init_val = 1;
+	long *value;
+
+	bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
+	value = bpf_map_lookup_elem(&percpu_hash_map, &key);
+	if (value)
+		bpf_map_delete_elem(&percpu_hash_map, &key);
+	return 0;
+}
+SEC("kprobe/sys_getgid")
+int stress_hmap_alloc(struct pt_regs *ctx)
+{
+	u32 key = bpf_get_current_pid_tgid();
+	long init_val = 1;
+	long *value;
+
+	bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
+	value = bpf_map_lookup_elem(&hash_map_alloc, &key);
+	if (value)
+		bpf_map_delete_elem(&hash_map_alloc, &key);
+	return 0;
+}
+
+SEC("kprobe/sys_getegid")
+int stress_percpu_hmap_alloc(struct pt_regs *ctx)
+{
+	u32 key = bpf_get_current_pid_tgid();
+	long init_val = 1;
+	long *value;
+
+	bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
+	value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
+	if (value)
+		bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
+	return 0;
+}
+char _license[] SEC("license") = "GPL";
+u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
new file mode 100644
index 000000000000..95af56ec5739
--- /dev/null
+++ b/samples/bpf/map_perf_test_user.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2016 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <asm/unistd.h>
+#include <unistd.h>
+#include <assert.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <linux/bpf.h>
+#include <string.h>
+#include <time.h>
+#include "libbpf.h"
+#include "bpf_load.h"
+
+#define MAX_CNT 1000000
+
+static __u64 time_get_ns(void)
+{
+	struct timespec ts;
+
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+	return ts.tv_sec * 1000000000ull + ts.tv_nsec;
+}
+
+#define HASH_PREALLOC		(1 << 0)
+#define PERCPU_HASH_PREALLOC	(1 << 1)
+#define HASH_KMALLOC		(1 << 2)
+#define PERCPU_HASH_KMALLOC	(1 << 3)
+
+static int test_flags = ~0;
+
+static void test_hash_prealloc(int cpu)
+{
+	__u64 start_time;
+	int i;
+
+	start_time = time_get_ns();
+	for (i = 0; i < MAX_CNT; i++)
+		syscall(__NR_getuid);
+	printf("%d:hash_map_perf pre-alloc %lld events per sec\n",
+	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_percpu_hash_prealloc(int cpu)
+{
+	__u64 start_time;
+	int i;
+
+	start_time = time_get_ns();
+	for (i = 0; i < MAX_CNT; i++)
+		syscall(__NR_geteuid);
+	printf("%d:percpu_hash_map_perf pre-alloc %lld events per sec\n",
+	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_hash_kmalloc(int cpu)
+{
+	__u64 start_time;
+	int i;
+
+	start_time = time_get_ns();
+	for (i = 0; i < MAX_CNT; i++)
+		syscall(__NR_getgid);
+	printf("%d:hash_map_perf kmalloc %lld events per sec\n",
+	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void test_percpu_hash_kmalloc(int cpu)
+{
+	__u64 start_time;
+	int i;
+
+	start_time = time_get_ns();
+	for (i = 0; i < MAX_CNT; i++)
+		syscall(__NR_getegid);
+	printf("%d:percpu_hash_map_perf kmalloc %lld events per sec\n",
+	       cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+}
+
+static void loop(int cpu)
+{
+	cpu_set_t cpuset;
+
+	CPU_ZERO(&cpuset);
+	CPU_SET(cpu, &cpuset);
+	sched_setaffinity(0, sizeof(cpuset), &cpuset);
+
+	if (test_flags & HASH_PREALLOC)
+		test_hash_prealloc(cpu);
+
+	if (test_flags & PERCPU_HASH_PREALLOC)
+		test_percpu_hash_prealloc(cpu);
+
+	if (test_flags & HASH_KMALLOC)
+		test_hash_kmalloc(cpu);
+
+	if (test_flags & PERCPU_HASH_KMALLOC)
+		test_percpu_hash_kmalloc(cpu);
+}
+
+static void run_perf_test(int tasks)
+{
+	pid_t pid[tasks];
+	int i;
+
+	for (i = 0; i < tasks; i++) {
+		pid[i] = fork();
+		if (pid[i] == 0) {
+			loop(i);
+			exit(0);
+		} else if (pid[i] == -1) {
+			printf("couldn't spawn #%d process\n", i);
+			exit(1);
+		}
+	}
+	for (i = 0; i < tasks; i++) {
+		int status;
+
+		assert(waitpid(pid[i], &status, 0) == pid[i]);
+		assert(status == 0);
+	}
+}
+
+int main(int argc, char **argv)
+{
+	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+	char filename[256];
+	int num_cpu = 8;
+
+	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+	setrlimit(RLIMIT_MEMLOCK, &r);
+
+	if (argc > 1)
+		test_flags = atoi(argv[1]) ? : test_flags;
+
+	if (argc > 2)
+		num_cpu = atoi(argv[2]) ? : num_cpu;
+
+	if (load_bpf_file(filename)) {
+		printf("%s", bpf_log_buf);
+		return 1;
+	}
+
+	run_perf_test(num_cpu);
+
+	return 0;
+}
-- 
2.8.0.rc1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next 1/3] samples/bpf: add bpf map stress test
  2016-03-08 23:07 [PATCH net-next 1/3] samples/bpf: add bpf map stress test Alexei Starovoitov
  2016-03-08 23:07 ` [PATCH net-next 2/3] samples/bpf: stress test bpf_get_stackid Alexei Starovoitov
  2016-03-08 23:07 ` [PATCH net-next 3/3] samples/bpf: add map performance test Alexei Starovoitov
@ 2016-03-09  4:22 ` David Miller
  2 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2016-03-09  4:22 UTC (permalink / raw)
  To: ast; +Cc: daniel, netdev, linux-kernel, kernel-team

From: Alexei Starovoitov <ast@fb.com>
Date: Tue, 8 Mar 2016 15:07:52 -0800

> this test calls bpf programs from different contexts:
> from inside of slub, from rcu, from pretty much everywhere,
> since it kprobes all spin_lock functions.
> It stresses the bpf hash and percpu map pre-allocation,
> deallocation logic and call_rcu mechanisms.
> User space part adding more stress by walking and deleting map elements.
> 
> Note that due to nature bpf_load.c the earlier kprobe+bpf programs are
> already active while loader loads new programs, creates new kprobes and
> attaches them.
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Applied.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next 2/3] samples/bpf: stress test bpf_get_stackid
  2016-03-08 23:07 ` [PATCH net-next 2/3] samples/bpf: stress test bpf_get_stackid Alexei Starovoitov
@ 2016-03-09  4:22   ` David Miller
  0 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2016-03-09  4:22 UTC (permalink / raw)
  To: ast; +Cc: daniel, netdev, linux-kernel, kernel-team

From: Alexei Starovoitov <ast@fb.com>
Date: Tue, 8 Mar 2016 15:07:53 -0800

> increase stress by also calling bpf_get_stackid() from
> various *spin* functions
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Applied.

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH net-next 3/3] samples/bpf: add map performance test
  2016-03-08 23:07 ` [PATCH net-next 3/3] samples/bpf: add map performance test Alexei Starovoitov
@ 2016-03-09  4:22   ` David Miller
  0 siblings, 0 replies; 6+ messages in thread
From: David Miller @ 2016-03-09  4:22 UTC (permalink / raw)
  To: ast; +Cc: daniel, netdev, linux-kernel, kernel-team

From: Alexei Starovoitov <ast@fb.com>
Date: Tue, 8 Mar 2016 15:07:54 -0800

> performance tests for hash map and per-cpu hash map
> with and without pre-allocation
> 
> Signed-off-by: Alexei Starovoitov <ast@kernel.org>

Applied.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-03-09  4:22 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-03-08 23:07 [PATCH net-next 1/3] samples/bpf: add bpf map stress test Alexei Starovoitov
2016-03-08 23:07 ` [PATCH net-next 2/3] samples/bpf: stress test bpf_get_stackid Alexei Starovoitov
2016-03-09  4:22   ` David Miller
2016-03-08 23:07 ` [PATCH net-next 3/3] samples/bpf: add map performance test Alexei Starovoitov
2016-03-09  4:22   ` David Miller
2016-03-09  4:22 ` [PATCH net-next 1/3] samples/bpf: add bpf map stress test David Miller

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).