* [PATCH net-next 1/5] net: move net_get_random_once to lib
2015-10-07 13:43 [PATCH net-next 0/5] BPF/random32 updates Daniel Borkmann
@ 2015-10-07 13:43 ` Daniel Borkmann
2015-10-07 13:43 ` [PATCH net-next 2/5] once: make helper generic for calling function once Daniel Borkmann
` (3 subsequent siblings)
4 siblings, 0 replies; 11+ messages in thread
From: Daniel Borkmann @ 2015-10-07 13:43 UTC (permalink / raw)
To: davem; +Cc: hannes, ast, netdev, Daniel Borkmann
From: Hannes Frederic Sowa <hannes@stressinduktion.org>
There's no good reason why users outside of networking should not
be using this facility, f.e. for initializing their seeds.
Therefore, make it accessible from there as get_random_once().
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
include/linux/net.h | 21 ++++----------------
include/linux/once.h | 24 +++++++++++++++++++++++
lib/Makefile | 3 ++-
lib/once.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++
net/core/utils.c | 49 -----------------------------------------------
5 files changed, 84 insertions(+), 67 deletions(-)
create mode 100644 include/linux/once.h
create mode 100644 lib/once.c
diff --git a/include/linux/net.h b/include/linux/net.h
index 049d4b0..70ac5e2 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,7 +24,8 @@
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
#include <linux/kmemcheck.h>
#include <linux/rcupdate.h>
-#include <linux/jump_label.h>
+#include <linux/once.h>
+
#include <uapi/linux/net.h>
struct poll_table_struct;
@@ -250,22 +251,8 @@ do { \
} while (0)
#endif
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
- struct static_key *done_key);
-
-#define net_get_random_once(buf, nbytes) \
- ({ \
- bool ___ret = false; \
- static bool ___done = false; \
- static struct static_key ___once_key = \
- STATIC_KEY_INIT_TRUE; \
- if (static_key_true(&___once_key)) \
- ___ret = __net_get_random_once(buf, \
- nbytes, \
- &___done, \
- &___once_key); \
- ___ret; \
- })
+#define net_get_random_once(buf, nbytes) \
+ get_random_once((buf), (nbytes))
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
size_t num, size_t len);
diff --git a/include/linux/once.h b/include/linux/once.h
new file mode 100644
index 0000000..2a83b53
--- /dev/null
+++ b/include/linux/once.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_ONCE_H
+#define _LINUX_ONCE_H
+
+#include <linux/types.h>
+#include <linux/jump_label.h>
+
+bool __get_random_once(void *buf, int nbytes, bool *done,
+ struct static_key *once_key);
+
+#define get_random_once(buf, nbytes) \
+ ({ \
+ bool ___ret = false; \
+ static bool ___done = false; \
+ static struct static_key ___once_key = \
+ STATIC_KEY_INIT_TRUE; \
+ if (static_key_true(&___once_key)) \
+ ___ret = __get_random_once((buf), \
+ (nbytes), \
+ &___done, \
+ &___once_key); \
+ ___ret; \
+ })
+
+#endif /* _LINUX_ONCE_H */
diff --git a/lib/Makefile b/lib/Makefile
index 13a7c6a..8de3b01 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -26,7 +26,8 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
+ percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
+ once.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
obj-y += hexdump.o
diff --git a/lib/once.c b/lib/once.c
new file mode 100644
index 0000000..2d5a7de
--- /dev/null
+++ b/lib/once.c
@@ -0,0 +1,54 @@
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/once.h>
+#include <linux/random.h>
+
+struct __random_once_work {
+ struct work_struct work;
+ struct static_key *key;
+};
+
+static void __random_once_deferred(struct work_struct *w)
+{
+ struct __random_once_work *work;
+
+ work = container_of(w, struct __random_once_work, work);
+ BUG_ON(!static_key_enabled(work->key));
+ static_key_slow_dec(work->key);
+ kfree(work);
+}
+
+static void __random_once_disable_jump(struct static_key *key)
+{
+ struct __random_once_work *w;
+
+ w = kmalloc(sizeof(*w), GFP_ATOMIC);
+ if (!w)
+ return;
+
+ INIT_WORK(&w->work, __random_once_deferred);
+ w->key = key;
+ schedule_work(&w->work);
+}
+
+bool __get_random_once(void *buf, int nbytes, bool *done,
+ struct static_key *once_key)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock, flags);
+ if (*done) {
+ spin_unlock_irqrestore(&lock, flags);
+ return false;
+ }
+
+ get_random_bytes(buf, nbytes);
+ *done = true;
+ spin_unlock_irqrestore(&lock, flags);
+
+ __random_once_disable_jump(once_key);
+
+ return true;
+}
+EXPORT_SYMBOL(__get_random_once);
diff --git a/net/core/utils.c b/net/core/utils.c
index 3dffce9..3d17ca8 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -348,52 +348,3 @@ void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
}
}
EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
-
-struct __net_random_once_work {
- struct work_struct work;
- struct static_key *key;
-};
-
-static void __net_random_once_deferred(struct work_struct *w)
-{
- struct __net_random_once_work *work =
- container_of(w, struct __net_random_once_work, work);
- BUG_ON(!static_key_enabled(work->key));
- static_key_slow_dec(work->key);
- kfree(work);
-}
-
-static void __net_random_once_disable_jump(struct static_key *key)
-{
- struct __net_random_once_work *w;
-
- w = kmalloc(sizeof(*w), GFP_ATOMIC);
- if (!w)
- return;
-
- INIT_WORK(&w->work, __net_random_once_deferred);
- w->key = key;
- schedule_work(&w->work);
-}
-
-bool __net_get_random_once(void *buf, int nbytes, bool *done,
- struct static_key *once_key)
-{
- static DEFINE_SPINLOCK(lock);
- unsigned long flags;
-
- spin_lock_irqsave(&lock, flags);
- if (*done) {
- spin_unlock_irqrestore(&lock, flags);
- return false;
- }
-
- get_random_bytes(buf, nbytes);
- *done = true;
- spin_unlock_irqrestore(&lock, flags);
-
- __net_random_once_disable_jump(once_key);
-
- return true;
-}
-EXPORT_SYMBOL(__net_get_random_once);
--
1.9.3
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH net-next 2/5] once: make helper generic for calling function once
2015-10-07 13:43 [PATCH net-next 0/5] BPF/random32 updates Daniel Borkmann
2015-10-07 13:43 ` [PATCH net-next 1/5] net: move net_get_random_once to lib Daniel Borkmann
@ 2015-10-07 13:43 ` Daniel Borkmann
2015-10-07 16:16 ` Alexei Starovoitov
2015-10-07 13:43 ` [PATCH net-next 3/5] random32: add prandom_seed_full_state helper Daniel Borkmann
` (2 subsequent siblings)
4 siblings, 1 reply; 11+ messages in thread
From: Daniel Borkmann @ 2015-10-07 13:43 UTC (permalink / raw)
To: davem; +Cc: hannes, ast, netdev, Daniel Borkmann
From: Hannes Frederic Sowa <hannes@stressinduktion.org>
Make the get_random_once() helper generic enough, so that functions
in general would only be called once, where one user of this is then
net_get_random_once().
The only implementation specific call is to get_random_bytes(), all
the rest of this *_once() facility would be duplicated among different
subsystems otherwise. The new do_once() helper will be used by prandom()
later on, but might also be useful for other scenarios as well where a
one-time initialization in often-called, possibly fast-path code could
occur.
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
---
include/linux/once.h | 25 ++++++++++++++++++-------
lib/once.c | 34 +++++++++++++++++++++-------------
2 files changed, 39 insertions(+), 20 deletions(-)
diff --git a/include/linux/once.h b/include/linux/once.h
index 2a83b53..f7a51d5 100644
--- a/include/linux/once.h
+++ b/include/linux/once.h
@@ -3,22 +3,33 @@
#include <linux/types.h>
#include <linux/jump_label.h>
+#include <linux/uio.h>
-bool __get_random_once(void *buf, int nbytes, bool *done,
- struct static_key *once_key);
+bool __do_once(void (*func)(void *arg), void *arg, bool *done,
+ struct static_key *once_key);
-#define get_random_once(buf, nbytes) \
+#define do_once(func, arg) \
({ \
bool ___ret = false; \
static bool ___done = false; \
static struct static_key ___once_key = \
STATIC_KEY_INIT_TRUE; \
if (static_key_true(&___once_key)) \
- ___ret = __get_random_once((buf), \
- (nbytes), \
- &___done, \
- &___once_key); \
+ ___ret = __do_once((func), (arg), \
+ &___done, \
+ &___once_key); \
___ret; \
})
+void get_random_once_kvec(void *arg);
+
+#define get_random_once(buf, nbytes) \
+ ({ \
+ struct kvec __v = { \
+ .iov_base = (buf), \
+ .iov_len = (nbytes), \
+ }; \
+ do_once(get_random_once_kvec, &__v); \
+ })
+
#endif /* _LINUX_ONCE_H */
diff --git a/lib/once.c b/lib/once.c
index 2d5a7de..1e62944 100644
--- a/lib/once.c
+++ b/lib/once.c
@@ -3,36 +3,36 @@
#include <linux/once.h>
#include <linux/random.h>
-struct __random_once_work {
+struct __once_work {
struct work_struct work;
struct static_key *key;
};
-static void __random_once_deferred(struct work_struct *w)
+static void __once_deferred(struct work_struct *w)
{
- struct __random_once_work *work;
+ struct __once_work *work;
- work = container_of(w, struct __random_once_work, work);
+ work = container_of(w, struct __once_work, work);
BUG_ON(!static_key_enabled(work->key));
static_key_slow_dec(work->key);
kfree(work);
}
-static void __random_once_disable_jump(struct static_key *key)
+static void __once_disable_jump(struct static_key *key)
{
- struct __random_once_work *w;
+ struct __once_work *w;
w = kmalloc(sizeof(*w), GFP_ATOMIC);
if (!w)
return;
- INIT_WORK(&w->work, __random_once_deferred);
+ INIT_WORK(&w->work, __once_deferred);
w->key = key;
schedule_work(&w->work);
}
-bool __get_random_once(void *buf, int nbytes, bool *done,
- struct static_key *once_key)
+bool __do_once(void (*func)(void *arg), void *arg, bool *done,
+ struct static_key *once_key)
{
static DEFINE_SPINLOCK(lock);
unsigned long flags;
@@ -43,12 +43,20 @@ bool __get_random_once(void *buf, int nbytes, bool *done,
return false;
}
- get_random_bytes(buf, nbytes);
+ func(arg);
*done = true;
spin_unlock_irqrestore(&lock, flags);
- __random_once_disable_jump(once_key);
-
+ __once_disable_jump(once_key);
return true;
}
-EXPORT_SYMBOL(__get_random_once);
+EXPORT_SYMBOL(__do_once);
+
+/* Helper function for once users. */
+void get_random_once_kvec(void *arg)
+{
+ struct kvec *v = arg;
+
+ get_random_bytes(v->iov_base, v->iov_len);
+}
+EXPORT_SYMBOL(get_random_once_kvec);
--
1.9.3
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/5] once: make helper generic for calling function once
2015-10-07 13:43 ` [PATCH net-next 2/5] once: make helper generic for calling function once Daniel Borkmann
@ 2015-10-07 16:16 ` Alexei Starovoitov
2015-10-07 21:00 ` Daniel Borkmann
0 siblings, 1 reply; 11+ messages in thread
From: Alexei Starovoitov @ 2015-10-07 16:16 UTC (permalink / raw)
To: Daniel Borkmann, davem; +Cc: hannes, netdev
On 10/7/15 6:43 AM, Daniel Borkmann wrote:
> From: Hannes Frederic Sowa<hannes@stressinduktion.org>
>
> Make the get_random_once() helper generic enough, so that functions
> in general would only be called once, where one user of this is then
> net_get_random_once().
>
> The only implementation specific call is to get_random_bytes(), all
> the rest of this *_once() facility would be duplicated among different
> subsystems otherwise. The new do_once() helper will be used by prandom()
> later on, but might also be useful for other scenarios as well where a
> one-time initialization in often-called, possibly fast-path code could
> occur.
>
> Signed-off-by: Hannes Frederic Sowa<hannes@stressinduktion.org>
> Signed-off-by: Daniel Borkmann<daniel@iogearbox.net>
> ---
> include/linux/once.h | 25 ++++++++++++++++++-------
> lib/once.c | 34 +++++++++++++++++++++-------------
> 2 files changed, 39 insertions(+), 20 deletions(-)
looking at the patch 1 the once.c file name really looked out of place,
but this patch makes it fit. Interesting helper, though
get_random_once_kvec() and kvec are not pretty, since they take extra
stack and being inited even when static_key is disabled.
Instead is it possible to split do_once into two parts then
your macro can have varags and kvec/extra_helper can be removed like:
#define do_once(func, ...) \
({ \
bool ___ret = false; \
static bool ___done = false; \
static struct static_key ___once_key = \
STATIC_KEY_INIT_TRUE; \
if (static_key_true(&___once_key)) { \
__do_once_lock(&___done); \
func(##__VA_ARGS__); \
__do_once_unlock(&_done, &___once_key);\
} \
___ret; \
})
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 2/5] once: make helper generic for calling function once
2015-10-07 16:16 ` Alexei Starovoitov
@ 2015-10-07 21:00 ` Daniel Borkmann
0 siblings, 0 replies; 11+ messages in thread
From: Daniel Borkmann @ 2015-10-07 21:00 UTC (permalink / raw)
To: Alexei Starovoitov, davem; +Cc: hannes, netdev
On 10/07/2015 06:16 PM, Alexei Starovoitov wrote:
> On 10/7/15 6:43 AM, Daniel Borkmann wrote:
>> From: Hannes Frederic Sowa<hannes@stressinduktion.org>
>>
>> Make the get_random_once() helper generic enough, so that functions
>> in general would only be called once, where one user of this is then
>> net_get_random_once().
>>
>> The only implementation specific call is to get_random_bytes(), all
>> the rest of this *_once() facility would be duplicated among different
>> subsystems otherwise. The new do_once() helper will be used by prandom()
>> later on, but might also be useful for other scenarios as well where a
>> one-time initialization in often-called, possibly fast-path code could
>> occur.
>>
>> Signed-off-by: Hannes Frederic Sowa<hannes@stressinduktion.org>
>> Signed-off-by: Daniel Borkmann<daniel@iogearbox.net>
>> ---
>> include/linux/once.h | 25 ++++++++++++++++++-------
>> lib/once.c | 34 +++++++++++++++++++++-------------
>> 2 files changed, 39 insertions(+), 20 deletions(-)
>
> looking at the patch 1 the once.c file name really looked out of place,
> but this patch makes it fit. Interesting helper, though
> get_random_once_kvec() and kvec are not pretty, since they take extra
> stack and being inited even when static_key is disabled.
> Instead is it possible to split do_once into two parts then
> your macro can have varags and kvec/extra_helper can be removed like:
> #define do_once(func, ...) \
> ({ \
> bool ___ret = false; \
> static bool ___done = false; \
> static struct static_key ___once_key = \
> STATIC_KEY_INIT_TRUE; \
> if (static_key_true(&___once_key)) { \
> __do_once_lock(&___done); \
> func(##__VA_ARGS__); \
> __do_once_unlock(&_done, &___once_key);\
> } \
> ___ret; \
> })
Thanks, good point, I do like it! After reworking this, the outcome of
the new DO_ONCE() is a bit different than above. I'll send out v2 very
soon to show the code result.
Thanks,
Daniel
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net-next 3/5] random32: add prandom_seed_full_state helper
2015-10-07 13:43 [PATCH net-next 0/5] BPF/random32 updates Daniel Borkmann
2015-10-07 13:43 ` [PATCH net-next 1/5] net: move net_get_random_once to lib Daniel Borkmann
2015-10-07 13:43 ` [PATCH net-next 2/5] once: make helper generic for calling function once Daniel Borkmann
@ 2015-10-07 13:43 ` Daniel Borkmann
2015-10-07 13:43 ` [PATCH net-next 4/5] random32: add prandom_init_once helper for own rngs Daniel Borkmann
2015-10-07 13:43 ` [PATCH net-next 5/5] bpf: split state from prandom_u32() and consolidate {c,e}BPF prngs Daniel Borkmann
4 siblings, 0 replies; 11+ messages in thread
From: Daniel Borkmann @ 2015-10-07 13:43 UTC (permalink / raw)
To: davem; +Cc: hannes, ast, netdev, Daniel Borkmann
Factor out the full reseed handling code that populates the state
through get_random_bytes() and runs prandom_warmup(). The resulting
prandom_seed_full_state() will be used later on in more than the
current __prandom_reseed() user. Fix also two minor whitespace
issues along the way.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
---
lib/random32.c | 37 +++++++++++++++++++++----------------
1 file changed, 21 insertions(+), 16 deletions(-)
diff --git a/lib/random32.c b/lib/random32.c
index 0bee183..36c09fb 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -181,7 +181,7 @@ void prandom_seed(u32 entropy)
* No locking on the CPUs, but then somewhat random results are, well,
* expected.
*/
- for_each_possible_cpu (i) {
+ for_each_possible_cpu(i) {
struct rnd_state *state = &per_cpu(net_rand_state, i);
state->s1 = __seed(state->s1 ^ entropy, 2U);
@@ -201,7 +201,7 @@ static int __init prandom_init(void)
prandom_state_selftest();
for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state,i);
+ struct rnd_state *state = &per_cpu(net_rand_state, i);
u32 weak_seed = (i + jiffies) ^ random_get_entropy();
prandom_seed_early(state, weak_seed, true);
@@ -238,13 +238,30 @@ static void __init __prandom_start_seed_timer(void)
add_timer(&seed_timer);
}
+static void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
+ u32 seeds[4];
+
+ get_random_bytes(&seeds, sizeof(seeds));
+ state->s1 = __seed(seeds[0], 2U);
+ state->s2 = __seed(seeds[1], 8U);
+ state->s3 = __seed(seeds[2], 16U);
+ state->s4 = __seed(seeds[3], 128U);
+
+ prandom_warmup(state);
+ }
+}
+
/*
* Generate better values after random number generator
* is fully initialized.
*/
static void __prandom_reseed(bool late)
{
- int i;
unsigned long flags;
static bool latch = false;
static DEFINE_SPINLOCK(lock);
@@ -266,19 +283,7 @@ static void __prandom_reseed(bool late)
goto out;
latch = true;
-
- for_each_possible_cpu(i) {
- struct rnd_state *state = &per_cpu(net_rand_state,i);
- u32 seeds[4];
-
- get_random_bytes(&seeds, sizeof(seeds));
- state->s1 = __seed(seeds[0], 2U);
- state->s2 = __seed(seeds[1], 8U);
- state->s3 = __seed(seeds[2], 16U);
- state->s4 = __seed(seeds[3], 128U);
-
- prandom_warmup(state);
- }
+ prandom_seed_full_state(&net_rand_state);
out:
spin_unlock_irqrestore(&lock, flags);
}
--
1.9.3
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH net-next 4/5] random32: add prandom_init_once helper for own rngs
2015-10-07 13:43 [PATCH net-next 0/5] BPF/random32 updates Daniel Borkmann
` (2 preceding siblings ...)
2015-10-07 13:43 ` [PATCH net-next 3/5] random32: add prandom_seed_full_state helper Daniel Borkmann
@ 2015-10-07 13:43 ` Daniel Borkmann
2015-10-07 16:32 ` Alexei Starovoitov
2015-10-07 13:43 ` [PATCH net-next 5/5] bpf: split state from prandom_u32() and consolidate {c,e}BPF prngs Daniel Borkmann
4 siblings, 1 reply; 11+ messages in thread
From: Daniel Borkmann @ 2015-10-07 13:43 UTC (permalink / raw)
To: davem; +Cc: hannes, ast, netdev, Daniel Borkmann
Add a prandom_init_once() facility that works on the rnd_state, so that
users that are keeping their own state independent from prandom_u32() can
initialize their taus113 per cpu states.
The motivation here is similar to net_get_random_once(): initialize the
state as late as possible in the hope that enough entropy has been
collected for the seeding. prandom_init_once() makes use of the recently
introduced prandom_seed_full_state() helper and is generic enough so that
it could also be used on fast-paths due to the do_once().
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
---
include/linux/random.h | 6 ++++++
lib/random32.c | 5 +++++
2 files changed, 11 insertions(+)
diff --git a/include/linux/random.h b/include/linux/random.h
index e651874..86d4aa7 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -7,6 +7,8 @@
#define _LINUX_RANDOM_H
#include <linux/list.h>
+#include <linux/once.h>
+
#include <uapi/linux/random.h>
struct random_ready_callback {
@@ -45,6 +47,10 @@ struct rnd_state {
u32 prandom_u32_state(struct rnd_state *state);
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
+void prandom_init_state_once(void *pcpu_state);
+
+#define prandom_init_once(pcpu_state) \
+ ({ do_once(prandom_init_state_once, (pcpu_state)); })
/**
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/lib/random32.c b/lib/random32.c
index 36c09fb..b166237 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -256,6 +256,11 @@ static void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
}
}
+void prandom_init_state_once(void *pcpu_state)
+{
+ prandom_seed_full_state((struct rnd_state __percpu *)pcpu_state);
+}
+
/*
* Generate better values after random number generator
* is fully initialized.
--
1.9.3
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 4/5] random32: add prandom_init_once helper for own rngs
2015-10-07 13:43 ` [PATCH net-next 4/5] random32: add prandom_init_once helper for own rngs Daniel Borkmann
@ 2015-10-07 16:32 ` Alexei Starovoitov
0 siblings, 0 replies; 11+ messages in thread
From: Alexei Starovoitov @ 2015-10-07 16:32 UTC (permalink / raw)
To: Daniel Borkmann, davem; +Cc: hannes, netdev
On 10/7/15 6:43 AM, Daniel Borkmann wrote:
> +void prandom_init_state_once(void *pcpu_state);
> +
> +#define prandom_init_once(pcpu_state) \
> + ({ do_once(prandom_init_state_once, (pcpu_state)); })
>
> /**
> * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
> diff --git a/lib/random32.c b/lib/random32.c
> index 36c09fb..b166237 100644
> --- a/lib/random32.c
> +++ b/lib/random32.c
> @@ -256,6 +256,11 @@ static void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
> }
> }
>
> +void prandom_init_state_once(void *pcpu_state)
> +{
> + prandom_seed_full_state((struct rnd_state __percpu *)pcpu_state);
> +}
> +
prandom_seed_full_state() is likely not inlined in the above.
Why introduce this helper instead of making prandom_seed_full_state()
global? with my other suggestion the void cast will be avoided as well
and compiler will do type checking.
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH net-next 5/5] bpf: split state from prandom_u32() and consolidate {c,e}BPF prngs
2015-10-07 13:43 [PATCH net-next 0/5] BPF/random32 updates Daniel Borkmann
` (3 preceding siblings ...)
2015-10-07 13:43 ` [PATCH net-next 4/5] random32: add prandom_init_once helper for own rngs Daniel Borkmann
@ 2015-10-07 13:43 ` Daniel Borkmann
2015-10-07 16:38 ` Alexei Starovoitov
4 siblings, 1 reply; 11+ messages in thread
From: Daniel Borkmann @ 2015-10-07 13:43 UTC (permalink / raw)
To: davem; +Cc: hannes, ast, netdev, Daniel Borkmann, Chema Gonzalez
While recently arguing on a seccomp discussion that raw prandom_u32()
access shouldn't be exposed to unpriviledged user space, I forgot the
fact that SKF_AD_RANDOM extension actually already does it for some time
in cBPF via commit 4cd3675ebf74 ("filter: added BPF random opcode").
Since prandom_u32() is being used in a lot of critical networking code,
lets be more conservative and split their states. Furthermore, consolidate
eBPF and cBPF prandom handlers to use the new internal PRNG. For eBPF,
bpf_get_prandom_u32() was only accessible for priviledged users, but
should that change one day, we also don't want to leak raw sequences
through things like eBPF maps.
One thought was also to have own per bpf_prog states, but due to ABI
reasons this is not easily possible, i.e. the program code currently
cannot access bpf_prog itself, and copying the rnd_state to/from the
stack scratch space whenever a program uses the prng seems not really
worth the trouble and seems too hacky. If needed, taus113 could in such
cases be implemented within eBPF using a map entry to keep the state
space, or get_random_bytes() could become a second helper in cases where
performance would not be critical.
Both sides can trigger a one-time late init via prandom_init_once() on
the shared state. Performance-wise, there should even be a tiny gain
as bpf_user_rnd_u32() saves one function call. The PRNG needs to live
inside the BPF core since kernels could have a NET-less config as well.
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Cc: Chema Gonzalez <chema@google.com>
---
include/linux/bpf.h | 4 ++++
kernel/bpf/core.c | 26 ++++++++++++++++++++++++++
kernel/bpf/helpers.c | 7 +------
kernel/bpf/syscall.c | 2 ++
net/core/filter.c | 9 ++-------
5 files changed, 35 insertions(+), 13 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index c915a6b..3697ad5 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -200,4 +200,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto;
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
+/* Shared helpers among cBPF and eBPF. */
+void bpf_user_rnd_init_once(void);
+u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+
#endif /* _LINUX_BPF_H */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c8855c2..8086471 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -731,6 +731,32 @@ void bpf_prog_free(struct bpf_prog *fp)
}
EXPORT_SYMBOL_GPL(bpf_prog_free);
+/* RNG for unpriviledged user space with separated state from prandom_u32(). */
+static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
+
+void bpf_user_rnd_init_once(void)
+{
+ prandom_init_once(&bpf_user_rnd_state);
+}
+
+u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+ /* Should someone ever have the rather unwise idea to use some
+ * of the registers passed into this function, then note that
+ * this function is called from native eBPF and classic-to-eBPF
+ * transformations. Register assignments from both sides are
+ * different, f.e. classic always sets fn(ctx, A, X) here.
+ */
+ struct rnd_state *state;
+ u32 res;
+
+ state = &get_cpu_var(bpf_user_rnd_state);
+ res = prandom_u32_state(state);
+ put_cpu_var(state);
+
+ return res;
+}
+
/* Weak definitions of helper functions in case we don't have bpf syscall. */
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1447ec0..4504ca6 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -93,13 +93,8 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = {
.arg2_type = ARG_PTR_TO_MAP_KEY,
};
-static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
- return prandom_u32();
-}
-
const struct bpf_func_proto bpf_get_prandom_u32_proto = {
- .func = bpf_get_prandom_u32,
+ .func = bpf_user_rnd_u32,
.gpl_only = false,
.ret_type = RET_INTEGER,
};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5f35f42..c868caf 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -404,6 +404,8 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
if (insn->imm == BPF_FUNC_get_route_realm)
prog->dst_needed = 1;
+ if (insn->imm == BPF_FUNC_get_prandom_u32)
+ bpf_user_rnd_init_once();
if (insn->imm == BPF_FUNC_tail_call) {
/* mark bpf_tail_call as different opcode
* to avoid conditional branch in
diff --git a/net/core/filter.c b/net/core/filter.c
index 8f4603c..342e6c8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -149,12 +149,6 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
return raw_smp_processor_id();
}
-/* note that this only generates 32-bit random numbers */
-static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
-{
- return prandom_u32();
-}
-
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
struct bpf_insn *insn_buf)
{
@@ -313,7 +307,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
*insn = BPF_EMIT_CALL(__get_raw_cpu_id);
break;
case SKF_AD_OFF + SKF_AD_RANDOM:
- *insn = BPF_EMIT_CALL(__get_random_u32);
+ *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
+ bpf_user_rnd_init_once();
break;
}
break;
--
1.9.3
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 5/5] bpf: split state from prandom_u32() and consolidate {c,e}BPF prngs
2015-10-07 13:43 ` [PATCH net-next 5/5] bpf: split state from prandom_u32() and consolidate {c,e}BPF prngs Daniel Borkmann
@ 2015-10-07 16:38 ` Alexei Starovoitov
2015-10-07 21:01 ` Daniel Borkmann
0 siblings, 1 reply; 11+ messages in thread
From: Alexei Starovoitov @ 2015-10-07 16:38 UTC (permalink / raw)
To: Daniel Borkmann, davem; +Cc: hannes, netdev, Chema Gonzalez
On 10/7/15 6:43 AM, Daniel Borkmann wrote:
> +void bpf_user_rnd_init_once(void)
> +{
> + prandom_init_once(&bpf_user_rnd_state);
> +}
here the helper is definitely needed, since it's called from two
places and we must make sure that prandom_init_once doesn't
duplicate its static_key in two places.
Probably makes sense to add a comment to do_once api that
do_once(func, arg);
do_once(func, arg);
is not equal to
void my_helper(void) { do_once(func, arg); }
my_helper();
my_helper();
For this patch:
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH net-next 5/5] bpf: split state from prandom_u32() and consolidate {c,e}BPF prngs
2015-10-07 16:38 ` Alexei Starovoitov
@ 2015-10-07 21:01 ` Daniel Borkmann
0 siblings, 0 replies; 11+ messages in thread
From: Daniel Borkmann @ 2015-10-07 21:01 UTC (permalink / raw)
To: Alexei Starovoitov, davem; +Cc: hannes, netdev, Chema Gonzalez
On 10/07/2015 06:38 PM, Alexei Starovoitov wrote:
> On 10/7/15 6:43 AM, Daniel Borkmann wrote:
>> +void bpf_user_rnd_init_once(void)
>> +{
>> + prandom_init_once(&bpf_user_rnd_state);
>> +}
>
> here the helper is definitely needed, since it's called from two
> places and we must make sure that prandom_init_once doesn't
> duplicate its static_key in two places.
Yes, sure.
> Probably makes sense to add a comment to do_once api that
>
> do_once(func, arg);
> do_once(func, arg);
>
> is not equal to
>
> void my_helper(void) { do_once(func, arg); }
> my_helper();
> my_helper();
Makes sense, added a comment.
> For this patch:
> Acked-by: Alexei Starovoitov <ast@plumgrid.com>
>
^ permalink raw reply [flat|nested] 11+ messages in thread