* [PATCH 0/12] random pt3: More core and accounting cleanups
@ 2005-01-19 8:17 Matt Mackall
2005-01-19 8:17 ` [PATCH 1/12] random pt3: More meaningful pool names Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
This is a third series of various cleanups for drivers/char/random.c.
It applies on top of the previous 10.
These bits greatly simplify the setup:
1 More meaningful pool names
2 Static allocation of pools
3 Static sysctl bits
These bits make the accounting safer and the code easier to follow:
4 Catastrophic reseed checks
5 Entropy reservation accounting
6 Reservation flag in pool struct
7 Reseed pointer in pool struct
8 Break up extract_user
These bits clean up the hashing functions:
9 Remove dead MD5 copy
10 Simplify hash folding
11 Clean up hash buffering
This bit drops a bunch of code and reduces lock hold times:
12 Remove entropy batching
The next series focuses on moving and sharing code more appropriately.
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 2/12] random pt3: Static allocation of pools
2005-01-19 8:17 ` [PATCH 1/12] random pt3: More meaningful pool names Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 3/12] random pt3: Static sysctl bits Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
As we no longer allow resizing of pools, it makes sense to allocate
and initialize them statically. Remove create_entropy_store and
simplify rand_initialize.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:22:06.531748727 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:34:46.175902338 -0800
@@ -295,42 +295,37 @@
int poolwords;
int tap1, tap2, tap3, tap4, tap5;
} poolinfo_table[] = {
+ /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+ { 128, 103, 76, 51, 25, 1 },
+ /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+ { 32, 26, 20, 14, 7, 1 },
+#if 0
/* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
{ 2048, 1638, 1231, 819, 411, 1 },
/* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
{ 1024, 817, 615, 412, 204, 1 },
-#if 0 /* Alternate polynomial */
+
/* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
{ 1024, 819, 616, 410, 207, 2 },
-#endif
/* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
{ 512, 411, 308, 208, 104, 1 },
-#if 0 /* Alternates */
+
/* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
{ 512, 409, 307, 206, 102, 2 },
/* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
{ 512, 409, 309, 205, 103, 2 },
-#endif
/* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
{ 256, 205, 155, 101, 52, 1 },
- /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
- { 128, 103, 76, 51, 25, 1 },
-#if 0 /* Alternate polynomial */
/* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
{ 128, 103, 78, 51, 27, 2 },
-#endif
/* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
{ 64, 52, 39, 26, 14, 1 },
-
- /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
- { 32, 26, 20, 14, 7, 1 },
-
- { 0, 0, 0, 0, 0, 0 },
+#endif
};
#define POOLBITS poolwords*32
@@ -382,9 +377,6 @@
/*
* Static global variables
*/
-static struct entropy_store *input_pool; /* The default global store */
-static struct entropy_store *blocking_pool; /* secondary store */
-static struct entropy_store *nonblocking_pool; /* For urandom */
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
@@ -392,7 +384,8 @@
* Forward procedure declarations
*/
#ifdef CONFIG_SYSCTL
-static void sysctl_init_random(struct entropy_store *random_state);
+struct entropy_store;
+static void sysctl_init_random(struct entropy_store *pool);
#endif
static inline __u32 rol32(__u32 word, int shift)
@@ -406,9 +399,9 @@
#define DEBUG_ENT(fmt, arg...) do { if (debug) \
printk(KERN_DEBUG "random %04d %04d %04d: " \
fmt,\
- input_pool->entropy_count,\
- blocking_pool->entropy_count,\
- nonblocking_pool->entropy_count,\
+ input_pool.entropy_count,\
+ blocking_pool.entropy_count,\
+ nonblocking_pool.entropy_count,\
## arg); } while (0)
#else
#define DEBUG_ENT(fmt, arg...) do {} while (0)
@@ -423,7 +416,7 @@
struct entropy_store {
/* mostly-read data: */
- struct poolinfo poolinfo;
+ struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
@@ -434,48 +427,30 @@
int input_rotate;
};
-/*
- * Initialize the entropy store. The input argument is the size of
- * the random pool.
- *
- * Returns an negative error if there is a problem.
- */
-static int create_entropy_store(int size, const char *name,
- struct entropy_store **ret_bucket)
-{
- struct entropy_store *r;
- struct poolinfo *p;
- int poolwords;
-
- poolwords = (size + 3) / 4; /* Convert bytes->words */
- /* The pool size must be a multiple of 16 32-bit words */
- poolwords = ((poolwords + 15) / 16) * 16;
+static __u32 input_pool_data[INPUT_POOL_WORDS];
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
+
+static struct entropy_store input_pool = {
+ .poolinfo = &poolinfo_table[0],
+ .name = "input",
+ .lock = SPIN_LOCK_UNLOCKED,
+ .pool = input_pool_data
+};
- for (p = poolinfo_table; p->poolwords; p++) {
- if (poolwords == p->poolwords)
- break;
- }
- if (p->poolwords == 0)
- return -EINVAL;
+static struct entropy_store blocking_pool = {
+ .poolinfo = &poolinfo_table[1],
+ .name = "blocking",
+ .lock = SPIN_LOCK_UNLOCKED,
+ .pool = blocking_pool_data
+};
- r = kmalloc(sizeof(struct entropy_store), GFP_KERNEL);
- if (!r)
- return -ENOMEM;
-
- memset (r, 0, sizeof(struct entropy_store));
- r->poolinfo = *p;
-
- r->pool = kmalloc(POOLBYTES, GFP_KERNEL);
- if (!r->pool) {
- kfree(r);
- return -ENOMEM;
- }
- memset(r->pool, 0, POOLBYTES);
- r->lock = SPIN_LOCK_UNLOCKED;
- r->name = name;
- *ret_bucket = r;
- return 0;
-}
+static struct entropy_store nonblocking_pool = {
+ .poolinfo = &poolinfo_table[1],
+ .name = "nonblocking",
+ .lock = SPIN_LOCK_UNLOCKED,
+ .pool = nonblocking_pool_data
+};
/*
* This function adds a byte into the entropy "pool". It does not
@@ -495,16 +470,16 @@
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
int new_rotate, input_rotate;
- int wordmask = r->poolinfo.poolwords - 1;
+ int wordmask = r->poolinfo->poolwords - 1;
__u32 w, next_w;
unsigned long flags;
/* Taps are constant, so we can load them without holding r->lock. */
- tap1 = r->poolinfo.tap1;
- tap2 = r->poolinfo.tap2;
- tap3 = r->poolinfo.tap3;
- tap4 = r->poolinfo.tap4;
- tap5 = r->poolinfo.tap5;
+ tap1 = r->poolinfo->tap1;
+ tap2 = r->poolinfo->tap2;
+ tap3 = r->poolinfo->tap3;
+ tap4 = r->poolinfo->tap4;
+ tap5 = r->poolinfo->tap5;
next_w = *in++;
spin_lock_irqsave(&r->lock, flags);
@@ -570,8 +545,8 @@
DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
r->entropy_count, nbits);
r->entropy_count = 0;
- } else if (r->entropy_count + nbits > r->poolinfo.POOLBITS) {
- r->entropy_count = r->poolinfo.POOLBITS;
+ } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) {
+ r->entropy_count = r->poolinfo->POOLBITS;
} else {
r->entropy_count += nbits;
if (nbits)
@@ -660,7 +635,7 @@
static void batch_entropy_process(void *private_)
{
struct entropy_store *r = (struct entropy_store *) private_, *p;
- int max_entropy = r->poolinfo.POOLBITS;
+ int max_entropy = r->poolinfo->POOLBITS;
unsigned head, tail;
/* Mixing into the pool is expensive, so copy over the batch
@@ -682,8 +657,9 @@
p = r;
while (head != tail) {
if (r->entropy_count >= max_entropy) {
- r = (r == blocking_pool) ? input_pool : blocking_pool;
- max_entropy = r->poolinfo.POOLBITS;
+ r = (r == &blocking_pool) ? &input_pool :
+ &blocking_pool;
+ max_entropy = r->poolinfo->POOLBITS;
}
add_entropy_words(r, batch_entropy_copy[tail].data, 2);
credit_entropy_store(r, batch_entropy_copy[tail].credit);
@@ -727,7 +703,7 @@
preempt_disable();
/* if over the trickle threshold, use only 1 in 4096 samples */
- if (input_pool->entropy_count > trickle_thresh &&
+ if (input_pool.entropy_count > trickle_thresh &&
(__get_cpu_var(trickle_count)++ & 0xfff))
goto out;
@@ -1226,7 +1202,7 @@
size_t nbytes, __u32 *tmp)
{
if (r->entropy_count < nbytes * 8 &&
- r->entropy_count < r->poolinfo.POOLBITS) {
+ r->entropy_count < r->poolinfo->POOLBITS) {
int bytes = max_t(int, random_read_wakeup_thresh / 8,
min_t(int, nbytes, TMP_BUF_SIZE));
@@ -1234,7 +1210,7 @@
"(%d of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
- bytes=extract_entropy(input_pool, tmp, bytes,
+ bytes=extract_entropy(&input_pool, tmp, bytes,
EXTRACT_ENTROPY_LIMIT);
add_entropy_words(r, tmp, bytes);
credit_entropy_store(r, bytes*8);
@@ -1263,8 +1239,8 @@
unsigned long cpuflags;
/* Redundant, but just in case... */
- if (r->entropy_count > r->poolinfo.POOLBITS)
- r->entropy_count = r->poolinfo.POOLBITS;
+ if (r->entropy_count > r->poolinfo->POOLBITS)
+ r->entropy_count = r->poolinfo->POOLBITS;
if (flags & EXTRACT_ENTROPY_SECONDARY)
xfer_secondary_pool(r, nbytes, tmp);
@@ -1323,7 +1299,7 @@
* attempts to find previous ouputs), unless the hash
* function can be inverted.
*/
- for (i = 0, x = 0; i < r->poolinfo.poolwords; i += 16, x+=2) {
+ for (i = 0, x = 0; i < r->poolinfo->poolwords; i += 16, x+=2) {
HASH_TRANSFORM(tmp, r->pool+i);
add_entropy_words(r, &tmp[x%HASH_BUFFER_SIZE], 1);
}
@@ -1377,21 +1353,8 @@
*/
void get_random_bytes(void *buf, int nbytes)
{
- struct entropy_store *r = nonblocking_pool;
- int flags = EXTRACT_ENTROPY_SECONDARY;
-
- if (!r)
- r = blocking_pool;
- if (!r) {
- r = input_pool;
- flags = 0;
- }
- if (!r) {
- printk(KERN_NOTICE "get_random_bytes called before "
- "random driver initialization\n");
- return;
- }
- extract_entropy(r, (char *) buf, nbytes, flags);
+ extract_entropy(&nonblocking_pool, (char *) buf, nbytes,
+ EXTRACT_ENTROPY_SECONDARY);
}
EXPORT_SYMBOL(get_random_bytes);
@@ -1422,21 +1385,13 @@
static int __init rand_initialize(void)
{
- if (create_entropy_store(INPUT_POOL_WORDS, "input", &input_pool))
- goto err;
- if (batch_entropy_init(BATCH_ENTROPY_SIZE, input_pool))
- goto err;
- if (create_entropy_store(OUTPUT_POOL_WORDS, "blocking",
- &blocking_pool))
- goto err;
- if (create_entropy_store(OUTPUT_POOL_WORDS, "nonblocking",
- &nonblocking_pool))
+ if (batch_entropy_init(BATCH_ENTROPY_SIZE, &input_pool))
goto err;
- init_std_data(input_pool);
- init_std_data(blocking_pool);
- init_std_data(nonblocking_pool);
+ init_std_data(&input_pool);
+ init_std_data(&blocking_pool);
+ init_std_data(&nonblocking_pool);
#ifdef CONFIG_SYSCTL
- sysctl_init_random(input_pool);
+ sysctl_init_random(&input_pool);
#endif
return 0;
err:
@@ -1492,7 +1447,7 @@
DEBUG_ENT("reading %d bits\n", n*8);
- n = extract_entropy(blocking_pool, buf, n,
+ n = extract_entropy(&blocking_pool, buf, n,
EXTRACT_ENTROPY_USER |
EXTRACT_ENTROPY_LIMIT |
EXTRACT_ENTROPY_SECONDARY);
@@ -1509,7 +1464,7 @@
DEBUG_ENT("sleeping?\n");
wait_event_interruptible(random_read_wait,
- input_pool->entropy_count >=
+ input_pool.entropy_count >=
random_read_wakeup_thresh);
DEBUG_ENT("awake\n");
@@ -1549,12 +1504,12 @@
int flags = EXTRACT_ENTROPY_USER;
unsigned long cpuflags;
- spin_lock_irqsave(&input_pool->lock, cpuflags);
- if (input_pool->entropy_count > input_pool->poolinfo.POOLBITS)
+ spin_lock_irqsave(&input_pool.lock, cpuflags);
+ if (input_pool.entropy_count > input_pool.poolinfo->POOLBITS)
flags |= EXTRACT_ENTROPY_SECONDARY;
- spin_unlock_irqrestore(&input_pool->lock, cpuflags);
+ spin_unlock_irqrestore(&input_pool.lock, cpuflags);
- return extract_entropy(nonblocking_pool, buf, nbytes, flags);
+ return extract_entropy(&nonblocking_pool, buf, nbytes, flags);
}
static unsigned int
@@ -1565,9 +1520,9 @@
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
- if (input_pool->entropy_count >= random_read_wakeup_thresh)
+ if (input_pool.entropy_count >= random_read_wakeup_thresh)
mask |= POLLIN | POLLRDNORM;
- if (input_pool->entropy_count < random_write_wakeup_thresh)
+ if (input_pool.entropy_count < random_write_wakeup_thresh)
mask |= POLLOUT | POLLWRNORM;
return mask;
}
@@ -1593,7 +1548,7 @@
c -= bytes;
p += bytes;
- add_entropy_words(input_pool, buf, (bytes + 3) / 4);
+ add_entropy_words(&input_pool, buf, (bytes + 3) / 4);
}
if (p == buffer) {
return (ssize_t)ret;
@@ -1614,7 +1569,7 @@
switch (cmd) {
case RNDGETENTCNT:
- ent_count = input_pool->entropy_count;
+ ent_count = input_pool.entropy_count;
if (put_user(ent_count, p))
return -EFAULT;
return 0;
@@ -1623,12 +1578,12 @@
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_store(input_pool, ent_count);
+ credit_entropy_store(&input_pool, ent_count);
/*
* Wake up waiting processes if we have enough
* entropy.
*/
- if (input_pool->entropy_count >= random_read_wakeup_thresh)
+ if (input_pool.entropy_count >= random_read_wakeup_thresh)
wake_up_interruptible(&random_read_wait);
return 0;
case RNDADDENTROPY:
@@ -1644,12 +1599,12 @@
size, &file->f_pos);
if (retval < 0)
return retval;
- credit_entropy_store(input_pool, ent_count);
+ credit_entropy_store(&input_pool, ent_count);
/*
* Wake up waiting processes if we have enough
* entropy.
*/
- if (input_pool->entropy_count >= random_read_wakeup_thresh)
+ if (input_pool.entropy_count >= random_read_wakeup_thresh)
wake_up_interruptible(&random_read_wait);
return 0;
case RNDZAPENTCNT:
@@ -1657,9 +1612,9 @@
/* Clear the entropy pool counters. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- init_std_data(input_pool);
- init_std_data(blocking_pool);
- init_std_data(nonblocking_pool);
+ init_std_data(&input_pool);
+ init_std_data(&blocking_pool);
+ init_std_data(&nonblocking_pool);
return 0;
default:
return -EINVAL;
@@ -1842,7 +1797,7 @@
{
min_read_thresh = 8;
min_write_thresh = 0;
- max_read_thresh = max_write_thresh = pool->poolinfo.POOLBITS;
+ max_read_thresh = max_write_thresh = pool->poolinfo->POOLBITS;
random_table[1].data = &pool->entropy_count;
}
#endif /* CONFIG_SYSCTL */
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 1/12] random pt3: More meaningful pool names
2005-01-19 8:17 [PATCH 0/12] random pt3: More core and accounting cleanups Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 2/12] random pt3: Static allocation of pools Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Give pools more meaningful names.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:21:12.250668976 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:21:12.314660818 -0800
@@ -256,8 +256,8 @@
/*
* Configuration information
*/
-#define DEFAULT_POOL_SIZE 512
-#define SECONDARY_POOL_SIZE 128
+#define INPUT_POOL_WORDS 128
+#define OUTPUT_POOL_WORDS 32
#define BATCH_ENTROPY_SIZE 256
#define USE_SHA
@@ -279,7 +279,7 @@
* samples to avoid wasting CPU time and reduce lock contention.
*/
-static int trickle_thresh = DEFAULT_POOL_SIZE * 7;
+static int trickle_thresh = INPUT_POOL_WORDS * 28;
static DEFINE_PER_CPU(int, trickle_count) = 0;
@@ -382,9 +382,9 @@
/*
* Static global variables
*/
-static struct entropy_store *random_state; /* The default global store */
-static struct entropy_store *sec_random_state; /* secondary store */
-static struct entropy_store *urandom_state; /* For urandom */
+static struct entropy_store *input_pool; /* The default global store */
+static struct entropy_store *blocking_pool; /* secondary store */
+static struct entropy_store *nonblocking_pool; /* For urandom */
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
@@ -406,9 +406,9 @@
#define DEBUG_ENT(fmt, arg...) do { if (debug) \
printk(KERN_DEBUG "random %04d %04d %04d: " \
fmt,\
- random_state->entropy_count,\
- sec_random_state->entropy_count,\
- urandom_state->entropy_count,\
+ input_pool->entropy_count,\
+ blocking_pool->entropy_count,\
+ nonblocking_pool->entropy_count,\
## arg); } while (0)
#else
#define DEBUG_ENT(fmt, arg...) do {} while (0)
@@ -653,9 +653,9 @@
}
/*
- * Flush out the accumulated entropy operations, adding entropy to the passed
- * store (normally random_state). If that store has enough entropy, alternate
- * between randomizing the data of the primary and secondary stores.
+ * Flush out the accumulated entropy operations, adding entropy to the
+ * input pool. If that pool has enough entropy, alternate
+ * between randomizing the data of all pools.
*/
static void batch_entropy_process(void *private_)
{
@@ -682,8 +682,7 @@
p = r;
while (head != tail) {
if (r->entropy_count >= max_entropy) {
- r = (r == sec_random_state) ? random_state :
- sec_random_state;
+ r = (r == blocking_pool) ? input_pool : blocking_pool;
max_entropy = r->poolinfo.POOLBITS;
}
add_entropy_words(r, batch_entropy_copy[tail].data, 2);
@@ -728,7 +727,7 @@
preempt_disable();
/* if over the trickle threshold, use only 1 in 4096 samples */
- if (random_state->entropy_count > trickle_thresh &&
+ if (input_pool->entropy_count > trickle_thresh &&
(__get_cpu_var(trickle_count)++ & 0xfff))
goto out;
@@ -1235,7 +1234,7 @@
"(%d of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
- bytes=extract_entropy(random_state, tmp, bytes,
+ bytes=extract_entropy(input_pool, tmp, bytes,
EXTRACT_ENTROPY_LIMIT);
add_entropy_words(r, tmp, bytes);
credit_entropy_store(r, bytes*8);
@@ -1378,13 +1377,13 @@
*/
void get_random_bytes(void *buf, int nbytes)
{
- struct entropy_store *r = urandom_state;
+ struct entropy_store *r = nonblocking_pool;
int flags = EXTRACT_ENTROPY_SECONDARY;
if (!r)
- r = sec_random_state;
+ r = blocking_pool;
if (!r) {
- r = random_state;
+ r = input_pool;
flags = 0;
}
if (!r) {
@@ -1423,21 +1422,21 @@
static int __init rand_initialize(void)
{
- if (create_entropy_store(DEFAULT_POOL_SIZE, "primary", &random_state))
+ if (create_entropy_store(INPUT_POOL_WORDS, "input", &input_pool))
goto err;
- if (batch_entropy_init(BATCH_ENTROPY_SIZE, random_state))
+ if (batch_entropy_init(BATCH_ENTROPY_SIZE, input_pool))
goto err;
- if (create_entropy_store(SECONDARY_POOL_SIZE, "secondary",
- &sec_random_state))
+ if (create_entropy_store(OUTPUT_POOL_WORDS, "blocking",
+ &blocking_pool))
goto err;
- if (create_entropy_store(SECONDARY_POOL_SIZE, "urandom",
- &urandom_state))
+ if (create_entropy_store(OUTPUT_POOL_WORDS, "nonblocking",
+ &nonblocking_pool))
goto err;
- init_std_data(random_state);
- init_std_data(sec_random_state);
- init_std_data(urandom_state);
+ init_std_data(input_pool);
+ init_std_data(blocking_pool);
+ init_std_data(nonblocking_pool);
#ifdef CONFIG_SYSCTL
- sysctl_init_random(random_state);
+ sysctl_init_random(input_pool);
#endif
return 0;
err:
@@ -1493,7 +1492,7 @@
DEBUG_ENT("reading %d bits\n", n*8);
- n = extract_entropy(sec_random_state, buf, n,
+ n = extract_entropy(blocking_pool, buf, n,
EXTRACT_ENTROPY_USER |
EXTRACT_ENTROPY_LIMIT |
EXTRACT_ENTROPY_SECONDARY);
@@ -1510,7 +1509,7 @@
DEBUG_ENT("sleeping?\n");
wait_event_interruptible(random_read_wait,
- random_state->entropy_count >=
+ input_pool->entropy_count >=
random_read_wakeup_thresh);
DEBUG_ENT("awake\n");
@@ -1550,12 +1549,12 @@
int flags = EXTRACT_ENTROPY_USER;
unsigned long cpuflags;
- spin_lock_irqsave(&random_state->lock, cpuflags);
- if (random_state->entropy_count > random_state->poolinfo.POOLBITS)
+ spin_lock_irqsave(&input_pool->lock, cpuflags);
+ if (input_pool->entropy_count > input_pool->poolinfo.POOLBITS)
flags |= EXTRACT_ENTROPY_SECONDARY;
- spin_unlock_irqrestore(&random_state->lock, cpuflags);
+ spin_unlock_irqrestore(&input_pool->lock, cpuflags);
- return extract_entropy(urandom_state, buf, nbytes, flags);
+ return extract_entropy(nonblocking_pool, buf, nbytes, flags);
}
static unsigned int
@@ -1566,9 +1565,9 @@
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
- if (random_state->entropy_count >= random_read_wakeup_thresh)
+ if (input_pool->entropy_count >= random_read_wakeup_thresh)
mask |= POLLIN | POLLRDNORM;
- if (random_state->entropy_count < random_write_wakeup_thresh)
+ if (input_pool->entropy_count < random_write_wakeup_thresh)
mask |= POLLOUT | POLLWRNORM;
return mask;
}
@@ -1594,7 +1593,7 @@
c -= bytes;
p += bytes;
- add_entropy_words(random_state, buf, (bytes + 3) / 4);
+ add_entropy_words(input_pool, buf, (bytes + 3) / 4);
}
if (p == buffer) {
return (ssize_t)ret;
@@ -1615,7 +1614,7 @@
switch (cmd) {
case RNDGETENTCNT:
- ent_count = random_state->entropy_count;
+ ent_count = input_pool->entropy_count;
if (put_user(ent_count, p))
return -EFAULT;
return 0;
@@ -1624,12 +1623,12 @@
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- credit_entropy_store(random_state, ent_count);
+ credit_entropy_store(input_pool, ent_count);
/*
* Wake up waiting processes if we have enough
* entropy.
*/
- if (random_state->entropy_count >= random_read_wakeup_thresh)
+ if (input_pool->entropy_count >= random_read_wakeup_thresh)
wake_up_interruptible(&random_read_wait);
return 0;
case RNDADDENTROPY:
@@ -1645,12 +1644,12 @@
size, &file->f_pos);
if (retval < 0)
return retval;
- credit_entropy_store(random_state, ent_count);
+ credit_entropy_store(input_pool, ent_count);
/*
* Wake up waiting processes if we have enough
* entropy.
*/
- if (random_state->entropy_count >= random_read_wakeup_thresh)
+ if (input_pool->entropy_count >= random_read_wakeup_thresh)
wake_up_interruptible(&random_read_wait);
return 0;
case RNDZAPENTCNT:
@@ -1658,9 +1657,9 @@
/* Clear the entropy pool counters. */
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- init_std_data(random_state);
- init_std_data(sec_random_state);
- init_std_data(urandom_state);
+ init_std_data(input_pool);
+ init_std_data(blocking_pool);
+ init_std_data(nonblocking_pool);
return 0;
default:
return -EINVAL;
@@ -1780,7 +1779,7 @@
return 1;
}
-static int sysctl_poolsize = DEFAULT_POOL_SIZE;
+static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
ctl_table random_table[] = {
{
.ctl_name = RANDOM_POOLSIZE,
@@ -1839,12 +1838,12 @@
{ .ctl_name = 0 }
};
-static void sysctl_init_random(struct entropy_store *random_state)
+static void sysctl_init_random(struct entropy_store *pool)
{
min_read_thresh = 8;
min_write_thresh = 0;
- max_read_thresh = max_write_thresh = random_state->poolinfo.POOLBITS;
- random_table[1].data = &random_state->entropy_count;
+ max_read_thresh = max_write_thresh = pool->poolinfo.POOLBITS;
+ random_table[1].data = &pool->entropy_count;
}
#endif /* CONFIG_SYSCTL */
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 4/12] random pt3: Catastrophic reseed checks
2005-01-19 8:17 ` [PATCH 3/12] random pt3: Static sysctl bits Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 5/12] random pt3: Entropy reservation accounting Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
When reseeding, we must always do a "catastrophic reseed" where we
pull enough new bits to make the new state unguessable from outputs
even if we knew the old state. So we must do the checks against the
minimum reseed amount under the pool lock in extract_entropy.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:37:42.989360541 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:39:17.538306576 -0800
@@ -1183,7 +1183,7 @@
#define SEC_XFER_SIZE (TMP_BUF_SIZE*4)
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
- size_t nbytes, int flags);
+ size_t nbytes, int min, int flags);
/*
* This utility inline function is responsible for transfering entropy
@@ -1203,6 +1203,7 @@
r->name, bytes * 8, nbytes * 8, r->entropy_count);
bytes=extract_entropy(&input_pool, tmp, bytes,
+ random_read_wakeup_thresh / 8,
EXTRACT_ENTROPY_LIMIT);
add_entropy_words(r, tmp, bytes);
credit_entropy_store(r, bytes*8);
@@ -1220,10 +1221,13 @@
* extracting entropy from the secondary pool, and can refill from the
* primary pool if needed.
*
+ * If we have less than min bytes of entropy available, exit without
+ * transferring any. This helps avoid racing when reseeding.
+ *
* Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
*/
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
- size_t nbytes, int flags)
+ size_t nbytes, int min, int flags)
{
ssize_t ret, i;
__u32 tmp[TMP_BUF_SIZE], data[16];
@@ -1243,16 +1247,21 @@
DEBUG_ENT("trying to extract %d bits from %s\n",
nbytes * 8, r->name);
- if (flags & EXTRACT_ENTROPY_LIMIT && nbytes >= r->entropy_count / 8)
- nbytes = r->entropy_count / 8;
-
- if (r->entropy_count / 8 >= nbytes)
- r->entropy_count -= nbytes*8;
- else
- r->entropy_count = 0;
+ if (r->entropy_count / 8 < min) {
+ nbytes = 0;
+ } else {
+ if (flags & EXTRACT_ENTROPY_LIMIT &&
+ nbytes >= r->entropy_count / 8)
+ nbytes = r->entropy_count / 8;
+
+ if (r->entropy_count / 8 >= nbytes)
+ r->entropy_count -= nbytes*8;
+ else
+ r->entropy_count = 0;
- if (r->entropy_count < random_write_wakeup_thresh)
- wake_up_interruptible(&random_write_wait);
+ if (r->entropy_count < random_write_wakeup_thresh)
+ wake_up_interruptible(&random_write_wait);
+ }
DEBUG_ENT("debiting %d entropy credits from %s%s\n",
nbytes * 8, r->name,
@@ -1345,7 +1354,7 @@
*/
void get_random_bytes(void *buf, int nbytes)
{
- extract_entropy(&nonblocking_pool, (char *) buf, nbytes,
+ extract_entropy(&nonblocking_pool, (char *) buf, nbytes, 0,
EXTRACT_ENTROPY_SECONDARY);
}
@@ -1435,7 +1444,7 @@
DEBUG_ENT("reading %d bits\n", n*8);
- n = extract_entropy(&blocking_pool, buf, n,
+ n = extract_entropy(&blocking_pool, buf, n, 0,
EXTRACT_ENTROPY_USER |
EXTRACT_ENTROPY_LIMIT |
EXTRACT_ENTROPY_SECONDARY);
@@ -1497,7 +1506,7 @@
flags |= EXTRACT_ENTROPY_SECONDARY;
spin_unlock_irqrestore(&input_pool.lock, cpuflags);
- return extract_entropy(&nonblocking_pool, buf, nbytes, flags);
+ return extract_entropy(&nonblocking_pool, buf, nbytes, 0, flags);
}
static unsigned int
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 3/12] random pt3: Static sysctl bits
2005-01-19 8:17 ` [PATCH 2/12] random pt3: Static allocation of pools Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 4/12] random pt3: Catastrophic reseed checks Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Static initialization for sysctl support
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:36:10.542146558 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:37:42.989360541 -0800
@@ -380,14 +380,6 @@
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-/*
- * Forward procedure declarations
- */
-#ifdef CONFIG_SYSCTL
-struct entropy_store;
-static void sysctl_init_random(struct entropy_store *pool);
-#endif
-
static inline __u32 rol32(__u32 word, int shift)
{
return (word << shift) | (word >> (32 - shift));
@@ -1386,16 +1378,12 @@
static int __init rand_initialize(void)
{
if (batch_entropy_init(BATCH_ENTROPY_SIZE, &input_pool))
- goto err;
+ return -1;
+
init_std_data(&input_pool);
init_std_data(&blocking_pool);
init_std_data(&nonblocking_pool);
-#ifdef CONFIG_SYSCTL
- sysctl_init_random(&input_pool);
-#endif
return 0;
-err:
- return -1;
}
module_init(rand_initialize);
@@ -1665,8 +1653,9 @@
#include <linux/sysctl.h>
-static int min_read_thresh, max_read_thresh;
-static int min_write_thresh, max_write_thresh;
+static int min_read_thresh = 8, min_write_thresh;
+static int max_read_thresh = INPUT_POOL_WORDS * 32;
+static int max_write_thresh = INPUT_POOL_WORDS * 32;
static char sysctl_bootid[16];
/*
@@ -1750,6 +1739,7 @@
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dointvec,
+ .data = &input_pool.entropy_count,
},
{
.ctl_name = RANDOM_READ_THRESH,
@@ -1792,14 +1782,6 @@
},
{ .ctl_name = 0 }
};
-
-static void sysctl_init_random(struct entropy_store *pool)
-{
- min_read_thresh = 8;
- min_write_thresh = 0;
- max_read_thresh = max_write_thresh = pool->poolinfo->POOLBITS;
- random_table[1].data = &pool->entropy_count;
-}
#endif /* CONFIG_SYSCTL */
/********************************************************************
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 6/12] random pt3: Reservation flag in pool struct
2005-01-19 8:17 ` [PATCH 5/12] random pt3: Entropy reservation accounting Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 7/12] random pt3: Reseed pointer " Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Move the limit flag to the pool struct, begin process of eliminating
extract flags.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:39:25.713264357 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:39:34.550137752 -0800
@@ -411,6 +411,7 @@
struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
+ int limit;
/* read-write data: */
spinlock_t lock ____cacheline_aligned_in_smp;
@@ -426,6 +427,7 @@
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
.name = "input",
+ .limit = 1,
.lock = SPIN_LOCK_UNLOCKED,
.pool = input_pool_data
};
@@ -433,6 +435,7 @@
static struct entropy_store blocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "blocking",
+ .limit = 1,
.lock = SPIN_LOCK_UNLOCKED,
.pool = blocking_pool_data
};
@@ -1178,7 +1181,6 @@
#define EXTRACT_ENTROPY_USER 1
#define EXTRACT_ENTROPY_SECONDARY 2
-#define EXTRACT_ENTROPY_LIMIT 4
#define TMP_BUF_SIZE (HASH_BUFFER_SIZE + HASH_EXTRA_SIZE)
#define SEC_XFER_SIZE (TMP_BUF_SIZE*4)
@@ -1197,14 +1199,14 @@
r->entropy_count < r->poolinfo->POOLBITS) {
int bytes = max_t(int, random_read_wakeup_thresh / 8,
min_t(int, nbytes, TMP_BUF_SIZE));
+ int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
DEBUG_ENT("going to reseed %s with %d bits "
"(%d of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
bytes=extract_entropy(&input_pool, tmp, bytes,
- random_read_wakeup_thresh / 8, 0,
- EXTRACT_ENTROPY_LIMIT);
+ random_read_wakeup_thresh / 8, rsvd, 0);
add_entropy_words(r, tmp, bytes);
credit_entropy_store(r, bytes*8);
}
@@ -1254,8 +1256,7 @@
nbytes = 0;
} else {
/* If limited, never pull more than available */
- if (flags & EXTRACT_ENTROPY_LIMIT &&
- nbytes + reserved >= r->entropy_count / 8)
+ if (r->limit && nbytes + reserved >= r->entropy_count / 8)
nbytes = r->entropy_count/8 - reserved;
if(r->entropy_count / 8 >= nbytes + reserved)
@@ -1268,8 +1269,7 @@
}
DEBUG_ENT("debiting %d entropy credits from %s%s\n",
- nbytes * 8, r->name,
- flags & EXTRACT_ENTROPY_LIMIT ? "" : " (unlimited)");
+ nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
spin_unlock_irqrestore(&r->lock, cpuflags);
@@ -1450,7 +1450,6 @@
n = extract_entropy(&blocking_pool, buf, n, 0, 0,
EXTRACT_ENTROPY_USER |
- EXTRACT_ENTROPY_LIMIT |
EXTRACT_ENTROPY_SECONDARY);
DEBUG_ENT("read got %d bits (%d still needed)\n",
@@ -1502,15 +1501,8 @@
urandom_read(struct file * file, char __user * buf,
size_t nbytes, loff_t *ppos)
{
- int flags = EXTRACT_ENTROPY_USER;
- unsigned long cpuflags;
-
- spin_lock_irqsave(&input_pool.lock, cpuflags);
- if (input_pool.entropy_count > input_pool.poolinfo->POOLBITS)
- flags |= EXTRACT_ENTROPY_SECONDARY;
- spin_unlock_irqrestore(&input_pool.lock, cpuflags);
-
- return extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0, flags);
+ return extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0,
+ EXTRACT_ENTROPY_USER);
}
static unsigned int
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 5/12] random pt3: Entropy reservation accounting
2005-01-19 8:17 ` [PATCH 4/12] random pt3: Catastrophic reseed checks Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 6/12] random pt3: Reservation flag in pool struct Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Additional parameter to allow keeping an entropy reserve in the input
pool. Groundwork for proper /dev/urandom vs /dev/random starvation prevention.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:39:17.538306576 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:39:25.713264357 -0800
@@ -1183,7 +1183,7 @@
#define SEC_XFER_SIZE (TMP_BUF_SIZE*4)
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
- size_t nbytes, int min, int flags);
+ size_t nbytes, int min, int rsvd, int flags);
/*
* This utility inline function is responsible for transfering entropy
@@ -1203,7 +1203,7 @@
r->name, bytes * 8, nbytes * 8, r->entropy_count);
bytes=extract_entropy(&input_pool, tmp, bytes,
- random_read_wakeup_thresh / 8,
+ random_read_wakeup_thresh / 8, 0,
EXTRACT_ENTROPY_LIMIT);
add_entropy_words(r, tmp, bytes);
credit_entropy_store(r, bytes*8);
@@ -1221,13 +1221,15 @@
* extracting entropy from the secondary pool, and can refill from the
* primary pool if needed.
*
- * If we have less than min bytes of entropy available, exit without
- * transferring any. This helps avoid racing when reseeding.
+ * The min parameter specifies the minimum amount we can pull before
+ * failing to avoid races that defeat catastrophic reseeding while the
+ * reserved parameter indicates how much entropy we must leave in the
+ * pool after each pull to avoid starving other readers.
*
* Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
*/
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
- size_t nbytes, int min, int flags)
+ size_t nbytes, int min, int reserved, int flags)
{
ssize_t ret, i;
__u32 tmp[TMP_BUF_SIZE], data[16];
@@ -1247,17 +1249,19 @@
DEBUG_ENT("trying to extract %d bits from %s\n",
nbytes * 8, r->name);
- if (r->entropy_count / 8 < min) {
+ /* Can we pull enough? */
+ if (r->entropy_count / 8 < min + reserved) {
nbytes = 0;
} else {
+ /* If limited, never pull more than available */
if (flags & EXTRACT_ENTROPY_LIMIT &&
- nbytes >= r->entropy_count / 8)
- nbytes = r->entropy_count / 8;
+ nbytes + reserved >= r->entropy_count / 8)
+ nbytes = r->entropy_count/8 - reserved;
- if (r->entropy_count / 8 >= nbytes)
+ if(r->entropy_count / 8 >= nbytes + reserved)
r->entropy_count -= nbytes*8;
else
- r->entropy_count = 0;
+ r->entropy_count = reserved;
if (r->entropy_count < random_write_wakeup_thresh)
wake_up_interruptible(&random_write_wait);
@@ -1354,7 +1358,7 @@
*/
void get_random_bytes(void *buf, int nbytes)
{
- extract_entropy(&nonblocking_pool, (char *) buf, nbytes, 0,
+ extract_entropy(&nonblocking_pool, (char *) buf, nbytes, 0, 0,
EXTRACT_ENTROPY_SECONDARY);
}
@@ -1444,7 +1448,7 @@
DEBUG_ENT("reading %d bits\n", n*8);
- n = extract_entropy(&blocking_pool, buf, n, 0,
+ n = extract_entropy(&blocking_pool, buf, n, 0, 0,
EXTRACT_ENTROPY_USER |
EXTRACT_ENTROPY_LIMIT |
EXTRACT_ENTROPY_SECONDARY);
@@ -1506,7 +1510,7 @@
flags |= EXTRACT_ENTROPY_SECONDARY;
spin_unlock_irqrestore(&input_pool.lock, cpuflags);
- return extract_entropy(&nonblocking_pool, buf, nbytes, 0, flags);
+ return extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0, flags);
}
static unsigned int
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 7/12] random pt3: Reseed pointer in pool struct
2005-01-19 8:17 ` [PATCH 6/12] random pt3: Reservation flag in pool struct Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 8/12] random pt3: Break up extract_user Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Put pointer to reseed pool in pool struct and automatically pull
entropy from it if it is set. This lets us remove the
EXTRACT_SECONDARY flag.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:39:34.550137752 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:39:47.360504569 -0800
@@ -406,12 +406,14 @@
*
**********************************************************************/
+struct entropy_store;
struct entropy_store {
/* mostly-read data: */
struct poolinfo *poolinfo;
__u32 *pool;
const char *name;
int limit;
+ struct entropy_store *pull;
/* read-write data: */
spinlock_t lock ____cacheline_aligned_in_smp;
@@ -436,6 +438,7 @@
.poolinfo = &poolinfo_table[1],
.name = "blocking",
.limit = 1,
+ .pull = &input_pool,
.lock = SPIN_LOCK_UNLOCKED,
.pool = blocking_pool_data
};
@@ -443,6 +446,7 @@
static struct entropy_store nonblocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "nonblocking",
+ .pull = &input_pool,
.lock = SPIN_LOCK_UNLOCKED,
.pool = nonblocking_pool_data
};
@@ -1180,7 +1184,6 @@
*********************************************************************/
#define EXTRACT_ENTROPY_USER 1
-#define EXTRACT_ENTROPY_SECONDARY 2
#define TMP_BUF_SIZE (HASH_BUFFER_SIZE + HASH_EXTRA_SIZE)
#define SEC_XFER_SIZE (TMP_BUF_SIZE*4)
@@ -1195,7 +1198,7 @@
static inline void xfer_secondary_pool(struct entropy_store *r,
size_t nbytes, __u32 *tmp)
{
- if (r->entropy_count < nbytes * 8 &&
+ if (r->pull && r->entropy_count < nbytes * 8 &&
r->entropy_count < r->poolinfo->POOLBITS) {
int bytes = max_t(int, random_read_wakeup_thresh / 8,
min_t(int, nbytes, TMP_BUF_SIZE));
@@ -1205,7 +1208,7 @@
"(%d of %d requested)\n",
r->name, bytes * 8, nbytes * 8, r->entropy_count);
- bytes=extract_entropy(&input_pool, tmp, bytes,
+ bytes=extract_entropy(r->pull, tmp, bytes,
random_read_wakeup_thresh / 8, rsvd, 0);
add_entropy_words(r, tmp, bytes);
credit_entropy_store(r, bytes*8);
@@ -1219,10 +1222,6 @@
* number of bytes that are actually obtained. If the EXTRACT_ENTROPY_USER
* flag is given, then the buf pointer is assumed to be in user space.
*
- * If the EXTRACT_ENTROPY_SECONDARY flag is given, then we are actually
- * extracting entropy from the secondary pool, and can refill from the
- * primary pool if needed.
- *
* The min parameter specifies the minimum amount we can pull before
* failing to avoid races that defeat catastrophic reseeding while the
* reserved parameter indicates how much entropy we must leave in the
@@ -1242,8 +1241,7 @@
if (r->entropy_count > r->poolinfo->POOLBITS)
r->entropy_count = r->poolinfo->POOLBITS;
- if (flags & EXTRACT_ENTROPY_SECONDARY)
- xfer_secondary_pool(r, nbytes, tmp);
+ xfer_secondary_pool(r, nbytes, tmp);
/* Hold lock while accounting */
spin_lock_irqsave(&r->lock, cpuflags);
@@ -1358,8 +1356,7 @@
*/
void get_random_bytes(void *buf, int nbytes)
{
- extract_entropy(&nonblocking_pool, (char *) buf, nbytes, 0, 0,
- EXTRACT_ENTROPY_SECONDARY);
+ extract_entropy(&nonblocking_pool, (char *) buf, nbytes, 0, 0, 0);
}
EXPORT_SYMBOL(get_random_bytes);
@@ -1449,8 +1446,7 @@
DEBUG_ENT("reading %d bits\n", n*8);
n = extract_entropy(&blocking_pool, buf, n, 0, 0,
- EXTRACT_ENTROPY_USER |
- EXTRACT_ENTROPY_SECONDARY);
+ EXTRACT_ENTROPY_USER);
DEBUG_ENT("read got %d bits (%d still needed)\n",
n*8, (nbytes-n)*8);
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 8/12] random pt3: Break up extract_user
2005-01-19 8:17 ` [PATCH 7/12] random pt3: Reseed pointer " Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 9/12] random pt3: Remove dead MD5 copy Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Break apart extract_entropy into kernel and user versions, remove last
extract flag and some unnecessary variables. This makes the code more
readable and amenable to sparse.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:39:47.360504569 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:40:00.654809689 -0800
@@ -1183,19 +1183,18 @@
*
*********************************************************************/
-#define EXTRACT_ENTROPY_USER 1
#define TMP_BUF_SIZE (HASH_BUFFER_SIZE + HASH_EXTRA_SIZE)
#define SEC_XFER_SIZE (TMP_BUF_SIZE*4)
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
- size_t nbytes, int min, int rsvd, int flags);
+ size_t nbytes, int min, int rsvd);
/*
* This utility inline function is responsible for transfering entropy
* from the primary pool to the secondary extraction pool. We make
* sure we pull enough for a 'catastrophic reseed'.
*/
-static inline void xfer_secondary_pool(struct entropy_store *r,
+static void xfer_secondary_pool(struct entropy_store *r,
size_t nbytes, __u32 *tmp)
{
if (r->pull && r->entropy_count < nbytes * 8 &&
@@ -1209,18 +1208,15 @@
r->name, bytes * 8, nbytes * 8, r->entropy_count);
bytes=extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_thresh / 8, rsvd, 0);
+ random_read_wakeup_thresh / 8, rsvd);
add_entropy_words(r, tmp, bytes);
credit_entropy_store(r, bytes*8);
}
}
/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a buffer. This function computes how many remaining
- * bits of entropy are left in the pool, but it does not restrict the
- * number of bytes that are actually obtained. If the EXTRACT_ENTROPY_USER
- * flag is given, then the buf pointer is assumed to be in user space.
+ * These functions extracts randomness from the "entropy pool", and
+ * returns it in a buffer.
*
* The min parameter specifies the minimum amount we can pull before
* failing to avoid races that defeat catastrophic reseeding while the
@@ -1229,22 +1225,16 @@
*
* Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
*/
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
- size_t nbytes, int min, int reserved, int flags)
+
+static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ int reserved)
{
- ssize_t ret, i;
- __u32 tmp[TMP_BUF_SIZE], data[16];
- __u32 x;
- unsigned long cpuflags;
-
- /* Redundant, but just in case... */
- if (r->entropy_count > r->poolinfo->POOLBITS)
- r->entropy_count = r->poolinfo->POOLBITS;
+ unsigned long flags;
- xfer_secondary_pool(r, nbytes, tmp);
+ BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
/* Hold lock while accounting */
- spin_lock_irqsave(&r->lock, cpuflags);
+ spin_lock_irqsave(&r->lock, flags);
DEBUG_ENT("trying to extract %d bits from %s\n",
nbytes * 8, r->name);
@@ -1269,75 +1259,111 @@
DEBUG_ENT("debiting %d entropy credits from %s%s\n",
nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
- spin_unlock_irqrestore(&r->lock, cpuflags);
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ return nbytes;
+}
+
+static void extract_buf(struct entropy_store *r, __u32 *buf)
+{
+ int i, x;
+ __u32 data[16];
+
+ /* Hash the pool to get the output */
+ buf[0] = 0x67452301;
+ buf[1] = 0xefcdab89;
+ buf[2] = 0x98badcfe;
+ buf[3] = 0x10325476;
+#ifdef USE_SHA
+ buf[4] = 0xc3d2e1f0;
+#endif
+
+ /*
+ * As we hash the pool, we mix intermediate values of
+ * the hash back into the pool. This eliminates
+ * backtracking attacks (where the attacker knows
+ * the state of the pool plus the current outputs, and
+ * attempts to find previous ouputs), unless the hash
+ * function can be inverted.
+ */
+ for (i = 0, x = 0; i < r->poolinfo->poolwords; i += 16, x+=2) {
+ HASH_TRANSFORM(buf, r->pool+i);
+ add_entropy_words(r, &buf[x%HASH_BUFFER_SIZE], 1);
+ }
+
+ /*
+ * To avoid duplicates, we atomically extract a
+ * portion of the pool while mixing, and hash one
+ * final time.
+ */
+ __add_entropy_words(r, &buf[x%HASH_BUFFER_SIZE], 1, data);
+ HASH_TRANSFORM(buf, data);
+
+ /*
+ * In case the hash function has some recognizable
+ * output pattern, we fold it in half.
+ */
+ for (i = 0; i < HASH_BUFFER_SIZE / 2; i++)
+ buf[i] ^= buf[i + (HASH_BUFFER_SIZE + 1) / 2];
+
+ if (HASH_BUFFER_SIZE & 1) {
+ /* There's a middle word to deal with */
+ x = buf[HASH_BUFFER_SIZE/2];
+ x ^= (x >> 16); /* Fold it in half */
+ ((__u16 *)buf)[HASH_BUFFER_SIZE - 1] = (__u16)x;
+ }
+}
+
+static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+ size_t nbytes, int min, int reserved)
+{
+ ssize_t ret = 0, i;
+ __u32 tmp[TMP_BUF_SIZE];
+
+ xfer_secondary_pool(r, nbytes, tmp);
+ nbytes = account(r, nbytes, min, reserved);
- ret = 0;
while (nbytes) {
- /*
- * Check if we need to break out or reschedule....
- */
- if ((flags & EXTRACT_ENTROPY_USER) && need_resched()) {
+ extract_buf(r, tmp);
+ i = min(nbytes, HASH_BUFFER_SIZE * sizeof(__u32) / 2);
+ memcpy(buf, (__u8 const *)tmp, i);
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ /* Wipe data just returned from memory */
+ memset(tmp, 0, sizeof(tmp));
+
+ return ret;
+}
+
+static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ size_t nbytes)
+{
+ ssize_t ret = 0, i;
+ __u32 tmp[TMP_BUF_SIZE];
+
+ xfer_secondary_pool(r, nbytes, tmp);
+ nbytes = account(r, nbytes, 0, 0);
+
+ while (nbytes) {
+ if (need_resched()) {
if (signal_pending(current)) {
if (ret == 0)
ret = -ERESTARTSYS;
break;
}
-
schedule();
}
- /* Hash the pool to get the output */
- tmp[0] = 0x67452301;
- tmp[1] = 0xefcdab89;
- tmp[2] = 0x98badcfe;
- tmp[3] = 0x10325476;
-#ifdef USE_SHA
- tmp[4] = 0xc3d2e1f0;
-#endif
- /*
- * As we hash the pool, we mix intermediate values of
- * the hash back into the pool. This eliminates
- * backtracking attacks (where the attacker knows
- * the state of the pool plus the current outputs, and
- * attempts to find previous ouputs), unless the hash
- * function can be inverted.
- */
- for (i = 0, x = 0; i < r->poolinfo->poolwords; i += 16, x+=2) {
- HASH_TRANSFORM(tmp, r->pool+i);
- add_entropy_words(r, &tmp[x%HASH_BUFFER_SIZE], 1);
+ extract_buf(r, tmp);
+ i = min(nbytes, HASH_BUFFER_SIZE * sizeof(__u32) / 2);
+ if (copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
}
- /*
- * To avoid duplicates, we atomically extract a
- * portion of the pool while mixing, and hash one
- * final time.
- */
- __add_entropy_words(r, &tmp[x%HASH_BUFFER_SIZE], 1, data);
- HASH_TRANSFORM(tmp, data);
-
- /*
- * In case the hash function has some recognizable
- * output pattern, we fold it in half.
- */
- for (i = 0; i < HASH_BUFFER_SIZE/2; i++)
- tmp[i] ^= tmp[i + (HASH_BUFFER_SIZE+1)/2];
-#if HASH_BUFFER_SIZE & 1 /* There's a middle word to deal with */
- x = tmp[HASH_BUFFER_SIZE/2];
- x ^= (x >> 16); /* Fold it in half */
- ((__u16 *)tmp)[HASH_BUFFER_SIZE-1] = (__u16)x;
-#endif
-
- /* Copy data to destination buffer */
- i = min(nbytes, HASH_BUFFER_SIZE*sizeof(__u32)/2);
- if (flags & EXTRACT_ENTROPY_USER) {
- i -= copy_to_user(buf, (__u8 const *)tmp, i);
- if (!i) {
- ret = -EFAULT;
- break;
- }
- } else
- memcpy(buf, (__u8 const *)tmp, i);
-
nbytes -= i;
buf += i;
ret += i;
@@ -1356,7 +1382,7 @@
*/
void get_random_bytes(void *buf, int nbytes)
{
- extract_entropy(&nonblocking_pool, (char *) buf, nbytes, 0, 0, 0);
+ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
}
EXPORT_SYMBOL(get_random_bytes);
@@ -1445,8 +1471,7 @@
DEBUG_ENT("reading %d bits\n", n*8);
- n = extract_entropy(&blocking_pool, buf, n, 0, 0,
- EXTRACT_ENTROPY_USER);
+ n = extract_entropy_user(&blocking_pool, buf, n);
DEBUG_ENT("read got %d bits (%d still needed)\n",
n*8, (nbytes-n)*8);
@@ -1497,8 +1522,7 @@
urandom_read(struct file * file, char __user * buf,
size_t nbytes, loff_t *ppos)
{
- return extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0,
- EXTRACT_ENTROPY_USER);
+ return extract_entropy_user(&nonblocking_pool, buf, nbytes);
}
static unsigned int
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 10/12] random pt3: Simplify hash folding
2005-01-19 8:17 ` [PATCH 9/12] random pt3: Remove dead MD5 copy Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 11/12] random pt3: Clean up hash buffering Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Simplify output hash folding
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:42:17.993300522 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:42:39.078612373 -0800
@@ -1166,15 +1166,10 @@
* In case the hash function has some recognizable
* output pattern, we fold it in half.
*/
- for (i = 0; i < HASH_BUFFER_SIZE / 2; i++)
- buf[i] ^= buf[i + (HASH_BUFFER_SIZE + 1) / 2];
- if (HASH_BUFFER_SIZE & 1) {
- /* There's a middle word to deal with */
- x = buf[HASH_BUFFER_SIZE/2];
- x ^= (x >> 16); /* Fold it in half */
- ((__u16 *)buf)[HASH_BUFFER_SIZE - 1] = (__u16)x;
- }
+ buf[0] ^= buf[3];
+ buf[1] ^= buf[4];
+ buf[0] ^= rol32(buf[3], 16);
}
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 9/12] random pt3: Remove dead MD5 copy
2005-01-19 8:17 ` [PATCH 8/12] random pt3: Break up extract_user Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 10/12] random pt3: Simplify hash folding Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Remove long-dead md5 code.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:40:00.654809689 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:42:17.993300522 -0800
@@ -220,10 +220,6 @@
*
* The code for SHA transform was taken from Peter Gutmann's
* implementation, which has been placed in the public domain.
- * The code for MD5 transform was taken from Colin Plumb's
- * implementation, which has been placed in the public domain.
- * The MD5 cryptographic checksum was devised by Ronald Rivest, and is
- * documented in RFC 1321, "The MD5 Message Digest Algorithm".
*
* Further background information on this topic may be obtained from
* RFC 1750, "Randomness Recommendations for Security", by Donald
@@ -259,7 +255,6 @@
#define INPUT_POOL_WORDS 128
#define OUTPUT_POOL_WORDS 32
#define BATCH_ENTROPY_SIZE 256
-#define USE_SHA
/*
* The minimum number of bits of entropy before we wake up a read on
@@ -802,7 +797,7 @@
/*
* This chunk of code defines a function
- * void HASH_TRANSFORM(__u32 digest[HASH_BUFFER_SIZE + HASH_EXTRA_SIZE],
+ * void sha_transform(__u32 digest[HASH_BUFFER_SIZE + HASH_EXTRA_SIZE],
* __u32 const data[16])
*
* The function hashes the input data to produce a digest in the first
@@ -812,24 +807,13 @@
* and tacking it onto the end of the digest[] array is the quick and
* dirty way of doing it.)
*
- * It so happens that MD5 and SHA share most of the initial vector
- * used to initialize the digest[] array before the first call:
- * 1) 0x67452301
- * 2) 0xefcdab89
- * 3) 0x98badcfe
- * 4) 0x10325476
- * 5) 0xc3d2e1f0 (SHA only)
- *
* For /dev/random purposes, the length of the data being hashed is
* fixed in length, so appending a bit count in the usual way is not
* cryptographically necessary.
*/
-#ifdef USE_SHA
-
#define HASH_BUFFER_SIZE 5
#define HASH_EXTRA_SIZE 80
-#define HASH_TRANSFORM SHATransform
/* Various size/speed tradeoffs are available. Choose 0..3. */
#define SHA_CODE_SIZE 0
@@ -856,7 +840,7 @@
#define subRound(a, b, c, d, e, f, k, data) \
(e += rol32(a, 5) + f(b, c, d) + k + data, b = rol32(b, 30))
-static void SHATransform(__u32 digest[85], __u32 const data[16])
+static void sha_transform(__u32 digest[85], __u32 const data[16])
{
__u32 A, B, C, D, E; /* Local vars */
__u32 TEMP;
@@ -1058,125 +1042,6 @@
#undef K4
#undef subRound
-#else /* !USE_SHA - Use MD5 */
-
-#define HASH_BUFFER_SIZE 4
-#define HASH_EXTRA_SIZE 0
-#define HASH_TRANSFORM MD5Transform
-
-/*
- * MD5 transform algorithm, taken from code written by Colin Plumb,
- * and put into the public domain
- */
-
-/* The four core functions - F1 is optimized somewhat */
-
-/* #define F1(x, y, z) (x & y | ~x & z) */
-#define F1(x, y, z) (z ^ (x & (y ^ z)))
-#define F2(x, y, z) F1(z, x, y)
-#define F3(x, y, z) (x ^ y ^ z)
-#define F4(x, y, z) (y ^ (x | ~z))
-
-/* This is the central step in the MD5 algorithm. */
-#define MD5STEP(f, w, x, y, z, data, s) \
- (w += f(x, y, z) + data, w = w << s | w >> (32 - s), w += x )
-
-/*
- * The core of the MD5 algorithm, this alters an existing MD5 hash to
- * reflect the addition of 16 longwords of new data. MD5Update blocks
- * the data and converts bytes into longwords for this routine.
- */
-static void MD5Transform(__u32 buf[HASH_BUFFER_SIZE], __u32 const in[16])
-{
- __u32 a, b, c, d;
-
- a = buf[0];
- b = buf[1];
- c = buf[2];
- d = buf[3];
-
- MD5STEP(F1, a, b, c, d, in[ 0]+0xd76aa478, 7);
- MD5STEP(F1, d, a, b, c, in[ 1]+0xe8c7b756, 12);
- MD5STEP(F1, c, d, a, b, in[ 2]+0x242070db, 17);
- MD5STEP(F1, b, c, d, a, in[ 3]+0xc1bdceee, 22);
- MD5STEP(F1, a, b, c, d, in[ 4]+0xf57c0faf, 7);
- MD5STEP(F1, d, a, b, c, in[ 5]+0x4787c62a, 12);
- MD5STEP(F1, c, d, a, b, in[ 6]+0xa8304613, 17);
- MD5STEP(F1, b, c, d, a, in[ 7]+0xfd469501, 22);
- MD5STEP(F1, a, b, c, d, in[ 8]+0x698098d8, 7);
- MD5STEP(F1, d, a, b, c, in[ 9]+0x8b44f7af, 12);
- MD5STEP(F1, c, d, a, b, in[10]+0xffff5bb1, 17);
- MD5STEP(F1, b, c, d, a, in[11]+0x895cd7be, 22);
- MD5STEP(F1, a, b, c, d, in[12]+0x6b901122, 7);
- MD5STEP(F1, d, a, b, c, in[13]+0xfd987193, 12);
- MD5STEP(F1, c, d, a, b, in[14]+0xa679438e, 17);
- MD5STEP(F1, b, c, d, a, in[15]+0x49b40821, 22);
-
- MD5STEP(F2, a, b, c, d, in[ 1]+0xf61e2562, 5);
- MD5STEP(F2, d, a, b, c, in[ 6]+0xc040b340, 9);
- MD5STEP(F2, c, d, a, b, in[11]+0x265e5a51, 14);
- MD5STEP(F2, b, c, d, a, in[ 0]+0xe9b6c7aa, 20);
- MD5STEP(F2, a, b, c, d, in[ 5]+0xd62f105d, 5);
- MD5STEP(F2, d, a, b, c, in[10]+0x02441453, 9);
- MD5STEP(F2, c, d, a, b, in[15]+0xd8a1e681, 14);
- MD5STEP(F2, b, c, d, a, in[ 4]+0xe7d3fbc8, 20);
- MD5STEP(F2, a, b, c, d, in[ 9]+0x21e1cde6, 5);
- MD5STEP(F2, d, a, b, c, in[14]+0xc33707d6, 9);
- MD5STEP(F2, c, d, a, b, in[ 3]+0xf4d50d87, 14);
- MD5STEP(F2, b, c, d, a, in[ 8]+0x455a14ed, 20);
- MD5STEP(F2, a, b, c, d, in[13]+0xa9e3e905, 5);
- MD5STEP(F2, d, a, b, c, in[ 2]+0xfcefa3f8, 9);
- MD5STEP(F2, c, d, a, b, in[ 7]+0x676f02d9, 14);
- MD5STEP(F2, b, c, d, a, in[12]+0x8d2a4c8a, 20);
-
- MD5STEP(F3, a, b, c, d, in[ 5]+0xfffa3942, 4);
- MD5STEP(F3, d, a, b, c, in[ 8]+0x8771f681, 11);
- MD5STEP(F3, c, d, a, b, in[11]+0x6d9d6122, 16);
- MD5STEP(F3, b, c, d, a, in[14]+0xfde5380c, 23);
- MD5STEP(F3, a, b, c, d, in[ 1]+0xa4beea44, 4);
- MD5STEP(F3, d, a, b, c, in[ 4]+0x4bdecfa9, 11);
- MD5STEP(F3, c, d, a, b, in[ 7]+0xf6bb4b60, 16);
- MD5STEP(F3, b, c, d, a, in[10]+0xbebfbc70, 23);
- MD5STEP(F3, a, b, c, d, in[13]+0x289b7ec6, 4);
- MD5STEP(F3, d, a, b, c, in[ 0]+0xeaa127fa, 11);
- MD5STEP(F3, c, d, a, b, in[ 3]+0xd4ef3085, 16);
- MD5STEP(F3, b, c, d, a, in[ 6]+0x04881d05, 23);
- MD5STEP(F3, a, b, c, d, in[ 9]+0xd9d4d039, 4);
- MD5STEP(F3, d, a, b, c, in[12]+0xe6db99e5, 11);
- MD5STEP(F3, c, d, a, b, in[15]+0x1fa27cf8, 16);
- MD5STEP(F3, b, c, d, a, in[ 2]+0xc4ac5665, 23);
-
- MD5STEP(F4, a, b, c, d, in[ 0]+0xf4292244, 6);
- MD5STEP(F4, d, a, b, c, in[ 7]+0x432aff97, 10);
- MD5STEP(F4, c, d, a, b, in[14]+0xab9423a7, 15);
- MD5STEP(F4, b, c, d, a, in[ 5]+0xfc93a039, 21);
- MD5STEP(F4, a, b, c, d, in[12]+0x655b59c3, 6);
- MD5STEP(F4, d, a, b, c, in[ 3]+0x8f0ccc92, 10);
- MD5STEP(F4, c, d, a, b, in[10]+0xffeff47d, 15);
- MD5STEP(F4, b, c, d, a, in[ 1]+0x85845dd1, 21);
- MD5STEP(F4, a, b, c, d, in[ 8]+0x6fa87e4f, 6);
- MD5STEP(F4, d, a, b, c, in[15]+0xfe2ce6e0, 10);
- MD5STEP(F4, c, d, a, b, in[ 6]+0xa3014314, 15);
- MD5STEP(F4, b, c, d, a, in[13]+0x4e0811a1, 21);
- MD5STEP(F4, a, b, c, d, in[ 4]+0xf7537e82, 6);
- MD5STEP(F4, d, a, b, c, in[11]+0xbd3af235, 10);
- MD5STEP(F4, c, d, a, b, in[ 2]+0x2ad7d2bb, 15);
- MD5STEP(F4, b, c, d, a, in[ 9]+0xeb86d391, 21);
-
- buf[0] += a;
- buf[1] += b;
- buf[2] += c;
- buf[3] += d;
-}
-
-#undef F1
-#undef F2
-#undef F3
-#undef F4
-#undef MD5STEP
-
-#endif /* !USE_SHA */
-
/*********************************************************************
*
* Entropy extraction routines
@@ -1274,9 +1139,7 @@
buf[1] = 0xefcdab89;
buf[2] = 0x98badcfe;
buf[3] = 0x10325476;
-#ifdef USE_SHA
buf[4] = 0xc3d2e1f0;
-#endif
/*
* As we hash the pool, we mix intermediate values of
@@ -1287,7 +1150,7 @@
* function can be inverted.
*/
for (i = 0, x = 0; i < r->poolinfo->poolwords; i += 16, x+=2) {
- HASH_TRANSFORM(buf, r->pool+i);
+ sha_transform(buf, r->pool+i);
add_entropy_words(r, &buf[x%HASH_BUFFER_SIZE], 1);
}
@@ -1297,7 +1160,7 @@
* final time.
*/
__add_entropy_words(r, &buf[x%HASH_BUFFER_SIZE], 1, data);
- HASH_TRANSFORM(buf, data);
+ sha_transform(buf, data);
/*
* In case the hash function has some recognizable
@@ -2159,7 +2022,7 @@
tmp[0]=saddr;
tmp[1]=daddr;
tmp[2]=(sport << 16) + dport;
- HASH_TRANSFORM(tmp+16, tmp);
+ sha_transform(tmp+16, tmp);
seq = tmp[17] + sseq + (count << COOKIEBITS);
memcpy(tmp + 3, syncookie_secret[1], sizeof(syncookie_secret[1]));
@@ -2167,7 +2030,7 @@
tmp[1]=daddr;
tmp[2]=(sport << 16) + dport;
tmp[3] = count; /* minute counter */
- HASH_TRANSFORM(tmp + 16, tmp);
+ sha_transform(tmp + 16, tmp);
/* Add in the second hash and the data */
return seq + ((tmp[17] + data) & COOKIEMASK);
@@ -2196,7 +2059,7 @@
tmp[0]=saddr;
tmp[1]=daddr;
tmp[2]=(sport << 16) + dport;
- HASH_TRANSFORM(tmp + 16, tmp);
+ sha_transform(tmp + 16, tmp);
cookie -= tmp[17] + sseq;
/* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */
@@ -2209,7 +2072,7 @@
tmp[1] = daddr;
tmp[2] = (sport << 16) + dport;
tmp[3] = count - diff; /* minute counter */
- HASH_TRANSFORM(tmp + 16, tmp);
+ sha_transform(tmp + 16, tmp);
return (cookie - tmp[17]) & COOKIEMASK; /* Leaving the data behind */
}
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 12/12] random pt3: Remove entropy batching
2005-01-19 8:17 ` [PATCH 11/12] random pt3: Clean up hash buffering Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
0 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Rather than batching up entropy samples, resulting in longer lock hold
times when we actually process the samples, mix in samples
immediately. The trickle code should eliminate almost all the
additional interrupt-time overhead this would otherwise incur, with or
without locking.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:45:13.176966505 -0800
+++ rnd/drivers/char/random.c 2005-01-18 11:01:30.616353586 -0800
@@ -238,7 +238,6 @@
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
-#include <linux/workqueue.h>
#include <linux/genhd.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
@@ -254,7 +253,6 @@
*/
#define INPUT_POOL_WORDS 128
#define OUTPUT_POOL_WORDS 32
-#define BATCH_ENTROPY_SIZE 256
#define SEC_XFER_SIZE 512
/*
@@ -552,118 +550,6 @@
spin_unlock_irqrestore(&r->lock, flags);
}
-/**********************************************************************
- *
- * Entropy batch input management
- *
- * We batch entropy to be added to avoid increasing interrupt latency
- *
- **********************************************************************/
-
-struct sample {
- __u32 data[2];
- int credit;
-};
-
-static struct sample *batch_entropy_pool, *batch_entropy_copy;
-static int batch_head, batch_tail;
-static spinlock_t batch_lock = SPIN_LOCK_UNLOCKED;
-
-static int batch_max;
-static void batch_entropy_process(void *private_);
-static DECLARE_WORK(batch_work, batch_entropy_process, NULL);
-
-/* note: the size must be a power of 2 */
-static int __init batch_entropy_init(int size, struct entropy_store *r)
-{
- batch_entropy_pool = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
- if (!batch_entropy_pool)
- return -1;
- batch_entropy_copy = kmalloc(size*sizeof(struct sample), GFP_KERNEL);
- if (!batch_entropy_copy) {
- kfree(batch_entropy_pool);
- return -1;
- }
- batch_head = batch_tail = 0;
- batch_work.data = r;
- batch_max = size;
- return 0;
-}
-
-/*
- * Changes to the entropy data is put into a queue rather than being added to
- * the entropy counts directly. This is presumably to avoid doing heavy
- * hashing calculations during an interrupt in add_timer_randomness().
- * Instead, the entropy is only added to the pool by keventd.
- */
-static void batch_entropy_store(u32 a, u32 b, int num)
-{
- int new;
- unsigned long flags;
-
- if (!batch_max)
- return;
-
- spin_lock_irqsave(&batch_lock, flags);
-
- batch_entropy_pool[batch_head].data[0] = a;
- batch_entropy_pool[batch_head].data[1] = b;
- batch_entropy_pool[batch_head].credit = num;
-
- if (((batch_head - batch_tail) & (batch_max - 1)) >= (batch_max / 2))
- schedule_delayed_work(&batch_work, 1);
-
- new = (batch_head + 1) & (batch_max - 1);
- if (new == batch_tail)
- DEBUG_ENT("batch entropy buffer full\n");
- else
- batch_head = new;
-
- spin_unlock_irqrestore(&batch_lock, flags);
-}
-
-/*
- * Flush out the accumulated entropy operations, adding entropy to the
- * input pool. If that pool has enough entropy, alternate
- * between randomizing the data of all pools.
- */
-static void batch_entropy_process(void *private_)
-{
- struct entropy_store *r = (struct entropy_store *) private_, *p;
- int max_entropy = r->poolinfo->POOLBITS;
- unsigned head, tail;
-
- /* Mixing into the pool is expensive, so copy over the batch
- * data and release the batch lock. The pool is at least half
- * full, so don't worry too much about copying only the used
- * part.
- */
- spin_lock_irq(&batch_lock);
-
- memcpy(batch_entropy_copy, batch_entropy_pool,
- batch_max * sizeof(struct sample));
-
- head = batch_head;
- tail = batch_tail;
- batch_tail = batch_head;
-
- spin_unlock_irq(&batch_lock);
-
- p = r;
- while (head != tail) {
- if (r->entropy_count >= max_entropy) {
- r = (r == &blocking_pool) ? &input_pool :
- &blocking_pool;
- max_entropy = r->poolinfo->POOLBITS;
- }
- add_entropy_words(r, batch_entropy_copy[tail].data, 2);
- credit_entropy_store(r, batch_entropy_copy[tail].credit);
- tail = (tail + 1) & (batch_max - 1);
- }
- if (p->entropy_count >= random_read_wakeup_thresh)
- wake_up_interruptible(&random_read_wait);
-}
-
/*********************************************************************
*
* Entropy input management
@@ -692,9 +578,12 @@
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
- cycles_t data;
- long delta, delta2, delta3, time;
- int entropy = 0;
+ struct {
+ cycles_t cycles;
+ long jiffies;
+ unsigned num;
+ } sample;
+ long delta, delta2, delta3;
preempt_disable();
/* if over the trickle threshold, use only 1 in 4096 samples */
@@ -702,16 +591,20 @@
(__get_cpu_var(trickle_count)++ & 0xfff))
goto out;
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+ sample.num = num;
+ add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4);
+
/*
* Calculate number of bits of randomness we probably added.
* We take into account the first, second and third-order deltas
* in order to make our estimate.
*/
- time = jiffies;
if (!state->dont_count_entropy) {
- delta = time - state->last_time;
- state->last_time = time;
+ delta = sample.jiffies - state->last_time;
+ state->last_time = sample.jiffies;
delta2 = delta - state->last_delta;
state->last_delta = delta;
@@ -735,20 +628,13 @@
* Round down by 1 bit on general principles,
* and limit entropy entimate to 12 bits.
*/
- entropy = min_t(int, fls(delta>>1), 11);
+ credit_entropy_store(&input_pool,
+ min_t(int, fls(delta>>1), 11));
}
- /*
- * Use get_cycles() if implemented, otherwise fall back to
- * jiffies.
- */
- data = get_cycles();
- if (data)
- num ^= (u32)((data >> 31) >> 1);
- else
- data = time;
+ if(input_pool.entropy_count >= random_read_wakeup_thresh)
+ wake_up_interruptible(&random_read_wait);
- batch_entropy_store(num, data, entropy);
out:
preempt_enable();
}
@@ -1273,9 +1159,6 @@
static int __init rand_initialize(void)
{
- if (batch_entropy_init(BATCH_ENTROPY_SIZE, &input_pool))
- return -1;
-
init_std_data(&input_pool);
init_std_data(&blocking_pool);
init_std_data(&nonblocking_pool);
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH 11/12] random pt3: Clean up hash buffering
2005-01-19 8:17 ` [PATCH 10/12] random pt3: Simplify hash folding Matt Mackall
@ 2005-01-19 8:17 ` Matt Mackall
2005-01-19 8:17 ` [PATCH 12/12] random pt3: Remove entropy batching Matt Mackall
0 siblings, 1 reply; 13+ messages in thread
From: Matt Mackall @ 2005-01-19 8:17 UTC (permalink / raw)
To: Andrew Morton, Theodore Ts'o; +Cc: linux-kernel
Clean up buffer usage for SHA and reseed. This makes the code more
readable and reduces worst-case stack usage.
Signed-off-by: Matt Mackall <mpm@selenic.com>
Index: rnd/drivers/char/random.c
===================================================================
--- rnd.orig/drivers/char/random.c 2005-01-18 10:42:39.078612373 -0800
+++ rnd/drivers/char/random.c 2005-01-18 10:45:13.176966505 -0800
@@ -255,6 +255,7 @@
#define INPUT_POOL_WORDS 128
#define OUTPUT_POOL_WORDS 32
#define BATCH_ENTROPY_SIZE 256
+#define SEC_XFER_SIZE 512
/*
* The minimum number of bits of entropy before we wake up a read on
@@ -813,6 +814,7 @@
*/
#define HASH_BUFFER_SIZE 5
+#define EXTRACT_SIZE 10
#define HASH_EXTRA_SIZE 80
/* Various size/speed tradeoffs are available. Choose 0..3. */
@@ -1048,9 +1050,6 @@
*
*********************************************************************/
-#define TMP_BUF_SIZE (HASH_BUFFER_SIZE + HASH_EXTRA_SIZE)
-#define SEC_XFER_SIZE (TMP_BUF_SIZE*4)
-
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
size_t nbytes, int min, int rsvd);
@@ -1059,13 +1058,14 @@
* from the primary pool to the secondary extraction pool. We make
* sure we pull enough for a 'catastrophic reseed'.
*/
-static void xfer_secondary_pool(struct entropy_store *r,
- size_t nbytes, __u32 *tmp)
+static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
+ __u32 tmp[OUTPUT_POOL_WORDS];
+
if (r->pull && r->entropy_count < nbytes * 8 &&
r->entropy_count < r->poolinfo->POOLBITS) {
int bytes = max_t(int, random_read_wakeup_thresh / 8,
- min_t(int, nbytes, TMP_BUF_SIZE));
+ min_t(int, nbytes, sizeof(tmp)));
int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
DEBUG_ENT("going to reseed %s with %d bits "
@@ -1129,10 +1129,10 @@
return nbytes;
}
-static void extract_buf(struct entropy_store *r, __u32 *buf)
+static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i, x;
- __u32 data[16];
+ __u32 data[16], buf[85];
/* Hash the pool to get the output */
buf[0] = 0x67452301;
@@ -1151,7 +1151,7 @@
*/
for (i = 0, x = 0; i < r->poolinfo->poolwords; i += 16, x+=2) {
sha_transform(buf, r->pool+i);
- add_entropy_words(r, &buf[x%HASH_BUFFER_SIZE], 1);
+ add_entropy_words(r, &buf[x % 5], 1);
}
/*
@@ -1159,7 +1159,7 @@
* portion of the pool while mixing, and hash one
* final time.
*/
- __add_entropy_words(r, &buf[x%HASH_BUFFER_SIZE], 1, data);
+ __add_entropy_words(r, &buf[x % 5], 1, data);
sha_transform(buf, data);
/*
@@ -1170,21 +1170,23 @@
buf[0] ^= buf[3];
buf[1] ^= buf[4];
buf[0] ^= rol32(buf[3], 16);
+ memcpy(out, buf, EXTRACT_SIZE);
+ memset(buf, 0, sizeof(buf));
}
static ssize_t extract_entropy(struct entropy_store *r, void * buf,
size_t nbytes, int min, int reserved)
{
ssize_t ret = 0, i;
- __u32 tmp[TMP_BUF_SIZE];
+ __u8 tmp[EXTRACT_SIZE];
- xfer_secondary_pool(r, nbytes, tmp);
+ xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
while (nbytes) {
extract_buf(r, tmp);
- i = min(nbytes, HASH_BUFFER_SIZE * sizeof(__u32) / 2);
- memcpy(buf, (__u8 const *)tmp, i);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+ memcpy(buf, tmp, i);
nbytes -= i;
buf += i;
ret += i;
@@ -1200,9 +1202,9 @@
size_t nbytes)
{
ssize_t ret = 0, i;
- __u32 tmp[TMP_BUF_SIZE];
+ __u8 tmp[EXTRACT_SIZE];
- xfer_secondary_pool(r, nbytes, tmp);
+ xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
while (nbytes) {
@@ -1216,7 +1218,7 @@
}
extract_buf(r, tmp);
- i = min(nbytes, HASH_BUFFER_SIZE * sizeof(__u32) / 2);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
if (copy_to_user(buf, tmp, i)) {
ret = -EFAULT;
break;
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2005-01-19 8:43 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2005-01-19 8:17 [PATCH 0/12] random pt3: More core and accounting cleanups Matt Mackall
2005-01-19 8:17 ` [PATCH 1/12] random pt3: More meaningful pool names Matt Mackall
2005-01-19 8:17 ` [PATCH 2/12] random pt3: Static allocation of pools Matt Mackall
2005-01-19 8:17 ` [PATCH 3/12] random pt3: Static sysctl bits Matt Mackall
2005-01-19 8:17 ` [PATCH 4/12] random pt3: Catastrophic reseed checks Matt Mackall
2005-01-19 8:17 ` [PATCH 5/12] random pt3: Entropy reservation accounting Matt Mackall
2005-01-19 8:17 ` [PATCH 6/12] random pt3: Reservation flag in pool struct Matt Mackall
2005-01-19 8:17 ` [PATCH 7/12] random pt3: Reseed pointer " Matt Mackall
2005-01-19 8:17 ` [PATCH 8/12] random pt3: Break up extract_user Matt Mackall
2005-01-19 8:17 ` [PATCH 9/12] random pt3: Remove dead MD5 copy Matt Mackall
2005-01-19 8:17 ` [PATCH 10/12] random pt3: Simplify hash folding Matt Mackall
2005-01-19 8:17 ` [PATCH 11/12] random pt3: Clean up hash buffering Matt Mackall
2005-01-19 8:17 ` [PATCH 12/12] random pt3: Remove entropy batching Matt Mackall
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox