public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 00 of 12] random cleanups for 2.6.25
@ 2008-01-18  2:33 Matt Mackall
  2008-01-18  2:33 ` [PATCH 01 of 12] random: clean up checkpatch complaints Matt Mackall
                   ` (11 more replies)
  0 siblings, 12 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

This is a collection of drivers/char/random.c cleanups and fixes for
2.6.25. Please apply.


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 01 of 12] random: clean up checkpatch complaints
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 02 of 12] random: consolidate wakeup logic Matt Mackall
                   ` (10 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r 5595adaea70f -r 905475c480bd drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 13:26:54 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -272,7 +272,7 @@
 
 static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
 
-static DEFINE_PER_CPU(int, trickle_count) = 0;
+static DEFINE_PER_CPU(int, trickle_count);
 
 /*
  * A pool of size .poolwords is stirred with a primitive polynomial
@@ -372,15 +372,16 @@
 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
 
 #if 0
-static int debug = 0;
+static int debug;
 module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { if (debug) \
-	printk(KERN_DEBUG "random %04d %04d %04d: " \
-	fmt,\
-	input_pool.entropy_count,\
-	blocking_pool.entropy_count,\
-	nonblocking_pool.entropy_count,\
-	## arg); } while (0)
+#define DEBUG_ENT(fmt, arg...) do { \
+	if (debug) \
+		printk(KERN_DEBUG "random %04d %04d %04d: " \
+		fmt,\
+		input_pool.entropy_count,\
+		blocking_pool.entropy_count,\
+		nonblocking_pool.entropy_count,\
+		## arg); } while (0)
 #else
 #define DEBUG_ENT(fmt, arg...) do {} while (0)
 #endif
@@ -551,7 +552,7 @@
 /* There is one of these per entropy source */
 struct timer_rand_state {
 	cycles_t last_time;
-	long last_delta,last_delta2;
+	long last_delta, last_delta2;
 	unsigned dont_count_entropy:1;
 };
 
@@ -624,7 +625,7 @@
 				     min_t(int, fls(delta>>1), 11));
 	}
 
-	if(input_pool.entropy_count >= random_read_wakeup_thresh)
+	if (input_pool.entropy_count >= random_read_wakeup_thresh)
 		wake_up_interruptible(&random_read_wait);
 
 out:
@@ -667,7 +668,6 @@
 	add_timer_randomness(disk->random,
 			     0x100 + MKDEV(disk->major, disk->first_minor));
 }
-
 EXPORT_SYMBOL(add_disk_randomness);
 #endif
 
@@ -679,7 +679,7 @@
  *
  *********************************************************************/
 
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 			       size_t nbytes, int min, int rsvd);
 
 /*
@@ -706,8 +706,8 @@
 			  "(%d of %d requested)\n",
 			  r->name, bytes * 8, nbytes * 8, r->entropy_count);
 
-		bytes=extract_entropy(r->pull, tmp, bytes,
-				      random_read_wakeup_thresh / 8, rsvd);
+		bytes = extract_entropy(r->pull, tmp, bytes,
+					random_read_wakeup_thresh / 8, rsvd);
 		add_entropy_words(r, tmp, (bytes + 3) / 4);
 		credit_entropy_store(r, bytes*8);
 	}
@@ -746,7 +746,7 @@
 		if (r->limit && nbytes + reserved >= r->entropy_count / 8)
 			nbytes = r->entropy_count/8 - reserved;
 
-		if(r->entropy_count / 8 >= nbytes + reserved)
+		if (r->entropy_count / 8 >= nbytes + reserved)
 			r->entropy_count -= nbytes*8;
 		else
 			r->entropy_count = reserved;
@@ -804,7 +804,7 @@
 	memset(buf, 0, sizeof(buf));
 }
 
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
 			       size_t nbytes, int min, int reserved)
 {
 	ssize_t ret = 0, i;
@@ -874,7 +874,6 @@
 {
 	extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
 }
-
 EXPORT_SYMBOL(get_random_bytes);
 
 /*
@@ -942,7 +941,7 @@
 #endif
 
 static ssize_t
-random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
 	ssize_t n, retval = 0, count = 0;
 
@@ -1004,8 +1003,7 @@
 }
 
 static ssize_t
-urandom_read(struct file * file, char __user * buf,
-		      size_t nbytes, loff_t *ppos)
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
 	return extract_entropy_user(&nonblocking_pool, buf, nbytes);
 }
@@ -1047,9 +1045,8 @@
 	return 0;
 }
 
-static ssize_t
-random_write(struct file * file, const char __user * buffer,
-	     size_t count, loff_t *ppos)
+static ssize_t random_write(struct file *file, const char __user *buffer,
+			    size_t count, loff_t *ppos)
 {
 	size_t ret;
 	struct inode *inode = file->f_path.dentry->d_inode;
@@ -1066,9 +1063,8 @@
 	return (ssize_t)count;
 }
 
-static int
-random_ioctl(struct inode * inode, struct file * file,
-	     unsigned int cmd, unsigned long arg)
+static int random_ioctl(struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg)
 {
 	int size, ent_count;
 	int __user *p = (int __user *)arg;
@@ -1159,7 +1155,6 @@
 	/* Set the UUID variant to DCE */
 	uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
 }
-
 EXPORT_SYMBOL(generate_random_uuid);
 
 /********************************************************************
@@ -1334,14 +1329,14 @@
  * Rotation is separate from addition to prevent recomputation
  */
 #define ROUND(f, a, b, c, d, x, s)	\
-	(a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
+	(a += f(b, c, d) + in[x], a = (a << s) | (a >> (32 - s)))
 #define K1 0
 #define K2 013240474631UL
 #define K3 015666365641UL
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 
-static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12])
+static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
 {
 	__u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
 
@@ -1489,8 +1484,8 @@
 	 */
 
 	memcpy(hash, saddr, 16);
-	hash[4]=((__force u16)sport << 16) + (__force u16)dport;
-	memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+	hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
+	memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
 
 	seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
 	seq += keyptr->count;
@@ -1540,10 +1535,10 @@
 	 *  Note that the words are placed into the starting vector, which is
 	 *  then mixed with a partial MD4 over random data.
 	 */
-	hash[0]=(__force u32)saddr;
-	hash[1]=(__force u32)daddr;
-	hash[2]=((__force u16)sport << 16) + (__force u16)dport;
-	hash[3]=keyptr->secret[11];
+	hash[0] = (__force u32)saddr;
+	hash[1] = (__force u32)daddr;
+	hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+	hash[3] = keyptr->secret[11];
 
 	seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
 	seq += keyptr->count;
@@ -1558,10 +1553,7 @@
 	 *	Choosing a clock of 64 ns period is OK. (period of 274 s)
 	 */
 	seq += ktime_to_ns(ktime_get_real()) >> 6;
-#if 0
-	printk("init_seq(%lx, %lx, %d, %d) = %d\n",
-	       saddr, daddr, sport, dport, seq);
-#endif
+
 	return seq;
 }
 
@@ -1584,14 +1576,15 @@
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+			       __be16 dport)
 {
 	struct keydata *keyptr = get_keyptr();
 	u32 hash[12];
 
 	memcpy(hash, saddr, 16);
 	hash[4] = (__force u32)dport;
-	memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+	memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
 
 	return twothirdsMD4Transform((const __u32 *)daddr, hash);
 }
@@ -1619,13 +1612,9 @@
 
 	seq += ktime_to_ns(ktime_get_real());
 	seq &= (1ull << 48) - 1;
-#if 0
-	printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
-	       saddr, daddr, sport, dport, seq);
-#endif
+
 	return seq;
 }
-
 EXPORT_SYMBOL(secure_dccp_sequence_number);
 #endif
 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 02 of 12] random: consolidate wakeup logic
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
  2008-01-18  2:33 ` [PATCH 01 of 12] random: clean up checkpatch complaints Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 03 of 12] random: use unlocked_ioctl Matt Mackall
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r 905475c480bd -r f814137b0bfc drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -540,6 +540,10 @@
 				  nbits, r->name);
 	}
 
+	/* should we wake readers? */
+	if (r == &input_pool && r->entropy_count >= random_read_wakeup_thresh)
+		wake_up_interruptible(&random_read_wait);
+
 	spin_unlock_irqrestore(&r->lock, flags);
 }
 
@@ -624,10 +628,6 @@
 		credit_entropy_store(&input_pool,
 				     min_t(int, fls(delta>>1), 11));
 	}
-
-	if (input_pool.entropy_count >= random_read_wakeup_thresh)
-		wake_up_interruptible(&random_read_wait);
-
 out:
 	preempt_enable();
 }
@@ -1082,12 +1082,6 @@
 		if (get_user(ent_count, p))
 			return -EFAULT;
 		credit_entropy_store(&input_pool, ent_count);
-		/*
-		 * Wake up waiting processes if we have enough
-		 * entropy.
-		 */
-		if (input_pool.entropy_count >= random_read_wakeup_thresh)
-			wake_up_interruptible(&random_read_wait);
 		return 0;
 	case RNDADDENTROPY:
 		if (!capable(CAP_SYS_ADMIN))
@@ -1103,12 +1097,6 @@
 		if (retval < 0)
 			return retval;
 		credit_entropy_store(&input_pool, ent_count);
-		/*
-		 * Wake up waiting processes if we have enough
-		 * entropy.
-		 */
-		if (input_pool.entropy_count >= random_read_wakeup_thresh)
-			wake_up_interruptible(&random_read_wait);
 		return 0;
 	case RNDZAPENTCNT:
 	case RNDCLEARPOOL:

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 03 of 12] random: use unlocked_ioctl
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
  2008-01-18  2:33 ` [PATCH 01 of 12] random: clean up checkpatch complaints Matt Mackall
  2008-01-18  2:33 ` [PATCH 02 of 12] random: consolidate wakeup logic Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 04 of 12] random: reuse rand_initialize Matt Mackall
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

No locking actually needed.

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r f814137b0bfc -r 6f8bed9c59f7 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -1063,8 +1063,7 @@
 	return (ssize_t)count;
 }
 
-static int random_ioctl(struct inode *inode, struct file *file,
-			unsigned int cmd, unsigned long arg)
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 {
 	int size, ent_count;
 	int __user *p = (int __user *)arg;
@@ -1072,8 +1071,8 @@
 
 	switch (cmd) {
 	case RNDGETENTCNT:
-		ent_count = input_pool.entropy_count;
-		if (put_user(ent_count, p))
+		/* inherently racy, no point locking */
+		if (put_user(input_pool.entropy_count, p))
 			return -EFAULT;
 		return 0;
 	case RNDADDTOENTCNT:
@@ -1116,13 +1115,13 @@
 	.read  = random_read,
 	.write = random_write,
 	.poll  = random_poll,
-	.ioctl = random_ioctl,
+	.unlocked_ioctl = random_ioctl,
 };
 
 const struct file_operations urandom_fops = {
 	.read  = urandom_read,
 	.write = random_write,
-	.ioctl = random_ioctl,
+	.unlocked_ioctl = random_ioctl,
 };
 
 /***************************************************************

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 04 of 12] random: reuse rand_initialize
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (2 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 03 of 12] random: use unlocked_ioctl Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 05 of 12] random: improve variable naming, clear extract buffer Matt Mackall
                   ` (7 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r 6f8bed9c59f7 -r 3e0b0226df90 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -900,7 +900,7 @@
 			  sizeof(*(utsname()))/4);
 }
 
-static int __init rand_initialize(void)
+static int rand_initialize(void)
 {
 	init_std_data(&input_pool);
 	init_std_data(&blocking_pool);
@@ -1102,9 +1102,7 @@
 		/* Clear the entropy pool counters. */
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
-		init_std_data(&input_pool);
-		init_std_data(&blocking_pool);
-		init_std_data(&nonblocking_pool);
+		rand_initialize();
 		return 0;
 	default:
 		return -EINVAL;

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 05 of 12] random: improve variable naming, clear extract buffer
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (3 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 04 of 12] random: reuse rand_initialize Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 06 of 12] random: make backtracking attacks harder Matt Mackall
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

- split the SHA variables apart into hash and workspace
- rename data to extract
- wipe extract and workspace after hashing

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r 3e0b0226df90 -r 42aa9f950f97 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -766,9 +766,9 @@
 static void extract_buf(struct entropy_store *r, __u8 *out)
 {
 	int i;
-	__u32 data[16], buf[5 + SHA_WORKSPACE_WORDS];
+	__u32 extract[16], hash[5], workspace[SHA_WORKSPACE_WORDS];
 
-	sha_init(buf);
+	sha_init(hash);
 	/*
 	 * As we hash the pool, we mix intermediate values of
 	 * the hash back into the pool.  This eliminates
@@ -779,9 +779,9 @@
 	 */
 	for (i = 0; i < r->poolinfo->poolwords; i += 16) {
 		/* hash blocks of 16 words = 512 bits */
-		sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
+		sha_transform(hash, (__u8 *)(r->pool + i), workspace);
 		/* feed back portion of the resulting hash */
-		add_entropy_words(r, &buf[i % 5], 1);
+		add_entropy_words(r, &hash[i % 5], 1);
 	}
 
 	/*
@@ -789,19 +789,21 @@
 	 * portion of the pool while mixing, and hash one
 	 * final time.
 	 */
-	__add_entropy_words(r, &buf[i % 5], 1, data);
-	sha_transform(buf, (__u8 *)data, buf + 5);
+	__add_entropy_words(r, &hash[i % 5], 1, extract);
+	sha_transform(hash, (__u8 *)extract, workspace);
+	memset(extract, 0, sizeof(extract));
+	memset(workspace, 0, sizeof(workspace));
 
 	/*
 	 * In case the hash function has some recognizable
 	 * output pattern, we fold it in half.
 	 */
 
-	buf[0] ^= buf[3];
-	buf[1] ^= buf[4];
-	buf[2] ^= rol32(buf[2], 16);
-	memcpy(out, buf, EXTRACT_SIZE);
-	memset(buf, 0, sizeof(buf));
+	hash[0] ^= hash[3];
+	hash[1] ^= hash[4];
+	hash[2] ^= rol32(hash[2], 16);
+	memcpy(out, hash, EXTRACT_SIZE);
+	memset(hash, 0, sizeof(hash));
 }
 
 static ssize_t extract_entropy(struct entropy_store *r, void *buf,

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 06 of 12] random: make backtracking attacks harder
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (4 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 05 of 12] random: improve variable naming, clear extract buffer Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 07 of 12] random: remove cacheline alignment for locks Matt Mackall
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

At each extraction, we change (poolbits / 16) + 32 bits in the pool,
or 96 bits in the case of the secondary pools. Thus, a brute-force
backtracking attack on the pool state is less difficult than breaking
the hash. In certain cases, this difficulty may be is reduced to 2^64
iterations.

Instead, hash the entire pool in one go, then feedback the whole hash
(160 bits) in one go. This will make backtracking at least as hard as
inverting the hash.

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r 42aa9f950f97 -r 9569d3011032 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -768,37 +768,35 @@
 	int i;
 	__u32 extract[16], hash[5], workspace[SHA_WORKSPACE_WORDS];
 
+	/* Generate a hash across the pool, 16 words (512 bits) at a time */
 	sha_init(hash);
-	/*
-	 * As we hash the pool, we mix intermediate values of
-	 * the hash back into the pool.  This eliminates
-	 * backtracking attacks (where the attacker knows
-	 * the state of the pool plus the current outputs, and
-	 * attempts to find previous ouputs), unless the hash
-	 * function can be inverted.
-	 */
-	for (i = 0; i < r->poolinfo->poolwords; i += 16) {
-		/* hash blocks of 16 words = 512 bits */
+	for (i = 0; i < r->poolinfo->poolwords; i += 16)
 		sha_transform(hash, (__u8 *)(r->pool + i), workspace);
-		/* feed back portion of the resulting hash */
-		add_entropy_words(r, &hash[i % 5], 1);
-	}
 
 	/*
-	 * To avoid duplicates, we atomically extract a
-	 * portion of the pool while mixing, and hash one
-	 * final time.
+	 * We mix the hash back into the pool to prevent backtracking
+	 * attacks (where the attacker knows the state of the pool
+	 * plus the current outputs, and attempts to find previous
+	 * ouputs), unless the hash function can be inverted. By
+	 * mixing at least a SHA1 worth of hash data back, we make
+	 * brute-forcing the feedback as hard as brute-forcing the
+	 * hash.
 	 */
-	__add_entropy_words(r, &hash[i % 5], 1, extract);
+	__add_entropy_words(r, hash, 5, extract);
+
+	/*
+	 * To avoid duplicates, we atomically extract a portion of the
+	 * pool while mixing, and hash one final time.
+	 */
 	sha_transform(hash, (__u8 *)extract, workspace);
 	memset(extract, 0, sizeof(extract));
 	memset(workspace, 0, sizeof(workspace));
 
 	/*
-	 * In case the hash function has some recognizable
-	 * output pattern, we fold it in half.
+	 * In case the hash function has some recognizable output
+	 * pattern, we fold it in half. Thus, we always feed back
+	 * twice as much data as we output.
 	 */
-
 	hash[0] ^= hash[3];
 	hash[1] ^= hash[4];
 	hash[2] ^= rol32(hash[2], 16);

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 07 of 12] random: remove cacheline alignment for locks
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (5 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 06 of 12] random: make backtracking attacks harder Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 08 of 12] random: eliminate redundant new_rotate variable Matt Mackall
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

Earlier changes greatly reduce the number of times we grab the lock
per output byte, so we shouldn't need this particular hack any more.

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r 9569d3011032 -r 70f981257057 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -395,7 +395,7 @@
 
 struct entropy_store;
 struct entropy_store {
-	/* mostly-read data: */
+	/* read-only data: */
 	struct poolinfo *poolinfo;
 	__u32 *pool;
 	const char *name;
@@ -403,7 +403,7 @@
 	struct entropy_store *pull;
 
 	/* read-write data: */
-	spinlock_t lock ____cacheline_aligned_in_smp;
+	spinlock_t lock;
 	unsigned add_ptr;
 	int entropy_count;
 	int input_rotate;

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 08 of 12] random: eliminate redundant new_rotate variable
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (6 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 07 of 12] random: remove cacheline alignment for locks Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 09 of 12] random: remove some prefetch logic Matt Mackall
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

- eliminate new_rotate
- move input_rotate masking
- simplify input_rotate update
- move input_rotate update to end of inner loop for readability

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r 70f981257057 -r f06a2e1b4d58 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -455,7 +455,7 @@
 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
 	unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
-	int new_rotate, input_rotate;
+	int input_rotate;
 	int wordmask = r->poolinfo->poolwords - 1;
 	__u32 w, next_w;
 	unsigned long flags;
@@ -474,20 +474,10 @@
 	add_ptr = r->add_ptr;
 
 	while (nwords--) {
-		w = rol32(next_w, input_rotate);
+		w = rol32(next_w, input_rotate & 31);
 		if (nwords > 0)
 			next_w = *in++;
 		i = add_ptr = (add_ptr - 1) & wordmask;
-		/*
-		 * Normally, we add 7 bits of rotation to the pool.
-		 * At the beginning of the pool, add an extra 7 bits
-		 * rotation, so that successive passes spread the
-		 * input bits across the pool evenly.
-		 */
-		new_rotate = input_rotate + 14;
-		if (i)
-			new_rotate = input_rotate + 7;
-		input_rotate = new_rotate & 31;
 
 		/* XOR in the various taps */
 		w ^= r->pool[(i + tap1) & wordmask];
@@ -497,6 +487,14 @@
 		w ^= r->pool[(i + tap5) & wordmask];
 		w ^= r->pool[i];
 		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
+		/*
+		 * Normally, we add 7 bits of rotation to the pool.
+		 * At the beginning of the pool, add an extra 7 bits
+		 * rotation, so that successive passes spread the
+		 * input bits across the pool evenly.
+		 */
+		input_rotate += i ? 7 : 14;
 	}
 
 	r->input_rotate = input_rotate;

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 09 of 12] random: remove some prefetch logic
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (7 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 08 of 12] random: eliminate redundant new_rotate variable Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 10 of 12] random: simplify add_ptr logic Matt Mackall
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

The urandom output pool (ie the fast path) fits in one cacheline, so
this is pretty unnecessary. Further, the output path has already
fetched the entire pool to hash it before calling in here.

(This was the only user of prefetch_range in the kernel, and it passed
in words rather than bytes!)

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r f06a2e1b4d58 -r bc31fa097d34 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -457,7 +457,7 @@
 	unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
 	int input_rotate;
 	int wordmask = r->poolinfo->poolwords - 1;
-	__u32 w, next_w;
+	__u32 w;
 	unsigned long flags;
 
 	/* Taps are constant, so we can load them without holding r->lock.  */
@@ -466,17 +466,13 @@
 	tap3 = r->poolinfo->tap3;
 	tap4 = r->poolinfo->tap4;
 	tap5 = r->poolinfo->tap5;
-	next_w = *in++;
 
 	spin_lock_irqsave(&r->lock, flags);
-	prefetch_range(r->pool, wordmask);
 	input_rotate = r->input_rotate;
 	add_ptr = r->add_ptr;
 
 	while (nwords--) {
-		w = rol32(next_w, input_rotate & 31);
-		if (nwords > 0)
-			next_w = *in++;
+		w = rol32(*in++, input_rotate & 31);
 		i = add_ptr = (add_ptr - 1) & wordmask;
 
 		/* XOR in the various taps */

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 10 of 12] random: simplify add_ptr logic
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (8 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 09 of 12] random: remove some prefetch logic Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 11 of 12] random: make mixing interface byte-oriented Matt Mackall
  2008-01-18  2:33 ` [PATCH 12 of 12] random: simplify and rename credit_entropy_store Matt Mackall
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

The add_ptr variable wasn't used in a sensible way, use only i instead.
i got reused later for a different purpose, use j instead.

While we're here, put tap0 first in the tap list and add a comment.

Signed-off-by: Matt Mackall <mpm@selenic.com>

diff -r bc31fa097d34 -r 8f286e22c84d drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
@@ -454,7 +454,7 @@
 	static __u32 const twist_table[8] = {
 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
 		0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
-	unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
+	unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
 	int input_rotate;
 	int wordmask = r->poolinfo->poolwords - 1;
 	__u32 w;
@@ -469,19 +469,21 @@
 
 	spin_lock_irqsave(&r->lock, flags);
 	input_rotate = r->input_rotate;
-	add_ptr = r->add_ptr;
+	i = r->add_ptr;
 
 	while (nwords--) {
 		w = rol32(*in++, input_rotate & 31);
-		i = add_ptr = (add_ptr - 1) & wordmask;
+		i = (i - 1) & wordmask;
 
 		/* XOR in the various taps */
+		w ^= r->pool[i];
 		w ^= r->pool[(i + tap1) & wordmask];
 		w ^= r->pool[(i + tap2) & wordmask];
 		w ^= r->pool[(i + tap3) & wordmask];
 		w ^= r->pool[(i + tap4) & wordmask];
 		w ^= r->pool[(i + tap5) & wordmask];
-		w ^= r->pool[i];
+
+		/* Mix the result back in with a twist */
 		r->pool[i] = (w >> 3) ^ twist_table[w & 7];
 
 		/*
@@ -494,14 +496,11 @@
 	}
 
 	r->input_rotate = input_rotate;
-	r->add_ptr = add_ptr;
+	r->add_ptr = i;
 
-	if (out) {
-		for (i = 0; i < 16; i++) {
-			out[i] = r->pool[add_ptr];
-			add_ptr = (add_ptr - 1) & wordmask;
-		}
-	}
+	if (out)
+		for (j = 0; j < 16; j++)
+			out[j] = r->pool[(i - j) & wordmask];
 
 	spin_unlock_irqrestore(&r->lock, flags);
 }

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 11 of 12] random: make mixing interface byte-oriented
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (9 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 10 of 12] random: simplify add_ptr logic Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  2008-01-18  2:33 ` [PATCH 12 of 12] random: simplify and rename credit_entropy_store Matt Mackall
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

Switch add_entropy_words to a byte-oriented interface, eliminating
numerous casts and byte/word size rounding issues. This also
reduces the overall bit/byte/word confusion in this code.

We now mix a byte at a time into the word-based pool. This takes four
times as many iterations, but should be negligible compared to hashing
overhead. This also increases our pool churn, which adds some depth
against some theoretical failure modes.

The function name is changed to emphasize pool mixing and deemphasize
entropy (the samples mixed in may not contain any). extract is added
to the core function to make it clear that it extracts from the pool.

diff -r 8f286e22c84d -r a0689714301a drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:23 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:24 2008 -0600
@@ -439,7 +439,7 @@
 };
 
 /*
- * This function adds a byte into the entropy "pool".  It does not
+ * This function adds bytes into the entropy "pool".  It does not
  * update the entropy estimate.  The caller should call
  * credit_entropy_store if this is appropriate.
  *
@@ -448,8 +448,8 @@
  * it's cheap to do so and helps slightly in the expected case where
  * the entropy is concentrated in the low-order bits.
  */
-static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
-				int nwords, __u32 out[16])
+static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+				   int nbytes, __u8 out[64])
 {
 	static __u32 const twist_table[8] = {
 		0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
@@ -457,6 +457,7 @@
 	unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
 	int input_rotate;
 	int wordmask = r->poolinfo->poolwords - 1;
+	const char *bytes = in;
 	__u32 w;
 	unsigned long flags;
 
@@ -471,8 +472,9 @@
 	input_rotate = r->input_rotate;
 	i = r->add_ptr;
 
-	while (nwords--) {
-		w = rol32(*in++, input_rotate & 31);
+	/* mix one byte at a time to simplify size handling and churn faster */
+	while (nbytes--) {
+		w = rol32(*bytes++, input_rotate & 31);
 		i = (i - 1) & wordmask;
 
 		/* XOR in the various taps */
@@ -500,15 +502,14 @@
 
 	if (out)
 		for (j = 0; j < 16; j++)
-			out[j] = r->pool[(i - j) & wordmask];
+			((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
 
 	spin_unlock_irqrestore(&r->lock, flags);
 }
 
-static inline void add_entropy_words(struct entropy_store *r, const __u32 *in,
-				     int nwords)
+static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
 {
-	__add_entropy_words(r, in, nwords, NULL);
+       mix_pool_bytes_extract(r, in, bytes, NULL);
 }
 
 /*
@@ -584,7 +585,7 @@
 	sample.jiffies = jiffies;
 	sample.cycles = get_cycles();
 	sample.num = num;
-	add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4);
+	mix_pool_bytes(&input_pool, &sample, sizeof(sample));
 
 	/*
 	 * Calculate number of bits of randomness we probably added.
@@ -701,7 +702,7 @@
 
 		bytes = extract_entropy(r->pull, tmp, bytes,
 					random_read_wakeup_thresh / 8, rsvd);
-		add_entropy_words(r, tmp, (bytes + 3) / 4);
+		mix_pool_bytes(r, tmp, bytes);
 		credit_entropy_store(r, bytes*8);
 	}
 }
@@ -759,7 +760,8 @@
 static void extract_buf(struct entropy_store *r, __u8 *out)
 {
 	int i;
-	__u32 extract[16], hash[5], workspace[SHA_WORKSPACE_WORDS];
+	__u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+	__u8 extract[64];
 
 	/* Generate a hash across the pool, 16 words (512 bits) at a time */
 	sha_init(hash);
@@ -775,13 +777,13 @@
 	 * brute-forcing the feedback as hard as brute-forcing the
 	 * hash.
 	 */
-	__add_entropy_words(r, hash, 5, extract);
+	mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
 
 	/*
 	 * To avoid duplicates, we atomically extract a portion of the
 	 * pool while mixing, and hash one final time.
 	 */
-	sha_transform(hash, (__u8 *)extract, workspace);
+	sha_transform(hash, extract, workspace);
 	memset(extract, 0, sizeof(extract));
 	memset(workspace, 0, sizeof(workspace));
 
@@ -888,9 +890,8 @@
 	spin_unlock_irqrestore(&r->lock, flags);
 
 	now = ktime_get_real();
-	add_entropy_words(r, (__u32 *)&now, sizeof(now)/4);
-	add_entropy_words(r, (__u32 *)utsname(),
-			  sizeof(*(utsname()))/4);
+	mix_pool_bytes(r, &now, sizeof(now));
+	mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
 }
 
 static int rand_initialize(void)
@@ -1031,7 +1032,7 @@
 		count -= bytes;
 		p += bytes;
 
-		add_entropy_words(r, buf, (bytes + 3) / 4);
+		mix_pool_bytes(r, buf, bytes);
 		cond_resched();
 	}
 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 12 of 12] random: simplify and rename credit_entropy_store
  2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
                   ` (10 preceding siblings ...)
  2008-01-18  2:33 ` [PATCH 11 of 12] random: make mixing interface byte-oriented Matt Mackall
@ 2008-01-18  2:33 ` Matt Mackall
  11 siblings, 0 replies; 13+ messages in thread
From: Matt Mackall @ 2008-01-18  2:33 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-kernel

- emphasize bits in the name
- make zero bits lock-free
- simplify logic

diff -r a0689714301a -r 218e9dac90b1 drivers/char/random.c
--- a/drivers/char/random.c	Thu Jan 17 20:25:24 2008 -0600
+++ b/drivers/char/random.c	Thu Jan 17 20:25:24 2008 -0600
@@ -441,7 +441,7 @@
 /*
  * This function adds bytes into the entropy "pool".  It does not
  * update the entropy estimate.  The caller should call
- * credit_entropy_store if this is appropriate.
+ * credit_entropy_bits if this is appropriate.
  *
  * The pool is stirred with a primitive polynomial of the appropriate
  * degree, and then twisted.  We twist by three bits at a time because
@@ -515,24 +515,22 @@
 /*
  * Credit (or debit) the entropy store with n bits of entropy
  */
-static void credit_entropy_store(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
 {
 	unsigned long flags;
 
+	if (!nbits)
+		return;
+
 	spin_lock_irqsave(&r->lock, flags);
 
-	if (r->entropy_count + nbits < 0) {
-		DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
-			  r->entropy_count, nbits);
+	DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+	r->entropy_count += nbits;
+	if (r->entropy_count < 0) {
+		DEBUG_ENT("negative entropy/overflow\n");
 		r->entropy_count = 0;
-	} else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) {
+	} else if (r->entropy_count > r->poolinfo->POOLBITS)
 		r->entropy_count = r->poolinfo->POOLBITS;
-	} else {
-		r->entropy_count += nbits;
-		if (nbits)
-			DEBUG_ENT("added %d entropy credits to %s\n",
-				  nbits, r->name);
-	}
 
 	/* should we wake readers? */
 	if (r == &input_pool && r->entropy_count >= random_read_wakeup_thresh)
@@ -619,8 +617,8 @@
 		 * Round down by 1 bit on general principles,
 		 * and limit entropy entimate to 12 bits.
 		 */
-		credit_entropy_store(&input_pool,
-				     min_t(int, fls(delta>>1), 11));
+		credit_entropy_bits(&input_pool,
+				    min_t(int, fls(delta>>1), 11));
 	}
 out:
 	preempt_enable();
@@ -703,7 +701,7 @@
 		bytes = extract_entropy(r->pull, tmp, bytes,
 					random_read_wakeup_thresh / 8, rsvd);
 		mix_pool_bytes(r, tmp, bytes);
-		credit_entropy_store(r, bytes*8);
+		credit_entropy_bits(r, bytes*8);
 	}
 }
 
@@ -1074,7 +1072,7 @@
 			return -EPERM;
 		if (get_user(ent_count, p))
 			return -EFAULT;
-		credit_entropy_store(&input_pool, ent_count);
+		credit_entropy_bits(&input_pool, ent_count);
 		return 0;
 	case RNDADDENTROPY:
 		if (!capable(CAP_SYS_ADMIN))
@@ -1089,7 +1087,7 @@
 				    size);
 		if (retval < 0)
 			return retval;
-		credit_entropy_store(&input_pool, ent_count);
+		credit_entropy_bits(&input_pool, ent_count);
 		return 0;
 	case RNDZAPENTCNT:
 	case RNDCLEARPOOL:

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2008-01-18  2:41 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-01-18  2:33 [PATCH 00 of 12] random cleanups for 2.6.25 Matt Mackall
2008-01-18  2:33 ` [PATCH 01 of 12] random: clean up checkpatch complaints Matt Mackall
2008-01-18  2:33 ` [PATCH 02 of 12] random: consolidate wakeup logic Matt Mackall
2008-01-18  2:33 ` [PATCH 03 of 12] random: use unlocked_ioctl Matt Mackall
2008-01-18  2:33 ` [PATCH 04 of 12] random: reuse rand_initialize Matt Mackall
2008-01-18  2:33 ` [PATCH 05 of 12] random: improve variable naming, clear extract buffer Matt Mackall
2008-01-18  2:33 ` [PATCH 06 of 12] random: make backtracking attacks harder Matt Mackall
2008-01-18  2:33 ` [PATCH 07 of 12] random: remove cacheline alignment for locks Matt Mackall
2008-01-18  2:33 ` [PATCH 08 of 12] random: eliminate redundant new_rotate variable Matt Mackall
2008-01-18  2:33 ` [PATCH 09 of 12] random: remove some prefetch logic Matt Mackall
2008-01-18  2:33 ` [PATCH 10 of 12] random: simplify add_ptr logic Matt Mackall
2008-01-18  2:33 ` [PATCH 11 of 12] random: make mixing interface byte-oriented Matt Mackall
2008-01-18  2:33 ` [PATCH 12 of 12] random: simplify and rename credit_entropy_store Matt Mackall

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox