diff options
author | Theodore Ts'o <tytso@mit.edu> | 2012-07-02 07:52:16 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-08-15 08:10:10 -0700 |
commit | 0110bbfbc8ed1b5240a51e8c767c44a856424139 (patch) | |
tree | f3ac162b4925c7e841413db59dabff88324640ae | |
parent | cabf5b0af54cec6a2f9ce674cbc96fc8aa2fc468 (diff) |
random: make 'add_interrupt_randomness()' do something sane
commit 775f4b297b780601e61787b766f306ed3e1d23eb upstream.
We've been moving away from add_interrupt_randomness() for various
reasons: it's too expensive to do on every interrupt, and flooding the
CPU with interrupts could theoretically cause bogus floods of entropy
from a somewhat externally controllable source.
This solves both problems by limiting the actual randomness addition
to just once a second or after 64 interrupts, whicever comes first.
During that time, the interrupt cycle data is buffered up in a per-cpu
pool. Also, we make sure the the nonblocking pool used by urandom is
initialized before we start feeding the normal input pool. This
assures that /dev/urandom is returning unpredictable data as soon as
possible.
(Based on an original patch by Linus, but significantly modified by
tytso.)
Tested-by: Eric Wustrow <ewust@umich.edu>
Reported-by: Eric Wustrow <ewust@umich.edu>
Reported-by: Nadia Heninger <nadiah@cs.ucsd.edu>
Reported-by: Zakir Durumeric <zakir@umich.edu>
Reported-by: J. Alex Halderman <jhalderm@umich.edu>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/char/random.c | 103 | ||||
-rw-r--r-- | drivers/mfd/ab3100-core.c | 2 | ||||
-rw-r--r-- | include/linux/random.h | 2 | ||||
-rw-r--r-- | kernel/irq/handle.c | 7 |
4 files changed, 90 insertions, 24 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index 4ec04a75473..c59dbc6e758 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -127,19 +127,15 @@ * * void add_input_randomness(unsigned int type, unsigned int code, * unsigned int value); - * void add_interrupt_randomness(int irq); + * void add_interrupt_randomness(int irq, int irq_flags); * void add_disk_randomness(struct gendisk *disk); * * add_input_randomness() uses the input layer interrupt timing, as well as * the event type information from the hardware. * - * add_interrupt_randomness() uses the inter-interrupt timing as random - * inputs to the entropy pool. Note that not all interrupts are good - * sources of randomness! For example, the timer interrupts is not a - * good choice, because the periodicity of the interrupts is too - * regular, and hence predictable to an attacker. Network Interface - * Controller interrupts are a better measure, since the timing of the - * NIC interrupts are more unpredictable. + * add_interrupt_randomness() uses the interrupt timing as random + * inputs to the entropy pool. Using the cycle counters and the irq source + * as inputs, it feeds the randomness roughly once a second. * * add_disk_randomness() uses what amounts to the seek time of block * layer request events, on a per-disk_devt basis, as input to the @@ -248,6 +244,7 @@ #include <linux/percpu.h> #include <linux/cryptohash.h> #include <linux/fips.h> +#include <linux/ptrace.h> #ifdef CONFIG_GENERIC_HARDIRQS # include <linux/irq.h> @@ -256,6 +253,7 @@ #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/irq.h> +#include <asm/irq_regs.h> #include <asm/io.h> /* @@ -421,7 +419,9 @@ struct entropy_store { spinlock_t lock; unsigned add_ptr; int entropy_count; + int entropy_total; int input_rotate; + unsigned int initialized:1; __u8 last_data[EXTRACT_SIZE]; }; @@ -454,6 +454,10 @@ static struct entropy_store nonblocking_pool = { .pool = nonblocking_pool_data }; +static __u32 const twist_table[8] = { + 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, + 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; + /* * This function adds bytes into the entropy "pool". It does not * update the entropy estimate. The caller should call @@ -467,9 +471,6 @@ static struct entropy_store nonblocking_pool = { static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, int nbytes, __u8 out[64]) { - static __u32 const twist_table[8] = { - 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, - 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; unsigned long i, j, tap1, tap2, tap3, tap4, tap5; int input_rotate; int wordmask = r->poolinfo->poolwords - 1; @@ -528,6 +529,36 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) mix_pool_bytes_extract(r, in, bytes, NULL); } +struct fast_pool { + __u32 pool[4]; + unsigned long last; + unsigned short count; + unsigned char rotate; + unsigned char last_timer_intr; +}; + +/* + * This is a fast mixing routine used by the interrupt randomness + * collector. It's hardcoded for an 128 bit pool and assumes that any + * locks that might be needed are taken by the caller. + */ +static void fast_mix(struct fast_pool *f, const void *in, int nbytes) +{ + const char *bytes = in; + __u32 w; + unsigned i = f->count; + unsigned input_rotate = f->rotate; + + while (nbytes--) { + w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^ + f->pool[(i + 1) & 3]; + f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7]; + input_rotate += (i++ & 3) ? 7 : 14; + } + f->count = i; + f->rotate = input_rotate; +} + /* * Credit (or debit) the entropy store with n bits of entropy */ @@ -551,6 +582,12 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) entropy_count = r->poolinfo->POOLBITS; r->entropy_count = entropy_count; + if (!r->initialized && nbits > 0) { + r->entropy_total += nbits; + if (r->entropy_total > 128) + r->initialized = 1; + } + /* should we wake readers? */ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) { wake_up_interruptible(&random_read_wait); @@ -700,17 +737,48 @@ void add_input_randomness(unsigned int type, unsigned int code, } EXPORT_SYMBOL_GPL(add_input_randomness); -void add_interrupt_randomness(int irq) +static DEFINE_PER_CPU(struct fast_pool, irq_randomness); + +void add_interrupt_randomness(int irq, int irq_flags) { - struct timer_rand_state *state; + struct entropy_store *r; + struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness); + struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + __u32 input[4], cycles = get_cycles(); + + input[0] = cycles ^ jiffies; + input[1] = irq; + if (regs) { + __u64 ip = instruction_pointer(regs); + input[2] = ip; + input[3] = ip >> 32; + } - state = get_timer_rand_state(irq); + fast_mix(fast_pool, input, sizeof(input)); - if (state == NULL) + if ((fast_pool->count & 1023) && + !time_after(now, fast_pool->last + HZ)) return; - DEBUG_ENT("irq event %d\n", irq); - add_timer_randomness(state, 0x100 + irq); + fast_pool->last = now; + + r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; + mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); + /* + * If we don't have a valid cycle counter, and we see + * back-to-back timer interrupts, then skip giving credit for + * any entropy. + */ + if (cycles == 0) { + if (irq_flags & __IRQF_TIMER) { + if (fast_pool->last_timer_intr) + return; + fast_pool->last_timer_intr = 1; + } else + fast_pool->last_timer_intr = 0; + } + credit_entropy_bits(r, 1); } #ifdef CONFIG_BLOCK @@ -971,6 +1039,7 @@ static void init_std_data(struct entropy_store *r) spin_lock_irqsave(&r->lock, flags); r->entropy_count = 0; + r->entropy_total = 0; spin_unlock_irqrestore(&r->lock, flags); now = ktime_get_real(); diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 1efad20fb17..9522d6bda4f 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -409,8 +409,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data) u32 fatevent; int err; - add_interrupt_randomness(irq); - err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1, event_regs, 3); if (err) diff --git a/include/linux/random.h b/include/linux/random.h index 8f74538c96d..6ef39d7f2db 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -52,7 +52,7 @@ extern void rand_initialize_irq(int irq); extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); -extern void add_interrupt_randomness(int irq); +extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); void generate_random_uuid(unsigned char uuid_out[16]); diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index bdb18032555..131ca176b49 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -133,7 +133,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) { irqreturn_t retval = IRQ_NONE; - unsigned int random = 0, irq = desc->irq_data.irq; + unsigned int flags = 0, irq = desc->irq_data.irq; do { irqreturn_t res; @@ -161,7 +161,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) /* Fall through to add to randomness */ case IRQ_HANDLED: - random |= action->flags; + flags |= action->flags; break; default: @@ -172,8 +172,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) action = action->next; } while (action); - if (random & IRQF_SAMPLE_RANDOM) - add_interrupt_randomness(irq); + add_interrupt_randomness(irq, flags); if (!noirqdebug) note_interrupt(irq, desc, retval); |