diff options
Diffstat (limited to 'drivers/char/hw_random/via-rng.c')
| -rw-r--r-- | drivers/char/hw_random/via-rng.c | 42 |
1 files changed, 36 insertions, 6 deletions
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index f7feae4ebb5..de5a6dcfb3e 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -11,7 +11,7 @@ * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) - * (c) Copyright 2001 Red Hat Inc <alan@redhat.com> + * (c) Copyright 2001 Red Hat Inc * * derived from * @@ -24,16 +24,18 @@ * warranty of any kind, whether express or implied. */ +#include <crypto/padlock.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/hw_random.h> #include <linux/delay.h> +#include <asm/cpu_device_id.h> #include <asm/io.h> #include <asm/msr.h> #include <asm/cpufeature.h> +#include <asm/i387.h> -#define PFX KBUILD_MODNAME ": " enum { @@ -67,23 +69,31 @@ enum { * Another possible performance boost may come from simply buffering * until we have 4 bytes, thus returning a u32 at a time, * instead of the current u8-at-a-time. + * + * Padlock instructions can generate a spurious DNA fault, so + * we have to call them in the context of irq_ts_save/restore() */ static inline u32 xstore(u32 *addr, u32 edx_in) { u32 eax_out; + int ts_state; + + ts_state = irq_ts_save(); asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */" - :"=m"(*addr), "=a"(eax_out) - :"D"(addr), "d"(edx_in)); + : "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr)); + irq_ts_restore(ts_state); return eax_out; } static int via_rng_data_present(struct hwrng *rng, int wait) { + char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); u32 bytes_out; - u32 *via_rng_datum = (u32 *)(&rng->priv); int i; /* We choose the recommended 1-byte-per-instruction RNG rate, @@ -107,6 +117,7 @@ static int via_rng_data_present(struct hwrng *rng, int wait) break; udelay(10); } + rng->priv = *via_rng_datum; return bytes_out ? 1 : 0; } @@ -124,6 +135,19 @@ static int via_rng_init(struct hwrng *rng) struct cpuinfo_x86 *c = &cpu_data(0); u32 lo, hi, old_lo; + /* VIA Nano CPUs don't have the MSR_VIA_RNG anymore. The RNG + * is always enabled if CPUID rng_en is set. There is no + * RNG configuration like it used to be the case in this + * register */ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { + if (!cpu_has_xstore_enabled) { + printk(KERN_ERR PFX "can't enable hardware RNG " + "if XSTORE is not enabled\n"); + return -ENODEV; + } + return 0; + } + /* Control the RNG via MSR. Tread lightly and pay very close * close attention to values written, as the reserved fields * are documented to be "undefined and unpredictable"; but it @@ -197,5 +221,11 @@ static void __exit mod_exit(void) module_init(mod_init); module_exit(mod_exit); -MODULE_DESCRIPTION("H/W RNG driver for VIA chipsets"); +static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { + X86_FEATURE_MATCH(X86_FEATURE_XSTORE), + {} +}; + +MODULE_DESCRIPTION("H/W RNG driver for VIA CPU with PadLock"); MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); |
