Loading...
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/*
3 * Copyright (C) 2017-2024 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6 *
7 * This driver produces cryptographically secure pseudorandom data. It is divided
8 * into roughly six sections, each with a section header:
9 *
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
15 * - Sysctl interface.
16 *
17 * The high level overview is that there is one input pool, into which
18 * various pieces of data are hashed. Prior to initialization, some of that
19 * data is then "credited" as having a certain number of bits of entropy.
20 * When enough bits of entropy are available, the hash is finalized and
21 * handed as a key to a stream cipher that expands it indefinitely for
22 * various consumers. This key is periodically refreshed as the various
23 * entropy collectors, described below, add data to the input pool.
24 */
25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28#include <linux/utsname.h>
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/major.h>
32#include <linux/string.h>
33#include <linux/fcntl.h>
34#include <linux/slab.h>
35#include <linux/random.h>
36#include <linux/poll.h>
37#include <linux/init.h>
38#include <linux/fs.h>
39#include <linux/blkdev.h>
40#include <linux/interrupt.h>
41#include <linux/mm.h>
42#include <linux/nodemask.h>
43#include <linux/spinlock.h>
44#include <linux/kthread.h>
45#include <linux/percpu.h>
46#include <linux/ptrace.h>
47#include <linux/workqueue.h>
48#include <linux/irq.h>
49#include <linux/ratelimit.h>
50#include <linux/syscalls.h>
51#include <linux/completion.h>
52#include <linux/uuid.h>
53#include <linux/uaccess.h>
54#include <linux/suspend.h>
55#include <linux/siphash.h>
56#include <linux/sched/isolation.h>
57#include <crypto/chacha.h>
58#include <crypto/blake2s.h>
59#ifdef CONFIG_VDSO_GETRANDOM
60#include <vdso/getrandom.h>
61#include <vdso/datapage.h>
62#include <vdso/vsyscall.h>
63#endif
64#include <asm/archrandom.h>
65#include <asm/processor.h>
66#include <asm/irq.h>
67#include <asm/irq_regs.h>
68#include <asm/io.h>
69
70/*********************************************************************
71 *
72 * Initialization and readiness waiting.
73 *
74 * Much of the RNG infrastructure is devoted to various dependencies
75 * being able to wait until the RNG has collected enough entropy and
76 * is ready for safe consumption.
77 *
78 *********************************************************************/
79
80/*
81 * crng_init is protected by base_crng->lock, and only increases
82 * its value (from empty->early->ready).
83 */
84static enum {
85 CRNG_EMPTY = 0, /* Little to no entropy collected */
86 CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
87 CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
88} crng_init __read_mostly = CRNG_EMPTY;
89static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
90#define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
91/* Various types of waiters for crng_init->CRNG_READY transition. */
92static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
93static struct fasync_struct *fasync;
94static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
95
96/* Control how we warn userspace. */
97static struct ratelimit_state urandom_warning =
98 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
99static int ratelimit_disable __read_mostly =
100 IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
101module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
102MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
103
104/*
105 * Returns whether or not the input pool has been seeded and thus guaranteed
106 * to supply cryptographically secure random numbers. This applies to: the
107 * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
108 * u16,u32,u64,long} family of functions.
109 *
110 * Returns: true if the input pool has been seeded.
111 * false if the input pool has not been seeded.
112 */
113bool rng_is_initialized(void)
114{
115 return crng_ready();
116}
117EXPORT_SYMBOL(rng_is_initialized);
118
119static void __cold crng_set_ready(struct work_struct *work)
120{
121 static_branch_enable(&crng_is_ready);
122}
123
124/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
125static void try_to_generate_entropy(void);
126
127/*
128 * Wait for the input pool to be seeded and thus guaranteed to supply
129 * cryptographically secure random numbers. This applies to: the /dev/urandom
130 * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
131 * long} family of functions. Using any of these functions without first
132 * calling this function forfeits the guarantee of security.
133 *
134 * Returns: 0 if the input pool has been seeded.
135 * -ERESTARTSYS if the function was interrupted by a signal.
136 */
137int wait_for_random_bytes(void)
138{
139 while (!crng_ready()) {
140 int ret;
141
142 try_to_generate_entropy();
143 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
144 if (ret)
145 return ret > 0 ? 0 : ret;
146 }
147 return 0;
148}
149EXPORT_SYMBOL(wait_for_random_bytes);
150
151/*
152 * Add a callback function that will be invoked when the crng is initialised,
153 * or immediately if it already has been. Only use this is you are absolutely
154 * sure it is required. Most users should instead be able to test
155 * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
156 */
157int __cold execute_with_initialized_rng(struct notifier_block *nb)
158{
159 unsigned long flags;
160 int ret = 0;
161
162 spin_lock_irqsave(&random_ready_notifier.lock, flags);
163 if (crng_ready())
164 nb->notifier_call(nb, 0, NULL);
165 else
166 ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
167 spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
168 return ret;
169}
170
171#define warn_unseeded_randomness() \
172 if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
173 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
174 __func__, (void *)_RET_IP_, crng_init)
175
176
177/*********************************************************************
178 *
179 * Fast key erasure RNG, the "crng".
180 *
181 * These functions expand entropy from the entropy extractor into
182 * long streams for external consumption using the "fast key erasure"
183 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
184 *
185 * There are a few exported interfaces for use by other drivers:
186 *
187 * void get_random_bytes(void *buf, size_t len)
188 * u8 get_random_u8()
189 * u16 get_random_u16()
190 * u32 get_random_u32()
191 * u32 get_random_u32_below(u32 ceil)
192 * u32 get_random_u32_above(u32 floor)
193 * u32 get_random_u32_inclusive(u32 floor, u32 ceil)
194 * u64 get_random_u64()
195 * unsigned long get_random_long()
196 *
197 * These interfaces will return the requested number of random bytes
198 * into the given buffer or as a return value. This is equivalent to
199 * a read from /dev/urandom. The u8, u16, u32, u64, long family of
200 * functions may be higher performance for one-off random integers,
201 * because they do a bit of buffering and do not invoke reseeding
202 * until the buffer is emptied.
203 *
204 *********************************************************************/
205
206enum {
207 CRNG_RESEED_START_INTERVAL = HZ,
208 CRNG_RESEED_INTERVAL = 60 * HZ
209};
210
211static struct {
212 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
213 unsigned long generation;
214 spinlock_t lock;
215} base_crng = {
216 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
217};
218
219struct crng {
220 u8 key[CHACHA_KEY_SIZE];
221 unsigned long generation;
222 local_lock_t lock;
223};
224
225static DEFINE_PER_CPU(struct crng, crngs) = {
226 .generation = ULONG_MAX,
227 .lock = INIT_LOCAL_LOCK(crngs.lock),
228};
229
230/*
231 * Return the interval until the next reseeding, which is normally
232 * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
233 * proportional to the uptime.
234 */
235static unsigned int crng_reseed_interval(void)
236{
237 static bool early_boot = true;
238
239 if (unlikely(READ_ONCE(early_boot))) {
240 time64_t uptime = ktime_get_seconds();
241 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
242 WRITE_ONCE(early_boot, false);
243 else
244 return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
245 (unsigned int)uptime / 2 * HZ);
246 }
247 return CRNG_RESEED_INTERVAL;
248}
249
250/* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
251static void extract_entropy(void *buf, size_t len);
252
253/* This extracts a new crng key from the input pool. */
254static void crng_reseed(struct work_struct *work)
255{
256 static DECLARE_DELAYED_WORK(next_reseed, crng_reseed);
257 unsigned long flags;
258 unsigned long next_gen;
259 u8 key[CHACHA_KEY_SIZE];
260
261 /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
262 if (likely(system_unbound_wq))
263 queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
264
265 extract_entropy(key, sizeof(key));
266
267 /*
268 * We copy the new key into the base_crng, overwriting the old one,
269 * and update the generation counter. We avoid hitting ULONG_MAX,
270 * because the per-cpu crngs are initialized to ULONG_MAX, so this
271 * forces new CPUs that come online to always initialize.
272 */
273 spin_lock_irqsave(&base_crng.lock, flags);
274 memcpy(base_crng.key, key, sizeof(base_crng.key));
275 next_gen = base_crng.generation + 1;
276 if (next_gen == ULONG_MAX)
277 ++next_gen;
278 WRITE_ONCE(base_crng.generation, next_gen);
279#ifdef CONFIG_VDSO_GETRANDOM
280 /* base_crng.generation's invalid value is ULONG_MAX, while
281 * _vdso_rng_data.generation's invalid value is 0, so add one to the
282 * former to arrive at the latter. Use smp_store_release so that this
283 * is ordered with the write above to base_crng.generation. Pairs with
284 * the smp_rmb() before the syscall in the vDSO code.
285 *
286 * Cast to unsigned long for 32-bit architectures, since atomic 64-bit
287 * operations are not supported on those architectures. This is safe
288 * because base_crng.generation is a 32-bit value. On big-endian
289 * architectures it will be stored in the upper 32 bits, but that's okay
290 * because the vDSO side only checks whether the value changed, without
291 * actually using or interpreting the value.
292 */
293 smp_store_release((unsigned long *)&__arch_get_k_vdso_rng_data()->generation, next_gen + 1);
294#endif
295 if (!static_branch_likely(&crng_is_ready))
296 crng_init = CRNG_READY;
297 spin_unlock_irqrestore(&base_crng.lock, flags);
298 memzero_explicit(key, sizeof(key));
299}
300
301/*
302 * This generates a ChaCha block using the provided key, and then
303 * immediately overwrites that key with half the block. It returns
304 * the resultant ChaCha state to the user, along with the second
305 * half of the block containing 32 bytes of random data that may
306 * be used; random_data_len may not be greater than 32.
307 *
308 * The returned ChaCha state contains within it a copy of the old
309 * key value, at index 4, so the state should always be zeroed out
310 * immediately after using in order to maintain forward secrecy.
311 * If the state cannot be erased in a timely manner, then it is
312 * safer to set the random_data parameter to &chacha_state[4] so
313 * that this function overwrites it before returning.
314 */
315static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
316 u32 chacha_state[CHACHA_STATE_WORDS],
317 u8 *random_data, size_t random_data_len)
318{
319 u8 first_block[CHACHA_BLOCK_SIZE];
320
321 BUG_ON(random_data_len > 32);
322
323 chacha_init_consts(chacha_state);
324 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
325 memset(&chacha_state[12], 0, sizeof(u32) * 4);
326 chacha20_block(chacha_state, first_block);
327
328 memcpy(key, first_block, CHACHA_KEY_SIZE);
329 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
330 memzero_explicit(first_block, sizeof(first_block));
331}
332
333/*
334 * This function returns a ChaCha state that you may use for generating
335 * random data. It also returns up to 32 bytes on its own of random data
336 * that may be used; random_data_len may not be greater than 32.
337 */
338static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
339 u8 *random_data, size_t random_data_len)
340{
341 unsigned long flags;
342 struct crng *crng;
343
344 BUG_ON(random_data_len > 32);
345
346 /*
347 * For the fast path, we check whether we're ready, unlocked first, and
348 * then re-check once locked later. In the case where we're really not
349 * ready, we do fast key erasure with the base_crng directly, extracting
350 * when crng_init is CRNG_EMPTY.
351 */
352 if (!crng_ready()) {
353 bool ready;
354
355 spin_lock_irqsave(&base_crng.lock, flags);
356 ready = crng_ready();
357 if (!ready) {
358 if (crng_init == CRNG_EMPTY)
359 extract_entropy(base_crng.key, sizeof(base_crng.key));
360 crng_fast_key_erasure(base_crng.key, chacha_state,
361 random_data, random_data_len);
362 }
363 spin_unlock_irqrestore(&base_crng.lock, flags);
364 if (!ready)
365 return;
366 }
367
368 local_lock_irqsave(&crngs.lock, flags);
369 crng = raw_cpu_ptr(&crngs);
370
371 /*
372 * If our per-cpu crng is older than the base_crng, then it means
373 * somebody reseeded the base_crng. In that case, we do fast key
374 * erasure on the base_crng, and use its output as the new key
375 * for our per-cpu crng. This brings us up to date with base_crng.
376 */
377 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
378 spin_lock(&base_crng.lock);
379 crng_fast_key_erasure(base_crng.key, chacha_state,
380 crng->key, sizeof(crng->key));
381 crng->generation = base_crng.generation;
382 spin_unlock(&base_crng.lock);
383 }
384
385 /*
386 * Finally, when we've made it this far, our per-cpu crng has an up
387 * to date key, and we can do fast key erasure with it to produce
388 * some random data and a ChaCha state for the caller. All other
389 * branches of this function are "unlikely", so most of the time we
390 * should wind up here immediately.
391 */
392 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
393 local_unlock_irqrestore(&crngs.lock, flags);
394}
395
396static void _get_random_bytes(void *buf, size_t len)
397{
398 u32 chacha_state[CHACHA_STATE_WORDS];
399 u8 tmp[CHACHA_BLOCK_SIZE];
400 size_t first_block_len;
401
402 if (!len)
403 return;
404
405 first_block_len = min_t(size_t, 32, len);
406 crng_make_state(chacha_state, buf, first_block_len);
407 len -= first_block_len;
408 buf += first_block_len;
409
410 while (len) {
411 if (len < CHACHA_BLOCK_SIZE) {
412 chacha20_block(chacha_state, tmp);
413 memcpy(buf, tmp, len);
414 memzero_explicit(tmp, sizeof(tmp));
415 break;
416 }
417
418 chacha20_block(chacha_state, buf);
419 if (unlikely(chacha_state[12] == 0))
420 ++chacha_state[13];
421 len -= CHACHA_BLOCK_SIZE;
422 buf += CHACHA_BLOCK_SIZE;
423 }
424
425 memzero_explicit(chacha_state, sizeof(chacha_state));
426}
427
428/*
429 * This returns random bytes in arbitrary quantities. The quality of the
430 * random bytes is good as /dev/urandom. In order to ensure that the
431 * randomness provided by this function is okay, the function
432 * wait_for_random_bytes() should be called and return 0 at least once
433 * at any point prior.
434 */
435void get_random_bytes(void *buf, size_t len)
436{
437 warn_unseeded_randomness();
438 _get_random_bytes(buf, len);
439}
440EXPORT_SYMBOL(get_random_bytes);
441
442static ssize_t get_random_bytes_user(struct iov_iter *iter)
443{
444 u32 chacha_state[CHACHA_STATE_WORDS];
445 u8 block[CHACHA_BLOCK_SIZE];
446 size_t ret = 0, copied;
447
448 if (unlikely(!iov_iter_count(iter)))
449 return 0;
450
451 /*
452 * Immediately overwrite the ChaCha key at index 4 with random
453 * bytes, in case userspace causes copy_to_iter() below to sleep
454 * forever, so that we still retain forward secrecy in that case.
455 */
456 crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
457 /*
458 * However, if we're doing a read of len <= 32, we don't need to
459 * use chacha_state after, so we can simply return those bytes to
460 * the user directly.
461 */
462 if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
463 ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
464 goto out_zero_chacha;
465 }
466
467 for (;;) {
468 chacha20_block(chacha_state, block);
469 if (unlikely(chacha_state[12] == 0))
470 ++chacha_state[13];
471
472 copied = copy_to_iter(block, sizeof(block), iter);
473 ret += copied;
474 if (!iov_iter_count(iter) || copied != sizeof(block))
475 break;
476
477 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
478 if (ret % PAGE_SIZE == 0) {
479 if (signal_pending(current))
480 break;
481 cond_resched();
482 }
483 }
484
485 memzero_explicit(block, sizeof(block));
486out_zero_chacha:
487 memzero_explicit(chacha_state, sizeof(chacha_state));
488 return ret ? ret : -EFAULT;
489}
490
491/*
492 * Batched entropy returns random integers. The quality of the random
493 * number is good as /dev/urandom. In order to ensure that the randomness
494 * provided by this function is okay, the function wait_for_random_bytes()
495 * should be called and return 0 at least once at any point prior.
496 */
497
498#define DEFINE_BATCHED_ENTROPY(type) \
499struct batch_ ##type { \
500 /* \
501 * We make this 1.5x a ChaCha block, so that we get the \
502 * remaining 32 bytes from fast key erasure, plus one full \
503 * block from the detached ChaCha state. We can increase \
504 * the size of this later if needed so long as we keep the \
505 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
506 */ \
507 type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
508 local_lock_t lock; \
509 unsigned long generation; \
510 unsigned int position; \
511}; \
512 \
513static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
514 .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
515 .position = UINT_MAX \
516}; \
517 \
518type get_random_ ##type(void) \
519{ \
520 type ret; \
521 unsigned long flags; \
522 struct batch_ ##type *batch; \
523 unsigned long next_gen; \
524 \
525 warn_unseeded_randomness(); \
526 \
527 if (!crng_ready()) { \
528 _get_random_bytes(&ret, sizeof(ret)); \
529 return ret; \
530 } \
531 \
532 local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
533 batch = raw_cpu_ptr(&batched_entropy_##type); \
534 \
535 next_gen = READ_ONCE(base_crng.generation); \
536 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
537 next_gen != batch->generation) { \
538 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
539 batch->position = 0; \
540 batch->generation = next_gen; \
541 } \
542 \
543 ret = batch->entropy[batch->position]; \
544 batch->entropy[batch->position] = 0; \
545 ++batch->position; \
546 local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
547 return ret; \
548} \
549EXPORT_SYMBOL(get_random_ ##type);
550
551DEFINE_BATCHED_ENTROPY(u8)
552DEFINE_BATCHED_ENTROPY(u16)
553DEFINE_BATCHED_ENTROPY(u32)
554DEFINE_BATCHED_ENTROPY(u64)
555
556u32 __get_random_u32_below(u32 ceil)
557{
558 /*
559 * This is the slow path for variable ceil. It is still fast, most of
560 * the time, by doing traditional reciprocal multiplication and
561 * opportunistically comparing the lower half to ceil itself, before
562 * falling back to computing a larger bound, and then rejecting samples
563 * whose lower half would indicate a range indivisible by ceil. The use
564 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
565 * in 32-bits.
566 */
567 u32 rand = get_random_u32();
568 u64 mult;
569
570 /*
571 * This function is technically undefined for ceil == 0, and in fact
572 * for the non-underscored constant version in the header, we build bug
573 * on that. But for the non-constant case, it's convenient to have that
574 * evaluate to being a straight call to get_random_u32(), so that
575 * get_random_u32_inclusive() can work over its whole range without
576 * undefined behavior.
577 */
578 if (unlikely(!ceil))
579 return rand;
580
581 mult = (u64)ceil * rand;
582 if (unlikely((u32)mult < ceil)) {
583 u32 bound = -ceil % ceil;
584 while (unlikely((u32)mult < bound))
585 mult = (u64)ceil * get_random_u32();
586 }
587 return mult >> 32;
588}
589EXPORT_SYMBOL(__get_random_u32_below);
590
591#ifdef CONFIG_SMP
592/*
593 * This function is called when the CPU is coming up, with entry
594 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
595 */
596int __cold random_prepare_cpu(unsigned int cpu)
597{
598 /*
599 * When the cpu comes back online, immediately invalidate both
600 * the per-cpu crng and all batches, so that we serve fresh
601 * randomness.
602 */
603 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
604 per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
605 per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
606 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
607 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
608 return 0;
609}
610#endif
611
612
613/**********************************************************************
614 *
615 * Entropy accumulation and extraction routines.
616 *
617 * Callers may add entropy via:
618 *
619 * static void mix_pool_bytes(const void *buf, size_t len)
620 *
621 * After which, if added entropy should be credited:
622 *
623 * static void credit_init_bits(size_t bits)
624 *
625 * Finally, extract entropy via:
626 *
627 * static void extract_entropy(void *buf, size_t len)
628 *
629 **********************************************************************/
630
631enum {
632 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
633 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
634 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
635};
636
637static struct {
638 struct blake2s_state hash;
639 spinlock_t lock;
640 unsigned int init_bits;
641} input_pool = {
642 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
643 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
644 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
645 .hash.outlen = BLAKE2S_HASH_SIZE,
646 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
647};
648
649static void _mix_pool_bytes(const void *buf, size_t len)
650{
651 blake2s_update(&input_pool.hash, buf, len);
652}
653
654/*
655 * This function adds bytes into the input pool. It does not
656 * update the initialization bit counter; the caller should call
657 * credit_init_bits if this is appropriate.
658 */
659static void mix_pool_bytes(const void *buf, size_t len)
660{
661 unsigned long flags;
662
663 spin_lock_irqsave(&input_pool.lock, flags);
664 _mix_pool_bytes(buf, len);
665 spin_unlock_irqrestore(&input_pool.lock, flags);
666}
667
668/*
669 * This is an HKDF-like construction for using the hashed collected entropy
670 * as a PRF key, that's then expanded block-by-block.
671 */
672static void extract_entropy(void *buf, size_t len)
673{
674 unsigned long flags;
675 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
676 struct {
677 unsigned long rdseed[32 / sizeof(long)];
678 size_t counter;
679 } block;
680 size_t i, longs;
681
682 for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
683 longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
684 if (longs) {
685 i += longs;
686 continue;
687 }
688 longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
689 if (longs) {
690 i += longs;
691 continue;
692 }
693 block.rdseed[i++] = random_get_entropy();
694 }
695
696 spin_lock_irqsave(&input_pool.lock, flags);
697
698 /* seed = HASHPRF(last_key, entropy_input) */
699 blake2s_final(&input_pool.hash, seed);
700
701 /* next_key = HASHPRF(seed, RDSEED || 0) */
702 block.counter = 0;
703 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
704 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
705
706 spin_unlock_irqrestore(&input_pool.lock, flags);
707 memzero_explicit(next_key, sizeof(next_key));
708
709 while (len) {
710 i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
711 /* output = HASHPRF(seed, RDSEED || ++counter) */
712 ++block.counter;
713 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
714 len -= i;
715 buf += i;
716 }
717
718 memzero_explicit(seed, sizeof(seed));
719 memzero_explicit(&block, sizeof(block));
720}
721
722#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
723
724static void __cold _credit_init_bits(size_t bits)
725{
726 static DECLARE_WORK(set_ready, crng_set_ready);
727 unsigned int new, orig, add;
728 unsigned long flags;
729
730 if (!bits)
731 return;
732
733 add = min_t(size_t, bits, POOL_BITS);
734
735 orig = READ_ONCE(input_pool.init_bits);
736 do {
737 new = min_t(unsigned int, POOL_BITS, orig + add);
738 } while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
739
740 if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
741 crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
742 if (static_key_initialized && system_unbound_wq)
743 queue_work(system_unbound_wq, &set_ready);
744 atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
745#ifdef CONFIG_VDSO_GETRANDOM
746 WRITE_ONCE(__arch_get_k_vdso_rng_data()->is_ready, true);
747#endif
748 wake_up_interruptible(&crng_init_wait);
749 kill_fasync(&fasync, SIGIO, POLL_IN);
750 pr_notice("crng init done\n");
751 if (urandom_warning.missed)
752 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
753 urandom_warning.missed);
754 } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
755 spin_lock_irqsave(&base_crng.lock, flags);
756 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
757 if (crng_init == CRNG_EMPTY) {
758 extract_entropy(base_crng.key, sizeof(base_crng.key));
759 crng_init = CRNG_EARLY;
760 }
761 spin_unlock_irqrestore(&base_crng.lock, flags);
762 }
763}
764
765
766/**********************************************************************
767 *
768 * Entropy collection routines.
769 *
770 * The following exported functions are used for pushing entropy into
771 * the above entropy accumulation routines:
772 *
773 * void add_device_randomness(const void *buf, size_t len);
774 * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
775 * void add_bootloader_randomness(const void *buf, size_t len);
776 * void add_vmfork_randomness(const void *unique_vm_id, size_t len);
777 * void add_interrupt_randomness(int irq);
778 * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
779 * void add_disk_randomness(struct gendisk *disk);
780 *
781 * add_device_randomness() adds data to the input pool that
782 * is likely to differ between two devices (or possibly even per boot).
783 * This would be things like MAC addresses or serial numbers, or the
784 * read-out of the RTC. This does *not* credit any actual entropy to
785 * the pool, but it initializes the pool to different values for devices
786 * that might otherwise be identical and have very little entropy
787 * available to them (particularly common in the embedded world).
788 *
789 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
790 * entropy as specified by the caller. If the entropy pool is full it will
791 * block until more entropy is needed.
792 *
793 * add_bootloader_randomness() is called by bootloader drivers, such as EFI
794 * and device tree, and credits its input depending on whether or not the
795 * command line option 'random.trust_bootloader'.
796 *
797 * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
798 * representing the current instance of a VM to the pool, without crediting,
799 * and then force-reseeds the crng so that it takes effect immediately.
800 *
801 * add_interrupt_randomness() uses the interrupt timing as random
802 * inputs to the entropy pool. Using the cycle counters and the irq source
803 * as inputs, it feeds the input pool roughly once a second or after 64
804 * interrupts, crediting 1 bit of entropy for whichever comes first.
805 *
806 * add_input_randomness() uses the input layer interrupt timing, as well
807 * as the event type information from the hardware.
808 *
809 * add_disk_randomness() uses what amounts to the seek time of block
810 * layer request events, on a per-disk_devt basis, as input to the
811 * entropy pool. Note that high-speed solid state drives with very low
812 * seek times do not make for good sources of entropy, as their seek
813 * times are usually fairly consistent.
814 *
815 * The last two routines try to estimate how many bits of entropy
816 * to credit. They do this by keeping track of the first and second
817 * order deltas of the event timings.
818 *
819 **********************************************************************/
820
821static bool trust_cpu __initdata = true;
822static bool trust_bootloader __initdata = true;
823static int __init parse_trust_cpu(char *arg)
824{
825 return kstrtobool(arg, &trust_cpu);
826}
827static int __init parse_trust_bootloader(char *arg)
828{
829 return kstrtobool(arg, &trust_bootloader);
830}
831early_param("random.trust_cpu", parse_trust_cpu);
832early_param("random.trust_bootloader", parse_trust_bootloader);
833
834static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
835{
836 unsigned long flags, entropy = random_get_entropy();
837
838 /*
839 * Encode a representation of how long the system has been suspended,
840 * in a way that is distinct from prior system suspends.
841 */
842 ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
843
844 spin_lock_irqsave(&input_pool.lock, flags);
845 _mix_pool_bytes(&action, sizeof(action));
846 _mix_pool_bytes(stamps, sizeof(stamps));
847 _mix_pool_bytes(&entropy, sizeof(entropy));
848 spin_unlock_irqrestore(&input_pool.lock, flags);
849
850 if (crng_ready() && (action == PM_RESTORE_PREPARE ||
851 (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
852 !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
853 crng_reseed(NULL);
854 pr_notice("crng reseeded on system resumption\n");
855 }
856 return 0;
857}
858
859static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
860
861/*
862 * This is called extremely early, before time keeping functionality is
863 * available, but arch randomness is. Interrupts are not yet enabled.
864 */
865void __init random_init_early(const char *command_line)
866{
867 unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
868 size_t i, longs, arch_bits;
869
870#if defined(LATENT_ENTROPY_PLUGIN)
871 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
872 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
873#endif
874
875 for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
876 longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
877 if (longs) {
878 _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
879 i += longs;
880 continue;
881 }
882 longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
883 if (longs) {
884 _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
885 i += longs;
886 continue;
887 }
888 arch_bits -= sizeof(*entropy) * 8;
889 ++i;
890 }
891
892 _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
893 _mix_pool_bytes(command_line, strlen(command_line));
894
895 /* Reseed if already seeded by earlier phases. */
896 if (crng_ready())
897 crng_reseed(NULL);
898 else if (trust_cpu)
899 _credit_init_bits(arch_bits);
900}
901
902/*
903 * This is called a little bit after the prior function, and now there is
904 * access to timestamps counters. Interrupts are not yet enabled.
905 */
906void __init random_init(void)
907{
908 unsigned long entropy = random_get_entropy();
909 ktime_t now = ktime_get_real();
910
911 _mix_pool_bytes(&now, sizeof(now));
912 _mix_pool_bytes(&entropy, sizeof(entropy));
913 add_latent_entropy();
914
915 /*
916 * If we were initialized by the cpu or bootloader before jump labels
917 * or workqueues are initialized, then we should enable the static
918 * branch here, where it's guaranteed that these have been initialized.
919 */
920 if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
921 crng_set_ready(NULL);
922
923 /* Reseed if already seeded by earlier phases. */
924 if (crng_ready())
925 crng_reseed(NULL);
926
927 WARN_ON(register_pm_notifier(&pm_notifier));
928
929 WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
930 "entropy collection will consequently suffer.");
931}
932
933/*
934 * Add device- or boot-specific data to the input pool to help
935 * initialize it.
936 *
937 * None of this adds any entropy; it is meant to avoid the problem of
938 * the entropy pool having similar initial state across largely
939 * identical devices.
940 */
941void add_device_randomness(const void *buf, size_t len)
942{
943 unsigned long entropy = random_get_entropy();
944 unsigned long flags;
945
946 spin_lock_irqsave(&input_pool.lock, flags);
947 _mix_pool_bytes(&entropy, sizeof(entropy));
948 _mix_pool_bytes(buf, len);
949 spin_unlock_irqrestore(&input_pool.lock, flags);
950}
951EXPORT_SYMBOL(add_device_randomness);
952
953/*
954 * Interface for in-kernel drivers of true hardware RNGs. Those devices
955 * may produce endless random bits, so this function will sleep for
956 * some amount of time after, if the sleep_after parameter is true.
957 */
958void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after)
959{
960 mix_pool_bytes(buf, len);
961 credit_init_bits(entropy);
962
963 /*
964 * Throttle writing to once every reseed interval, unless we're not yet
965 * initialized or no entropy is credited.
966 */
967 if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy))
968 schedule_timeout_interruptible(crng_reseed_interval());
969}
970EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
971
972/*
973 * Handle random seed passed by bootloader, and credit it depending
974 * on the command line option 'random.trust_bootloader'.
975 */
976void __init add_bootloader_randomness(const void *buf, size_t len)
977{
978 mix_pool_bytes(buf, len);
979 if (trust_bootloader)
980 credit_init_bits(len * 8);
981}
982
983#if IS_ENABLED(CONFIG_VMGENID)
984static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
985
986/*
987 * Handle a new unique VM ID, which is unique, not secret, so we
988 * don't credit it, but we do immediately force a reseed after so
989 * that it's used by the crng posthaste.
990 */
991void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
992{
993 add_device_randomness(unique_vm_id, len);
994 if (crng_ready()) {
995 crng_reseed(NULL);
996 pr_notice("crng reseeded due to virtual machine fork\n");
997 }
998 blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
999}
1000#if IS_MODULE(CONFIG_VMGENID)
1001EXPORT_SYMBOL_GPL(add_vmfork_randomness);
1002#endif
1003
1004int __cold register_random_vmfork_notifier(struct notifier_block *nb)
1005{
1006 return blocking_notifier_chain_register(&vmfork_chain, nb);
1007}
1008EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
1009
1010int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
1011{
1012 return blocking_notifier_chain_unregister(&vmfork_chain, nb);
1013}
1014EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
1015#endif
1016
1017struct fast_pool {
1018 unsigned long pool[4];
1019 unsigned long last;
1020 unsigned int count;
1021 struct timer_list mix;
1022};
1023
1024static void mix_interrupt_randomness(struct timer_list *work);
1025
1026static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
1027#ifdef CONFIG_64BIT
1028#define FASTMIX_PERM SIPHASH_PERMUTATION
1029 .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
1030#else
1031#define FASTMIX_PERM HSIPHASH_PERMUTATION
1032 .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
1033#endif
1034 .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
1035};
1036
1037/*
1038 * This is [Half]SipHash-1-x, starting from an empty key. Because
1039 * the key is fixed, it assumes that its inputs are non-malicious,
1040 * and therefore this has no security on its own. s represents the
1041 * four-word SipHash state, while v represents a two-word input.
1042 */
1043static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
1044{
1045 s[3] ^= v1;
1046 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1047 s[0] ^= v1;
1048 s[3] ^= v2;
1049 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1050 s[0] ^= v2;
1051}
1052
1053#ifdef CONFIG_SMP
1054/*
1055 * This function is called when the CPU has just come online, with
1056 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
1057 */
1058int __cold random_online_cpu(unsigned int cpu)
1059{
1060 /*
1061 * During CPU shutdown and before CPU onlining, add_interrupt_
1062 * randomness() may schedule mix_interrupt_randomness(), and
1063 * set the MIX_INFLIGHT flag. However, because the worker can
1064 * be scheduled on a different CPU during this period, that
1065 * flag will never be cleared. For that reason, we zero out
1066 * the flag here, which runs just after workqueues are onlined
1067 * for the CPU again. This also has the effect of setting the
1068 * irq randomness count to zero so that new accumulated irqs
1069 * are fresh.
1070 */
1071 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
1072 return 0;
1073}
1074#endif
1075
1076static void mix_interrupt_randomness(struct timer_list *work)
1077{
1078 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
1079 /*
1080 * The size of the copied stack pool is explicitly 2 longs so that we
1081 * only ever ingest half of the siphash output each time, retaining
1082 * the other half as the next "key" that carries over. The entropy is
1083 * supposed to be sufficiently dispersed between bits so on average
1084 * we don't wind up "losing" some.
1085 */
1086 unsigned long pool[2];
1087 unsigned int count;
1088
1089 /* Check to see if we're running on the wrong CPU due to hotplug. */
1090 local_irq_disable();
1091 if (fast_pool != this_cpu_ptr(&irq_randomness)) {
1092 local_irq_enable();
1093 return;
1094 }
1095
1096 /*
1097 * Copy the pool to the stack so that the mixer always has a
1098 * consistent view, before we reenable irqs again.
1099 */
1100 memcpy(pool, fast_pool->pool, sizeof(pool));
1101 count = fast_pool->count;
1102 fast_pool->count = 0;
1103 fast_pool->last = jiffies;
1104 local_irq_enable();
1105
1106 mix_pool_bytes(pool, sizeof(pool));
1107 credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
1108
1109 memzero_explicit(pool, sizeof(pool));
1110}
1111
1112void add_interrupt_randomness(int irq)
1113{
1114 enum { MIX_INFLIGHT = 1U << 31 };
1115 unsigned long entropy = random_get_entropy();
1116 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1117 struct pt_regs *regs = get_irq_regs();
1118 unsigned int new_count;
1119
1120 fast_mix(fast_pool->pool, entropy,
1121 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1122 new_count = ++fast_pool->count;
1123
1124 if (new_count & MIX_INFLIGHT)
1125 return;
1126
1127 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1128 return;
1129
1130 fast_pool->count |= MIX_INFLIGHT;
1131 if (!timer_pending(&fast_pool->mix)) {
1132 fast_pool->mix.expires = jiffies;
1133 add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1134 }
1135}
1136EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1137
1138/* There is one of these per entropy source */
1139struct timer_rand_state {
1140 unsigned long last_time;
1141 long last_delta, last_delta2;
1142};
1143
1144/*
1145 * This function adds entropy to the entropy "pool" by using timing
1146 * delays. It uses the timer_rand_state structure to make an estimate
1147 * of how many bits of entropy this call has added to the pool. The
1148 * value "num" is also added to the pool; it should somehow describe
1149 * the type of event that just happened.
1150 */
1151static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1152{
1153 unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1154 long delta, delta2, delta3;
1155 unsigned int bits;
1156
1157 /*
1158 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1159 * sometime after, so mix into the fast pool.
1160 */
1161 if (in_hardirq()) {
1162 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1163 } else {
1164 spin_lock_irqsave(&input_pool.lock, flags);
1165 _mix_pool_bytes(&entropy, sizeof(entropy));
1166 _mix_pool_bytes(&num, sizeof(num));
1167 spin_unlock_irqrestore(&input_pool.lock, flags);
1168 }
1169
1170 if (crng_ready())
1171 return;
1172
1173 /*
1174 * Calculate number of bits of randomness we probably added.
1175 * We take into account the first, second and third-order deltas
1176 * in order to make our estimate.
1177 */
1178 delta = now - READ_ONCE(state->last_time);
1179 WRITE_ONCE(state->last_time, now);
1180
1181 delta2 = delta - READ_ONCE(state->last_delta);
1182 WRITE_ONCE(state->last_delta, delta);
1183
1184 delta3 = delta2 - READ_ONCE(state->last_delta2);
1185 WRITE_ONCE(state->last_delta2, delta2);
1186
1187 if (delta < 0)
1188 delta = -delta;
1189 if (delta2 < 0)
1190 delta2 = -delta2;
1191 if (delta3 < 0)
1192 delta3 = -delta3;
1193 if (delta > delta2)
1194 delta = delta2;
1195 if (delta > delta3)
1196 delta = delta3;
1197
1198 /*
1199 * delta is now minimum absolute delta. Round down by 1 bit
1200 * on general principles, and limit entropy estimate to 11 bits.
1201 */
1202 bits = min(fls(delta >> 1), 11);
1203
1204 /*
1205 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1206 * will run after this, which uses a different crediting scheme of 1 bit
1207 * per every 64 interrupts. In order to let that function do accounting
1208 * close to the one in this function, we credit a full 64/64 bit per bit,
1209 * and then subtract one to account for the extra one added.
1210 */
1211 if (in_hardirq())
1212 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1213 else
1214 _credit_init_bits(bits);
1215}
1216
1217void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1218{
1219 static unsigned char last_value;
1220 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1221
1222 /* Ignore autorepeat and the like. */
1223 if (value == last_value)
1224 return;
1225
1226 last_value = value;
1227 add_timer_randomness(&input_timer_state,
1228 (type << 4) ^ code ^ (code >> 4) ^ value);
1229}
1230EXPORT_SYMBOL_GPL(add_input_randomness);
1231
1232#ifdef CONFIG_BLOCK
1233void add_disk_randomness(struct gendisk *disk)
1234{
1235 if (!disk || !disk->random)
1236 return;
1237 /* First major is 1, so we get >= 0x200 here. */
1238 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1239}
1240EXPORT_SYMBOL_GPL(add_disk_randomness);
1241
1242void __cold rand_initialize_disk(struct gendisk *disk)
1243{
1244 struct timer_rand_state *state;
1245
1246 /*
1247 * If kzalloc returns null, we just won't use that entropy
1248 * source.
1249 */
1250 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1251 if (state) {
1252 state->last_time = INITIAL_JIFFIES;
1253 disk->random = state;
1254 }
1255}
1256#endif
1257
1258struct entropy_timer_state {
1259 unsigned long entropy;
1260 struct timer_list timer;
1261 atomic_t samples;
1262 unsigned int samples_per_bit;
1263};
1264
1265/*
1266 * Each time the timer fires, we expect that we got an unpredictable jump in
1267 * the cycle counter. Even if the timer is running on another CPU, the timer
1268 * activity will be touching the stack of the CPU that is generating entropy.
1269 *
1270 * Note that we don't re-arm the timer in the timer itself - we are happy to be
1271 * scheduled away, since that just makes the load more complex, but we do not
1272 * want the timer to keep ticking unless the entropy loop is running.
1273 *
1274 * So the re-arming always happens in the entropy loop itself.
1275 */
1276static void __cold entropy_timer(struct timer_list *timer)
1277{
1278 struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
1279 unsigned long entropy = random_get_entropy();
1280
1281 mix_pool_bytes(&entropy, sizeof(entropy));
1282 if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
1283 credit_init_bits(1);
1284}
1285
1286/*
1287 * If we have an actual cycle counter, see if we can generate enough entropy
1288 * with timing noise.
1289 */
1290static void __cold try_to_generate_entropy(void)
1291{
1292 enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
1293 u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
1294 struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
1295 unsigned int i, num_different = 0;
1296 unsigned long last = random_get_entropy();
1297 int cpu = -1;
1298
1299 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1300 stack->entropy = random_get_entropy();
1301 if (stack->entropy != last)
1302 ++num_different;
1303 last = stack->entropy;
1304 }
1305 stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1306 if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
1307 return;
1308
1309 atomic_set(&stack->samples, 0);
1310 timer_setup_on_stack(&stack->timer, entropy_timer, 0);
1311 while (!crng_ready() && !signal_pending(current)) {
1312 /*
1313 * Check !timer_pending() and then ensure that any previous callback has finished
1314 * executing by checking try_to_del_timer_sync(), before queueing the next one.
1315 */
1316 if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
1317 struct cpumask timer_cpus;
1318 unsigned int num_cpus;
1319
1320 /*
1321 * Preemption must be disabled here, both to read the current CPU number
1322 * and to avoid scheduling a timer on a dead CPU.
1323 */
1324 preempt_disable();
1325
1326 /* Only schedule callbacks on timer CPUs that are online. */
1327 cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
1328 num_cpus = cpumask_weight(&timer_cpus);
1329 /* In very bizarre case of misconfiguration, fallback to all online. */
1330 if (unlikely(num_cpus == 0)) {
1331 timer_cpus = *cpu_online_mask;
1332 num_cpus = cpumask_weight(&timer_cpus);
1333 }
1334
1335 /* Basic CPU round-robin, which avoids the current CPU. */
1336 do {
1337 cpu = cpumask_next(cpu, &timer_cpus);
1338 if (cpu >= nr_cpu_ids)
1339 cpu = cpumask_first(&timer_cpus);
1340 } while (cpu == smp_processor_id() && num_cpus > 1);
1341
1342 /* Expiring the timer at `jiffies` means it's the next tick. */
1343 stack->timer.expires = jiffies;
1344
1345 add_timer_on(&stack->timer, cpu);
1346
1347 preempt_enable();
1348 }
1349 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
1350 schedule();
1351 stack->entropy = random_get_entropy();
1352 }
1353 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
1354
1355 del_timer_sync(&stack->timer);
1356 destroy_timer_on_stack(&stack->timer);
1357}
1358
1359
1360/**********************************************************************
1361 *
1362 * Userspace reader/writer interfaces.
1363 *
1364 * getrandom(2) is the primary modern interface into the RNG and should
1365 * be used in preference to anything else.
1366 *
1367 * Reading from /dev/random has the same functionality as calling
1368 * getrandom(2) with flags=0. In earlier versions, however, it had
1369 * vastly different semantics and should therefore be avoided, to
1370 * prevent backwards compatibility issues.
1371 *
1372 * Reading from /dev/urandom has the same functionality as calling
1373 * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1374 * waiting for the RNG to be ready, it should not be used.
1375 *
1376 * Writing to either /dev/random or /dev/urandom adds entropy to
1377 * the input pool but does not credit it.
1378 *
1379 * Polling on /dev/random indicates when the RNG is initialized, on
1380 * the read side, and when it wants new entropy, on the write side.
1381 *
1382 * Both /dev/random and /dev/urandom have the same set of ioctls for
1383 * adding entropy, getting the entropy count, zeroing the count, and
1384 * reseeding the crng.
1385 *
1386 **********************************************************************/
1387
1388SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1389{
1390 struct iov_iter iter;
1391 int ret;
1392
1393 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1394 return -EINVAL;
1395
1396 /*
1397 * Requesting insecure and blocking randomness at the same time makes
1398 * no sense.
1399 */
1400 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1401 return -EINVAL;
1402
1403 if (!crng_ready() && !(flags & GRND_INSECURE)) {
1404 if (flags & GRND_NONBLOCK)
1405 return -EAGAIN;
1406 ret = wait_for_random_bytes();
1407 if (unlikely(ret))
1408 return ret;
1409 }
1410
1411 ret = import_ubuf(ITER_DEST, ubuf, len, &iter);
1412 if (unlikely(ret))
1413 return ret;
1414 return get_random_bytes_user(&iter);
1415}
1416
1417static __poll_t random_poll(struct file *file, poll_table *wait)
1418{
1419 poll_wait(file, &crng_init_wait, wait);
1420 return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1421}
1422
1423static ssize_t write_pool_user(struct iov_iter *iter)
1424{
1425 u8 block[BLAKE2S_BLOCK_SIZE];
1426 ssize_t ret = 0;
1427 size_t copied;
1428
1429 if (unlikely(!iov_iter_count(iter)))
1430 return 0;
1431
1432 for (;;) {
1433 copied = copy_from_iter(block, sizeof(block), iter);
1434 ret += copied;
1435 mix_pool_bytes(block, copied);
1436 if (!iov_iter_count(iter) || copied != sizeof(block))
1437 break;
1438
1439 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1440 if (ret % PAGE_SIZE == 0) {
1441 if (signal_pending(current))
1442 break;
1443 cond_resched();
1444 }
1445 }
1446
1447 memzero_explicit(block, sizeof(block));
1448 return ret ? ret : -EFAULT;
1449}
1450
1451static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1452{
1453 return write_pool_user(iter);
1454}
1455
1456static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1457{
1458 static int maxwarn = 10;
1459
1460 /*
1461 * Opportunistically attempt to initialize the RNG on platforms that
1462 * have fast cycle counters, but don't (for now) require it to succeed.
1463 */
1464 if (!crng_ready())
1465 try_to_generate_entropy();
1466
1467 if (!crng_ready()) {
1468 if (!ratelimit_disable && maxwarn <= 0)
1469 ++urandom_warning.missed;
1470 else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1471 --maxwarn;
1472 pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1473 current->comm, iov_iter_count(iter));
1474 }
1475 }
1476
1477 return get_random_bytes_user(iter);
1478}
1479
1480static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1481{
1482 int ret;
1483
1484 if (!crng_ready() &&
1485 ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
1486 (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1487 return -EAGAIN;
1488
1489 ret = wait_for_random_bytes();
1490 if (ret != 0)
1491 return ret;
1492 return get_random_bytes_user(iter);
1493}
1494
1495static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1496{
1497 int __user *p = (int __user *)arg;
1498 int ent_count;
1499
1500 switch (cmd) {
1501 case RNDGETENTCNT:
1502 /* Inherently racy, no point locking. */
1503 if (put_user(input_pool.init_bits, p))
1504 return -EFAULT;
1505 return 0;
1506 case RNDADDTOENTCNT:
1507 if (!capable(CAP_SYS_ADMIN))
1508 return -EPERM;
1509 if (get_user(ent_count, p))
1510 return -EFAULT;
1511 if (ent_count < 0)
1512 return -EINVAL;
1513 credit_init_bits(ent_count);
1514 return 0;
1515 case RNDADDENTROPY: {
1516 struct iov_iter iter;
1517 ssize_t ret;
1518 int len;
1519
1520 if (!capable(CAP_SYS_ADMIN))
1521 return -EPERM;
1522 if (get_user(ent_count, p++))
1523 return -EFAULT;
1524 if (ent_count < 0)
1525 return -EINVAL;
1526 if (get_user(len, p++))
1527 return -EFAULT;
1528 ret = import_ubuf(ITER_SOURCE, p, len, &iter);
1529 if (unlikely(ret))
1530 return ret;
1531 ret = write_pool_user(&iter);
1532 if (unlikely(ret < 0))
1533 return ret;
1534 /* Since we're crediting, enforce that it was all written into the pool. */
1535 if (unlikely(ret != len))
1536 return -EFAULT;
1537 credit_init_bits(ent_count);
1538 return 0;
1539 }
1540 case RNDZAPENTCNT:
1541 case RNDCLEARPOOL:
1542 /* No longer has any effect. */
1543 if (!capable(CAP_SYS_ADMIN))
1544 return -EPERM;
1545 return 0;
1546 case RNDRESEEDCRNG:
1547 if (!capable(CAP_SYS_ADMIN))
1548 return -EPERM;
1549 if (!crng_ready())
1550 return -ENODATA;
1551 crng_reseed(NULL);
1552 return 0;
1553 default:
1554 return -EINVAL;
1555 }
1556}
1557
1558static int random_fasync(int fd, struct file *filp, int on)
1559{
1560 return fasync_helper(fd, filp, on, &fasync);
1561}
1562
1563const struct file_operations random_fops = {
1564 .read_iter = random_read_iter,
1565 .write_iter = random_write_iter,
1566 .poll = random_poll,
1567 .unlocked_ioctl = random_ioctl,
1568 .compat_ioctl = compat_ptr_ioctl,
1569 .fasync = random_fasync,
1570 .llseek = noop_llseek,
1571 .splice_read = copy_splice_read,
1572 .splice_write = iter_file_splice_write,
1573};
1574
1575const struct file_operations urandom_fops = {
1576 .read_iter = urandom_read_iter,
1577 .write_iter = random_write_iter,
1578 .unlocked_ioctl = random_ioctl,
1579 .compat_ioctl = compat_ptr_ioctl,
1580 .fasync = random_fasync,
1581 .llseek = noop_llseek,
1582 .splice_read = copy_splice_read,
1583 .splice_write = iter_file_splice_write,
1584};
1585
1586
1587/********************************************************************
1588 *
1589 * Sysctl interface.
1590 *
1591 * These are partly unused legacy knobs with dummy values to not break
1592 * userspace and partly still useful things. They are usually accessible
1593 * in /proc/sys/kernel/random/ and are as follows:
1594 *
1595 * - boot_id - a UUID representing the current boot.
1596 *
1597 * - uuid - a random UUID, different each time the file is read.
1598 *
1599 * - poolsize - the number of bits of entropy that the input pool can
1600 * hold, tied to the POOL_BITS constant.
1601 *
1602 * - entropy_avail - the number of bits of entropy currently in the
1603 * input pool. Always <= poolsize.
1604 *
1605 * - write_wakeup_threshold - the amount of entropy in the input pool
1606 * below which write polls to /dev/random will unblock, requesting
1607 * more entropy, tied to the POOL_READY_BITS constant. It is writable
1608 * to avoid breaking old userspaces, but writing to it does not
1609 * change any behavior of the RNG.
1610 *
1611 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1612 * It is writable to avoid breaking old userspaces, but writing
1613 * to it does not change any behavior of the RNG.
1614 *
1615 ********************************************************************/
1616
1617#ifdef CONFIG_SYSCTL
1618
1619#include <linux/sysctl.h>
1620
1621static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1622static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1623static int sysctl_poolsize = POOL_BITS;
1624static u8 sysctl_bootid[UUID_SIZE];
1625
1626/*
1627 * This function is used to return both the bootid UUID, and random
1628 * UUID. The difference is in whether table->data is NULL; if it is,
1629 * then a new UUID is generated and returned to the user.
1630 */
1631static int proc_do_uuid(const struct ctl_table *table, int write, void *buf,
1632 size_t *lenp, loff_t *ppos)
1633{
1634 u8 tmp_uuid[UUID_SIZE], *uuid;
1635 char uuid_string[UUID_STRING_LEN + 1];
1636 struct ctl_table fake_table = {
1637 .data = uuid_string,
1638 .maxlen = UUID_STRING_LEN
1639 };
1640
1641 if (write)
1642 return -EPERM;
1643
1644 uuid = table->data;
1645 if (!uuid) {
1646 uuid = tmp_uuid;
1647 generate_random_uuid(uuid);
1648 } else {
1649 static DEFINE_SPINLOCK(bootid_spinlock);
1650
1651 spin_lock(&bootid_spinlock);
1652 if (!uuid[8])
1653 generate_random_uuid(uuid);
1654 spin_unlock(&bootid_spinlock);
1655 }
1656
1657 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1658 return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1659}
1660
1661/* The same as proc_dointvec, but writes don't change anything. */
1662static int proc_do_rointvec(const struct ctl_table *table, int write, void *buf,
1663 size_t *lenp, loff_t *ppos)
1664{
1665 return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1666}
1667
1668static struct ctl_table random_table[] = {
1669 {
1670 .procname = "poolsize",
1671 .data = &sysctl_poolsize,
1672 .maxlen = sizeof(int),
1673 .mode = 0444,
1674 .proc_handler = proc_dointvec,
1675 },
1676 {
1677 .procname = "entropy_avail",
1678 .data = &input_pool.init_bits,
1679 .maxlen = sizeof(int),
1680 .mode = 0444,
1681 .proc_handler = proc_dointvec,
1682 },
1683 {
1684 .procname = "write_wakeup_threshold",
1685 .data = &sysctl_random_write_wakeup_bits,
1686 .maxlen = sizeof(int),
1687 .mode = 0644,
1688 .proc_handler = proc_do_rointvec,
1689 },
1690 {
1691 .procname = "urandom_min_reseed_secs",
1692 .data = &sysctl_random_min_urandom_seed,
1693 .maxlen = sizeof(int),
1694 .mode = 0644,
1695 .proc_handler = proc_do_rointvec,
1696 },
1697 {
1698 .procname = "boot_id",
1699 .data = &sysctl_bootid,
1700 .mode = 0444,
1701 .proc_handler = proc_do_uuid,
1702 },
1703 {
1704 .procname = "uuid",
1705 .mode = 0444,
1706 .proc_handler = proc_do_uuid,
1707 },
1708};
1709
1710/*
1711 * random_init() is called before sysctl_init(),
1712 * so we cannot call register_sysctl_init() in random_init()
1713 */
1714static int __init random_sysctls_init(void)
1715{
1716 register_sysctl_init("kernel/random", random_table);
1717 return 0;
1718}
1719device_initcall(random_sysctls_init);
1720#endif
1/*
2 * random.c -- A strong random number generator
3 *
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 *
6 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
7 * rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, and the entire permission notice in its entirety,
14 * including the disclaimer of warranties.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote
19 * products derived from this software without specific prior
20 * written permission.
21 *
22 * ALTERNATIVELY, this product may be distributed under the terms of
23 * the GNU General Public License, in which case the provisions of the GPL are
24 * required INSTEAD OF the above restrictions. (This clause is
25 * necessary due to a potential bad interaction between the GPL and
26 * the restrictions contained in a BSD-style copyright.)
27 *
28 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
31 * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
34 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
36 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
38 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 */
41
42/*
43 * (now, with legal B.S. out of the way.....)
44 *
45 * This routine gathers environmental noise from device drivers, etc.,
46 * and returns good random numbers, suitable for cryptographic use.
47 * Besides the obvious cryptographic uses, these numbers are also good
48 * for seeding TCP sequence numbers, and other places where it is
49 * desirable to have numbers which are not only random, but hard to
50 * predict by an attacker.
51 *
52 * Theory of operation
53 * ===================
54 *
55 * Computers are very predictable devices. Hence it is extremely hard
56 * to produce truly random numbers on a computer --- as opposed to
57 * pseudo-random numbers, which can easily generated by using a
58 * algorithm. Unfortunately, it is very easy for attackers to guess
59 * the sequence of pseudo-random number generators, and for some
60 * applications this is not acceptable. So instead, we must try to
61 * gather "environmental noise" from the computer's environment, which
62 * must be hard for outside attackers to observe, and use that to
63 * generate random numbers. In a Unix environment, this is best done
64 * from inside the kernel.
65 *
66 * Sources of randomness from the environment include inter-keyboard
67 * timings, inter-interrupt timings from some interrupts, and other
68 * events which are both (a) non-deterministic and (b) hard for an
69 * outside observer to measure. Randomness from these sources are
70 * added to an "entropy pool", which is mixed using a CRC-like function.
71 * This is not cryptographically strong, but it is adequate assuming
72 * the randomness is not chosen maliciously, and it is fast enough that
73 * the overhead of doing it on every interrupt is very reasonable.
74 * As random bytes are mixed into the entropy pool, the routines keep
75 * an *estimate* of how many bits of randomness have been stored into
76 * the random number generator's internal state.
77 *
78 * When random bytes are desired, they are obtained by taking the SHA
79 * hash of the contents of the "entropy pool". The SHA hash avoids
80 * exposing the internal state of the entropy pool. It is believed to
81 * be computationally infeasible to derive any useful information
82 * about the input of SHA from its output. Even if it is possible to
83 * analyze SHA in some clever way, as long as the amount of data
84 * returned from the generator is less than the inherent entropy in
85 * the pool, the output data is totally unpredictable. For this
86 * reason, the routine decreases its internal estimate of how many
87 * bits of "true randomness" are contained in the entropy pool as it
88 * outputs random numbers.
89 *
90 * If this estimate goes to zero, the routine can still generate
91 * random numbers; however, an attacker may (at least in theory) be
92 * able to infer the future output of the generator from prior
93 * outputs. This requires successful cryptanalysis of SHA, which is
94 * not believed to be feasible, but there is a remote possibility.
95 * Nonetheless, these numbers should be useful for the vast majority
96 * of purposes.
97 *
98 * Exported interfaces ---- output
99 * ===============================
100 *
101 * There are three exported interfaces; the first is one designed to
102 * be used from within the kernel:
103 *
104 * void get_random_bytes(void *buf, int nbytes);
105 *
106 * This interface will return the requested number of random bytes,
107 * and place it in the requested buffer.
108 *
109 * The two other interfaces are two character devices /dev/random and
110 * /dev/urandom. /dev/random is suitable for use when very high
111 * quality randomness is desired (for example, for key generation or
112 * one-time pads), as it will only return a maximum of the number of
113 * bits of randomness (as estimated by the random number generator)
114 * contained in the entropy pool.
115 *
116 * The /dev/urandom device does not have this limit, and will return
117 * as many bytes as are requested. As more and more random bytes are
118 * requested without giving time for the entropy pool to recharge,
119 * this will result in random numbers that are merely cryptographically
120 * strong. For many applications, however, this is acceptable.
121 *
122 * Exported interfaces ---- input
123 * ==============================
124 *
125 * The current exported interfaces for gathering environmental noise
126 * from the devices are:
127 *
128 * void add_device_randomness(const void *buf, unsigned int size);
129 * void add_input_randomness(unsigned int type, unsigned int code,
130 * unsigned int value);
131 * void add_interrupt_randomness(int irq, int irq_flags);
132 * void add_disk_randomness(struct gendisk *disk);
133 *
134 * add_device_randomness() is for adding data to the random pool that
135 * is likely to differ between two devices (or possibly even per boot).
136 * This would be things like MAC addresses or serial numbers, or the
137 * read-out of the RTC. This does *not* add any actual entropy to the
138 * pool, but it initializes the pool to different values for devices
139 * that might otherwise be identical and have very little entropy
140 * available to them (particularly common in the embedded world).
141 *
142 * add_input_randomness() uses the input layer interrupt timing, as well as
143 * the event type information from the hardware.
144 *
145 * add_interrupt_randomness() uses the interrupt timing as random
146 * inputs to the entropy pool. Using the cycle counters and the irq source
147 * as inputs, it feeds the randomness roughly once a second.
148 *
149 * add_disk_randomness() uses what amounts to the seek time of block
150 * layer request events, on a per-disk_devt basis, as input to the
151 * entropy pool. Note that high-speed solid state drives with very low
152 * seek times do not make for good sources of entropy, as their seek
153 * times are usually fairly consistent.
154 *
155 * All of these routines try to estimate how many bits of randomness a
156 * particular randomness source. They do this by keeping track of the
157 * first and second order deltas of the event timings.
158 *
159 * Ensuring unpredictability at system startup
160 * ============================================
161 *
162 * When any operating system starts up, it will go through a sequence
163 * of actions that are fairly predictable by an adversary, especially
164 * if the start-up does not involve interaction with a human operator.
165 * This reduces the actual number of bits of unpredictability in the
166 * entropy pool below the value in entropy_count. In order to
167 * counteract this effect, it helps to carry information in the
168 * entropy pool across shut-downs and start-ups. To do this, put the
169 * following lines an appropriate script which is run during the boot
170 * sequence:
171 *
172 * echo "Initializing random number generator..."
173 * random_seed=/var/run/random-seed
174 * # Carry a random seed from start-up to start-up
175 * # Load and then save the whole entropy pool
176 * if [ -f $random_seed ]; then
177 * cat $random_seed >/dev/urandom
178 * else
179 * touch $random_seed
180 * fi
181 * chmod 600 $random_seed
182 * dd if=/dev/urandom of=$random_seed count=1 bs=512
183 *
184 * and the following lines in an appropriate script which is run as
185 * the system is shutdown:
186 *
187 * # Carry a random seed from shut-down to start-up
188 * # Save the whole entropy pool
189 * echo "Saving random seed..."
190 * random_seed=/var/run/random-seed
191 * touch $random_seed
192 * chmod 600 $random_seed
193 * dd if=/dev/urandom of=$random_seed count=1 bs=512
194 *
195 * For example, on most modern systems using the System V init
196 * scripts, such code fragments would be found in
197 * /etc/rc.d/init.d/random. On older Linux systems, the correct script
198 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
199 *
200 * Effectively, these commands cause the contents of the entropy pool
201 * to be saved at shut-down time and reloaded into the entropy pool at
202 * start-up. (The 'dd' in the addition to the bootup script is to
203 * make sure that /etc/random-seed is different for every start-up,
204 * even if the system crashes without executing rc.0.) Even with
205 * complete knowledge of the start-up activities, predicting the state
206 * of the entropy pool requires knowledge of the previous history of
207 * the system.
208 *
209 * Configuring the /dev/random driver under Linux
210 * ==============================================
211 *
212 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
213 * the /dev/mem major number (#1). So if your system does not have
214 * /dev/random and /dev/urandom created already, they can be created
215 * by using the commands:
216 *
217 * mknod /dev/random c 1 8
218 * mknod /dev/urandom c 1 9
219 *
220 * Acknowledgements:
221 * =================
222 *
223 * Ideas for constructing this random number generator were derived
224 * from Pretty Good Privacy's random number generator, and from private
225 * discussions with Phil Karn. Colin Plumb provided a faster random
226 * number generator, which speed up the mixing function of the entropy
227 * pool, taken from PGPfone. Dale Worley has also contributed many
228 * useful ideas and suggestions to improve this driver.
229 *
230 * Any flaws in the design are solely my responsibility, and should
231 * not be attributed to the Phil, Colin, or any of authors of PGP.
232 *
233 * Further background information on this topic may be obtained from
234 * RFC 1750, "Randomness Recommendations for Security", by Donald
235 * Eastlake, Steve Crocker, and Jeff Schiller.
236 */
237
238#include <linux/utsname.h>
239#include <linux/module.h>
240#include <linux/kernel.h>
241#include <linux/major.h>
242#include <linux/string.h>
243#include <linux/fcntl.h>
244#include <linux/slab.h>
245#include <linux/random.h>
246#include <linux/poll.h>
247#include <linux/init.h>
248#include <linux/fs.h>
249#include <linux/genhd.h>
250#include <linux/interrupt.h>
251#include <linux/mm.h>
252#include <linux/spinlock.h>
253#include <linux/percpu.h>
254#include <linux/cryptohash.h>
255#include <linux/fips.h>
256#include <linux/ptrace.h>
257#include <linux/kmemcheck.h>
258#include <linux/workqueue.h>
259#include <linux/irq.h>
260
261#include <asm/processor.h>
262#include <asm/uaccess.h>
263#include <asm/irq.h>
264#include <asm/irq_regs.h>
265#include <asm/io.h>
266
267#define CREATE_TRACE_POINTS
268#include <trace/events/random.h>
269
270/*
271 * Configuration information
272 */
273#define INPUT_POOL_SHIFT 12
274#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
275#define OUTPUT_POOL_SHIFT 10
276#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
277#define SEC_XFER_SIZE 512
278#define EXTRACT_SIZE 10
279
280#define DEBUG_RANDOM_BOOT 0
281
282#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
283
284/*
285 * To allow fractional bits to be tracked, the entropy_count field is
286 * denominated in units of 1/8th bits.
287 *
288 * 2*(ENTROPY_SHIFT + log2(poolbits)) must <= 31, or the multiply in
289 * credit_entropy_bits() needs to be 64 bits wide.
290 */
291#define ENTROPY_SHIFT 3
292#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
293
294/*
295 * The minimum number of bits of entropy before we wake up a read on
296 * /dev/random. Should be enough to do a significant reseed.
297 */
298static int random_read_wakeup_bits = 64;
299
300/*
301 * If the entropy count falls under this number of bits, then we
302 * should wake up processes which are selecting or polling on write
303 * access to /dev/random.
304 */
305static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
306
307/*
308 * The minimum number of seconds between urandom pool reseeding. We
309 * do this to limit the amount of entropy that can be drained from the
310 * input pool even if there are heavy demands on /dev/urandom.
311 */
312static int random_min_urandom_seed = 60;
313
314/*
315 * Originally, we used a primitive polynomial of degree .poolwords
316 * over GF(2). The taps for various sizes are defined below. They
317 * were chosen to be evenly spaced except for the last tap, which is 1
318 * to get the twisting happening as fast as possible.
319 *
320 * For the purposes of better mixing, we use the CRC-32 polynomial as
321 * well to make a (modified) twisted Generalized Feedback Shift
322 * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR
323 * generators. ACM Transactions on Modeling and Computer Simulation
324 * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted
325 * GFSR generators II. ACM Transactions on Modeling and Computer
326 * Simulation 4:254-266)
327 *
328 * Thanks to Colin Plumb for suggesting this.
329 *
330 * The mixing operation is much less sensitive than the output hash,
331 * where we use SHA-1. All that we want of mixing operation is that
332 * it be a good non-cryptographic hash; i.e. it not produce collisions
333 * when fed "random" data of the sort we expect to see. As long as
334 * the pool state differs for different inputs, we have preserved the
335 * input entropy and done a good job. The fact that an intelligent
336 * attacker can construct inputs that will produce controlled
337 * alterations to the pool's state is not important because we don't
338 * consider such inputs to contribute any randomness. The only
339 * property we need with respect to them is that the attacker can't
340 * increase his/her knowledge of the pool's state. Since all
341 * additions are reversible (knowing the final state and the input,
342 * you can reconstruct the initial state), if an attacker has any
343 * uncertainty about the initial state, he/she can only shuffle that
344 * uncertainty about, but never cause any collisions (which would
345 * decrease the uncertainty).
346 *
347 * Our mixing functions were analyzed by Lacharme, Roeck, Strubel, and
348 * Videau in their paper, "The Linux Pseudorandom Number Generator
349 * Revisited" (see: http://eprint.iacr.org/2012/251.pdf). In their
350 * paper, they point out that we are not using a true Twisted GFSR,
351 * since Matsumoto & Kurita used a trinomial feedback polynomial (that
352 * is, with only three taps, instead of the six that we are using).
353 * As a result, the resulting polynomial is neither primitive nor
354 * irreducible, and hence does not have a maximal period over
355 * GF(2**32). They suggest a slight change to the generator
356 * polynomial which improves the resulting TGFSR polynomial to be
357 * irreducible, which we have made here.
358 */
359static struct poolinfo {
360 int poolbitshift, poolwords, poolbytes, poolbits, poolfracbits;
361#define S(x) ilog2(x)+5, (x), (x)*4, (x)*32, (x) << (ENTROPY_SHIFT+5)
362 int tap1, tap2, tap3, tap4, tap5;
363} poolinfo_table[] = {
364 /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
365 /* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
366 { S(128), 104, 76, 51, 25, 1 },
367 /* was: x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 */
368 /* x^32 + x^26 + x^19 + x^14 + x^7 + x + 1 */
369 { S(32), 26, 19, 14, 7, 1 },
370#if 0
371 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
372 { S(2048), 1638, 1231, 819, 411, 1 },
373
374 /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
375 { S(1024), 817, 615, 412, 204, 1 },
376
377 /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
378 { S(1024), 819, 616, 410, 207, 2 },
379
380 /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
381 { S(512), 411, 308, 208, 104, 1 },
382
383 /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
384 { S(512), 409, 307, 206, 102, 2 },
385 /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
386 { S(512), 409, 309, 205, 103, 2 },
387
388 /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
389 { S(256), 205, 155, 101, 52, 1 },
390
391 /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
392 { S(128), 103, 78, 51, 27, 2 },
393
394 /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
395 { S(64), 52, 39, 26, 14, 1 },
396#endif
397};
398
399/*
400 * Static global variables
401 */
402static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
403static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
404static struct fasync_struct *fasync;
405
406/**********************************************************************
407 *
408 * OS independent entropy store. Here are the functions which handle
409 * storing entropy in an entropy pool.
410 *
411 **********************************************************************/
412
413struct entropy_store;
414struct entropy_store {
415 /* read-only data: */
416 const struct poolinfo *poolinfo;
417 __u32 *pool;
418 const char *name;
419 struct entropy_store *pull;
420 struct work_struct push_work;
421
422 /* read-write data: */
423 unsigned long last_pulled;
424 spinlock_t lock;
425 unsigned short add_ptr;
426 unsigned short input_rotate;
427 int entropy_count;
428 int entropy_total;
429 unsigned int initialized:1;
430 unsigned int limit:1;
431 unsigned int last_data_init:1;
432 __u8 last_data[EXTRACT_SIZE];
433};
434
435static void push_to_pool(struct work_struct *work);
436static __u32 input_pool_data[INPUT_POOL_WORDS];
437static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
438static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
439
440static struct entropy_store input_pool = {
441 .poolinfo = &poolinfo_table[0],
442 .name = "input",
443 .limit = 1,
444 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
445 .pool = input_pool_data
446};
447
448static struct entropy_store blocking_pool = {
449 .poolinfo = &poolinfo_table[1],
450 .name = "blocking",
451 .limit = 1,
452 .pull = &input_pool,
453 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
454 .pool = blocking_pool_data,
455 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
456 push_to_pool),
457};
458
459static struct entropy_store nonblocking_pool = {
460 .poolinfo = &poolinfo_table[1],
461 .name = "nonblocking",
462 .pull = &input_pool,
463 .lock = __SPIN_LOCK_UNLOCKED(nonblocking_pool.lock),
464 .pool = nonblocking_pool_data,
465 .push_work = __WORK_INITIALIZER(nonblocking_pool.push_work,
466 push_to_pool),
467};
468
469static __u32 const twist_table[8] = {
470 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
471 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
472
473/*
474 * This function adds bytes into the entropy "pool". It does not
475 * update the entropy estimate. The caller should call
476 * credit_entropy_bits if this is appropriate.
477 *
478 * The pool is stirred with a primitive polynomial of the appropriate
479 * degree, and then twisted. We twist by three bits at a time because
480 * it's cheap to do so and helps slightly in the expected case where
481 * the entropy is concentrated in the low-order bits.
482 */
483static void _mix_pool_bytes(struct entropy_store *r, const void *in,
484 int nbytes, __u8 out[64])
485{
486 unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
487 int input_rotate;
488 int wordmask = r->poolinfo->poolwords - 1;
489 const char *bytes = in;
490 __u32 w;
491
492 tap1 = r->poolinfo->tap1;
493 tap2 = r->poolinfo->tap2;
494 tap3 = r->poolinfo->tap3;
495 tap4 = r->poolinfo->tap4;
496 tap5 = r->poolinfo->tap5;
497
498 smp_rmb();
499 input_rotate = ACCESS_ONCE(r->input_rotate);
500 i = ACCESS_ONCE(r->add_ptr);
501
502 /* mix one byte at a time to simplify size handling and churn faster */
503 while (nbytes--) {
504 w = rol32(*bytes++, input_rotate);
505 i = (i - 1) & wordmask;
506
507 /* XOR in the various taps */
508 w ^= r->pool[i];
509 w ^= r->pool[(i + tap1) & wordmask];
510 w ^= r->pool[(i + tap2) & wordmask];
511 w ^= r->pool[(i + tap3) & wordmask];
512 w ^= r->pool[(i + tap4) & wordmask];
513 w ^= r->pool[(i + tap5) & wordmask];
514
515 /* Mix the result back in with a twist */
516 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
517
518 /*
519 * Normally, we add 7 bits of rotation to the pool.
520 * At the beginning of the pool, add an extra 7 bits
521 * rotation, so that successive passes spread the
522 * input bits across the pool evenly.
523 */
524 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
525 }
526
527 ACCESS_ONCE(r->input_rotate) = input_rotate;
528 ACCESS_ONCE(r->add_ptr) = i;
529 smp_wmb();
530
531 if (out)
532 for (j = 0; j < 16; j++)
533 ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
534}
535
536static void __mix_pool_bytes(struct entropy_store *r, const void *in,
537 int nbytes, __u8 out[64])
538{
539 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
540 _mix_pool_bytes(r, in, nbytes, out);
541}
542
543static void mix_pool_bytes(struct entropy_store *r, const void *in,
544 int nbytes, __u8 out[64])
545{
546 unsigned long flags;
547
548 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
549 spin_lock_irqsave(&r->lock, flags);
550 _mix_pool_bytes(r, in, nbytes, out);
551 spin_unlock_irqrestore(&r->lock, flags);
552}
553
554struct fast_pool {
555 __u32 pool[4];
556 unsigned long last;
557 unsigned short count;
558 unsigned char rotate;
559 unsigned char last_timer_intr;
560};
561
562/*
563 * This is a fast mixing routine used by the interrupt randomness
564 * collector. It's hardcoded for an 128 bit pool and assumes that any
565 * locks that might be needed are taken by the caller.
566 */
567static void fast_mix(struct fast_pool *f, __u32 input[4])
568{
569 __u32 w;
570 unsigned input_rotate = f->rotate;
571
572 w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
573 f->pool[0] = (w >> 3) ^ twist_table[w & 7];
574 input_rotate = (input_rotate + 14) & 31;
575 w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
576 f->pool[1] = (w >> 3) ^ twist_table[w & 7];
577 input_rotate = (input_rotate + 7) & 31;
578 w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
579 f->pool[2] = (w >> 3) ^ twist_table[w & 7];
580 input_rotate = (input_rotate + 7) & 31;
581 w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
582 f->pool[3] = (w >> 3) ^ twist_table[w & 7];
583 input_rotate = (input_rotate + 7) & 31;
584
585 f->rotate = input_rotate;
586 f->count++;
587}
588
589/*
590 * Credit (or debit) the entropy store with n bits of entropy.
591 * Use credit_entropy_bits_safe() if the value comes from userspace
592 * or otherwise should be checked for extreme values.
593 */
594static void credit_entropy_bits(struct entropy_store *r, int nbits)
595{
596 int entropy_count, orig;
597 const int pool_size = r->poolinfo->poolfracbits;
598 int nfrac = nbits << ENTROPY_SHIFT;
599
600 if (!nbits)
601 return;
602
603retry:
604 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
605 if (nfrac < 0) {
606 /* Debit */
607 entropy_count += nfrac;
608 } else {
609 /*
610 * Credit: we have to account for the possibility of
611 * overwriting already present entropy. Even in the
612 * ideal case of pure Shannon entropy, new contributions
613 * approach the full value asymptotically:
614 *
615 * entropy <- entropy + (pool_size - entropy) *
616 * (1 - exp(-add_entropy/pool_size))
617 *
618 * For add_entropy <= pool_size/2 then
619 * (1 - exp(-add_entropy/pool_size)) >=
620 * (add_entropy/pool_size)*0.7869...
621 * so we can approximate the exponential with
622 * 3/4*add_entropy/pool_size and still be on the
623 * safe side by adding at most pool_size/2 at a time.
624 *
625 * The use of pool_size-2 in the while statement is to
626 * prevent rounding artifacts from making the loop
627 * arbitrarily long; this limits the loop to log2(pool_size)*2
628 * turns no matter how large nbits is.
629 */
630 int pnfrac = nfrac;
631 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
632 /* The +2 corresponds to the /4 in the denominator */
633
634 do {
635 unsigned int anfrac = min(pnfrac, pool_size/2);
636 unsigned int add =
637 ((pool_size - entropy_count)*anfrac*3) >> s;
638
639 entropy_count += add;
640 pnfrac -= anfrac;
641 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
642 }
643
644 if (entropy_count < 0) {
645 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
646 r->name, entropy_count);
647 WARN_ON(1);
648 entropy_count = 0;
649 } else if (entropy_count > pool_size)
650 entropy_count = pool_size;
651 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
652 goto retry;
653
654 r->entropy_total += nbits;
655 if (!r->initialized && r->entropy_total > 128) {
656 r->initialized = 1;
657 r->entropy_total = 0;
658 if (r == &nonblocking_pool) {
659 prandom_reseed_late();
660 pr_notice("random: %s pool is initialized\n", r->name);
661 }
662 }
663
664 trace_credit_entropy_bits(r->name, nbits,
665 entropy_count >> ENTROPY_SHIFT,
666 r->entropy_total, _RET_IP_);
667
668 if (r == &input_pool) {
669 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
670
671 /* should we wake readers? */
672 if (entropy_bits >= random_read_wakeup_bits) {
673 wake_up_interruptible(&random_read_wait);
674 kill_fasync(&fasync, SIGIO, POLL_IN);
675 }
676 /* If the input pool is getting full, send some
677 * entropy to the two output pools, flipping back and
678 * forth between them, until the output pools are 75%
679 * full.
680 */
681 if (entropy_bits > random_write_wakeup_bits &&
682 r->initialized &&
683 r->entropy_total >= 2*random_read_wakeup_bits) {
684 static struct entropy_store *last = &blocking_pool;
685 struct entropy_store *other = &blocking_pool;
686
687 if (last == &blocking_pool)
688 other = &nonblocking_pool;
689 if (other->entropy_count <=
690 3 * other->poolinfo->poolfracbits / 4)
691 last = other;
692 if (last->entropy_count <=
693 3 * last->poolinfo->poolfracbits / 4) {
694 schedule_work(&last->push_work);
695 r->entropy_total = 0;
696 }
697 }
698 }
699}
700
701static void credit_entropy_bits_safe(struct entropy_store *r, int nbits)
702{
703 const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
704
705 /* Cap the value to avoid overflows */
706 nbits = min(nbits, nbits_max);
707 nbits = max(nbits, -nbits_max);
708
709 credit_entropy_bits(r, nbits);
710}
711
712/*********************************************************************
713 *
714 * Entropy input management
715 *
716 *********************************************************************/
717
718/* There is one of these per entropy source */
719struct timer_rand_state {
720 cycles_t last_time;
721 long last_delta, last_delta2;
722 unsigned dont_count_entropy:1;
723};
724
725#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
726
727/*
728 * Add device- or boot-specific data to the input and nonblocking
729 * pools to help initialize them to unique values.
730 *
731 * None of this adds any entropy, it is meant to avoid the
732 * problem of the nonblocking pool having similar initial state
733 * across largely identical devices.
734 */
735void add_device_randomness(const void *buf, unsigned int size)
736{
737 unsigned long time = random_get_entropy() ^ jiffies;
738 unsigned long flags;
739
740 trace_add_device_randomness(size, _RET_IP_);
741 spin_lock_irqsave(&input_pool.lock, flags);
742 _mix_pool_bytes(&input_pool, buf, size, NULL);
743 _mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
744 spin_unlock_irqrestore(&input_pool.lock, flags);
745
746 spin_lock_irqsave(&nonblocking_pool.lock, flags);
747 _mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
748 _mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
749 spin_unlock_irqrestore(&nonblocking_pool.lock, flags);
750}
751EXPORT_SYMBOL(add_device_randomness);
752
753static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
754
755/*
756 * This function adds entropy to the entropy "pool" by using timing
757 * delays. It uses the timer_rand_state structure to make an estimate
758 * of how many bits of entropy this call has added to the pool.
759 *
760 * The number "num" is also added to the pool - it should somehow describe
761 * the type of event which just happened. This is currently 0-255 for
762 * keyboard scan codes, and 256 upwards for interrupts.
763 *
764 */
765static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
766{
767 struct entropy_store *r;
768 struct {
769 long jiffies;
770 unsigned cycles;
771 unsigned num;
772 } sample;
773 long delta, delta2, delta3;
774
775 preempt_disable();
776
777 sample.jiffies = jiffies;
778 sample.cycles = random_get_entropy();
779 sample.num = num;
780 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
781 mix_pool_bytes(r, &sample, sizeof(sample), NULL);
782
783 /*
784 * Calculate number of bits of randomness we probably added.
785 * We take into account the first, second and third-order deltas
786 * in order to make our estimate.
787 */
788
789 if (!state->dont_count_entropy) {
790 delta = sample.jiffies - state->last_time;
791 state->last_time = sample.jiffies;
792
793 delta2 = delta - state->last_delta;
794 state->last_delta = delta;
795
796 delta3 = delta2 - state->last_delta2;
797 state->last_delta2 = delta2;
798
799 if (delta < 0)
800 delta = -delta;
801 if (delta2 < 0)
802 delta2 = -delta2;
803 if (delta3 < 0)
804 delta3 = -delta3;
805 if (delta > delta2)
806 delta = delta2;
807 if (delta > delta3)
808 delta = delta3;
809
810 /*
811 * delta is now minimum absolute delta.
812 * Round down by 1 bit on general principles,
813 * and limit entropy entimate to 12 bits.
814 */
815 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
816 }
817 preempt_enable();
818}
819
820void add_input_randomness(unsigned int type, unsigned int code,
821 unsigned int value)
822{
823 static unsigned char last_value;
824
825 /* ignore autorepeat and the like */
826 if (value == last_value)
827 return;
828
829 last_value = value;
830 add_timer_randomness(&input_timer_state,
831 (type << 4) ^ code ^ (code >> 4) ^ value);
832 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
833}
834EXPORT_SYMBOL_GPL(add_input_randomness);
835
836static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
837
838void add_interrupt_randomness(int irq, int irq_flags)
839{
840 struct entropy_store *r;
841 struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
842 struct pt_regs *regs = get_irq_regs();
843 unsigned long now = jiffies;
844 cycles_t cycles = random_get_entropy();
845 __u32 input[4], c_high, j_high;
846 __u64 ip;
847 unsigned long seed;
848 int credit;
849
850 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
851 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
852 input[0] = cycles ^ j_high ^ irq;
853 input[1] = now ^ c_high;
854 ip = regs ? instruction_pointer(regs) : _RET_IP_;
855 input[2] = ip;
856 input[3] = ip >> 32;
857
858 fast_mix(fast_pool, input);
859
860 if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
861 return;
862
863 fast_pool->last = now;
864
865 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
866 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
867
868 /*
869 * If we don't have a valid cycle counter, and we see
870 * back-to-back timer interrupts, then skip giving credit for
871 * any entropy, otherwise credit 1 bit.
872 */
873 credit = 1;
874 if (cycles == 0) {
875 if (irq_flags & __IRQF_TIMER) {
876 if (fast_pool->last_timer_intr)
877 credit = 0;
878 fast_pool->last_timer_intr = 1;
879 } else
880 fast_pool->last_timer_intr = 0;
881 }
882
883 /*
884 * If we have architectural seed generator, produce a seed and
885 * add it to the pool. For the sake of paranoia count it as
886 * 50% entropic.
887 */
888 if (arch_get_random_seed_long(&seed)) {
889 __mix_pool_bytes(r, &seed, sizeof(seed), NULL);
890 credit += sizeof(seed) * 4;
891 }
892
893 credit_entropy_bits(r, credit);
894}
895
896#ifdef CONFIG_BLOCK
897void add_disk_randomness(struct gendisk *disk)
898{
899 if (!disk || !disk->random)
900 return;
901 /* first major is 1, so we get >= 0x200 here */
902 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
903 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
904}
905#endif
906
907/*********************************************************************
908 *
909 * Entropy extraction routines
910 *
911 *********************************************************************/
912
913static ssize_t extract_entropy(struct entropy_store *r, void *buf,
914 size_t nbytes, int min, int rsvd);
915
916/*
917 * This utility inline function is responsible for transferring entropy
918 * from the primary pool to the secondary extraction pool. We make
919 * sure we pull enough for a 'catastrophic reseed'.
920 */
921static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
922static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
923{
924 if (r->limit == 0 && random_min_urandom_seed) {
925 unsigned long now = jiffies;
926
927 if (time_before(now,
928 r->last_pulled + random_min_urandom_seed * HZ))
929 return;
930 r->last_pulled = now;
931 }
932 if (r->pull &&
933 r->entropy_count < (nbytes << (ENTROPY_SHIFT + 3)) &&
934 r->entropy_count < r->poolinfo->poolfracbits)
935 _xfer_secondary_pool(r, nbytes);
936}
937
938static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
939{
940 __u32 tmp[OUTPUT_POOL_WORDS];
941
942 /* For /dev/random's pool, always leave two wakeups' worth */
943 int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
944 int bytes = nbytes;
945
946 /* pull at least as much as a wakeup */
947 bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
948 /* but never more than the buffer size */
949 bytes = min_t(int, bytes, sizeof(tmp));
950
951 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
952 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
953 bytes = extract_entropy(r->pull, tmp, bytes,
954 random_read_wakeup_bits / 8, rsvd_bytes);
955 mix_pool_bytes(r, tmp, bytes, NULL);
956 credit_entropy_bits(r, bytes*8);
957}
958
959/*
960 * Used as a workqueue function so that when the input pool is getting
961 * full, we can "spill over" some entropy to the output pools. That
962 * way the output pools can store some of the excess entropy instead
963 * of letting it go to waste.
964 */
965static void push_to_pool(struct work_struct *work)
966{
967 struct entropy_store *r = container_of(work, struct entropy_store,
968 push_work);
969 BUG_ON(!r);
970 _xfer_secondary_pool(r, random_read_wakeup_bits/8);
971 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
972 r->pull->entropy_count >> ENTROPY_SHIFT);
973}
974
975/*
976 * This function decides how many bytes to actually take from the
977 * given pool, and also debits the entropy count accordingly.
978 */
979static size_t account(struct entropy_store *r, size_t nbytes, int min,
980 int reserved)
981{
982 int have_bytes;
983 int entropy_count, orig;
984 size_t ibytes;
985
986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
987
988 /* Can we pull enough? */
989retry:
990 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
991 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
992 ibytes = nbytes;
993 /* If limited, never pull more than available */
994 if (r->limit)
995 ibytes = min_t(size_t, ibytes, have_bytes - reserved);
996 if (ibytes < min)
997 ibytes = 0;
998 if (have_bytes >= ibytes + reserved)
999 entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
1000 else
1001 entropy_count = reserved << (ENTROPY_SHIFT + 3);
1002
1003 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1004 goto retry;
1005
1006 trace_debit_entropy(r->name, 8 * ibytes);
1007 if (ibytes &&
1008 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1009 wake_up_interruptible(&random_write_wait);
1010 kill_fasync(&fasync, SIGIO, POLL_OUT);
1011 }
1012
1013 return ibytes;
1014}
1015
1016/*
1017 * This function does the actual extraction for extract_entropy and
1018 * extract_entropy_user.
1019 *
1020 * Note: we assume that .poolwords is a multiple of 16 words.
1021 */
1022static void extract_buf(struct entropy_store *r, __u8 *out)
1023{
1024 int i;
1025 union {
1026 __u32 w[5];
1027 unsigned long l[LONGS(20)];
1028 } hash;
1029 __u32 workspace[SHA_WORKSPACE_WORDS];
1030 __u8 extract[64];
1031 unsigned long flags;
1032
1033 /*
1034 * If we have an architectural hardware random number
1035 * generator, use it for SHA's initial vector
1036 */
1037 sha_init(hash.w);
1038 for (i = 0; i < LONGS(20); i++) {
1039 unsigned long v;
1040 if (!arch_get_random_long(&v))
1041 break;
1042 hash.l[i] = v;
1043 }
1044
1045 /* Generate a hash across the pool, 16 words (512 bits) at a time */
1046 spin_lock_irqsave(&r->lock, flags);
1047 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1048 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1049
1050 /*
1051 * We mix the hash back into the pool to prevent backtracking
1052 * attacks (where the attacker knows the state of the pool
1053 * plus the current outputs, and attempts to find previous
1054 * ouputs), unless the hash function can be inverted. By
1055 * mixing at least a SHA1 worth of hash data back, we make
1056 * brute-forcing the feedback as hard as brute-forcing the
1057 * hash.
1058 */
1059 __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
1060 spin_unlock_irqrestore(&r->lock, flags);
1061
1062 /*
1063 * To avoid duplicates, we atomically extract a portion of the
1064 * pool while mixing, and hash one final time.
1065 */
1066 sha_transform(hash.w, extract, workspace);
1067 memset(extract, 0, sizeof(extract));
1068 memset(workspace, 0, sizeof(workspace));
1069
1070 /*
1071 * In case the hash function has some recognizable output
1072 * pattern, we fold it in half. Thus, we always feed back
1073 * twice as much data as we output.
1074 */
1075 hash.w[0] ^= hash.w[3];
1076 hash.w[1] ^= hash.w[4];
1077 hash.w[2] ^= rol32(hash.w[2], 16);
1078
1079 memcpy(out, &hash, EXTRACT_SIZE);
1080 memset(&hash, 0, sizeof(hash));
1081}
1082
1083/*
1084 * This function extracts randomness from the "entropy pool", and
1085 * returns it in a buffer.
1086 *
1087 * The min parameter specifies the minimum amount we can pull before
1088 * failing to avoid races that defeat catastrophic reseeding while the
1089 * reserved parameter indicates how much entropy we must leave in the
1090 * pool after each pull to avoid starving other readers.
1091 */
1092static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1093 size_t nbytes, int min, int reserved)
1094{
1095 ssize_t ret = 0, i;
1096 __u8 tmp[EXTRACT_SIZE];
1097 unsigned long flags;
1098
1099 /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
1100 if (fips_enabled) {
1101 spin_lock_irqsave(&r->lock, flags);
1102 if (!r->last_data_init) {
1103 r->last_data_init = 1;
1104 spin_unlock_irqrestore(&r->lock, flags);
1105 trace_extract_entropy(r->name, EXTRACT_SIZE,
1106 ENTROPY_BITS(r), _RET_IP_);
1107 xfer_secondary_pool(r, EXTRACT_SIZE);
1108 extract_buf(r, tmp);
1109 spin_lock_irqsave(&r->lock, flags);
1110 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1111 }
1112 spin_unlock_irqrestore(&r->lock, flags);
1113 }
1114
1115 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1116 xfer_secondary_pool(r, nbytes);
1117 nbytes = account(r, nbytes, min, reserved);
1118
1119 while (nbytes) {
1120 extract_buf(r, tmp);
1121
1122 if (fips_enabled) {
1123 spin_lock_irqsave(&r->lock, flags);
1124 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1125 panic("Hardware RNG duplicated output!\n");
1126 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1127 spin_unlock_irqrestore(&r->lock, flags);
1128 }
1129 i = min_t(int, nbytes, EXTRACT_SIZE);
1130 memcpy(buf, tmp, i);
1131 nbytes -= i;
1132 buf += i;
1133 ret += i;
1134 }
1135
1136 /* Wipe data just returned from memory */
1137 memset(tmp, 0, sizeof(tmp));
1138
1139 return ret;
1140}
1141
1142/*
1143 * This function extracts randomness from the "entropy pool", and
1144 * returns it in a userspace buffer.
1145 */
1146static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1147 size_t nbytes)
1148{
1149 ssize_t ret = 0, i;
1150 __u8 tmp[EXTRACT_SIZE];
1151
1152 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1153 xfer_secondary_pool(r, nbytes);
1154 nbytes = account(r, nbytes, 0, 0);
1155
1156 while (nbytes) {
1157 if (need_resched()) {
1158 if (signal_pending(current)) {
1159 if (ret == 0)
1160 ret = -ERESTARTSYS;
1161 break;
1162 }
1163 schedule();
1164 }
1165
1166 extract_buf(r, tmp);
1167 i = min_t(int, nbytes, EXTRACT_SIZE);
1168 if (copy_to_user(buf, tmp, i)) {
1169 ret = -EFAULT;
1170 break;
1171 }
1172
1173 nbytes -= i;
1174 buf += i;
1175 ret += i;
1176 }
1177
1178 /* Wipe data just returned from memory */
1179 memset(tmp, 0, sizeof(tmp));
1180
1181 return ret;
1182}
1183
1184/*
1185 * This function is the exported kernel interface. It returns some
1186 * number of good random numbers, suitable for key generation, seeding
1187 * TCP sequence numbers, etc. It does not rely on the hardware random
1188 * number generator. For random bytes direct from the hardware RNG
1189 * (when available), use get_random_bytes_arch().
1190 */
1191void get_random_bytes(void *buf, int nbytes)
1192{
1193#if DEBUG_RANDOM_BOOT > 0
1194 if (unlikely(nonblocking_pool.initialized == 0))
1195 printk(KERN_NOTICE "random: %pF get_random_bytes called "
1196 "with %d bits of entropy available\n",
1197 (void *) _RET_IP_,
1198 nonblocking_pool.entropy_total);
1199#endif
1200 trace_get_random_bytes(nbytes, _RET_IP_);
1201 extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
1202}
1203EXPORT_SYMBOL(get_random_bytes);
1204
1205/*
1206 * This function will use the architecture-specific hardware random
1207 * number generator if it is available. The arch-specific hw RNG will
1208 * almost certainly be faster than what we can do in software, but it
1209 * is impossible to verify that it is implemented securely (as
1210 * opposed, to, say, the AES encryption of a sequence number using a
1211 * key known by the NSA). So it's useful if we need the speed, but
1212 * only if we're willing to trust the hardware manufacturer not to
1213 * have put in a back door.
1214 */
1215void get_random_bytes_arch(void *buf, int nbytes)
1216{
1217 char *p = buf;
1218
1219 trace_get_random_bytes_arch(nbytes, _RET_IP_);
1220 while (nbytes) {
1221 unsigned long v;
1222 int chunk = min(nbytes, (int)sizeof(unsigned long));
1223
1224 if (!arch_get_random_long(&v))
1225 break;
1226
1227 memcpy(p, &v, chunk);
1228 p += chunk;
1229 nbytes -= chunk;
1230 }
1231
1232 if (nbytes)
1233 extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
1234}
1235EXPORT_SYMBOL(get_random_bytes_arch);
1236
1237
1238/*
1239 * init_std_data - initialize pool with system data
1240 *
1241 * @r: pool to initialize
1242 *
1243 * This function clears the pool's entropy count and mixes some system
1244 * data into the pool to prepare it for use. The pool is not cleared
1245 * as that can only decrease the entropy in the pool.
1246 */
1247static void init_std_data(struct entropy_store *r)
1248{
1249 int i;
1250 ktime_t now = ktime_get_real();
1251 unsigned long rv;
1252
1253 r->last_pulled = jiffies;
1254 mix_pool_bytes(r, &now, sizeof(now), NULL);
1255 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1256 if (!arch_get_random_seed_long(&rv) &&
1257 !arch_get_random_long(&rv))
1258 rv = random_get_entropy();
1259 mix_pool_bytes(r, &rv, sizeof(rv), NULL);
1260 }
1261 mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
1262}
1263
1264/*
1265 * Note that setup_arch() may call add_device_randomness()
1266 * long before we get here. This allows seeding of the pools
1267 * with some platform dependent data very early in the boot
1268 * process. But it limits our options here. We must use
1269 * statically allocated structures that already have all
1270 * initializations complete at compile time. We should also
1271 * take care not to overwrite the precious per platform data
1272 * we were given.
1273 */
1274static int rand_initialize(void)
1275{
1276 init_std_data(&input_pool);
1277 init_std_data(&blocking_pool);
1278 init_std_data(&nonblocking_pool);
1279 return 0;
1280}
1281early_initcall(rand_initialize);
1282
1283#ifdef CONFIG_BLOCK
1284void rand_initialize_disk(struct gendisk *disk)
1285{
1286 struct timer_rand_state *state;
1287
1288 /*
1289 * If kzalloc returns null, we just won't use that entropy
1290 * source.
1291 */
1292 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1293 if (state) {
1294 state->last_time = INITIAL_JIFFIES;
1295 disk->random = state;
1296 }
1297}
1298#endif
1299
1300/*
1301 * Attempt an emergency refill using arch_get_random_seed_long().
1302 *
1303 * As with add_interrupt_randomness() be paranoid and only
1304 * credit the output as 50% entropic.
1305 */
1306static int arch_random_refill(void)
1307{
1308 const unsigned int nlongs = 64; /* Arbitrary number */
1309 unsigned int n = 0;
1310 unsigned int i;
1311 unsigned long buf[nlongs];
1312
1313 if (!arch_has_random_seed())
1314 return 0;
1315
1316 for (i = 0; i < nlongs; i++) {
1317 if (arch_get_random_seed_long(&buf[n]))
1318 n++;
1319 }
1320
1321 if (n) {
1322 unsigned int rand_bytes = n * sizeof(unsigned long);
1323
1324 mix_pool_bytes(&input_pool, buf, rand_bytes, NULL);
1325 credit_entropy_bits(&input_pool, rand_bytes*4);
1326 }
1327
1328 return n;
1329}
1330
1331static ssize_t
1332random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1333{
1334 ssize_t n;
1335
1336 if (nbytes == 0)
1337 return 0;
1338
1339 nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1340 while (1) {
1341 n = extract_entropy_user(&blocking_pool, buf, nbytes);
1342 if (n < 0)
1343 return n;
1344 trace_random_read(n*8, (nbytes-n)*8,
1345 ENTROPY_BITS(&blocking_pool),
1346 ENTROPY_BITS(&input_pool));
1347 if (n > 0)
1348 return n;
1349
1350 /* Pool is (near) empty. Maybe wait and retry. */
1351
1352 /* First try an emergency refill */
1353 if (arch_random_refill())
1354 continue;
1355
1356 if (file->f_flags & O_NONBLOCK)
1357 return -EAGAIN;
1358
1359 wait_event_interruptible(random_read_wait,
1360 ENTROPY_BITS(&input_pool) >=
1361 random_read_wakeup_bits);
1362 if (signal_pending(current))
1363 return -ERESTARTSYS;
1364 }
1365}
1366
1367static ssize_t
1368urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1369{
1370 int ret;
1371
1372 if (unlikely(nonblocking_pool.initialized == 0))
1373 printk_once(KERN_NOTICE "random: %s urandom read "
1374 "with %d bits of entropy available\n",
1375 current->comm, nonblocking_pool.entropy_total);
1376
1377 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
1378
1379 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
1380 ENTROPY_BITS(&input_pool));
1381 return ret;
1382}
1383
1384static unsigned int
1385random_poll(struct file *file, poll_table * wait)
1386{
1387 unsigned int mask;
1388
1389 poll_wait(file, &random_read_wait, wait);
1390 poll_wait(file, &random_write_wait, wait);
1391 mask = 0;
1392 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
1393 mask |= POLLIN | POLLRDNORM;
1394 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
1395 mask |= POLLOUT | POLLWRNORM;
1396 return mask;
1397}
1398
1399static int
1400write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1401{
1402 size_t bytes;
1403 __u32 buf[16];
1404 const char __user *p = buffer;
1405
1406 while (count > 0) {
1407 bytes = min(count, sizeof(buf));
1408 if (copy_from_user(&buf, p, bytes))
1409 return -EFAULT;
1410
1411 count -= bytes;
1412 p += bytes;
1413
1414 mix_pool_bytes(r, buf, bytes, NULL);
1415 cond_resched();
1416 }
1417
1418 return 0;
1419}
1420
1421static ssize_t random_write(struct file *file, const char __user *buffer,
1422 size_t count, loff_t *ppos)
1423{
1424 size_t ret;
1425
1426 ret = write_pool(&blocking_pool, buffer, count);
1427 if (ret)
1428 return ret;
1429 ret = write_pool(&nonblocking_pool, buffer, count);
1430 if (ret)
1431 return ret;
1432
1433 return (ssize_t)count;
1434}
1435
1436static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1437{
1438 int size, ent_count;
1439 int __user *p = (int __user *)arg;
1440 int retval;
1441
1442 switch (cmd) {
1443 case RNDGETENTCNT:
1444 /* inherently racy, no point locking */
1445 ent_count = ENTROPY_BITS(&input_pool);
1446 if (put_user(ent_count, p))
1447 return -EFAULT;
1448 return 0;
1449 case RNDADDTOENTCNT:
1450 if (!capable(CAP_SYS_ADMIN))
1451 return -EPERM;
1452 if (get_user(ent_count, p))
1453 return -EFAULT;
1454 credit_entropy_bits_safe(&input_pool, ent_count);
1455 return 0;
1456 case RNDADDENTROPY:
1457 if (!capable(CAP_SYS_ADMIN))
1458 return -EPERM;
1459 if (get_user(ent_count, p++))
1460 return -EFAULT;
1461 if (ent_count < 0)
1462 return -EINVAL;
1463 if (get_user(size, p++))
1464 return -EFAULT;
1465 retval = write_pool(&input_pool, (const char __user *)p,
1466 size);
1467 if (retval < 0)
1468 return retval;
1469 credit_entropy_bits_safe(&input_pool, ent_count);
1470 return 0;
1471 case RNDZAPENTCNT:
1472 case RNDCLEARPOOL:
1473 /*
1474 * Clear the entropy pool counters. We no longer clear
1475 * the entropy pool, as that's silly.
1476 */
1477 if (!capable(CAP_SYS_ADMIN))
1478 return -EPERM;
1479 input_pool.entropy_count = 0;
1480 nonblocking_pool.entropy_count = 0;
1481 blocking_pool.entropy_count = 0;
1482 return 0;
1483 default:
1484 return -EINVAL;
1485 }
1486}
1487
1488static int random_fasync(int fd, struct file *filp, int on)
1489{
1490 return fasync_helper(fd, filp, on, &fasync);
1491}
1492
1493const struct file_operations random_fops = {
1494 .read = random_read,
1495 .write = random_write,
1496 .poll = random_poll,
1497 .unlocked_ioctl = random_ioctl,
1498 .fasync = random_fasync,
1499 .llseek = noop_llseek,
1500};
1501
1502const struct file_operations urandom_fops = {
1503 .read = urandom_read,
1504 .write = random_write,
1505 .unlocked_ioctl = random_ioctl,
1506 .fasync = random_fasync,
1507 .llseek = noop_llseek,
1508};
1509
1510/***************************************************************
1511 * Random UUID interface
1512 *
1513 * Used here for a Boot ID, but can be useful for other kernel
1514 * drivers.
1515 ***************************************************************/
1516
1517/*
1518 * Generate random UUID
1519 */
1520void generate_random_uuid(unsigned char uuid_out[16])
1521{
1522 get_random_bytes(uuid_out, 16);
1523 /* Set UUID version to 4 --- truly random generation */
1524 uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
1525 /* Set the UUID variant to DCE */
1526 uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
1527}
1528EXPORT_SYMBOL(generate_random_uuid);
1529
1530/********************************************************************
1531 *
1532 * Sysctl interface
1533 *
1534 ********************************************************************/
1535
1536#ifdef CONFIG_SYSCTL
1537
1538#include <linux/sysctl.h>
1539
1540static int min_read_thresh = 8, min_write_thresh;
1541static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
1542static int max_write_thresh = INPUT_POOL_WORDS * 32;
1543static char sysctl_bootid[16];
1544
1545/*
1546 * This function is used to return both the bootid UUID, and random
1547 * UUID. The difference is in whether table->data is NULL; if it is,
1548 * then a new UUID is generated and returned to the user.
1549 *
1550 * If the user accesses this via the proc interface, the UUID will be
1551 * returned as an ASCII string in the standard UUID format; if via the
1552 * sysctl system call, as 16 bytes of binary data.
1553 */
1554static int proc_do_uuid(struct ctl_table *table, int write,
1555 void __user *buffer, size_t *lenp, loff_t *ppos)
1556{
1557 struct ctl_table fake_table;
1558 unsigned char buf[64], tmp_uuid[16], *uuid;
1559
1560 uuid = table->data;
1561 if (!uuid) {
1562 uuid = tmp_uuid;
1563 generate_random_uuid(uuid);
1564 } else {
1565 static DEFINE_SPINLOCK(bootid_spinlock);
1566
1567 spin_lock(&bootid_spinlock);
1568 if (!uuid[8])
1569 generate_random_uuid(uuid);
1570 spin_unlock(&bootid_spinlock);
1571 }
1572
1573 sprintf(buf, "%pU", uuid);
1574
1575 fake_table.data = buf;
1576 fake_table.maxlen = sizeof(buf);
1577
1578 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1579}
1580
1581/*
1582 * Return entropy available scaled to integral bits
1583 */
1584static int proc_do_entropy(ctl_table *table, int write,
1585 void __user *buffer, size_t *lenp, loff_t *ppos)
1586{
1587 ctl_table fake_table;
1588 int entropy_count;
1589
1590 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
1591
1592 fake_table.data = &entropy_count;
1593 fake_table.maxlen = sizeof(entropy_count);
1594
1595 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
1596}
1597
1598static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
1599extern struct ctl_table random_table[];
1600struct ctl_table random_table[] = {
1601 {
1602 .procname = "poolsize",
1603 .data = &sysctl_poolsize,
1604 .maxlen = sizeof(int),
1605 .mode = 0444,
1606 .proc_handler = proc_dointvec,
1607 },
1608 {
1609 .procname = "entropy_avail",
1610 .maxlen = sizeof(int),
1611 .mode = 0444,
1612 .proc_handler = proc_do_entropy,
1613 .data = &input_pool.entropy_count,
1614 },
1615 {
1616 .procname = "read_wakeup_threshold",
1617 .data = &random_read_wakeup_bits,
1618 .maxlen = sizeof(int),
1619 .mode = 0644,
1620 .proc_handler = proc_dointvec_minmax,
1621 .extra1 = &min_read_thresh,
1622 .extra2 = &max_read_thresh,
1623 },
1624 {
1625 .procname = "write_wakeup_threshold",
1626 .data = &random_write_wakeup_bits,
1627 .maxlen = sizeof(int),
1628 .mode = 0644,
1629 .proc_handler = proc_dointvec_minmax,
1630 .extra1 = &min_write_thresh,
1631 .extra2 = &max_write_thresh,
1632 },
1633 {
1634 .procname = "urandom_min_reseed_secs",
1635 .data = &random_min_urandom_seed,
1636 .maxlen = sizeof(int),
1637 .mode = 0644,
1638 .proc_handler = proc_dointvec,
1639 },
1640 {
1641 .procname = "boot_id",
1642 .data = &sysctl_bootid,
1643 .maxlen = 16,
1644 .mode = 0444,
1645 .proc_handler = proc_do_uuid,
1646 },
1647 {
1648 .procname = "uuid",
1649 .maxlen = 16,
1650 .mode = 0444,
1651 .proc_handler = proc_do_uuid,
1652 },
1653 { }
1654};
1655#endif /* CONFIG_SYSCTL */
1656
1657static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
1658
1659int random_int_secret_init(void)
1660{
1661 get_random_bytes(random_int_secret, sizeof(random_int_secret));
1662 return 0;
1663}
1664
1665/*
1666 * Get a random word for internal kernel use only. Similar to urandom but
1667 * with the goal of minimal entropy pool depletion. As a result, the random
1668 * value is not cryptographically secure but for several uses the cost of
1669 * depleting entropy is too high
1670 */
1671static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
1672unsigned int get_random_int(void)
1673{
1674 __u32 *hash;
1675 unsigned int ret;
1676
1677 if (arch_get_random_int(&ret))
1678 return ret;
1679
1680 hash = get_cpu_var(get_random_int_hash);
1681
1682 hash[0] += current->pid + jiffies + random_get_entropy();
1683 md5_transform(hash, random_int_secret);
1684 ret = hash[0];
1685 put_cpu_var(get_random_int_hash);
1686
1687 return ret;
1688}
1689EXPORT_SYMBOL(get_random_int);
1690
1691/*
1692 * randomize_range() returns a start address such that
1693 *
1694 * [...... <range> .....]
1695 * start end
1696 *
1697 * a <range> with size "len" starting at the return value is inside in the
1698 * area defined by [start, end], but is otherwise randomized.
1699 */
1700unsigned long
1701randomize_range(unsigned long start, unsigned long end, unsigned long len)
1702{
1703 unsigned long range = end - len - start;
1704
1705 if (end <= start + len)
1706 return 0;
1707 return PAGE_ALIGN(get_random_int() % range + start);
1708}