1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6 *
7 * This driver produces cryptographically secure pseudorandom data. It is divided
8 * into roughly six sections, each with a section header:
9 *
10 * - Initialization and readiness waiting.
11 * - Fast key erasure RNG, the "crng".
12 * - Entropy accumulation and extraction routines.
13 * - Entropy collection routines.
14 * - Userspace reader/writer interfaces.
15 * - Sysctl interface.
16 *
17 * The high level overview is that there is one input pool, into which
18 * various pieces of data are hashed. Prior to initialization, some of that
19 * data is then "credited" as having a certain number of bits of entropy.
20 * When enough bits of entropy are available, the hash is finalized and
21 * handed as a key to a stream cipher that expands it indefinitely for
22 * various consumers. This key is periodically refreshed as the various
23 * entropy collectors, described below, add data to the input pool.
24 */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/utsname.h>
29 #include <linux/module.h>
30 #include <linux/kernel.h>
31 #include <linux/major.h>
32 #include <linux/string.h>
33 #include <linux/fcntl.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/poll.h>
37 #include <linux/init.h>
38 #include <linux/fs.h>
39 #include <linux/blkdev.h>
40 #include <linux/interrupt.h>
41 #include <linux/mm.h>
42 #include <linux/nodemask.h>
43 #include <linux/spinlock.h>
44 #include <linux/kthread.h>
45 #include <linux/percpu.h>
46 #include <linux/ptrace.h>
47 #include <linux/workqueue.h>
48 #include <linux/irq.h>
49 #include <linux/ratelimit.h>
50 #include <linux/syscalls.h>
51 #include <linux/completion.h>
52 #include <linux/uuid.h>
53 #include <linux/uaccess.h>
54 #include <linux/suspend.h>
55 #include <linux/siphash.h>
56 #include <linux/sched/isolation.h>
57 #include <crypto/chacha.h>
58 #include <crypto/blake2s.h>
59 #include <asm/archrandom.h>
60 #include <asm/processor.h>
61 #include <asm/irq.h>
62 #include <asm/irq_regs.h>
63 #include <asm/io.h>
64
65 /*********************************************************************
66 *
67 * Initialization and readiness waiting.
68 *
69 * Much of the RNG infrastructure is devoted to various dependencies
70 * being able to wait until the RNG has collected enough entropy and
71 * is ready for safe consumption.
72 *
73 *********************************************************************/
74
75 /*
76 * crng_init is protected by base_crng->lock, and only increases
77 * its value (from empty->early->ready).
78 */
79 static enum {
80 CRNG_EMPTY = 0, /* Little to no entropy collected */
81 CRNG_EARLY = 1, /* At least POOL_EARLY_BITS collected */
82 CRNG_READY = 2 /* Fully initialized with POOL_READY_BITS collected */
83 } crng_init __read_mostly = CRNG_EMPTY;
84 static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
85 #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
86 /* Various types of waiters for crng_init->CRNG_READY transition. */
87 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
88 static struct fasync_struct *fasync;
89 static ATOMIC_NOTIFIER_HEAD(random_ready_notifier);
90
91 /* Control how we warn userspace. */
92 static struct ratelimit_state urandom_warning =
93 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
94 static int ratelimit_disable __read_mostly =
95 IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
96 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
97 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
98
99 /*
100 * Returns whether or not the input pool has been seeded and thus guaranteed
101 * to supply cryptographically secure random numbers. This applies to: the
102 * /dev/urandom device, the get_random_bytes function, and the get_random_{u8,
103 * u16,u32,u64,long} family of functions.
104 *
105 * Returns: true if the input pool has been seeded.
106 * false if the input pool has not been seeded.
107 */
rng_is_initialized(void)108 bool rng_is_initialized(void)
109 {
110 return crng_ready();
111 }
112 EXPORT_SYMBOL(rng_is_initialized);
113
crng_set_ready(struct work_struct * work)114 static void __cold crng_set_ready(struct work_struct *work)
115 {
116 static_branch_enable(&crng_is_ready);
117 }
118
119 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
120 static void try_to_generate_entropy(void);
121
122 /*
123 * Wait for the input pool to be seeded and thus guaranteed to supply
124 * cryptographically secure random numbers. This applies to: the /dev/urandom
125 * device, the get_random_bytes function, and the get_random_{u8,u16,u32,u64,
126 * long} family of functions. Using any of these functions without first
127 * calling this function forfeits the guarantee of security.
128 *
129 * Returns: 0 if the input pool has been seeded.
130 * -ERESTARTSYS if the function was interrupted by a signal.
131 */
wait_for_random_bytes(void)132 int wait_for_random_bytes(void)
133 {
134 while (!crng_ready()) {
135 int ret;
136
137 try_to_generate_entropy();
138 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
139 if (ret)
140 return ret > 0 ? 0 : ret;
141 }
142 return 0;
143 }
144 EXPORT_SYMBOL(wait_for_random_bytes);
145
146 /*
147 * Add a callback function that will be invoked when the crng is initialised,
148 * or immediately if it already has been. Only use this is you are absolutely
149 * sure it is required. Most users should instead be able to test
150 * `rng_is_initialized()` on demand, or make use of `get_random_bytes_wait()`.
151 */
execute_with_initialized_rng(struct notifier_block * nb)152 int __cold execute_with_initialized_rng(struct notifier_block *nb)
153 {
154 unsigned long flags;
155 int ret = 0;
156
157 spin_lock_irqsave(&random_ready_notifier.lock, flags);
158 if (crng_ready())
159 nb->notifier_call(nb, 0, NULL);
160 else
161 ret = raw_notifier_chain_register((struct raw_notifier_head *)&random_ready_notifier.head, nb);
162 spin_unlock_irqrestore(&random_ready_notifier.lock, flags);
163 return ret;
164 }
165
166 #define warn_unseeded_randomness() \
167 if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
168 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
169 __func__, (void *)_RET_IP_, crng_init)
170
171
172 /*********************************************************************
173 *
174 * Fast key erasure RNG, the "crng".
175 *
176 * These functions expand entropy from the entropy extractor into
177 * long streams for external consumption using the "fast key erasure"
178 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
179 *
180 * There are a few exported interfaces for use by other drivers:
181 *
182 * void get_random_bytes(void *buf, size_t len)
183 * u8 get_random_u8()
184 * u16 get_random_u16()
185 * u32 get_random_u32()
186 * u32 get_random_u32_below(u32 ceil)
187 * u32 get_random_u32_above(u32 floor)
188 * u32 get_random_u32_inclusive(u32 floor, u32 ceil)
189 * u64 get_random_u64()
190 * unsigned long get_random_long()
191 *
192 * These interfaces will return the requested number of random bytes
193 * into the given buffer or as a return value. This is equivalent to
194 * a read from /dev/urandom. The u8, u16, u32, u64, long family of
195 * functions may be higher performance for one-off random integers,
196 * because they do a bit of buffering and do not invoke reseeding
197 * until the buffer is emptied.
198 *
199 *********************************************************************/
200
201 enum {
202 CRNG_RESEED_START_INTERVAL = HZ,
203 CRNG_RESEED_INTERVAL = 60 * HZ
204 };
205
206 static struct {
207 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
208 unsigned long generation;
209 spinlock_t lock;
210 } base_crng = {
211 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
212 };
213
214 struct crng {
215 u8 key[CHACHA_KEY_SIZE];
216 unsigned long generation;
217 local_lock_t lock;
218 };
219
220 static DEFINE_PER_CPU(struct crng, crngs) = {
221 .generation = ULONG_MAX,
222 .lock = INIT_LOCAL_LOCK(crngs.lock),
223 };
224
225 /*
226 * Return the interval until the next reseeding, which is normally
227 * CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
228 * proportional to the uptime.
229 */
crng_reseed_interval(void)230 static unsigned int crng_reseed_interval(void)
231 {
232 static bool early_boot = true;
233
234 if (unlikely(READ_ONCE(early_boot))) {
235 time64_t uptime = ktime_get_seconds();
236 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
237 WRITE_ONCE(early_boot, false);
238 else
239 return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
240 (unsigned int)uptime / 2 * HZ);
241 }
242 return CRNG_RESEED_INTERVAL;
243 }
244
245 /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */
246 static void extract_entropy(void *buf, size_t len);
247
248 /* This extracts a new crng key from the input pool. */
crng_reseed(struct work_struct * work)249 static void crng_reseed(struct work_struct *work)
250 {
251 static DECLARE_DELAYED_WORK(next_reseed, crng_reseed);
252 unsigned long flags;
253 unsigned long next_gen;
254 u8 key[CHACHA_KEY_SIZE];
255
256 /* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
257 if (likely(system_unbound_wq))
258 queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
259
260 extract_entropy(key, sizeof(key));
261
262 /*
263 * We copy the new key into the base_crng, overwriting the old one,
264 * and update the generation counter. We avoid hitting ULONG_MAX,
265 * because the per-cpu crngs are initialized to ULONG_MAX, so this
266 * forces new CPUs that come online to always initialize.
267 */
268 spin_lock_irqsave(&base_crng.lock, flags);
269 memcpy(base_crng.key, key, sizeof(base_crng.key));
270 next_gen = base_crng.generation + 1;
271 if (next_gen == ULONG_MAX)
272 ++next_gen;
273 WRITE_ONCE(base_crng.generation, next_gen);
274 if (!static_branch_likely(&crng_is_ready))
275 crng_init = CRNG_READY;
276 spin_unlock_irqrestore(&base_crng.lock, flags);
277 memzero_explicit(key, sizeof(key));
278 }
279
280 /*
281 * This generates a ChaCha block using the provided key, and then
282 * immediately overwrites that key with half the block. It returns
283 * the resultant ChaCha state to the user, along with the second
284 * half of the block containing 32 bytes of random data that may
285 * be used; random_data_len may not be greater than 32.
286 *
287 * The returned ChaCha state contains within it a copy of the old
288 * key value, at index 4, so the state should always be zeroed out
289 * immediately after using in order to maintain forward secrecy.
290 * If the state cannot be erased in a timely manner, then it is
291 * safer to set the random_data parameter to &chacha_state[4] so
292 * that this function overwrites it before returning.
293 */
crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)294 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
295 u32 chacha_state[CHACHA_STATE_WORDS],
296 u8 *random_data, size_t random_data_len)
297 {
298 u8 first_block[CHACHA_BLOCK_SIZE];
299
300 BUG_ON(random_data_len > 32);
301
302 chacha_init_consts(chacha_state);
303 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
304 memset(&chacha_state[12], 0, sizeof(u32) * 4);
305 chacha20_block(chacha_state, first_block);
306
307 memcpy(key, first_block, CHACHA_KEY_SIZE);
308 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
309 memzero_explicit(first_block, sizeof(first_block));
310 }
311
312 /*
313 * This function returns a ChaCha state that you may use for generating
314 * random data. It also returns up to 32 bytes on its own of random data
315 * that may be used; random_data_len may not be greater than 32.
316 */
crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],u8 * random_data,size_t random_data_len)317 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
318 u8 *random_data, size_t random_data_len)
319 {
320 unsigned long flags;
321 struct crng *crng;
322
323 BUG_ON(random_data_len > 32);
324
325 /*
326 * For the fast path, we check whether we're ready, unlocked first, and
327 * then re-check once locked later. In the case where we're really not
328 * ready, we do fast key erasure with the base_crng directly, extracting
329 * when crng_init is CRNG_EMPTY.
330 */
331 if (!crng_ready()) {
332 bool ready;
333
334 spin_lock_irqsave(&base_crng.lock, flags);
335 ready = crng_ready();
336 if (!ready) {
337 if (crng_init == CRNG_EMPTY)
338 extract_entropy(base_crng.key, sizeof(base_crng.key));
339 crng_fast_key_erasure(base_crng.key, chacha_state,
340 random_data, random_data_len);
341 }
342 spin_unlock_irqrestore(&base_crng.lock, flags);
343 if (!ready)
344 return;
345 }
346
347 local_lock_irqsave(&crngs.lock, flags);
348 crng = raw_cpu_ptr(&crngs);
349
350 /*
351 * If our per-cpu crng is older than the base_crng, then it means
352 * somebody reseeded the base_crng. In that case, we do fast key
353 * erasure on the base_crng, and use its output as the new key
354 * for our per-cpu crng. This brings us up to date with base_crng.
355 */
356 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
357 spin_lock(&base_crng.lock);
358 crng_fast_key_erasure(base_crng.key, chacha_state,
359 crng->key, sizeof(crng->key));
360 crng->generation = base_crng.generation;
361 spin_unlock(&base_crng.lock);
362 }
363
364 /*
365 * Finally, when we've made it this far, our per-cpu crng has an up
366 * to date key, and we can do fast key erasure with it to produce
367 * some random data and a ChaCha state for the caller. All other
368 * branches of this function are "unlikely", so most of the time we
369 * should wind up here immediately.
370 */
371 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
372 local_unlock_irqrestore(&crngs.lock, flags);
373 }
374
_get_random_bytes(void * buf,size_t len)375 static void _get_random_bytes(void *buf, size_t len)
376 {
377 u32 chacha_state[CHACHA_STATE_WORDS];
378 u8 tmp[CHACHA_BLOCK_SIZE];
379 size_t first_block_len;
380
381 if (!len)
382 return;
383
384 first_block_len = min_t(size_t, 32, len);
385 crng_make_state(chacha_state, buf, first_block_len);
386 len -= first_block_len;
387 buf += first_block_len;
388
389 while (len) {
390 if (len < CHACHA_BLOCK_SIZE) {
391 chacha20_block(chacha_state, tmp);
392 memcpy(buf, tmp, len);
393 memzero_explicit(tmp, sizeof(tmp));
394 break;
395 }
396
397 chacha20_block(chacha_state, buf);
398 if (unlikely(chacha_state[12] == 0))
399 ++chacha_state[13];
400 len -= CHACHA_BLOCK_SIZE;
401 buf += CHACHA_BLOCK_SIZE;
402 }
403
404 memzero_explicit(chacha_state, sizeof(chacha_state));
405 }
406
407 /*
408 * This returns random bytes in arbitrary quantities. The quality of the
409 * random bytes is good as /dev/urandom. In order to ensure that the
410 * randomness provided by this function is okay, the function
411 * wait_for_random_bytes() should be called and return 0 at least once
412 * at any point prior.
413 */
get_random_bytes(void * buf,size_t len)414 void get_random_bytes(void *buf, size_t len)
415 {
416 warn_unseeded_randomness();
417 _get_random_bytes(buf, len);
418 }
419 EXPORT_SYMBOL(get_random_bytes);
420
get_random_bytes_user(struct iov_iter * iter)421 static ssize_t get_random_bytes_user(struct iov_iter *iter)
422 {
423 u32 chacha_state[CHACHA_STATE_WORDS];
424 u8 block[CHACHA_BLOCK_SIZE];
425 size_t ret = 0, copied;
426
427 if (unlikely(!iov_iter_count(iter)))
428 return 0;
429
430 /*
431 * Immediately overwrite the ChaCha key at index 4 with random
432 * bytes, in case userspace causes copy_to_iter() below to sleep
433 * forever, so that we still retain forward secrecy in that case.
434 */
435 crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
436 /*
437 * However, if we're doing a read of len <= 32, we don't need to
438 * use chacha_state after, so we can simply return those bytes to
439 * the user directly.
440 */
441 if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
442 ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
443 goto out_zero_chacha;
444 }
445
446 for (;;) {
447 chacha20_block(chacha_state, block);
448 if (unlikely(chacha_state[12] == 0))
449 ++chacha_state[13];
450
451 copied = copy_to_iter(block, sizeof(block), iter);
452 ret += copied;
453 if (!iov_iter_count(iter) || copied != sizeof(block))
454 break;
455
456 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
457 if (ret % PAGE_SIZE == 0) {
458 if (signal_pending(current))
459 break;
460 cond_resched();
461 }
462 }
463
464 memzero_explicit(block, sizeof(block));
465 out_zero_chacha:
466 memzero_explicit(chacha_state, sizeof(chacha_state));
467 return ret ? ret : -EFAULT;
468 }
469
470 /*
471 * Batched entropy returns random integers. The quality of the random
472 * number is good as /dev/urandom. In order to ensure that the randomness
473 * provided by this function is okay, the function wait_for_random_bytes()
474 * should be called and return 0 at least once at any point prior.
475 */
476
477 #define DEFINE_BATCHED_ENTROPY(type) \
478 struct batch_ ##type { \
479 /* \
480 * We make this 1.5x a ChaCha block, so that we get the \
481 * remaining 32 bytes from fast key erasure, plus one full \
482 * block from the detached ChaCha state. We can increase \
483 * the size of this later if needed so long as we keep the \
484 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. \
485 */ \
486 type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
487 local_lock_t lock; \
488 unsigned long generation; \
489 unsigned int position; \
490 }; \
491 \
492 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
493 .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
494 .position = UINT_MAX \
495 }; \
496 \
497 type get_random_ ##type(void) \
498 { \
499 type ret; \
500 unsigned long flags; \
501 struct batch_ ##type *batch; \
502 unsigned long next_gen; \
503 \
504 warn_unseeded_randomness(); \
505 \
506 if (!crng_ready()) { \
507 _get_random_bytes(&ret, sizeof(ret)); \
508 return ret; \
509 } \
510 \
511 local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
512 batch = raw_cpu_ptr(&batched_entropy_##type); \
513 \
514 next_gen = READ_ONCE(base_crng.generation); \
515 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
516 next_gen != batch->generation) { \
517 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
518 batch->position = 0; \
519 batch->generation = next_gen; \
520 } \
521 \
522 ret = batch->entropy[batch->position]; \
523 batch->entropy[batch->position] = 0; \
524 ++batch->position; \
525 local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
526 return ret; \
527 } \
528 EXPORT_SYMBOL(get_random_ ##type);
529
530 DEFINE_BATCHED_ENTROPY(u8)
DEFINE_BATCHED_ENTROPY(u16)531 DEFINE_BATCHED_ENTROPY(u16)
532 DEFINE_BATCHED_ENTROPY(u32)
533 DEFINE_BATCHED_ENTROPY(u64)
534
535 u32 __get_random_u32_below(u32 ceil)
536 {
537 /*
538 * This is the slow path for variable ceil. It is still fast, most of
539 * the time, by doing traditional reciprocal multiplication and
540 * opportunistically comparing the lower half to ceil itself, before
541 * falling back to computing a larger bound, and then rejecting samples
542 * whose lower half would indicate a range indivisible by ceil. The use
543 * of `-ceil % ceil` is analogous to `2^32 % ceil`, but is computable
544 * in 32-bits.
545 */
546 u32 rand = get_random_u32();
547 u64 mult;
548
549 /*
550 * This function is technically undefined for ceil == 0, and in fact
551 * for the non-underscored constant version in the header, we build bug
552 * on that. But for the non-constant case, it's convenient to have that
553 * evaluate to being a straight call to get_random_u32(), so that
554 * get_random_u32_inclusive() can work over its whole range without
555 * undefined behavior.
556 */
557 if (unlikely(!ceil))
558 return rand;
559
560 mult = (u64)ceil * rand;
561 if (unlikely((u32)mult < ceil)) {
562 u32 bound = -ceil % ceil;
563 while (unlikely((u32)mult < bound))
564 mult = (u64)ceil * get_random_u32();
565 }
566 return mult >> 32;
567 }
568 EXPORT_SYMBOL(__get_random_u32_below);
569
570 #ifdef CONFIG_SMP
571 /*
572 * This function is called when the CPU is coming up, with entry
573 * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
574 */
random_prepare_cpu(unsigned int cpu)575 int __cold random_prepare_cpu(unsigned int cpu)
576 {
577 /*
578 * When the cpu comes back online, immediately invalidate both
579 * the per-cpu crng and all batches, so that we serve fresh
580 * randomness.
581 */
582 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
583 per_cpu_ptr(&batched_entropy_u8, cpu)->position = UINT_MAX;
584 per_cpu_ptr(&batched_entropy_u16, cpu)->position = UINT_MAX;
585 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
586 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
587 return 0;
588 }
589 #endif
590
591
592 /**********************************************************************
593 *
594 * Entropy accumulation and extraction routines.
595 *
596 * Callers may add entropy via:
597 *
598 * static void mix_pool_bytes(const void *buf, size_t len)
599 *
600 * After which, if added entropy should be credited:
601 *
602 * static void credit_init_bits(size_t bits)
603 *
604 * Finally, extract entropy via:
605 *
606 * static void extract_entropy(void *buf, size_t len)
607 *
608 **********************************************************************/
609
610 enum {
611 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
612 POOL_READY_BITS = POOL_BITS, /* When crng_init->CRNG_READY */
613 POOL_EARLY_BITS = POOL_READY_BITS / 2 /* When crng_init->CRNG_EARLY */
614 };
615
616 static struct {
617 struct blake2s_state hash;
618 spinlock_t lock;
619 unsigned int init_bits;
620 } input_pool = {
621 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
622 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
623 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
624 .hash.outlen = BLAKE2S_HASH_SIZE,
625 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
626 };
627
_mix_pool_bytes(const void * buf,size_t len)628 static void _mix_pool_bytes(const void *buf, size_t len)
629 {
630 blake2s_update(&input_pool.hash, buf, len);
631 }
632
633 /*
634 * This function adds bytes into the input pool. It does not
635 * update the initialization bit counter; the caller should call
636 * credit_init_bits if this is appropriate.
637 */
mix_pool_bytes(const void * buf,size_t len)638 static void mix_pool_bytes(const void *buf, size_t len)
639 {
640 unsigned long flags;
641
642 spin_lock_irqsave(&input_pool.lock, flags);
643 _mix_pool_bytes(buf, len);
644 spin_unlock_irqrestore(&input_pool.lock, flags);
645 }
646
647 /*
648 * This is an HKDF-like construction for using the hashed collected entropy
649 * as a PRF key, that's then expanded block-by-block.
650 */
extract_entropy(void * buf,size_t len)651 static void extract_entropy(void *buf, size_t len)
652 {
653 unsigned long flags;
654 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
655 struct {
656 unsigned long rdseed[32 / sizeof(long)];
657 size_t counter;
658 } block;
659 size_t i, longs;
660
661 for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
662 longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
663 if (longs) {
664 i += longs;
665 continue;
666 }
667 longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
668 if (longs) {
669 i += longs;
670 continue;
671 }
672 block.rdseed[i++] = random_get_entropy();
673 }
674
675 spin_lock_irqsave(&input_pool.lock, flags);
676
677 /* seed = HASHPRF(last_key, entropy_input) */
678 blake2s_final(&input_pool.hash, seed);
679
680 /* next_key = HASHPRF(seed, RDSEED || 0) */
681 block.counter = 0;
682 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
683 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
684
685 spin_unlock_irqrestore(&input_pool.lock, flags);
686 memzero_explicit(next_key, sizeof(next_key));
687
688 while (len) {
689 i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
690 /* output = HASHPRF(seed, RDSEED || ++counter) */
691 ++block.counter;
692 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
693 len -= i;
694 buf += i;
695 }
696
697 memzero_explicit(seed, sizeof(seed));
698 memzero_explicit(&block, sizeof(block));
699 }
700
701 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
702
_credit_init_bits(size_t bits)703 static void __cold _credit_init_bits(size_t bits)
704 {
705 static DECLARE_WORK(set_ready, crng_set_ready);
706 unsigned int new, orig, add;
707 unsigned long flags;
708
709 if (!bits)
710 return;
711
712 add = min_t(size_t, bits, POOL_BITS);
713
714 orig = READ_ONCE(input_pool.init_bits);
715 do {
716 new = min_t(unsigned int, POOL_BITS, orig + add);
717 } while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
718
719 if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
720 crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
721 if (static_key_initialized && system_unbound_wq)
722 queue_work(system_unbound_wq, &set_ready);
723 atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
724 wake_up_interruptible(&crng_init_wait);
725 kill_fasync(&fasync, SIGIO, POLL_IN);
726 pr_notice("crng init done\n");
727 if (urandom_warning.missed)
728 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
729 urandom_warning.missed);
730 } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
731 spin_lock_irqsave(&base_crng.lock, flags);
732 /* Check if crng_init is CRNG_EMPTY, to avoid race with crng_reseed(). */
733 if (crng_init == CRNG_EMPTY) {
734 extract_entropy(base_crng.key, sizeof(base_crng.key));
735 crng_init = CRNG_EARLY;
736 }
737 spin_unlock_irqrestore(&base_crng.lock, flags);
738 }
739 }
740
741
742 /**********************************************************************
743 *
744 * Entropy collection routines.
745 *
746 * The following exported functions are used for pushing entropy into
747 * the above entropy accumulation routines:
748 *
749 * void add_device_randomness(const void *buf, size_t len);
750 * void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after);
751 * void add_bootloader_randomness(const void *buf, size_t len);
752 * void add_vmfork_randomness(const void *unique_vm_id, size_t len);
753 * void add_interrupt_randomness(int irq);
754 * void add_input_randomness(unsigned int type, unsigned int code, unsigned int value);
755 * void add_disk_randomness(struct gendisk *disk);
756 *
757 * add_device_randomness() adds data to the input pool that
758 * is likely to differ between two devices (or possibly even per boot).
759 * This would be things like MAC addresses or serial numbers, or the
760 * read-out of the RTC. This does *not* credit any actual entropy to
761 * the pool, but it initializes the pool to different values for devices
762 * that might otherwise be identical and have very little entropy
763 * available to them (particularly common in the embedded world).
764 *
765 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
766 * entropy as specified by the caller. If the entropy pool is full it will
767 * block until more entropy is needed.
768 *
769 * add_bootloader_randomness() is called by bootloader drivers, such as EFI
770 * and device tree, and credits its input depending on whether or not the
771 * command line option 'random.trust_bootloader'.
772 *
773 * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
774 * representing the current instance of a VM to the pool, without crediting,
775 * and then force-reseeds the crng so that it takes effect immediately.
776 *
777 * add_interrupt_randomness() uses the interrupt timing as random
778 * inputs to the entropy pool. Using the cycle counters and the irq source
779 * as inputs, it feeds the input pool roughly once a second or after 64
780 * interrupts, crediting 1 bit of entropy for whichever comes first.
781 *
782 * add_input_randomness() uses the input layer interrupt timing, as well
783 * as the event type information from the hardware.
784 *
785 * add_disk_randomness() uses what amounts to the seek time of block
786 * layer request events, on a per-disk_devt basis, as input to the
787 * entropy pool. Note that high-speed solid state drives with very low
788 * seek times do not make for good sources of entropy, as their seek
789 * times are usually fairly consistent.
790 *
791 * The last two routines try to estimate how many bits of entropy
792 * to credit. They do this by keeping track of the first and second
793 * order deltas of the event timings.
794 *
795 **********************************************************************/
796
797 static bool trust_cpu __initdata = true;
798 static bool trust_bootloader __initdata = true;
parse_trust_cpu(char * arg)799 static int __init parse_trust_cpu(char *arg)
800 {
801 return kstrtobool(arg, &trust_cpu);
802 }
parse_trust_bootloader(char * arg)803 static int __init parse_trust_bootloader(char *arg)
804 {
805 return kstrtobool(arg, &trust_bootloader);
806 }
807 early_param("random.trust_cpu", parse_trust_cpu);
808 early_param("random.trust_bootloader", parse_trust_bootloader);
809
random_pm_notification(struct notifier_block * nb,unsigned long action,void * data)810 static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
811 {
812 unsigned long flags, entropy = random_get_entropy();
813
814 /*
815 * Encode a representation of how long the system has been suspended,
816 * in a way that is distinct from prior system suspends.
817 */
818 ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
819
820 spin_lock_irqsave(&input_pool.lock, flags);
821 _mix_pool_bytes(&action, sizeof(action));
822 _mix_pool_bytes(stamps, sizeof(stamps));
823 _mix_pool_bytes(&entropy, sizeof(entropy));
824 spin_unlock_irqrestore(&input_pool.lock, flags);
825
826 if (crng_ready() && (action == PM_RESTORE_PREPARE ||
827 (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
828 !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
829 crng_reseed(NULL);
830 pr_notice("crng reseeded on system resumption\n");
831 }
832 return 0;
833 }
834
835 static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
836
837 /*
838 * This is called extremely early, before time keeping functionality is
839 * available, but arch randomness is. Interrupts are not yet enabled.
840 */
random_init_early(const char * command_line)841 void __init random_init_early(const char *command_line)
842 {
843 unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
844 size_t i, longs, arch_bits;
845
846 #if defined(LATENT_ENTROPY_PLUGIN)
847 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
848 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
849 #endif
850
851 for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
852 longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
853 if (longs) {
854 _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
855 i += longs;
856 continue;
857 }
858 longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
859 if (longs) {
860 _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
861 i += longs;
862 continue;
863 }
864 arch_bits -= sizeof(*entropy) * 8;
865 ++i;
866 }
867
868 _mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
869 _mix_pool_bytes(command_line, strlen(command_line));
870
871 /* Reseed if already seeded by earlier phases. */
872 if (crng_ready())
873 crng_reseed(NULL);
874 else if (trust_cpu)
875 _credit_init_bits(arch_bits);
876 }
877
878 /*
879 * This is called a little bit after the prior function, and now there is
880 * access to timestamps counters. Interrupts are not yet enabled.
881 */
random_init(void)882 void __init random_init(void)
883 {
884 unsigned long entropy = random_get_entropy();
885 ktime_t now = ktime_get_real();
886
887 _mix_pool_bytes(&now, sizeof(now));
888 _mix_pool_bytes(&entropy, sizeof(entropy));
889 add_latent_entropy();
890
891 /*
892 * If we were initialized by the cpu or bootloader before jump labels
893 * or workqueues are initialized, then we should enable the static
894 * branch here, where it's guaranteed that these have been initialized.
895 */
896 if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
897 crng_set_ready(NULL);
898
899 /* Reseed if already seeded by earlier phases. */
900 if (crng_ready())
901 crng_reseed(NULL);
902
903 WARN_ON(register_pm_notifier(&pm_notifier));
904
905 WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
906 "entropy collection will consequently suffer.");
907 }
908
909 /*
910 * Add device- or boot-specific data to the input pool to help
911 * initialize it.
912 *
913 * None of this adds any entropy; it is meant to avoid the problem of
914 * the entropy pool having similar initial state across largely
915 * identical devices.
916 */
add_device_randomness(const void * buf,size_t len)917 void add_device_randomness(const void *buf, size_t len)
918 {
919 unsigned long entropy = random_get_entropy();
920 unsigned long flags;
921
922 spin_lock_irqsave(&input_pool.lock, flags);
923 _mix_pool_bytes(&entropy, sizeof(entropy));
924 _mix_pool_bytes(buf, len);
925 spin_unlock_irqrestore(&input_pool.lock, flags);
926 }
927 EXPORT_SYMBOL(add_device_randomness);
928
929 /*
930 * Interface for in-kernel drivers of true hardware RNGs. Those devices
931 * may produce endless random bits, so this function will sleep for
932 * some amount of time after, if the sleep_after parameter is true.
933 */
add_hwgenerator_randomness(const void * buf,size_t len,size_t entropy,bool sleep_after)934 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after)
935 {
936 mix_pool_bytes(buf, len);
937 credit_init_bits(entropy);
938
939 /*
940 * Throttle writing to once every reseed interval, unless we're not yet
941 * initialized or no entropy is credited.
942 */
943 if (sleep_after && !kthread_should_stop() && (crng_ready() || !entropy))
944 schedule_timeout_interruptible(crng_reseed_interval());
945 }
946 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
947
948 /*
949 * Handle random seed passed by bootloader, and credit it depending
950 * on the command line option 'random.trust_bootloader'.
951 */
add_bootloader_randomness(const void * buf,size_t len)952 void __init add_bootloader_randomness(const void *buf, size_t len)
953 {
954 mix_pool_bytes(buf, len);
955 if (trust_bootloader)
956 credit_init_bits(len * 8);
957 }
958
959 #if IS_ENABLED(CONFIG_VMGENID)
960 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
961
962 /*
963 * Handle a new unique VM ID, which is unique, not secret, so we
964 * don't credit it, but we do immediately force a reseed after so
965 * that it's used by the crng posthaste.
966 */
add_vmfork_randomness(const void * unique_vm_id,size_t len)967 void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
968 {
969 add_device_randomness(unique_vm_id, len);
970 if (crng_ready()) {
971 crng_reseed(NULL);
972 pr_notice("crng reseeded due to virtual machine fork\n");
973 }
974 blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
975 }
976 #if IS_MODULE(CONFIG_VMGENID)
977 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
978 #endif
979
register_random_vmfork_notifier(struct notifier_block * nb)980 int __cold register_random_vmfork_notifier(struct notifier_block *nb)
981 {
982 return blocking_notifier_chain_register(&vmfork_chain, nb);
983 }
984 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
985
unregister_random_vmfork_notifier(struct notifier_block * nb)986 int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
987 {
988 return blocking_notifier_chain_unregister(&vmfork_chain, nb);
989 }
990 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
991 #endif
992
993 struct fast_pool {
994 unsigned long pool[4];
995 unsigned long last;
996 unsigned int count;
997 struct timer_list mix;
998 };
999
1000 static void mix_interrupt_randomness(struct timer_list *work);
1001
1002 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
1003 #ifdef CONFIG_64BIT
1004 #define FASTMIX_PERM SIPHASH_PERMUTATION
1005 .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
1006 #else
1007 #define FASTMIX_PERM HSIPHASH_PERMUTATION
1008 .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
1009 #endif
1010 .mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
1011 };
1012
1013 /*
1014 * This is [Half]SipHash-1-x, starting from an empty key. Because
1015 * the key is fixed, it assumes that its inputs are non-malicious,
1016 * and therefore this has no security on its own. s represents the
1017 * four-word SipHash state, while v represents a two-word input.
1018 */
fast_mix(unsigned long s[4],unsigned long v1,unsigned long v2)1019 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
1020 {
1021 s[3] ^= v1;
1022 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1023 s[0] ^= v1;
1024 s[3] ^= v2;
1025 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
1026 s[0] ^= v2;
1027 }
1028
1029 #ifdef CONFIG_SMP
1030 /*
1031 * This function is called when the CPU has just come online, with
1032 * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
1033 */
random_online_cpu(unsigned int cpu)1034 int __cold random_online_cpu(unsigned int cpu)
1035 {
1036 /*
1037 * During CPU shutdown and before CPU onlining, add_interrupt_
1038 * randomness() may schedule mix_interrupt_randomness(), and
1039 * set the MIX_INFLIGHT flag. However, because the worker can
1040 * be scheduled on a different CPU during this period, that
1041 * flag will never be cleared. For that reason, we zero out
1042 * the flag here, which runs just after workqueues are onlined
1043 * for the CPU again. This also has the effect of setting the
1044 * irq randomness count to zero so that new accumulated irqs
1045 * are fresh.
1046 */
1047 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
1048 return 0;
1049 }
1050 #endif
1051
mix_interrupt_randomness(struct timer_list * work)1052 static void mix_interrupt_randomness(struct timer_list *work)
1053 {
1054 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
1055 /*
1056 * The size of the copied stack pool is explicitly 2 longs so that we
1057 * only ever ingest half of the siphash output each time, retaining
1058 * the other half as the next "key" that carries over. The entropy is
1059 * supposed to be sufficiently dispersed between bits so on average
1060 * we don't wind up "losing" some.
1061 */
1062 unsigned long pool[2];
1063 unsigned int count;
1064
1065 /* Check to see if we're running on the wrong CPU due to hotplug. */
1066 local_irq_disable();
1067 if (fast_pool != this_cpu_ptr(&irq_randomness)) {
1068 local_irq_enable();
1069 return;
1070 }
1071
1072 /*
1073 * Copy the pool to the stack so that the mixer always has a
1074 * consistent view, before we reenable irqs again.
1075 */
1076 memcpy(pool, fast_pool->pool, sizeof(pool));
1077 count = fast_pool->count;
1078 fast_pool->count = 0;
1079 fast_pool->last = jiffies;
1080 local_irq_enable();
1081
1082 mix_pool_bytes(pool, sizeof(pool));
1083 credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));
1084
1085 memzero_explicit(pool, sizeof(pool));
1086 }
1087
add_interrupt_randomness(int irq)1088 void add_interrupt_randomness(int irq)
1089 {
1090 enum { MIX_INFLIGHT = 1U << 31 };
1091 unsigned long entropy = random_get_entropy();
1092 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1093 struct pt_regs *regs = get_irq_regs();
1094 unsigned int new_count;
1095
1096 fast_mix(fast_pool->pool, entropy,
1097 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1098 new_count = ++fast_pool->count;
1099
1100 if (new_count & MIX_INFLIGHT)
1101 return;
1102
1103 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1104 return;
1105
1106 fast_pool->count |= MIX_INFLIGHT;
1107 if (!timer_pending(&fast_pool->mix)) {
1108 fast_pool->mix.expires = jiffies;
1109 add_timer_on(&fast_pool->mix, raw_smp_processor_id());
1110 }
1111 }
1112 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1113
1114 /* There is one of these per entropy source */
1115 struct timer_rand_state {
1116 unsigned long last_time;
1117 long last_delta, last_delta2;
1118 };
1119
1120 /*
1121 * This function adds entropy to the entropy "pool" by using timing
1122 * delays. It uses the timer_rand_state structure to make an estimate
1123 * of how many bits of entropy this call has added to the pool. The
1124 * value "num" is also added to the pool; it should somehow describe
1125 * the type of event that just happened.
1126 */
add_timer_randomness(struct timer_rand_state * state,unsigned int num)1127 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1128 {
1129 unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1130 long delta, delta2, delta3;
1131 unsigned int bits;
1132
1133 /*
1134 * If we're in a hard IRQ, add_interrupt_randomness() will be called
1135 * sometime after, so mix into the fast pool.
1136 */
1137 if (in_hardirq()) {
1138 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1139 } else {
1140 spin_lock_irqsave(&input_pool.lock, flags);
1141 _mix_pool_bytes(&entropy, sizeof(entropy));
1142 _mix_pool_bytes(&num, sizeof(num));
1143 spin_unlock_irqrestore(&input_pool.lock, flags);
1144 }
1145
1146 if (crng_ready())
1147 return;
1148
1149 /*
1150 * Calculate number of bits of randomness we probably added.
1151 * We take into account the first, second and third-order deltas
1152 * in order to make our estimate.
1153 */
1154 delta = now - READ_ONCE(state->last_time);
1155 WRITE_ONCE(state->last_time, now);
1156
1157 delta2 = delta - READ_ONCE(state->last_delta);
1158 WRITE_ONCE(state->last_delta, delta);
1159
1160 delta3 = delta2 - READ_ONCE(state->last_delta2);
1161 WRITE_ONCE(state->last_delta2, delta2);
1162
1163 if (delta < 0)
1164 delta = -delta;
1165 if (delta2 < 0)
1166 delta2 = -delta2;
1167 if (delta3 < 0)
1168 delta3 = -delta3;
1169 if (delta > delta2)
1170 delta = delta2;
1171 if (delta > delta3)
1172 delta = delta3;
1173
1174 /*
1175 * delta is now minimum absolute delta. Round down by 1 bit
1176 * on general principles, and limit entropy estimate to 11 bits.
1177 */
1178 bits = min(fls(delta >> 1), 11);
1179
1180 /*
1181 * As mentioned above, if we're in a hard IRQ, add_interrupt_randomness()
1182 * will run after this, which uses a different crediting scheme of 1 bit
1183 * per every 64 interrupts. In order to let that function do accounting
1184 * close to the one in this function, we credit a full 64/64 bit per bit,
1185 * and then subtract one to account for the extra one added.
1186 */
1187 if (in_hardirq())
1188 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1189 else
1190 _credit_init_bits(bits);
1191 }
1192
add_input_randomness(unsigned int type,unsigned int code,unsigned int value)1193 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1194 {
1195 static unsigned char last_value;
1196 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1197
1198 /* Ignore autorepeat and the like. */
1199 if (value == last_value)
1200 return;
1201
1202 last_value = value;
1203 add_timer_randomness(&input_timer_state,
1204 (type << 4) ^ code ^ (code >> 4) ^ value);
1205 }
1206 EXPORT_SYMBOL_GPL(add_input_randomness);
1207
1208 #ifdef CONFIG_BLOCK
add_disk_randomness(struct gendisk * disk)1209 void add_disk_randomness(struct gendisk *disk)
1210 {
1211 if (!disk || !disk->random)
1212 return;
1213 /* First major is 1, so we get >= 0x200 here. */
1214 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1215 }
1216 EXPORT_SYMBOL_GPL(add_disk_randomness);
1217
rand_initialize_disk(struct gendisk * disk)1218 void __cold rand_initialize_disk(struct gendisk *disk)
1219 {
1220 struct timer_rand_state *state;
1221
1222 /*
1223 * If kzalloc returns null, we just won't use that entropy
1224 * source.
1225 */
1226 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1227 if (state) {
1228 state->last_time = INITIAL_JIFFIES;
1229 disk->random = state;
1230 }
1231 }
1232 #endif
1233
1234 struct entropy_timer_state {
1235 unsigned long entropy;
1236 struct timer_list timer;
1237 atomic_t samples;
1238 unsigned int samples_per_bit;
1239 };
1240
1241 /*
1242 * Each time the timer fires, we expect that we got an unpredictable jump in
1243 * the cycle counter. Even if the timer is running on another CPU, the timer
1244 * activity will be touching the stack of the CPU that is generating entropy.
1245 *
1246 * Note that we don't re-arm the timer in the timer itself - we are happy to be
1247 * scheduled away, since that just makes the load more complex, but we do not
1248 * want the timer to keep ticking unless the entropy loop is running.
1249 *
1250 * So the re-arming always happens in the entropy loop itself.
1251 */
entropy_timer(struct timer_list * timer)1252 static void __cold entropy_timer(struct timer_list *timer)
1253 {
1254 struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
1255 unsigned long entropy = random_get_entropy();
1256
1257 mix_pool_bytes(&entropy, sizeof(entropy));
1258 if (atomic_inc_return(&state->samples) % state->samples_per_bit == 0)
1259 credit_init_bits(1);
1260 }
1261
1262 /*
1263 * If we have an actual cycle counter, see if we can generate enough entropy
1264 * with timing noise.
1265 */
try_to_generate_entropy(void)1266 static void __cold try_to_generate_entropy(void)
1267 {
1268 enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 15 };
1269 u8 stack_bytes[sizeof(struct entropy_timer_state) + SMP_CACHE_BYTES - 1];
1270 struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
1271 unsigned int i, num_different = 0;
1272 unsigned long last = random_get_entropy();
1273 int cpu = -1;
1274
1275 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1276 stack->entropy = random_get_entropy();
1277 if (stack->entropy != last)
1278 ++num_different;
1279 last = stack->entropy;
1280 }
1281 stack->samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1282 if (stack->samples_per_bit > MAX_SAMPLES_PER_BIT)
1283 return;
1284
1285 atomic_set(&stack->samples, 0);
1286 timer_setup_on_stack(&stack->timer, entropy_timer, 0);
1287 while (!crng_ready() && !signal_pending(current)) {
1288 /*
1289 * Check !timer_pending() and then ensure that any previous callback has finished
1290 * executing by checking try_to_del_timer_sync(), before queueing the next one.
1291 */
1292 if (!timer_pending(&stack->timer) && try_to_del_timer_sync(&stack->timer) >= 0) {
1293 struct cpumask timer_cpus;
1294 unsigned int num_cpus;
1295
1296 /*
1297 * Preemption must be disabled here, both to read the current CPU number
1298 * and to avoid scheduling a timer on a dead CPU.
1299 */
1300 preempt_disable();
1301
1302 /* Only schedule callbacks on timer CPUs that are online. */
1303 cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
1304 num_cpus = cpumask_weight(&timer_cpus);
1305 /* In very bizarre case of misconfiguration, fallback to all online. */
1306 if (unlikely(num_cpus == 0)) {
1307 timer_cpus = *cpu_online_mask;
1308 num_cpus = cpumask_weight(&timer_cpus);
1309 }
1310
1311 /* Basic CPU round-robin, which avoids the current CPU. */
1312 do {
1313 cpu = cpumask_next(cpu, &timer_cpus);
1314 if (cpu >= nr_cpu_ids)
1315 cpu = cpumask_first(&timer_cpus);
1316 } while (cpu == smp_processor_id() && num_cpus > 1);
1317
1318 /* Expiring the timer at `jiffies` means it's the next tick. */
1319 stack->timer.expires = jiffies;
1320
1321 add_timer_on(&stack->timer, cpu);
1322
1323 preempt_enable();
1324 }
1325 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
1326 schedule();
1327 stack->entropy = random_get_entropy();
1328 }
1329 mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
1330
1331 del_timer_sync(&stack->timer);
1332 destroy_timer_on_stack(&stack->timer);
1333 }
1334
1335
1336 /**********************************************************************
1337 *
1338 * Userspace reader/writer interfaces.
1339 *
1340 * getrandom(2) is the primary modern interface into the RNG and should
1341 * be used in preference to anything else.
1342 *
1343 * Reading from /dev/random has the same functionality as calling
1344 * getrandom(2) with flags=0. In earlier versions, however, it had
1345 * vastly different semantics and should therefore be avoided, to
1346 * prevent backwards compatibility issues.
1347 *
1348 * Reading from /dev/urandom has the same functionality as calling
1349 * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1350 * waiting for the RNG to be ready, it should not be used.
1351 *
1352 * Writing to either /dev/random or /dev/urandom adds entropy to
1353 * the input pool but does not credit it.
1354 *
1355 * Polling on /dev/random indicates when the RNG is initialized, on
1356 * the read side, and when it wants new entropy, on the write side.
1357 *
1358 * Both /dev/random and /dev/urandom have the same set of ioctls for
1359 * adding entropy, getting the entropy count, zeroing the count, and
1360 * reseeding the crng.
1361 *
1362 **********************************************************************/
1363
SYSCALL_DEFINE3(getrandom,char __user *,ubuf,size_t,len,unsigned int,flags)1364 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1365 {
1366 struct iov_iter iter;
1367 struct iovec iov;
1368 int ret;
1369
1370 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1371 return -EINVAL;
1372
1373 /*
1374 * Requesting insecure and blocking randomness at the same time makes
1375 * no sense.
1376 */
1377 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1378 return -EINVAL;
1379
1380 if (!crng_ready() && !(flags & GRND_INSECURE)) {
1381 if (flags & GRND_NONBLOCK)
1382 return -EAGAIN;
1383 ret = wait_for_random_bytes();
1384 if (unlikely(ret))
1385 return ret;
1386 }
1387
1388 ret = import_single_range(ITER_DEST, ubuf, len, &iov, &iter);
1389 if (unlikely(ret))
1390 return ret;
1391 return get_random_bytes_user(&iter);
1392 }
1393
random_poll(struct file * file,poll_table * wait)1394 static __poll_t random_poll(struct file *file, poll_table *wait)
1395 {
1396 poll_wait(file, &crng_init_wait, wait);
1397 return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1398 }
1399
write_pool_user(struct iov_iter * iter)1400 static ssize_t write_pool_user(struct iov_iter *iter)
1401 {
1402 u8 block[BLAKE2S_BLOCK_SIZE];
1403 ssize_t ret = 0;
1404 size_t copied;
1405
1406 if (unlikely(!iov_iter_count(iter)))
1407 return 0;
1408
1409 for (;;) {
1410 copied = copy_from_iter(block, sizeof(block), iter);
1411 ret += copied;
1412 mix_pool_bytes(block, copied);
1413 if (!iov_iter_count(iter) || copied != sizeof(block))
1414 break;
1415
1416 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1417 if (ret % PAGE_SIZE == 0) {
1418 if (signal_pending(current))
1419 break;
1420 cond_resched();
1421 }
1422 }
1423
1424 memzero_explicit(block, sizeof(block));
1425 return ret ? ret : -EFAULT;
1426 }
1427
random_write_iter(struct kiocb * kiocb,struct iov_iter * iter)1428 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1429 {
1430 return write_pool_user(iter);
1431 }
1432
urandom_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1433 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1434 {
1435 static int maxwarn = 10;
1436
1437 /*
1438 * Opportunistically attempt to initialize the RNG on platforms that
1439 * have fast cycle counters, but don't (for now) require it to succeed.
1440 */
1441 if (!crng_ready())
1442 try_to_generate_entropy();
1443
1444 if (!crng_ready()) {
1445 if (!ratelimit_disable && maxwarn <= 0)
1446 ++urandom_warning.missed;
1447 else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1448 --maxwarn;
1449 pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1450 current->comm, iov_iter_count(iter));
1451 }
1452 }
1453
1454 return get_random_bytes_user(iter);
1455 }
1456
random_read_iter(struct kiocb * kiocb,struct iov_iter * iter)1457 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1458 {
1459 int ret;
1460
1461 if (!crng_ready() &&
1462 ((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
1463 (kiocb->ki_filp->f_flags & O_NONBLOCK)))
1464 return -EAGAIN;
1465
1466 ret = wait_for_random_bytes();
1467 if (ret != 0)
1468 return ret;
1469 return get_random_bytes_user(iter);
1470 }
1471
random_ioctl(struct file * f,unsigned int cmd,unsigned long arg)1472 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1473 {
1474 int __user *p = (int __user *)arg;
1475 int ent_count;
1476
1477 switch (cmd) {
1478 case RNDGETENTCNT:
1479 /* Inherently racy, no point locking. */
1480 if (put_user(input_pool.init_bits, p))
1481 return -EFAULT;
1482 return 0;
1483 case RNDADDTOENTCNT:
1484 if (!capable(CAP_SYS_ADMIN))
1485 return -EPERM;
1486 if (get_user(ent_count, p))
1487 return -EFAULT;
1488 if (ent_count < 0)
1489 return -EINVAL;
1490 credit_init_bits(ent_count);
1491 return 0;
1492 case RNDADDENTROPY: {
1493 struct iov_iter iter;
1494 struct iovec iov;
1495 ssize_t ret;
1496 int len;
1497
1498 if (!capable(CAP_SYS_ADMIN))
1499 return -EPERM;
1500 if (get_user(ent_count, p++))
1501 return -EFAULT;
1502 if (ent_count < 0)
1503 return -EINVAL;
1504 if (get_user(len, p++))
1505 return -EFAULT;
1506 ret = import_single_range(ITER_SOURCE, p, len, &iov, &iter);
1507 if (unlikely(ret))
1508 return ret;
1509 ret = write_pool_user(&iter);
1510 if (unlikely(ret < 0))
1511 return ret;
1512 /* Since we're crediting, enforce that it was all written into the pool. */
1513 if (unlikely(ret != len))
1514 return -EFAULT;
1515 credit_init_bits(ent_count);
1516 return 0;
1517 }
1518 case RNDZAPENTCNT:
1519 case RNDCLEARPOOL:
1520 /* No longer has any effect. */
1521 if (!capable(CAP_SYS_ADMIN))
1522 return -EPERM;
1523 return 0;
1524 case RNDRESEEDCRNG:
1525 if (!capable(CAP_SYS_ADMIN))
1526 return -EPERM;
1527 if (!crng_ready())
1528 return -ENODATA;
1529 crng_reseed(NULL);
1530 return 0;
1531 default:
1532 return -EINVAL;
1533 }
1534 }
1535
random_fasync(int fd,struct file * filp,int on)1536 static int random_fasync(int fd, struct file *filp, int on)
1537 {
1538 return fasync_helper(fd, filp, on, &fasync);
1539 }
1540
1541 const struct file_operations random_fops = {
1542 .read_iter = random_read_iter,
1543 .write_iter = random_write_iter,
1544 .poll = random_poll,
1545 .unlocked_ioctl = random_ioctl,
1546 .compat_ioctl = compat_ptr_ioctl,
1547 .fasync = random_fasync,
1548 .llseek = noop_llseek,
1549 .splice_read = copy_splice_read,
1550 .splice_write = iter_file_splice_write,
1551 };
1552
1553 const struct file_operations urandom_fops = {
1554 .read_iter = urandom_read_iter,
1555 .write_iter = random_write_iter,
1556 .unlocked_ioctl = random_ioctl,
1557 .compat_ioctl = compat_ptr_ioctl,
1558 .fasync = random_fasync,
1559 .llseek = noop_llseek,
1560 .splice_read = copy_splice_read,
1561 .splice_write = iter_file_splice_write,
1562 };
1563
1564
1565 /********************************************************************
1566 *
1567 * Sysctl interface.
1568 *
1569 * These are partly unused legacy knobs with dummy values to not break
1570 * userspace and partly still useful things. They are usually accessible
1571 * in /proc/sys/kernel/random/ and are as follows:
1572 *
1573 * - boot_id - a UUID representing the current boot.
1574 *
1575 * - uuid - a random UUID, different each time the file is read.
1576 *
1577 * - poolsize - the number of bits of entropy that the input pool can
1578 * hold, tied to the POOL_BITS constant.
1579 *
1580 * - entropy_avail - the number of bits of entropy currently in the
1581 * input pool. Always <= poolsize.
1582 *
1583 * - write_wakeup_threshold - the amount of entropy in the input pool
1584 * below which write polls to /dev/random will unblock, requesting
1585 * more entropy, tied to the POOL_READY_BITS constant. It is writable
1586 * to avoid breaking old userspaces, but writing to it does not
1587 * change any behavior of the RNG.
1588 *
1589 * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1590 * It is writable to avoid breaking old userspaces, but writing
1591 * to it does not change any behavior of the RNG.
1592 *
1593 ********************************************************************/
1594
1595 #ifdef CONFIG_SYSCTL
1596
1597 #include <linux/sysctl.h>
1598
1599 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1600 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1601 static int sysctl_poolsize = POOL_BITS;
1602 static u8 sysctl_bootid[UUID_SIZE];
1603
1604 /*
1605 * This function is used to return both the bootid UUID, and random
1606 * UUID. The difference is in whether table->data is NULL; if it is,
1607 * then a new UUID is generated and returned to the user.
1608 */
proc_do_uuid(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1609 static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
1610 size_t *lenp, loff_t *ppos)
1611 {
1612 u8 tmp_uuid[UUID_SIZE], *uuid;
1613 char uuid_string[UUID_STRING_LEN + 1];
1614 struct ctl_table fake_table = {
1615 .data = uuid_string,
1616 .maxlen = UUID_STRING_LEN
1617 };
1618
1619 if (write)
1620 return -EPERM;
1621
1622 uuid = table->data;
1623 if (!uuid) {
1624 uuid = tmp_uuid;
1625 generate_random_uuid(uuid);
1626 } else {
1627 static DEFINE_SPINLOCK(bootid_spinlock);
1628
1629 spin_lock(&bootid_spinlock);
1630 if (!uuid[8])
1631 generate_random_uuid(uuid);
1632 spin_unlock(&bootid_spinlock);
1633 }
1634
1635 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1636 return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1637 }
1638
1639 /* The same as proc_dointvec, but writes don't change anything. */
proc_do_rointvec(struct ctl_table * table,int write,void * buf,size_t * lenp,loff_t * ppos)1640 static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
1641 size_t *lenp, loff_t *ppos)
1642 {
1643 return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1644 }
1645
1646 static struct ctl_table random_table[] = {
1647 {
1648 .procname = "poolsize",
1649 .data = &sysctl_poolsize,
1650 .maxlen = sizeof(int),
1651 .mode = 0444,
1652 .proc_handler = proc_dointvec,
1653 },
1654 {
1655 .procname = "entropy_avail",
1656 .data = &input_pool.init_bits,
1657 .maxlen = sizeof(int),
1658 .mode = 0444,
1659 .proc_handler = proc_dointvec,
1660 },
1661 {
1662 .procname = "write_wakeup_threshold",
1663 .data = &sysctl_random_write_wakeup_bits,
1664 .maxlen = sizeof(int),
1665 .mode = 0644,
1666 .proc_handler = proc_do_rointvec,
1667 },
1668 {
1669 .procname = "urandom_min_reseed_secs",
1670 .data = &sysctl_random_min_urandom_seed,
1671 .maxlen = sizeof(int),
1672 .mode = 0644,
1673 .proc_handler = proc_do_rointvec,
1674 },
1675 {
1676 .procname = "boot_id",
1677 .data = &sysctl_bootid,
1678 .mode = 0444,
1679 .proc_handler = proc_do_uuid,
1680 },
1681 {
1682 .procname = "uuid",
1683 .mode = 0444,
1684 .proc_handler = proc_do_uuid,
1685 },
1686 { }
1687 };
1688
1689 /*
1690 * random_init() is called before sysctl_init(),
1691 * so we cannot call register_sysctl_init() in random_init()
1692 */
random_sysctls_init(void)1693 static int __init random_sysctls_init(void)
1694 {
1695 register_sysctl_init("kernel/random", random_table);
1696 return 0;
1697 }
1698 device_initcall(random_sysctls_init);
1699 #endif
1700