xref: /openbmc/linux/drivers/char/random.c (revision 1e70d57e)
1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2 /*
3  * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
5  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved.
6  *
7  * This driver produces cryptographically secure pseudorandom data. It is divided
8  * into roughly six sections, each with a section header:
9  *
10  *   - Initialization and readiness waiting.
11  *   - Fast key erasure RNG, the "crng".
12  *   - Entropy accumulation and extraction routines.
13  *   - Entropy collection routines.
14  *   - Userspace reader/writer interfaces.
15  *   - Sysctl interface.
16  *
17  * The high level overview is that there is one input pool, into which
18  * various pieces of data are hashed. Some of that data is then "credited" as
19  * having a certain number of bits of entropy. When enough bits of entropy are
20  * available, the hash is finalized and handed as a key to a stream cipher that
21  * expands it indefinitely for various consumers. This key is periodically
22  * refreshed as the various entropy collectors, described below, add data to the
23  * input pool and credit it. There is currently no Fortuna-like scheduler
24  * involved, which can lead to malicious entropy sources causing a premature
25  * reseed, and the entropy estimates are, at best, conservative guesses.
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 
30 #include <linux/utsname.h>
31 #include <linux/module.h>
32 #include <linux/kernel.h>
33 #include <linux/major.h>
34 #include <linux/string.h>
35 #include <linux/fcntl.h>
36 #include <linux/slab.h>
37 #include <linux/random.h>
38 #include <linux/poll.h>
39 #include <linux/init.h>
40 #include <linux/fs.h>
41 #include <linux/blkdev.h>
42 #include <linux/interrupt.h>
43 #include <linux/mm.h>
44 #include <linux/nodemask.h>
45 #include <linux/spinlock.h>
46 #include <linux/kthread.h>
47 #include <linux/percpu.h>
48 #include <linux/ptrace.h>
49 #include <linux/workqueue.h>
50 #include <linux/irq.h>
51 #include <linux/ratelimit.h>
52 #include <linux/syscalls.h>
53 #include <linux/completion.h>
54 #include <linux/uuid.h>
55 #include <linux/uaccess.h>
56 #include <crypto/chacha.h>
57 #include <crypto/blake2s.h>
58 #include <asm/processor.h>
59 #include <asm/irq.h>
60 #include <asm/irq_regs.h>
61 #include <asm/io.h>
62 
63 /*********************************************************************
64  *
65  * Initialization and readiness waiting.
66  *
67  * Much of the RNG infrastructure is devoted to various dependencies
68  * being able to wait until the RNG has collected enough entropy and
69  * is ready for safe consumption.
70  *
71  *********************************************************************/
72 
73 /*
74  * crng_init =  0 --> Uninitialized
75  *		1 --> Initialized
76  *		2 --> Initialized from input_pool
77  *
78  * crng_init is protected by base_crng->lock, and only increases
79  * its value (from 0->1->2).
80  */
81 static int crng_init = 0;
82 #define crng_ready() (likely(crng_init > 1))
83 /* Various types of waiters for crng_init->2 transition. */
84 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
85 static struct fasync_struct *fasync;
86 static DEFINE_SPINLOCK(random_ready_chain_lock);
87 static RAW_NOTIFIER_HEAD(random_ready_chain);
88 
89 /* Control how we warn userspace. */
90 static struct ratelimit_state unseeded_warning =
91 	RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
92 static struct ratelimit_state urandom_warning =
93 	RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
94 static int ratelimit_disable __read_mostly;
95 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
96 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
97 
98 /*
99  * Returns whether or not the input pool has been seeded and thus guaranteed
100  * to supply cryptographically secure random numbers. This applies to: the
101  * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
102  * ,u64,int,long} family of functions.
103  *
104  * Returns: true if the input pool has been seeded.
105  *          false if the input pool has not been seeded.
106  */
107 bool rng_is_initialized(void)
108 {
109 	return crng_ready();
110 }
111 EXPORT_SYMBOL(rng_is_initialized);
112 
113 /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
114 static void try_to_generate_entropy(void);
115 
116 /*
117  * Wait for the input pool to be seeded and thus guaranteed to supply
118  * cryptographically secure random numbers. This applies to: the /dev/urandom
119  * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
120  * family of functions. Using any of these functions without first calling
121  * this function forfeits the guarantee of security.
122  *
123  * Returns: 0 if the input pool has been seeded.
124  *          -ERESTARTSYS if the function was interrupted by a signal.
125  */
126 int wait_for_random_bytes(void)
127 {
128 	while (!crng_ready()) {
129 		int ret;
130 
131 		try_to_generate_entropy();
132 		ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
133 		if (ret)
134 			return ret > 0 ? 0 : ret;
135 	}
136 	return 0;
137 }
138 EXPORT_SYMBOL(wait_for_random_bytes);
139 
140 /*
141  * Add a callback function that will be invoked when the input
142  * pool is initialised.
143  *
144  * returns: 0 if callback is successfully added
145  *	    -EALREADY if pool is already initialised (callback not called)
146  */
147 int register_random_ready_notifier(struct notifier_block *nb)
148 {
149 	unsigned long flags;
150 	int ret = -EALREADY;
151 
152 	if (crng_ready())
153 		return ret;
154 
155 	spin_lock_irqsave(&random_ready_chain_lock, flags);
156 	if (!crng_ready())
157 		ret = raw_notifier_chain_register(&random_ready_chain, nb);
158 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
159 	return ret;
160 }
161 
162 /*
163  * Delete a previously registered readiness callback function.
164  */
165 int unregister_random_ready_notifier(struct notifier_block *nb)
166 {
167 	unsigned long flags;
168 	int ret;
169 
170 	spin_lock_irqsave(&random_ready_chain_lock, flags);
171 	ret = raw_notifier_chain_unregister(&random_ready_chain, nb);
172 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
173 	return ret;
174 }
175 
176 static void process_random_ready_list(void)
177 {
178 	unsigned long flags;
179 
180 	spin_lock_irqsave(&random_ready_chain_lock, flags);
181 	raw_notifier_call_chain(&random_ready_chain, 0, NULL);
182 	spin_unlock_irqrestore(&random_ready_chain_lock, flags);
183 }
184 
185 #define warn_unseeded_randomness(previous) \
186 	_warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
187 
188 static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
189 {
190 #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
191 	const bool print_once = false;
192 #else
193 	static bool print_once __read_mostly;
194 #endif
195 
196 	if (print_once || crng_ready() ||
197 	    (previous && (caller == READ_ONCE(*previous))))
198 		return;
199 	WRITE_ONCE(*previous, caller);
200 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
201 	print_once = true;
202 #endif
203 	if (__ratelimit(&unseeded_warning))
204 		printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
205 				func_name, caller, crng_init);
206 }
207 
208 
209 /*********************************************************************
210  *
211  * Fast key erasure RNG, the "crng".
212  *
213  * These functions expand entropy from the entropy extractor into
214  * long streams for external consumption using the "fast key erasure"
215  * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
216  *
217  * There are a few exported interfaces for use by other drivers:
218  *
219  *	void get_random_bytes(void *buf, size_t nbytes)
220  *	u32 get_random_u32()
221  *	u64 get_random_u64()
222  *	unsigned int get_random_int()
223  *	unsigned long get_random_long()
224  *
225  * These interfaces will return the requested number of random bytes
226  * into the given buffer or as a return value. This is equivalent to
227  * a read from /dev/urandom. The u32, u64, int, and long family of
228  * functions may be higher performance for one-off random integers,
229  * because they do a bit of buffering and do not invoke reseeding
230  * until the buffer is emptied.
231  *
232  *********************************************************************/
233 
234 enum {
235 	CRNG_RESEED_INTERVAL = 300 * HZ,
236 	CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
237 };
238 
239 static struct {
240 	u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
241 	unsigned long birth;
242 	unsigned long generation;
243 	spinlock_t lock;
244 } base_crng = {
245 	.lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
246 };
247 
248 struct crng {
249 	u8 key[CHACHA_KEY_SIZE];
250 	unsigned long generation;
251 	local_lock_t lock;
252 };
253 
254 static DEFINE_PER_CPU(struct crng, crngs) = {
255 	.generation = ULONG_MAX,
256 	.lock = INIT_LOCAL_LOCK(crngs.lock),
257 };
258 
259 /* Used by crng_reseed() to extract a new seed from the input pool. */
260 static bool drain_entropy(void *buf, size_t nbytes, bool force);
261 
262 /*
263  * This extracts a new crng key from the input pool, but only if there is a
264  * sufficient amount of entropy available or force is true, in order to
265  * mitigate bruteforcing of newly added bits.
266  */
267 static void crng_reseed(bool force)
268 {
269 	unsigned long flags;
270 	unsigned long next_gen;
271 	u8 key[CHACHA_KEY_SIZE];
272 	bool finalize_init = false;
273 
274 	/* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
275 	if (!drain_entropy(key, sizeof(key), force))
276 		return;
277 
278 	/*
279 	 * We copy the new key into the base_crng, overwriting the old one,
280 	 * and update the generation counter. We avoid hitting ULONG_MAX,
281 	 * because the per-cpu crngs are initialized to ULONG_MAX, so this
282 	 * forces new CPUs that come online to always initialize.
283 	 */
284 	spin_lock_irqsave(&base_crng.lock, flags);
285 	memcpy(base_crng.key, key, sizeof(base_crng.key));
286 	next_gen = base_crng.generation + 1;
287 	if (next_gen == ULONG_MAX)
288 		++next_gen;
289 	WRITE_ONCE(base_crng.generation, next_gen);
290 	WRITE_ONCE(base_crng.birth, jiffies);
291 	if (!crng_ready()) {
292 		crng_init = 2;
293 		finalize_init = true;
294 	}
295 	spin_unlock_irqrestore(&base_crng.lock, flags);
296 	memzero_explicit(key, sizeof(key));
297 	if (finalize_init) {
298 		process_random_ready_list();
299 		wake_up_interruptible(&crng_init_wait);
300 		kill_fasync(&fasync, SIGIO, POLL_IN);
301 		pr_notice("crng init done\n");
302 		if (unseeded_warning.missed) {
303 			pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
304 				  unseeded_warning.missed);
305 			unseeded_warning.missed = 0;
306 		}
307 		if (urandom_warning.missed) {
308 			pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
309 				  urandom_warning.missed);
310 			urandom_warning.missed = 0;
311 		}
312 	}
313 }
314 
315 /*
316  * This generates a ChaCha block using the provided key, and then
317  * immediately overwites that key with half the block. It returns
318  * the resultant ChaCha state to the user, along with the second
319  * half of the block containing 32 bytes of random data that may
320  * be used; random_data_len may not be greater than 32.
321  */
322 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
323 				  u32 chacha_state[CHACHA_STATE_WORDS],
324 				  u8 *random_data, size_t random_data_len)
325 {
326 	u8 first_block[CHACHA_BLOCK_SIZE];
327 
328 	BUG_ON(random_data_len > 32);
329 
330 	chacha_init_consts(chacha_state);
331 	memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
332 	memset(&chacha_state[12], 0, sizeof(u32) * 4);
333 	chacha20_block(chacha_state, first_block);
334 
335 	memcpy(key, first_block, CHACHA_KEY_SIZE);
336 	memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
337 	memzero_explicit(first_block, sizeof(first_block));
338 }
339 
340 /*
341  * Return whether the crng seed is considered to be sufficiently
342  * old that a reseeding might be attempted. This happens if the last
343  * reseeding was CRNG_RESEED_INTERVAL ago, or during early boot, at
344  * an interval proportional to the uptime.
345  */
346 static bool crng_has_old_seed(void)
347 {
348 	static bool early_boot = true;
349 	unsigned long interval = CRNG_RESEED_INTERVAL;
350 
351 	if (unlikely(READ_ONCE(early_boot))) {
352 		time64_t uptime = ktime_get_seconds();
353 		if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
354 			WRITE_ONCE(early_boot, false);
355 		else
356 			interval = max_t(unsigned int, 5 * HZ,
357 					 (unsigned int)uptime / 2 * HZ);
358 	}
359 	return time_after(jiffies, READ_ONCE(base_crng.birth) + interval);
360 }
361 
362 /*
363  * This function returns a ChaCha state that you may use for generating
364  * random data. It also returns up to 32 bytes on its own of random data
365  * that may be used; random_data_len may not be greater than 32.
366  */
367 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
368 			    u8 *random_data, size_t random_data_len)
369 {
370 	unsigned long flags;
371 	struct crng *crng;
372 
373 	BUG_ON(random_data_len > 32);
374 
375 	/*
376 	 * For the fast path, we check whether we're ready, unlocked first, and
377 	 * then re-check once locked later. In the case where we're really not
378 	 * ready, we do fast key erasure with the base_crng directly, because
379 	 * this is what crng_pre_init_inject() mutates during early init.
380 	 */
381 	if (!crng_ready()) {
382 		bool ready;
383 
384 		spin_lock_irqsave(&base_crng.lock, flags);
385 		ready = crng_ready();
386 		if (!ready)
387 			crng_fast_key_erasure(base_crng.key, chacha_state,
388 					      random_data, random_data_len);
389 		spin_unlock_irqrestore(&base_crng.lock, flags);
390 		if (!ready)
391 			return;
392 	}
393 
394 	/*
395 	 * If the base_crng is old enough, we try to reseed, which in turn
396 	 * bumps the generation counter that we check below.
397 	 */
398 	if (unlikely(crng_has_old_seed()))
399 		crng_reseed(false);
400 
401 	local_lock_irqsave(&crngs.lock, flags);
402 	crng = raw_cpu_ptr(&crngs);
403 
404 	/*
405 	 * If our per-cpu crng is older than the base_crng, then it means
406 	 * somebody reseeded the base_crng. In that case, we do fast key
407 	 * erasure on the base_crng, and use its output as the new key
408 	 * for our per-cpu crng. This brings us up to date with base_crng.
409 	 */
410 	if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
411 		spin_lock(&base_crng.lock);
412 		crng_fast_key_erasure(base_crng.key, chacha_state,
413 				      crng->key, sizeof(crng->key));
414 		crng->generation = base_crng.generation;
415 		spin_unlock(&base_crng.lock);
416 	}
417 
418 	/*
419 	 * Finally, when we've made it this far, our per-cpu crng has an up
420 	 * to date key, and we can do fast key erasure with it to produce
421 	 * some random data and a ChaCha state for the caller. All other
422 	 * branches of this function are "unlikely", so most of the time we
423 	 * should wind up here immediately.
424 	 */
425 	crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
426 	local_unlock_irqrestore(&crngs.lock, flags);
427 }
428 
429 /*
430  * This function is for crng_init == 0 only. It loads entropy directly
431  * into the crng's key, without going through the input pool. It is,
432  * generally speaking, not very safe, but we use this only at early
433  * boot time when it's better to have something there rather than
434  * nothing.
435  *
436  * If account is set, then the crng_init_cnt counter is incremented.
437  * This shouldn't be set by functions like add_device_randomness(),
438  * where we can't trust the buffer passed to it is guaranteed to be
439  * unpredictable (so it might not have any entropy at all).
440  */
441 static void crng_pre_init_inject(const void *input, size_t len, bool account)
442 {
443 	static int crng_init_cnt = 0;
444 	struct blake2s_state hash;
445 	unsigned long flags;
446 
447 	blake2s_init(&hash, sizeof(base_crng.key));
448 
449 	spin_lock_irqsave(&base_crng.lock, flags);
450 	if (crng_init != 0) {
451 		spin_unlock_irqrestore(&base_crng.lock, flags);
452 		return;
453 	}
454 
455 	blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
456 	blake2s_update(&hash, input, len);
457 	blake2s_final(&hash, base_crng.key);
458 
459 	if (account) {
460 		crng_init_cnt += min_t(size_t, len, CRNG_INIT_CNT_THRESH - crng_init_cnt);
461 		if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
462 			++base_crng.generation;
463 			crng_init = 1;
464 		}
465 	}
466 
467 	spin_unlock_irqrestore(&base_crng.lock, flags);
468 
469 	if (crng_init == 1)
470 		pr_notice("fast init done\n");
471 }
472 
473 static void _get_random_bytes(void *buf, size_t nbytes)
474 {
475 	u32 chacha_state[CHACHA_STATE_WORDS];
476 	u8 tmp[CHACHA_BLOCK_SIZE];
477 	size_t len;
478 
479 	if (!nbytes)
480 		return;
481 
482 	len = min_t(size_t, 32, nbytes);
483 	crng_make_state(chacha_state, buf, len);
484 	nbytes -= len;
485 	buf += len;
486 
487 	while (nbytes) {
488 		if (nbytes < CHACHA_BLOCK_SIZE) {
489 			chacha20_block(chacha_state, tmp);
490 			memcpy(buf, tmp, nbytes);
491 			memzero_explicit(tmp, sizeof(tmp));
492 			break;
493 		}
494 
495 		chacha20_block(chacha_state, buf);
496 		if (unlikely(chacha_state[12] == 0))
497 			++chacha_state[13];
498 		nbytes -= CHACHA_BLOCK_SIZE;
499 		buf += CHACHA_BLOCK_SIZE;
500 	}
501 
502 	memzero_explicit(chacha_state, sizeof(chacha_state));
503 }
504 
505 /*
506  * This function is the exported kernel interface.  It returns some
507  * number of good random numbers, suitable for key generation, seeding
508  * TCP sequence numbers, etc.  It does not rely on the hardware random
509  * number generator.  For random bytes direct from the hardware RNG
510  * (when available), use get_random_bytes_arch(). In order to ensure
511  * that the randomness provided by this function is okay, the function
512  * wait_for_random_bytes() should be called and return 0 at least once
513  * at any point prior.
514  */
515 void get_random_bytes(void *buf, size_t nbytes)
516 {
517 	static void *previous;
518 
519 	warn_unseeded_randomness(&previous);
520 	_get_random_bytes(buf, nbytes);
521 }
522 EXPORT_SYMBOL(get_random_bytes);
523 
524 static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
525 {
526 	ssize_t ret = 0;
527 	size_t len;
528 	u32 chacha_state[CHACHA_STATE_WORDS];
529 	u8 output[CHACHA_BLOCK_SIZE];
530 
531 	if (!nbytes)
532 		return 0;
533 
534 	/*
535 	 * Immediately overwrite the ChaCha key at index 4 with random
536 	 * bytes, in case userspace causes copy_to_user() below to sleep
537 	 * forever, so that we still retain forward secrecy in that case.
538 	 */
539 	crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
540 	/*
541 	 * However, if we're doing a read of len <= 32, we don't need to
542 	 * use chacha_state after, so we can simply return those bytes to
543 	 * the user directly.
544 	 */
545 	if (nbytes <= CHACHA_KEY_SIZE) {
546 		ret = copy_to_user(buf, &chacha_state[4], nbytes) ? -EFAULT : nbytes;
547 		goto out_zero_chacha;
548 	}
549 
550 	do {
551 		chacha20_block(chacha_state, output);
552 		if (unlikely(chacha_state[12] == 0))
553 			++chacha_state[13];
554 
555 		len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
556 		if (copy_to_user(buf, output, len)) {
557 			ret = -EFAULT;
558 			break;
559 		}
560 
561 		nbytes -= len;
562 		buf += len;
563 		ret += len;
564 
565 		BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0);
566 		if (!(ret % PAGE_SIZE) && nbytes) {
567 			if (signal_pending(current))
568 				break;
569 			cond_resched();
570 		}
571 	} while (nbytes);
572 
573 	memzero_explicit(output, sizeof(output));
574 out_zero_chacha:
575 	memzero_explicit(chacha_state, sizeof(chacha_state));
576 	return ret;
577 }
578 
579 /*
580  * Batched entropy returns random integers. The quality of the random
581  * number is good as /dev/urandom. In order to ensure that the randomness
582  * provided by this function is okay, the function wait_for_random_bytes()
583  * should be called and return 0 at least once at any point prior.
584  */
585 struct batched_entropy {
586 	union {
587 		/*
588 		 * We make this 1.5x a ChaCha block, so that we get the
589 		 * remaining 32 bytes from fast key erasure, plus one full
590 		 * block from the detached ChaCha state. We can increase
591 		 * the size of this later if needed so long as we keep the
592 		 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
593 		 */
594 		u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
595 		u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
596 	};
597 	local_lock_t lock;
598 	unsigned long generation;
599 	unsigned int position;
600 };
601 
602 
603 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
604 	.lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
605 	.position = UINT_MAX
606 };
607 
608 u64 get_random_u64(void)
609 {
610 	u64 ret;
611 	unsigned long flags;
612 	struct batched_entropy *batch;
613 	static void *previous;
614 	unsigned long next_gen;
615 
616 	warn_unseeded_randomness(&previous);
617 
618 	local_lock_irqsave(&batched_entropy_u64.lock, flags);
619 	batch = raw_cpu_ptr(&batched_entropy_u64);
620 
621 	next_gen = READ_ONCE(base_crng.generation);
622 	if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
623 	    next_gen != batch->generation) {
624 		_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
625 		batch->position = 0;
626 		batch->generation = next_gen;
627 	}
628 
629 	ret = batch->entropy_u64[batch->position];
630 	batch->entropy_u64[batch->position] = 0;
631 	++batch->position;
632 	local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
633 	return ret;
634 }
635 EXPORT_SYMBOL(get_random_u64);
636 
637 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
638 	.lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
639 	.position = UINT_MAX
640 };
641 
642 u32 get_random_u32(void)
643 {
644 	u32 ret;
645 	unsigned long flags;
646 	struct batched_entropy *batch;
647 	static void *previous;
648 	unsigned long next_gen;
649 
650 	warn_unseeded_randomness(&previous);
651 
652 	local_lock_irqsave(&batched_entropy_u32.lock, flags);
653 	batch = raw_cpu_ptr(&batched_entropy_u32);
654 
655 	next_gen = READ_ONCE(base_crng.generation);
656 	if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
657 	    next_gen != batch->generation) {
658 		_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
659 		batch->position = 0;
660 		batch->generation = next_gen;
661 	}
662 
663 	ret = batch->entropy_u32[batch->position];
664 	batch->entropy_u32[batch->position] = 0;
665 	++batch->position;
666 	local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
667 	return ret;
668 }
669 EXPORT_SYMBOL(get_random_u32);
670 
671 #ifdef CONFIG_SMP
672 /*
673  * This function is called when the CPU is coming up, with entry
674  * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
675  */
676 int random_prepare_cpu(unsigned int cpu)
677 {
678 	/*
679 	 * When the cpu comes back online, immediately invalidate both
680 	 * the per-cpu crng and all batches, so that we serve fresh
681 	 * randomness.
682 	 */
683 	per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
684 	per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
685 	per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
686 	return 0;
687 }
688 #endif
689 
690 /**
691  * randomize_page - Generate a random, page aligned address
692  * @start:	The smallest acceptable address the caller will take.
693  * @range:	The size of the area, starting at @start, within which the
694  *		random address must fall.
695  *
696  * If @start + @range would overflow, @range is capped.
697  *
698  * NOTE: Historical use of randomize_range, which this replaces, presumed that
699  * @start was already page aligned.  We now align it regardless.
700  *
701  * Return: A page aligned address within [start, start + range).  On error,
702  * @start is returned.
703  */
704 unsigned long randomize_page(unsigned long start, unsigned long range)
705 {
706 	if (!PAGE_ALIGNED(start)) {
707 		range -= PAGE_ALIGN(start) - start;
708 		start = PAGE_ALIGN(start);
709 	}
710 
711 	if (start > ULONG_MAX - range)
712 		range = ULONG_MAX - start;
713 
714 	range >>= PAGE_SHIFT;
715 
716 	if (range == 0)
717 		return start;
718 
719 	return start + (get_random_long() % range << PAGE_SHIFT);
720 }
721 
722 /*
723  * This function will use the architecture-specific hardware random
724  * number generator if it is available. It is not recommended for
725  * use. Use get_random_bytes() instead. It returns the number of
726  * bytes filled in.
727  */
728 size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
729 {
730 	size_t left = nbytes;
731 	u8 *p = buf;
732 
733 	while (left) {
734 		unsigned long v;
735 		size_t chunk = min_t(size_t, left, sizeof(unsigned long));
736 
737 		if (!arch_get_random_long(&v))
738 			break;
739 
740 		memcpy(p, &v, chunk);
741 		p += chunk;
742 		left -= chunk;
743 	}
744 
745 	return nbytes - left;
746 }
747 EXPORT_SYMBOL(get_random_bytes_arch);
748 
749 
750 /**********************************************************************
751  *
752  * Entropy accumulation and extraction routines.
753  *
754  * Callers may add entropy via:
755  *
756  *     static void mix_pool_bytes(const void *in, size_t nbytes)
757  *
758  * After which, if added entropy should be credited:
759  *
760  *     static void credit_entropy_bits(size_t nbits)
761  *
762  * Finally, extract entropy via these two, with the latter one
763  * setting the entropy count to zero and extracting only if there
764  * is POOL_MIN_BITS entropy credited prior or force is true:
765  *
766  *     static void extract_entropy(void *buf, size_t nbytes)
767  *     static bool drain_entropy(void *buf, size_t nbytes, bool force)
768  *
769  **********************************************************************/
770 
771 enum {
772 	POOL_BITS = BLAKE2S_HASH_SIZE * 8,
773 	POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
774 };
775 
776 /* For notifying userspace should write into /dev/random. */
777 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
778 
779 static struct {
780 	struct blake2s_state hash;
781 	spinlock_t lock;
782 	unsigned int entropy_count;
783 } input_pool = {
784 	.hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
785 		    BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
786 		    BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
787 	.hash.outlen = BLAKE2S_HASH_SIZE,
788 	.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
789 };
790 
791 static void _mix_pool_bytes(const void *in, size_t nbytes)
792 {
793 	blake2s_update(&input_pool.hash, in, nbytes);
794 }
795 
796 /*
797  * This function adds bytes into the entropy "pool".  It does not
798  * update the entropy estimate.  The caller should call
799  * credit_entropy_bits if this is appropriate.
800  */
801 static void mix_pool_bytes(const void *in, size_t nbytes)
802 {
803 	unsigned long flags;
804 
805 	spin_lock_irqsave(&input_pool.lock, flags);
806 	_mix_pool_bytes(in, nbytes);
807 	spin_unlock_irqrestore(&input_pool.lock, flags);
808 }
809 
810 static void credit_entropy_bits(size_t nbits)
811 {
812 	unsigned int entropy_count, orig, add;
813 
814 	if (!nbits)
815 		return;
816 
817 	add = min_t(size_t, nbits, POOL_BITS);
818 
819 	do {
820 		orig = READ_ONCE(input_pool.entropy_count);
821 		entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
822 	} while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
823 
824 	if (!crng_ready() && entropy_count >= POOL_MIN_BITS)
825 		crng_reseed(false);
826 }
827 
828 /*
829  * This is an HKDF-like construction for using the hashed collected entropy
830  * as a PRF key, that's then expanded block-by-block.
831  */
832 static void extract_entropy(void *buf, size_t nbytes)
833 {
834 	unsigned long flags;
835 	u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
836 	struct {
837 		unsigned long rdseed[32 / sizeof(long)];
838 		size_t counter;
839 	} block;
840 	size_t i;
841 
842 	for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
843 		if (!arch_get_random_seed_long(&block.rdseed[i]) &&
844 		    !arch_get_random_long(&block.rdseed[i]))
845 			block.rdseed[i] = random_get_entropy();
846 	}
847 
848 	spin_lock_irqsave(&input_pool.lock, flags);
849 
850 	/* seed = HASHPRF(last_key, entropy_input) */
851 	blake2s_final(&input_pool.hash, seed);
852 
853 	/* next_key = HASHPRF(seed, RDSEED || 0) */
854 	block.counter = 0;
855 	blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
856 	blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
857 
858 	spin_unlock_irqrestore(&input_pool.lock, flags);
859 	memzero_explicit(next_key, sizeof(next_key));
860 
861 	while (nbytes) {
862 		i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
863 		/* output = HASHPRF(seed, RDSEED || ++counter) */
864 		++block.counter;
865 		blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
866 		nbytes -= i;
867 		buf += i;
868 	}
869 
870 	memzero_explicit(seed, sizeof(seed));
871 	memzero_explicit(&block, sizeof(block));
872 }
873 
874 /*
875  * First we make sure we have POOL_MIN_BITS of entropy in the pool unless force
876  * is true, and then we set the entropy count to zero (but don't actually touch
877  * any data). Only then can we extract a new key with extract_entropy().
878  */
879 static bool drain_entropy(void *buf, size_t nbytes, bool force)
880 {
881 	unsigned int entropy_count;
882 	do {
883 		entropy_count = READ_ONCE(input_pool.entropy_count);
884 		if (!force && entropy_count < POOL_MIN_BITS)
885 			return false;
886 	} while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
887 	extract_entropy(buf, nbytes);
888 	wake_up_interruptible(&random_write_wait);
889 	kill_fasync(&fasync, SIGIO, POLL_OUT);
890 	return true;
891 }
892 
893 
894 /**********************************************************************
895  *
896  * Entropy collection routines.
897  *
898  * The following exported functions are used for pushing entropy into
899  * the above entropy accumulation routines:
900  *
901  *	void add_device_randomness(const void *buf, size_t size);
902  *	void add_input_randomness(unsigned int type, unsigned int code,
903  *	                          unsigned int value);
904  *	void add_disk_randomness(struct gendisk *disk);
905  *	void add_hwgenerator_randomness(const void *buffer, size_t count,
906  *					size_t entropy);
907  *	void add_bootloader_randomness(const void *buf, size_t size);
908  *	void add_vmfork_randomness(const void *unique_vm_id, size_t size);
909  *	void add_interrupt_randomness(int irq);
910  *
911  * add_device_randomness() adds data to the input pool that
912  * is likely to differ between two devices (or possibly even per boot).
913  * This would be things like MAC addresses or serial numbers, or the
914  * read-out of the RTC. This does *not* credit any actual entropy to
915  * the pool, but it initializes the pool to different values for devices
916  * that might otherwise be identical and have very little entropy
917  * available to them (particularly common in the embedded world).
918  *
919  * add_input_randomness() uses the input layer interrupt timing, as well
920  * as the event type information from the hardware.
921  *
922  * add_disk_randomness() uses what amounts to the seek time of block
923  * layer request events, on a per-disk_devt basis, as input to the
924  * entropy pool. Note that high-speed solid state drives with very low
925  * seek times do not make for good sources of entropy, as their seek
926  * times are usually fairly consistent.
927  *
928  * The above two routines try to estimate how many bits of entropy
929  * to credit. They do this by keeping track of the first and second
930  * order deltas of the event timings.
931  *
932  * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
933  * entropy as specified by the caller. If the entropy pool is full it will
934  * block until more entropy is needed.
935  *
936  * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
937  * add_device_randomness(), depending on whether or not the configuration
938  * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
939  *
940  * add_vmfork_randomness() adds a unique (but not necessarily secret) ID
941  * representing the current instance of a VM to the pool, without crediting,
942  * and then force-reseeds the crng so that it takes effect immediately.
943  *
944  * add_interrupt_randomness() uses the interrupt timing as random
945  * inputs to the entropy pool. Using the cycle counters and the irq source
946  * as inputs, it feeds the input pool roughly once a second or after 64
947  * interrupts, crediting 1 bit of entropy for whichever comes first.
948  *
949  **********************************************************************/
950 
951 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
952 static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
953 static int __init parse_trust_cpu(char *arg)
954 {
955 	return kstrtobool(arg, &trust_cpu);
956 }
957 static int __init parse_trust_bootloader(char *arg)
958 {
959 	return kstrtobool(arg, &trust_bootloader);
960 }
961 early_param("random.trust_cpu", parse_trust_cpu);
962 early_param("random.trust_bootloader", parse_trust_bootloader);
963 
964 /*
965  * The first collection of entropy occurs at system boot while interrupts
966  * are still turned off. Here we push in RDSEED, a timestamp, and utsname().
967  * Depending on the above configuration knob, RDSEED may be considered
968  * sufficient for initialization. Note that much earlier setup may already
969  * have pushed entropy into the input pool by the time we get here.
970  */
971 int __init rand_initialize(void)
972 {
973 	size_t i;
974 	ktime_t now = ktime_get_real();
975 	bool arch_init = true;
976 	unsigned long rv;
977 
978 #if defined(LATENT_ENTROPY_PLUGIN)
979 	static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
980 	_mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
981 #endif
982 
983 	for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
984 		if (!arch_get_random_seed_long_early(&rv) &&
985 		    !arch_get_random_long_early(&rv)) {
986 			rv = random_get_entropy();
987 			arch_init = false;
988 		}
989 		_mix_pool_bytes(&rv, sizeof(rv));
990 	}
991 	_mix_pool_bytes(&now, sizeof(now));
992 	_mix_pool_bytes(utsname(), sizeof(*(utsname())));
993 
994 	extract_entropy(base_crng.key, sizeof(base_crng.key));
995 	++base_crng.generation;
996 
997 	if (arch_init && trust_cpu && !crng_ready()) {
998 		crng_init = 2;
999 		pr_notice("crng init done (trusting CPU's manufacturer)\n");
1000 	}
1001 
1002 	if (ratelimit_disable) {
1003 		urandom_warning.interval = 0;
1004 		unseeded_warning.interval = 0;
1005 	}
1006 	return 0;
1007 }
1008 
1009 /*
1010  * Add device- or boot-specific data to the input pool to help
1011  * initialize it.
1012  *
1013  * None of this adds any entropy; it is meant to avoid the problem of
1014  * the entropy pool having similar initial state across largely
1015  * identical devices.
1016  */
1017 void add_device_randomness(const void *buf, size_t size)
1018 {
1019 	cycles_t cycles = random_get_entropy();
1020 	unsigned long flags, now = jiffies;
1021 
1022 	if (crng_init == 0 && size)
1023 		crng_pre_init_inject(buf, size, false);
1024 
1025 	spin_lock_irqsave(&input_pool.lock, flags);
1026 	_mix_pool_bytes(&cycles, sizeof(cycles));
1027 	_mix_pool_bytes(&now, sizeof(now));
1028 	_mix_pool_bytes(buf, size);
1029 	spin_unlock_irqrestore(&input_pool.lock, flags);
1030 }
1031 EXPORT_SYMBOL(add_device_randomness);
1032 
1033 /* There is one of these per entropy source */
1034 struct timer_rand_state {
1035 	unsigned long last_time;
1036 	long last_delta, last_delta2;
1037 };
1038 
1039 /*
1040  * This function adds entropy to the entropy "pool" by using timing
1041  * delays.  It uses the timer_rand_state structure to make an estimate
1042  * of how many bits of entropy this call has added to the pool.
1043  *
1044  * The number "num" is also added to the pool - it should somehow describe
1045  * the type of event which just happened.  This is currently 0-255 for
1046  * keyboard scan codes, and 256 upwards for interrupts.
1047  */
1048 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1049 {
1050 	cycles_t cycles = random_get_entropy();
1051 	unsigned long flags, now = jiffies;
1052 	long delta, delta2, delta3;
1053 
1054 	spin_lock_irqsave(&input_pool.lock, flags);
1055 	_mix_pool_bytes(&cycles, sizeof(cycles));
1056 	_mix_pool_bytes(&now, sizeof(now));
1057 	_mix_pool_bytes(&num, sizeof(num));
1058 	spin_unlock_irqrestore(&input_pool.lock, flags);
1059 
1060 	/*
1061 	 * Calculate number of bits of randomness we probably added.
1062 	 * We take into account the first, second and third-order deltas
1063 	 * in order to make our estimate.
1064 	 */
1065 	delta = now - READ_ONCE(state->last_time);
1066 	WRITE_ONCE(state->last_time, now);
1067 
1068 	delta2 = delta - READ_ONCE(state->last_delta);
1069 	WRITE_ONCE(state->last_delta, delta);
1070 
1071 	delta3 = delta2 - READ_ONCE(state->last_delta2);
1072 	WRITE_ONCE(state->last_delta2, delta2);
1073 
1074 	if (delta < 0)
1075 		delta = -delta;
1076 	if (delta2 < 0)
1077 		delta2 = -delta2;
1078 	if (delta3 < 0)
1079 		delta3 = -delta3;
1080 	if (delta > delta2)
1081 		delta = delta2;
1082 	if (delta > delta3)
1083 		delta = delta3;
1084 
1085 	/*
1086 	 * delta is now minimum absolute delta.
1087 	 * Round down by 1 bit on general principles,
1088 	 * and limit entropy estimate to 12 bits.
1089 	 */
1090 	credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
1091 }
1092 
1093 void add_input_randomness(unsigned int type, unsigned int code,
1094 			  unsigned int value)
1095 {
1096 	static unsigned char last_value;
1097 	static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1098 
1099 	/* Ignore autorepeat and the like. */
1100 	if (value == last_value)
1101 		return;
1102 
1103 	last_value = value;
1104 	add_timer_randomness(&input_timer_state,
1105 			     (type << 4) ^ code ^ (code >> 4) ^ value);
1106 }
1107 EXPORT_SYMBOL_GPL(add_input_randomness);
1108 
1109 #ifdef CONFIG_BLOCK
1110 void add_disk_randomness(struct gendisk *disk)
1111 {
1112 	if (!disk || !disk->random)
1113 		return;
1114 	/* First major is 1, so we get >= 0x200 here. */
1115 	add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1116 }
1117 EXPORT_SYMBOL_GPL(add_disk_randomness);
1118 
1119 void rand_initialize_disk(struct gendisk *disk)
1120 {
1121 	struct timer_rand_state *state;
1122 
1123 	/*
1124 	 * If kzalloc returns null, we just won't use that entropy
1125 	 * source.
1126 	 */
1127 	state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1128 	if (state) {
1129 		state->last_time = INITIAL_JIFFIES;
1130 		disk->random = state;
1131 	}
1132 }
1133 #endif
1134 
1135 /*
1136  * Interface for in-kernel drivers of true hardware RNGs.
1137  * Those devices may produce endless random bits and will be throttled
1138  * when our pool is full.
1139  */
1140 void add_hwgenerator_randomness(const void *buffer, size_t count,
1141 				size_t entropy)
1142 {
1143 	if (unlikely(crng_init == 0 && entropy < POOL_MIN_BITS)) {
1144 		crng_pre_init_inject(buffer, count, true);
1145 		mix_pool_bytes(buffer, count);
1146 		return;
1147 	}
1148 
1149 	/*
1150 	 * Throttle writing if we're above the trickle threshold.
1151 	 * We'll be woken up again once below POOL_MIN_BITS, when
1152 	 * the calling thread is about to terminate, or once
1153 	 * CRNG_RESEED_INTERVAL has elapsed.
1154 	 */
1155 	wait_event_interruptible_timeout(random_write_wait,
1156 			!system_wq || kthread_should_stop() ||
1157 			input_pool.entropy_count < POOL_MIN_BITS,
1158 			CRNG_RESEED_INTERVAL);
1159 	mix_pool_bytes(buffer, count);
1160 	credit_entropy_bits(entropy);
1161 }
1162 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
1163 
1164 /*
1165  * Handle random seed passed by bootloader.
1166  * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
1167  * it would be regarded as device data.
1168  * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
1169  */
1170 void add_bootloader_randomness(const void *buf, size_t size)
1171 {
1172 	if (trust_bootloader)
1173 		add_hwgenerator_randomness(buf, size, size * 8);
1174 	else
1175 		add_device_randomness(buf, size);
1176 }
1177 EXPORT_SYMBOL_GPL(add_bootloader_randomness);
1178 
1179 #if IS_ENABLED(CONFIG_VMGENID)
1180 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
1181 
1182 /*
1183  * Handle a new unique VM ID, which is unique, not secret, so we
1184  * don't credit it, but we do immediately force a reseed after so
1185  * that it's used by the crng posthaste.
1186  */
1187 void add_vmfork_randomness(const void *unique_vm_id, size_t size)
1188 {
1189 	add_device_randomness(unique_vm_id, size);
1190 	if (crng_ready()) {
1191 		crng_reseed(true);
1192 		pr_notice("crng reseeded due to virtual machine fork\n");
1193 	}
1194 	blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
1195 }
1196 #if IS_MODULE(CONFIG_VMGENID)
1197 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
1198 #endif
1199 
1200 int register_random_vmfork_notifier(struct notifier_block *nb)
1201 {
1202 	return blocking_notifier_chain_register(&vmfork_chain, nb);
1203 }
1204 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
1205 
1206 int unregister_random_vmfork_notifier(struct notifier_block *nb)
1207 {
1208 	return blocking_notifier_chain_unregister(&vmfork_chain, nb);
1209 }
1210 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
1211 #endif
1212 
1213 struct fast_pool {
1214 	struct work_struct mix;
1215 	unsigned long pool[4];
1216 	unsigned long last;
1217 	unsigned int count;
1218 	u16 reg_idx;
1219 };
1220 
1221 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
1222 #ifdef CONFIG_64BIT
1223 	/* SipHash constants */
1224 	.pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL,
1225 		  0x6c7967656e657261UL, 0x7465646279746573UL }
1226 #else
1227 	/* HalfSipHash constants */
1228 	.pool = { 0, 0, 0x6c796765U, 0x74656462U }
1229 #endif
1230 };
1231 
1232 /*
1233  * This is [Half]SipHash-1-x, starting from an empty key. Because
1234  * the key is fixed, it assumes that its inputs are non-malicious,
1235  * and therefore this has no security on its own. s represents the
1236  * 128 or 256-bit SipHash state, while v represents a 128-bit input.
1237  */
1238 static void fast_mix(unsigned long s[4], const unsigned long *v)
1239 {
1240 	size_t i;
1241 
1242 	for (i = 0; i < 16 / sizeof(long); ++i) {
1243 		s[3] ^= v[i];
1244 #ifdef CONFIG_64BIT
1245 		s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32);
1246 		s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2];
1247 		s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0];
1248 		s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32);
1249 #else
1250 		s[0] += s[1]; s[1] = rol32(s[1],  5); s[1] ^= s[0]; s[0] = rol32(s[0], 16);
1251 		s[2] += s[3]; s[3] = rol32(s[3],  8); s[3] ^= s[2];
1252 		s[0] += s[3]; s[3] = rol32(s[3],  7); s[3] ^= s[0];
1253 		s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16);
1254 #endif
1255 		s[0] ^= v[i];
1256 	}
1257 }
1258 
1259 #ifdef CONFIG_SMP
1260 /*
1261  * This function is called when the CPU has just come online, with
1262  * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
1263  */
1264 int random_online_cpu(unsigned int cpu)
1265 {
1266 	/*
1267 	 * During CPU shutdown and before CPU onlining, add_interrupt_
1268 	 * randomness() may schedule mix_interrupt_randomness(), and
1269 	 * set the MIX_INFLIGHT flag. However, because the worker can
1270 	 * be scheduled on a different CPU during this period, that
1271 	 * flag will never be cleared. For that reason, we zero out
1272 	 * the flag here, which runs just after workqueues are onlined
1273 	 * for the CPU again. This also has the effect of setting the
1274 	 * irq randomness count to zero so that new accumulated irqs
1275 	 * are fresh.
1276 	 */
1277 	per_cpu_ptr(&irq_randomness, cpu)->count = 0;
1278 	return 0;
1279 }
1280 #endif
1281 
1282 static unsigned long get_reg(struct fast_pool *f, struct pt_regs *regs)
1283 {
1284 	unsigned long *ptr = (unsigned long *)regs;
1285 	unsigned int idx;
1286 
1287 	if (regs == NULL)
1288 		return 0;
1289 	idx = READ_ONCE(f->reg_idx);
1290 	if (idx >= sizeof(struct pt_regs) / sizeof(unsigned long))
1291 		idx = 0;
1292 	ptr += idx++;
1293 	WRITE_ONCE(f->reg_idx, idx);
1294 	return *ptr;
1295 }
1296 
1297 static void mix_interrupt_randomness(struct work_struct *work)
1298 {
1299 	struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
1300 	/*
1301 	 * The size of the copied stack pool is explicitly 16 bytes so that we
1302 	 * tax mix_pool_byte()'s compression function the same amount on all
1303 	 * platforms. This means on 64-bit we copy half the pool into this,
1304 	 * while on 32-bit we copy all of it. The entropy is supposed to be
1305 	 * sufficiently dispersed between bits that in the sponge-like
1306 	 * half case, on average we don't wind up "losing" some.
1307 	 */
1308 	u8 pool[16];
1309 
1310 	/* Check to see if we're running on the wrong CPU due to hotplug. */
1311 	local_irq_disable();
1312 	if (fast_pool != this_cpu_ptr(&irq_randomness)) {
1313 		local_irq_enable();
1314 		return;
1315 	}
1316 
1317 	/*
1318 	 * Copy the pool to the stack so that the mixer always has a
1319 	 * consistent view, before we reenable irqs again.
1320 	 */
1321 	memcpy(pool, fast_pool->pool, sizeof(pool));
1322 	fast_pool->count = 0;
1323 	fast_pool->last = jiffies;
1324 	local_irq_enable();
1325 
1326 	if (unlikely(crng_init == 0)) {
1327 		crng_pre_init_inject(pool, sizeof(pool), true);
1328 		mix_pool_bytes(pool, sizeof(pool));
1329 	} else {
1330 		mix_pool_bytes(pool, sizeof(pool));
1331 		credit_entropy_bits(1);
1332 	}
1333 
1334 	memzero_explicit(pool, sizeof(pool));
1335 }
1336 
1337 void add_interrupt_randomness(int irq)
1338 {
1339 	enum { MIX_INFLIGHT = 1U << 31 };
1340 	cycles_t cycles = random_get_entropy();
1341 	unsigned long now = jiffies;
1342 	struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1343 	struct pt_regs *regs = get_irq_regs();
1344 	unsigned int new_count;
1345 	union {
1346 		u32 u32[4];
1347 		u64 u64[2];
1348 		unsigned long longs[16 / sizeof(long)];
1349 	} irq_data;
1350 
1351 	if (cycles == 0)
1352 		cycles = get_reg(fast_pool, regs);
1353 
1354 	if (sizeof(cycles) == 8)
1355 		irq_data.u64[0] = cycles ^ rol64(now, 32) ^ irq;
1356 	else {
1357 		irq_data.u32[0] = cycles ^ irq;
1358 		irq_data.u32[1] = now;
1359 	}
1360 
1361 	if (sizeof(unsigned long) == 8)
1362 		irq_data.u64[1] = regs ? instruction_pointer(regs) : _RET_IP_;
1363 	else {
1364 		irq_data.u32[2] = regs ? instruction_pointer(regs) : _RET_IP_;
1365 		irq_data.u32[3] = get_reg(fast_pool, regs);
1366 	}
1367 
1368 	fast_mix(fast_pool->pool, irq_data.longs);
1369 	new_count = ++fast_pool->count;
1370 
1371 	if (new_count & MIX_INFLIGHT)
1372 		return;
1373 
1374 	if (new_count < 64 && (!time_after(now, fast_pool->last + HZ) ||
1375 			       unlikely(crng_init == 0)))
1376 		return;
1377 
1378 	if (unlikely(!fast_pool->mix.func))
1379 		INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
1380 	fast_pool->count |= MIX_INFLIGHT;
1381 	queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
1382 }
1383 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1384 
1385 /*
1386  * Each time the timer fires, we expect that we got an unpredictable
1387  * jump in the cycle counter. Even if the timer is running on another
1388  * CPU, the timer activity will be touching the stack of the CPU that is
1389  * generating entropy..
1390  *
1391  * Note that we don't re-arm the timer in the timer itself - we are
1392  * happy to be scheduled away, since that just makes the load more
1393  * complex, but we do not want the timer to keep ticking unless the
1394  * entropy loop is running.
1395  *
1396  * So the re-arming always happens in the entropy loop itself.
1397  */
1398 static void entropy_timer(struct timer_list *t)
1399 {
1400 	credit_entropy_bits(1);
1401 }
1402 
1403 /*
1404  * If we have an actual cycle counter, see if we can
1405  * generate enough entropy with timing noise
1406  */
1407 static void try_to_generate_entropy(void)
1408 {
1409 	struct {
1410 		cycles_t cycles;
1411 		struct timer_list timer;
1412 	} stack;
1413 
1414 	stack.cycles = random_get_entropy();
1415 
1416 	/* Slow counter - or none. Don't even bother */
1417 	if (stack.cycles == random_get_entropy())
1418 		return;
1419 
1420 	timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1421 	while (!crng_ready() && !signal_pending(current)) {
1422 		if (!timer_pending(&stack.timer))
1423 			mod_timer(&stack.timer, jiffies + 1);
1424 		mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
1425 		schedule();
1426 		stack.cycles = random_get_entropy();
1427 	}
1428 
1429 	del_timer_sync(&stack.timer);
1430 	destroy_timer_on_stack(&stack.timer);
1431 	mix_pool_bytes(&stack.cycles, sizeof(stack.cycles));
1432 }
1433 
1434 
1435 /**********************************************************************
1436  *
1437  * Userspace reader/writer interfaces.
1438  *
1439  * getrandom(2) is the primary modern interface into the RNG and should
1440  * be used in preference to anything else.
1441  *
1442  * Reading from /dev/random has the same functionality as calling
1443  * getrandom(2) with flags=0. In earlier versions, however, it had
1444  * vastly different semantics and should therefore be avoided, to
1445  * prevent backwards compatibility issues.
1446  *
1447  * Reading from /dev/urandom has the same functionality as calling
1448  * getrandom(2) with flags=GRND_INSECURE. Because it does not block
1449  * waiting for the RNG to be ready, it should not be used.
1450  *
1451  * Writing to either /dev/random or /dev/urandom adds entropy to
1452  * the input pool but does not credit it.
1453  *
1454  * Polling on /dev/random indicates when the RNG is initialized, on
1455  * the read side, and when it wants new entropy, on the write side.
1456  *
1457  * Both /dev/random and /dev/urandom have the same set of ioctls for
1458  * adding entropy, getting the entropy count, zeroing the count, and
1459  * reseeding the crng.
1460  *
1461  **********************************************************************/
1462 
1463 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1464 		flags)
1465 {
1466 	if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1467 		return -EINVAL;
1468 
1469 	/*
1470 	 * Requesting insecure and blocking randomness at the same time makes
1471 	 * no sense.
1472 	 */
1473 	if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1474 		return -EINVAL;
1475 
1476 	if (count > INT_MAX)
1477 		count = INT_MAX;
1478 
1479 	if (!(flags & GRND_INSECURE) && !crng_ready()) {
1480 		int ret;
1481 
1482 		if (flags & GRND_NONBLOCK)
1483 			return -EAGAIN;
1484 		ret = wait_for_random_bytes();
1485 		if (unlikely(ret))
1486 			return ret;
1487 	}
1488 	return get_random_bytes_user(buf, count);
1489 }
1490 
1491 static __poll_t random_poll(struct file *file, poll_table *wait)
1492 {
1493 	__poll_t mask;
1494 
1495 	poll_wait(file, &crng_init_wait, wait);
1496 	poll_wait(file, &random_write_wait, wait);
1497 	mask = 0;
1498 	if (crng_ready())
1499 		mask |= EPOLLIN | EPOLLRDNORM;
1500 	if (input_pool.entropy_count < POOL_MIN_BITS)
1501 		mask |= EPOLLOUT | EPOLLWRNORM;
1502 	return mask;
1503 }
1504 
1505 static int write_pool(const char __user *ubuf, size_t count)
1506 {
1507 	size_t len;
1508 	int ret = 0;
1509 	u8 block[BLAKE2S_BLOCK_SIZE];
1510 
1511 	while (count) {
1512 		len = min(count, sizeof(block));
1513 		if (copy_from_user(block, ubuf, len)) {
1514 			ret = -EFAULT;
1515 			goto out;
1516 		}
1517 		count -= len;
1518 		ubuf += len;
1519 		mix_pool_bytes(block, len);
1520 		cond_resched();
1521 	}
1522 
1523 out:
1524 	memzero_explicit(block, sizeof(block));
1525 	return ret;
1526 }
1527 
1528 static ssize_t random_write(struct file *file, const char __user *buffer,
1529 			    size_t count, loff_t *ppos)
1530 {
1531 	int ret;
1532 
1533 	ret = write_pool(buffer, count);
1534 	if (ret)
1535 		return ret;
1536 
1537 	return (ssize_t)count;
1538 }
1539 
1540 static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1541 			    loff_t *ppos)
1542 {
1543 	static int maxwarn = 10;
1544 
1545 	/*
1546 	 * Opportunistically attempt to initialize the RNG on platforms that
1547 	 * have fast cycle counters, but don't (for now) require it to succeed.
1548 	 */
1549 	if (!crng_ready())
1550 		try_to_generate_entropy();
1551 
1552 	if (!crng_ready() && maxwarn > 0) {
1553 		maxwarn--;
1554 		if (__ratelimit(&urandom_warning))
1555 			pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1556 				  current->comm, nbytes);
1557 	}
1558 
1559 	return get_random_bytes_user(buf, nbytes);
1560 }
1561 
1562 static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1563 			   loff_t *ppos)
1564 {
1565 	int ret;
1566 
1567 	ret = wait_for_random_bytes();
1568 	if (ret != 0)
1569 		return ret;
1570 	return get_random_bytes_user(buf, nbytes);
1571 }
1572 
1573 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1574 {
1575 	int size, ent_count;
1576 	int __user *p = (int __user *)arg;
1577 	int retval;
1578 
1579 	switch (cmd) {
1580 	case RNDGETENTCNT:
1581 		/* Inherently racy, no point locking. */
1582 		if (put_user(input_pool.entropy_count, p))
1583 			return -EFAULT;
1584 		return 0;
1585 	case RNDADDTOENTCNT:
1586 		if (!capable(CAP_SYS_ADMIN))
1587 			return -EPERM;
1588 		if (get_user(ent_count, p))
1589 			return -EFAULT;
1590 		if (ent_count < 0)
1591 			return -EINVAL;
1592 		credit_entropy_bits(ent_count);
1593 		return 0;
1594 	case RNDADDENTROPY:
1595 		if (!capable(CAP_SYS_ADMIN))
1596 			return -EPERM;
1597 		if (get_user(ent_count, p++))
1598 			return -EFAULT;
1599 		if (ent_count < 0)
1600 			return -EINVAL;
1601 		if (get_user(size, p++))
1602 			return -EFAULT;
1603 		retval = write_pool((const char __user *)p, size);
1604 		if (retval < 0)
1605 			return retval;
1606 		credit_entropy_bits(ent_count);
1607 		return 0;
1608 	case RNDZAPENTCNT:
1609 	case RNDCLEARPOOL:
1610 		/*
1611 		 * Clear the entropy pool counters. We no longer clear
1612 		 * the entropy pool, as that's silly.
1613 		 */
1614 		if (!capable(CAP_SYS_ADMIN))
1615 			return -EPERM;
1616 		if (xchg(&input_pool.entropy_count, 0) >= POOL_MIN_BITS) {
1617 			wake_up_interruptible(&random_write_wait);
1618 			kill_fasync(&fasync, SIGIO, POLL_OUT);
1619 		}
1620 		return 0;
1621 	case RNDRESEEDCRNG:
1622 		if (!capable(CAP_SYS_ADMIN))
1623 			return -EPERM;
1624 		if (!crng_ready())
1625 			return -ENODATA;
1626 		crng_reseed(false);
1627 		return 0;
1628 	default:
1629 		return -EINVAL;
1630 	}
1631 }
1632 
1633 static int random_fasync(int fd, struct file *filp, int on)
1634 {
1635 	return fasync_helper(fd, filp, on, &fasync);
1636 }
1637 
1638 const struct file_operations random_fops = {
1639 	.read = random_read,
1640 	.write = random_write,
1641 	.poll = random_poll,
1642 	.unlocked_ioctl = random_ioctl,
1643 	.compat_ioctl = compat_ptr_ioctl,
1644 	.fasync = random_fasync,
1645 	.llseek = noop_llseek,
1646 };
1647 
1648 const struct file_operations urandom_fops = {
1649 	.read = urandom_read,
1650 	.write = random_write,
1651 	.unlocked_ioctl = random_ioctl,
1652 	.compat_ioctl = compat_ptr_ioctl,
1653 	.fasync = random_fasync,
1654 	.llseek = noop_llseek,
1655 };
1656 
1657 
1658 /********************************************************************
1659  *
1660  * Sysctl interface.
1661  *
1662  * These are partly unused legacy knobs with dummy values to not break
1663  * userspace and partly still useful things. They are usually accessible
1664  * in /proc/sys/kernel/random/ and are as follows:
1665  *
1666  * - boot_id - a UUID representing the current boot.
1667  *
1668  * - uuid - a random UUID, different each time the file is read.
1669  *
1670  * - poolsize - the number of bits of entropy that the input pool can
1671  *   hold, tied to the POOL_BITS constant.
1672  *
1673  * - entropy_avail - the number of bits of entropy currently in the
1674  *   input pool. Always <= poolsize.
1675  *
1676  * - write_wakeup_threshold - the amount of entropy in the input pool
1677  *   below which write polls to /dev/random will unblock, requesting
1678  *   more entropy, tied to the POOL_MIN_BITS constant. It is writable
1679  *   to avoid breaking old userspaces, but writing to it does not
1680  *   change any behavior of the RNG.
1681  *
1682  * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL.
1683  *   It is writable to avoid breaking old userspaces, but writing
1684  *   to it does not change any behavior of the RNG.
1685  *
1686  ********************************************************************/
1687 
1688 #ifdef CONFIG_SYSCTL
1689 
1690 #include <linux/sysctl.h>
1691 
1692 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1693 static int sysctl_random_write_wakeup_bits = POOL_MIN_BITS;
1694 static int sysctl_poolsize = POOL_BITS;
1695 static u8 sysctl_bootid[UUID_SIZE];
1696 
1697 /*
1698  * This function is used to return both the bootid UUID, and random
1699  * UUID. The difference is in whether table->data is NULL; if it is,
1700  * then a new UUID is generated and returned to the user.
1701  */
1702 static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1703 			size_t *lenp, loff_t *ppos)
1704 {
1705 	u8 tmp_uuid[UUID_SIZE], *uuid;
1706 	char uuid_string[UUID_STRING_LEN + 1];
1707 	struct ctl_table fake_table = {
1708 		.data = uuid_string,
1709 		.maxlen = UUID_STRING_LEN
1710 	};
1711 
1712 	if (write)
1713 		return -EPERM;
1714 
1715 	uuid = table->data;
1716 	if (!uuid) {
1717 		uuid = tmp_uuid;
1718 		generate_random_uuid(uuid);
1719 	} else {
1720 		static DEFINE_SPINLOCK(bootid_spinlock);
1721 
1722 		spin_lock(&bootid_spinlock);
1723 		if (!uuid[8])
1724 			generate_random_uuid(uuid);
1725 		spin_unlock(&bootid_spinlock);
1726 	}
1727 
1728 	snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1729 	return proc_dostring(&fake_table, 0, buffer, lenp, ppos);
1730 }
1731 
1732 /* The same as proc_dointvec, but writes don't change anything. */
1733 static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer,
1734 			    size_t *lenp, loff_t *ppos)
1735 {
1736 	return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos);
1737 }
1738 
1739 static struct ctl_table random_table[] = {
1740 	{
1741 		.procname	= "poolsize",
1742 		.data		= &sysctl_poolsize,
1743 		.maxlen		= sizeof(int),
1744 		.mode		= 0444,
1745 		.proc_handler	= proc_dointvec,
1746 	},
1747 	{
1748 		.procname	= "entropy_avail",
1749 		.data		= &input_pool.entropy_count,
1750 		.maxlen		= sizeof(int),
1751 		.mode		= 0444,
1752 		.proc_handler	= proc_dointvec,
1753 	},
1754 	{
1755 		.procname	= "write_wakeup_threshold",
1756 		.data		= &sysctl_random_write_wakeup_bits,
1757 		.maxlen		= sizeof(int),
1758 		.mode		= 0644,
1759 		.proc_handler	= proc_do_rointvec,
1760 	},
1761 	{
1762 		.procname	= "urandom_min_reseed_secs",
1763 		.data		= &sysctl_random_min_urandom_seed,
1764 		.maxlen		= sizeof(int),
1765 		.mode		= 0644,
1766 		.proc_handler	= proc_do_rointvec,
1767 	},
1768 	{
1769 		.procname	= "boot_id",
1770 		.data		= &sysctl_bootid,
1771 		.mode		= 0444,
1772 		.proc_handler	= proc_do_uuid,
1773 	},
1774 	{
1775 		.procname	= "uuid",
1776 		.mode		= 0444,
1777 		.proc_handler	= proc_do_uuid,
1778 	},
1779 	{ }
1780 };
1781 
1782 /*
1783  * rand_initialize() is called before sysctl_init(),
1784  * so we cannot call register_sysctl_init() in rand_initialize()
1785  */
1786 static int __init random_sysctls_init(void)
1787 {
1788 	register_sysctl_init("kernel/random", random_table);
1789 	return 0;
1790 }
1791 device_initcall(random_sysctls_init);
1792 #endif
1793