xref: /openbmc/linux/drivers/char/hw_random/core.c (revision 93696d8f)
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/admin-guide/hw_random.rst for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/random.h>
23 #include <linux/sched.h>
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/uaccess.h>
28 
29 #define RNG_MODULE_NAME		"hw_random"
30 
31 #define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
32 
33 static struct hwrng *current_rng;
34 /* the current rng has been explicitly chosen by user via sysfs */
35 static int cur_rng_set_by_user;
36 static struct task_struct *hwrng_fill;
37 /* list of registered rngs */
38 static LIST_HEAD(rng_list);
39 /* Protects rng_list and current_rng */
40 static DEFINE_MUTEX(rng_mutex);
41 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
42 static DEFINE_MUTEX(reading_mutex);
43 static int data_avail;
44 static u8 *rng_buffer, *rng_fillbuf;
45 static unsigned short current_quality;
46 static unsigned short default_quality = 1024; /* default to maximum */
47 
48 module_param(current_quality, ushort, 0644);
49 MODULE_PARM_DESC(current_quality,
50 		 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
51 module_param(default_quality, ushort, 0644);
52 MODULE_PARM_DESC(default_quality,
53 		 "default maximum entropy content of hwrng per 1024 bits of input");
54 
55 static void drop_current_rng(void);
56 static int hwrng_init(struct hwrng *rng);
57 static int hwrng_fillfn(void *unused);
58 
59 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
60 			       int wait);
61 
62 static size_t rng_buffer_size(void)
63 {
64 	return RNG_BUFFER_SIZE;
65 }
66 
67 static void add_early_randomness(struct hwrng *rng)
68 {
69 	int bytes_read;
70 
71 	mutex_lock(&reading_mutex);
72 	bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
73 	mutex_unlock(&reading_mutex);
74 	if (bytes_read > 0) {
75 		size_t entropy = bytes_read * 8 * rng->quality / 1024;
76 		add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false);
77 	}
78 }
79 
80 static inline void cleanup_rng(struct kref *kref)
81 {
82 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
83 
84 	if (rng->cleanup)
85 		rng->cleanup(rng);
86 
87 	complete(&rng->cleanup_done);
88 }
89 
90 static int set_current_rng(struct hwrng *rng)
91 {
92 	int err;
93 
94 	BUG_ON(!mutex_is_locked(&rng_mutex));
95 
96 	err = hwrng_init(rng);
97 	if (err)
98 		return err;
99 
100 	drop_current_rng();
101 	current_rng = rng;
102 
103 	/* if necessary, start hwrng thread */
104 	if (!hwrng_fill) {
105 		hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
106 		if (IS_ERR(hwrng_fill)) {
107 			pr_err("hwrng_fill thread creation failed\n");
108 			hwrng_fill = NULL;
109 		}
110 	}
111 
112 	return 0;
113 }
114 
115 static void drop_current_rng(void)
116 {
117 	BUG_ON(!mutex_is_locked(&rng_mutex));
118 	if (!current_rng)
119 		return;
120 
121 	/* decrease last reference for triggering the cleanup */
122 	kref_put(&current_rng->ref, cleanup_rng);
123 	current_rng = NULL;
124 }
125 
126 /* Returns ERR_PTR(), NULL or refcounted hwrng */
127 static struct hwrng *get_current_rng_nolock(void)
128 {
129 	if (current_rng)
130 		kref_get(&current_rng->ref);
131 
132 	return current_rng;
133 }
134 
135 static struct hwrng *get_current_rng(void)
136 {
137 	struct hwrng *rng;
138 
139 	if (mutex_lock_interruptible(&rng_mutex))
140 		return ERR_PTR(-ERESTARTSYS);
141 
142 	rng = get_current_rng_nolock();
143 
144 	mutex_unlock(&rng_mutex);
145 	return rng;
146 }
147 
148 static void put_rng(struct hwrng *rng)
149 {
150 	/*
151 	 * Hold rng_mutex here so we serialize in case they set_current_rng
152 	 * on rng again immediately.
153 	 */
154 	mutex_lock(&rng_mutex);
155 	if (rng)
156 		kref_put(&rng->ref, cleanup_rng);
157 	mutex_unlock(&rng_mutex);
158 }
159 
160 static int hwrng_init(struct hwrng *rng)
161 {
162 	if (kref_get_unless_zero(&rng->ref))
163 		goto skip_init;
164 
165 	if (rng->init) {
166 		int ret;
167 
168 		ret =  rng->init(rng);
169 		if (ret)
170 			return ret;
171 	}
172 
173 	kref_init(&rng->ref);
174 	reinit_completion(&rng->cleanup_done);
175 
176 skip_init:
177 	current_quality = rng->quality; /* obsolete */
178 
179 	return 0;
180 }
181 
182 static int rng_dev_open(struct inode *inode, struct file *filp)
183 {
184 	/* enforce read-only access to this chrdev */
185 	if ((filp->f_mode & FMODE_READ) == 0)
186 		return -EINVAL;
187 	if (filp->f_mode & FMODE_WRITE)
188 		return -EINVAL;
189 	return 0;
190 }
191 
192 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
193 			int wait) {
194 	int present;
195 
196 	BUG_ON(!mutex_is_locked(&reading_mutex));
197 	if (rng->read)
198 		return rng->read(rng, (void *)buffer, size, wait);
199 
200 	if (rng->data_present)
201 		present = rng->data_present(rng, wait);
202 	else
203 		present = 1;
204 
205 	if (present)
206 		return rng->data_read(rng, (u32 *)buffer);
207 
208 	return 0;
209 }
210 
211 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
212 			    size_t size, loff_t *offp)
213 {
214 	u8 buffer[RNG_BUFFER_SIZE];
215 	ssize_t ret = 0;
216 	int err = 0;
217 	int bytes_read, len;
218 	struct hwrng *rng;
219 
220 	while (size) {
221 		rng = get_current_rng();
222 		if (IS_ERR(rng)) {
223 			err = PTR_ERR(rng);
224 			goto out;
225 		}
226 		if (!rng) {
227 			err = -ENODEV;
228 			goto out;
229 		}
230 
231 		if (mutex_lock_interruptible(&reading_mutex)) {
232 			err = -ERESTARTSYS;
233 			goto out_put;
234 		}
235 		if (!data_avail) {
236 			bytes_read = rng_get_data(rng, rng_buffer,
237 				rng_buffer_size(),
238 				!(filp->f_flags & O_NONBLOCK));
239 			if (bytes_read < 0) {
240 				err = bytes_read;
241 				goto out_unlock_reading;
242 			} else if (bytes_read == 0 &&
243 				   (filp->f_flags & O_NONBLOCK)) {
244 				err = -EAGAIN;
245 				goto out_unlock_reading;
246 			}
247 
248 			data_avail = bytes_read;
249 		}
250 
251 		len = data_avail;
252 		if (len) {
253 			if (len > size)
254 				len = size;
255 
256 			data_avail -= len;
257 
258 			memcpy(buffer, rng_buffer + data_avail, len);
259 		}
260 		mutex_unlock(&reading_mutex);
261 		put_rng(rng);
262 
263 		if (len) {
264 			if (copy_to_user(buf + ret, buffer, len)) {
265 				err = -EFAULT;
266 				goto out;
267 			}
268 
269 			size -= len;
270 			ret += len;
271 		}
272 
273 
274 		if (need_resched())
275 			schedule_timeout_interruptible(1);
276 
277 		if (signal_pending(current)) {
278 			err = -ERESTARTSYS;
279 			goto out;
280 		}
281 	}
282 out:
283 	memzero_explicit(buffer, sizeof(buffer));
284 	return ret ? : err;
285 
286 out_unlock_reading:
287 	mutex_unlock(&reading_mutex);
288 out_put:
289 	put_rng(rng);
290 	goto out;
291 }
292 
293 static const struct file_operations rng_chrdev_ops = {
294 	.owner		= THIS_MODULE,
295 	.open		= rng_dev_open,
296 	.read		= rng_dev_read,
297 	.llseek		= noop_llseek,
298 };
299 
300 static const struct attribute_group *rng_dev_groups[];
301 
302 static struct miscdevice rng_miscdev = {
303 	.minor		= HWRNG_MINOR,
304 	.name		= RNG_MODULE_NAME,
305 	.nodename	= "hwrng",
306 	.fops		= &rng_chrdev_ops,
307 	.groups		= rng_dev_groups,
308 };
309 
310 static int enable_best_rng(void)
311 {
312 	struct hwrng *rng, *new_rng = NULL;
313 	int ret = -ENODEV;
314 
315 	BUG_ON(!mutex_is_locked(&rng_mutex));
316 
317 	/* no rng to use? */
318 	if (list_empty(&rng_list)) {
319 		drop_current_rng();
320 		cur_rng_set_by_user = 0;
321 		return 0;
322 	}
323 
324 	/* use the rng which offers the best quality */
325 	list_for_each_entry(rng, &rng_list, list) {
326 		if (!new_rng || rng->quality > new_rng->quality)
327 			new_rng = rng;
328 	}
329 
330 	ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
331 	if (!ret)
332 		cur_rng_set_by_user = 0;
333 
334 	return ret;
335 }
336 
337 static ssize_t rng_current_store(struct device *dev,
338 				 struct device_attribute *attr,
339 				 const char *buf, size_t len)
340 {
341 	int err;
342 	struct hwrng *rng, *old_rng, *new_rng;
343 
344 	err = mutex_lock_interruptible(&rng_mutex);
345 	if (err)
346 		return -ERESTARTSYS;
347 
348 	old_rng = current_rng;
349 	if (sysfs_streq(buf, "")) {
350 		err = enable_best_rng();
351 	} else {
352 		list_for_each_entry(rng, &rng_list, list) {
353 			if (sysfs_streq(rng->name, buf)) {
354 				err = set_current_rng(rng);
355 				if (!err)
356 					cur_rng_set_by_user = 1;
357 				break;
358 			}
359 		}
360 	}
361 	new_rng = get_current_rng_nolock();
362 	mutex_unlock(&rng_mutex);
363 
364 	if (new_rng) {
365 		if (new_rng != old_rng)
366 			add_early_randomness(new_rng);
367 		put_rng(new_rng);
368 	}
369 
370 	return err ? : len;
371 }
372 
373 static ssize_t rng_current_show(struct device *dev,
374 				struct device_attribute *attr,
375 				char *buf)
376 {
377 	ssize_t ret;
378 	struct hwrng *rng;
379 
380 	rng = get_current_rng();
381 	if (IS_ERR(rng))
382 		return PTR_ERR(rng);
383 
384 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
385 	put_rng(rng);
386 
387 	return ret;
388 }
389 
390 static ssize_t rng_available_show(struct device *dev,
391 				  struct device_attribute *attr,
392 				  char *buf)
393 {
394 	int err;
395 	struct hwrng *rng;
396 
397 	err = mutex_lock_interruptible(&rng_mutex);
398 	if (err)
399 		return -ERESTARTSYS;
400 	buf[0] = '\0';
401 	list_for_each_entry(rng, &rng_list, list) {
402 		strlcat(buf, rng->name, PAGE_SIZE);
403 		strlcat(buf, " ", PAGE_SIZE);
404 	}
405 	strlcat(buf, "\n", PAGE_SIZE);
406 	mutex_unlock(&rng_mutex);
407 
408 	return strlen(buf);
409 }
410 
411 static ssize_t rng_selected_show(struct device *dev,
412 				 struct device_attribute *attr,
413 				 char *buf)
414 {
415 	return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
416 }
417 
418 static ssize_t rng_quality_show(struct device *dev,
419 				struct device_attribute *attr,
420 				char *buf)
421 {
422 	ssize_t ret;
423 	struct hwrng *rng;
424 
425 	rng = get_current_rng();
426 	if (IS_ERR(rng))
427 		return PTR_ERR(rng);
428 
429 	if (!rng) /* no need to put_rng */
430 		return -ENODEV;
431 
432 	ret = sysfs_emit(buf, "%hu\n", rng->quality);
433 	put_rng(rng);
434 
435 	return ret;
436 }
437 
438 static ssize_t rng_quality_store(struct device *dev,
439 				 struct device_attribute *attr,
440 				 const char *buf, size_t len)
441 {
442 	u16 quality;
443 	int ret = -EINVAL;
444 
445 	if (len < 2)
446 		return -EINVAL;
447 
448 	ret = mutex_lock_interruptible(&rng_mutex);
449 	if (ret)
450 		return -ERESTARTSYS;
451 
452 	ret = kstrtou16(buf, 0, &quality);
453 	if (ret || quality > 1024) {
454 		ret = -EINVAL;
455 		goto out;
456 	}
457 
458 	if (!current_rng) {
459 		ret = -ENODEV;
460 		goto out;
461 	}
462 
463 	current_rng->quality = quality;
464 	current_quality = quality; /* obsolete */
465 
466 	/* the best available RNG may have changed */
467 	ret = enable_best_rng();
468 
469 out:
470 	mutex_unlock(&rng_mutex);
471 	return ret ? ret : len;
472 }
473 
474 static DEVICE_ATTR_RW(rng_current);
475 static DEVICE_ATTR_RO(rng_available);
476 static DEVICE_ATTR_RO(rng_selected);
477 static DEVICE_ATTR_RW(rng_quality);
478 
479 static struct attribute *rng_dev_attrs[] = {
480 	&dev_attr_rng_current.attr,
481 	&dev_attr_rng_available.attr,
482 	&dev_attr_rng_selected.attr,
483 	&dev_attr_rng_quality.attr,
484 	NULL
485 };
486 
487 ATTRIBUTE_GROUPS(rng_dev);
488 
489 static void __exit unregister_miscdev(void)
490 {
491 	misc_deregister(&rng_miscdev);
492 }
493 
494 static int __init register_miscdev(void)
495 {
496 	return misc_register(&rng_miscdev);
497 }
498 
499 static int hwrng_fillfn(void *unused)
500 {
501 	size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */
502 	long rc;
503 
504 	while (!kthread_should_stop()) {
505 		unsigned short quality;
506 		struct hwrng *rng;
507 
508 		rng = get_current_rng();
509 		if (IS_ERR(rng) || !rng)
510 			break;
511 		mutex_lock(&reading_mutex);
512 		rc = rng_get_data(rng, rng_fillbuf,
513 				  rng_buffer_size(), 1);
514 		if (current_quality != rng->quality)
515 			rng->quality = current_quality; /* obsolete */
516 		quality = rng->quality;
517 		mutex_unlock(&reading_mutex);
518 
519 		if (rc <= 0)
520 			hwrng_msleep(rng, 10000);
521 
522 		put_rng(rng);
523 
524 		if (rc <= 0)
525 			continue;
526 
527 		/* If we cannot credit at least one bit of entropy,
528 		 * keep track of the remainder for the next iteration
529 		 */
530 		entropy = rc * quality * 8 + entropy_credit;
531 		if ((entropy >> 10) == 0)
532 			entropy_credit = entropy;
533 
534 		/* Outside lock, sure, but y'know: randomness. */
535 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
536 					   entropy >> 10, true);
537 	}
538 	hwrng_fill = NULL;
539 	return 0;
540 }
541 
542 int hwrng_register(struct hwrng *rng)
543 {
544 	int err = -EINVAL;
545 	struct hwrng *tmp;
546 	bool is_new_current = false;
547 
548 	if (!rng->name || (!rng->data_read && !rng->read))
549 		goto out;
550 
551 	mutex_lock(&rng_mutex);
552 
553 	/* Must not register two RNGs with the same name. */
554 	err = -EEXIST;
555 	list_for_each_entry(tmp, &rng_list, list) {
556 		if (strcmp(tmp->name, rng->name) == 0)
557 			goto out_unlock;
558 	}
559 	list_add_tail(&rng->list, &rng_list);
560 
561 	init_completion(&rng->cleanup_done);
562 	complete(&rng->cleanup_done);
563 	init_completion(&rng->dying);
564 
565 	/* Adjust quality field to always have a proper value */
566 	rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
567 
568 	if (!current_rng ||
569 	    (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
570 		/*
571 		 * Set new rng as current as the new rng source
572 		 * provides better entropy quality and was not
573 		 * chosen by userspace.
574 		 */
575 		err = set_current_rng(rng);
576 		if (err)
577 			goto out_unlock;
578 		/* to use current_rng in add_early_randomness() we need
579 		 * to take a ref
580 		 */
581 		is_new_current = true;
582 		kref_get(&rng->ref);
583 	}
584 	mutex_unlock(&rng_mutex);
585 	if (is_new_current || !rng->init) {
586 		/*
587 		 * Use a new device's input to add some randomness to
588 		 * the system.  If this rng device isn't going to be
589 		 * used right away, its init function hasn't been
590 		 * called yet by set_current_rng(); so only use the
591 		 * randomness from devices that don't need an init callback
592 		 */
593 		add_early_randomness(rng);
594 	}
595 	if (is_new_current)
596 		put_rng(rng);
597 	return 0;
598 out_unlock:
599 	mutex_unlock(&rng_mutex);
600 out:
601 	return err;
602 }
603 EXPORT_SYMBOL_GPL(hwrng_register);
604 
605 void hwrng_unregister(struct hwrng *rng)
606 {
607 	struct hwrng *old_rng, *new_rng;
608 	int err;
609 
610 	mutex_lock(&rng_mutex);
611 
612 	old_rng = current_rng;
613 	list_del(&rng->list);
614 	complete_all(&rng->dying);
615 	if (current_rng == rng) {
616 		err = enable_best_rng();
617 		if (err) {
618 			drop_current_rng();
619 			cur_rng_set_by_user = 0;
620 		}
621 	}
622 
623 	new_rng = get_current_rng_nolock();
624 	if (list_empty(&rng_list)) {
625 		mutex_unlock(&rng_mutex);
626 		if (hwrng_fill)
627 			kthread_stop(hwrng_fill);
628 	} else
629 		mutex_unlock(&rng_mutex);
630 
631 	if (new_rng) {
632 		if (old_rng != new_rng)
633 			add_early_randomness(new_rng);
634 		put_rng(new_rng);
635 	}
636 
637 	wait_for_completion(&rng->cleanup_done);
638 }
639 EXPORT_SYMBOL_GPL(hwrng_unregister);
640 
641 static void devm_hwrng_release(struct device *dev, void *res)
642 {
643 	hwrng_unregister(*(struct hwrng **)res);
644 }
645 
646 static int devm_hwrng_match(struct device *dev, void *res, void *data)
647 {
648 	struct hwrng **r = res;
649 
650 	if (WARN_ON(!r || !*r))
651 		return 0;
652 
653 	return *r == data;
654 }
655 
656 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
657 {
658 	struct hwrng **ptr;
659 	int error;
660 
661 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
662 	if (!ptr)
663 		return -ENOMEM;
664 
665 	error = hwrng_register(rng);
666 	if (error) {
667 		devres_free(ptr);
668 		return error;
669 	}
670 
671 	*ptr = rng;
672 	devres_add(dev, ptr);
673 	return 0;
674 }
675 EXPORT_SYMBOL_GPL(devm_hwrng_register);
676 
677 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
678 {
679 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
680 }
681 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
682 
683 long hwrng_msleep(struct hwrng *rng, unsigned int msecs)
684 {
685 	unsigned long timeout = msecs_to_jiffies(msecs) + 1;
686 
687 	return wait_for_completion_interruptible_timeout(&rng->dying, timeout);
688 }
689 EXPORT_SYMBOL_GPL(hwrng_msleep);
690 
691 long hwrng_yield(struct hwrng *rng)
692 {
693 	return wait_for_completion_interruptible_timeout(&rng->dying, 1);
694 }
695 EXPORT_SYMBOL_GPL(hwrng_yield);
696 
697 static int __init hwrng_modinit(void)
698 {
699 	int ret;
700 
701 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
702 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
703 	if (!rng_buffer)
704 		return -ENOMEM;
705 
706 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
707 	if (!rng_fillbuf) {
708 		kfree(rng_buffer);
709 		return -ENOMEM;
710 	}
711 
712 	ret = register_miscdev();
713 	if (ret) {
714 		kfree(rng_fillbuf);
715 		kfree(rng_buffer);
716 	}
717 
718 	return ret;
719 }
720 
721 static void __exit hwrng_modexit(void)
722 {
723 	mutex_lock(&rng_mutex);
724 	BUG_ON(current_rng);
725 	kfree(rng_buffer);
726 	kfree(rng_fillbuf);
727 	mutex_unlock(&rng_mutex);
728 
729 	unregister_miscdev();
730 }
731 
732 fs_initcall(hwrng_modinit); /* depends on misc_register() */
733 module_exit(hwrng_modexit);
734 
735 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
736 MODULE_LICENSE("GPL");
737