xref: /openbmc/linux/drivers/char/hw_random/core.c (revision fb8d6c8d)
1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/admin-guide/hw_random.rst for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/freezer.h>
17 #include <linux/fs.h>
18 #include <linux/hw_random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/uaccess.h>
28 
29 #define RNG_MODULE_NAME		"hw_random"
30 
31 static struct hwrng *current_rng;
32 /* the current rng has been explicitly chosen by user via sysfs */
33 static int cur_rng_set_by_user;
34 static struct task_struct *hwrng_fill;
35 /* list of registered rngs, sorted decending by quality */
36 static LIST_HEAD(rng_list);
37 /* Protects rng_list and current_rng */
38 static DEFINE_MUTEX(rng_mutex);
39 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
40 static DEFINE_MUTEX(reading_mutex);
41 static int data_avail;
42 static u8 *rng_buffer, *rng_fillbuf;
43 static unsigned short current_quality;
44 static unsigned short default_quality; /* = 0; default to "off" */
45 
46 module_param(current_quality, ushort, 0644);
47 MODULE_PARM_DESC(current_quality,
48 		 "current hwrng entropy estimation per 1024 bits of input");
49 module_param(default_quality, ushort, 0644);
50 MODULE_PARM_DESC(default_quality,
51 		 "default entropy content of hwrng per 1024 bits of input");
52 
53 static void drop_current_rng(void);
54 static int hwrng_init(struct hwrng *rng);
55 static void start_khwrngd(void);
56 
57 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
58 			       int wait);
59 
60 static size_t rng_buffer_size(void)
61 {
62 	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
63 }
64 
65 static void add_early_randomness(struct hwrng *rng)
66 {
67 	int bytes_read;
68 	size_t size = min_t(size_t, 16, rng_buffer_size());
69 
70 	mutex_lock(&reading_mutex);
71 	bytes_read = rng_get_data(rng, rng_buffer, size, 0);
72 	mutex_unlock(&reading_mutex);
73 	if (bytes_read > 0)
74 		add_device_randomness(rng_buffer, bytes_read);
75 }
76 
77 static inline void cleanup_rng(struct kref *kref)
78 {
79 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
80 
81 	if (rng->cleanup)
82 		rng->cleanup(rng);
83 
84 	complete(&rng->cleanup_done);
85 }
86 
87 static int set_current_rng(struct hwrng *rng)
88 {
89 	int err;
90 
91 	BUG_ON(!mutex_is_locked(&rng_mutex));
92 
93 	err = hwrng_init(rng);
94 	if (err)
95 		return err;
96 
97 	drop_current_rng();
98 	current_rng = rng;
99 
100 	return 0;
101 }
102 
103 static void drop_current_rng(void)
104 {
105 	BUG_ON(!mutex_is_locked(&rng_mutex));
106 	if (!current_rng)
107 		return;
108 
109 	/* decrease last reference for triggering the cleanup */
110 	kref_put(&current_rng->ref, cleanup_rng);
111 	current_rng = NULL;
112 }
113 
114 /* Returns ERR_PTR(), NULL or refcounted hwrng */
115 static struct hwrng *get_current_rng(void)
116 {
117 	struct hwrng *rng;
118 
119 	if (mutex_lock_interruptible(&rng_mutex))
120 		return ERR_PTR(-ERESTARTSYS);
121 
122 	rng = current_rng;
123 	if (rng)
124 		kref_get(&rng->ref);
125 
126 	mutex_unlock(&rng_mutex);
127 	return rng;
128 }
129 
130 static void put_rng(struct hwrng *rng)
131 {
132 	/*
133 	 * Hold rng_mutex here so we serialize in case they set_current_rng
134 	 * on rng again immediately.
135 	 */
136 	mutex_lock(&rng_mutex);
137 	if (rng)
138 		kref_put(&rng->ref, cleanup_rng);
139 	mutex_unlock(&rng_mutex);
140 }
141 
142 static int hwrng_init(struct hwrng *rng)
143 {
144 	if (kref_get_unless_zero(&rng->ref))
145 		goto skip_init;
146 
147 	if (rng->init) {
148 		int ret;
149 
150 		ret =  rng->init(rng);
151 		if (ret)
152 			return ret;
153 	}
154 
155 	kref_init(&rng->ref);
156 	reinit_completion(&rng->cleanup_done);
157 
158 skip_init:
159 	add_early_randomness(rng);
160 
161 	current_quality = rng->quality ? : default_quality;
162 	if (current_quality > 1024)
163 		current_quality = 1024;
164 
165 	if (current_quality == 0 && hwrng_fill)
166 		kthread_stop(hwrng_fill);
167 	if (current_quality > 0 && !hwrng_fill)
168 		start_khwrngd();
169 
170 	return 0;
171 }
172 
173 static int rng_dev_open(struct inode *inode, struct file *filp)
174 {
175 	/* enforce read-only access to this chrdev */
176 	if ((filp->f_mode & FMODE_READ) == 0)
177 		return -EINVAL;
178 	if (filp->f_mode & FMODE_WRITE)
179 		return -EINVAL;
180 	return 0;
181 }
182 
183 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
184 			int wait) {
185 	int present;
186 
187 	BUG_ON(!mutex_is_locked(&reading_mutex));
188 	if (rng->read)
189 		return rng->read(rng, (void *)buffer, size, wait);
190 
191 	if (rng->data_present)
192 		present = rng->data_present(rng, wait);
193 	else
194 		present = 1;
195 
196 	if (present)
197 		return rng->data_read(rng, (u32 *)buffer);
198 
199 	return 0;
200 }
201 
202 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
203 			    size_t size, loff_t *offp)
204 {
205 	ssize_t ret = 0;
206 	int err = 0;
207 	int bytes_read, len;
208 	struct hwrng *rng;
209 
210 	while (size) {
211 		rng = get_current_rng();
212 		if (IS_ERR(rng)) {
213 			err = PTR_ERR(rng);
214 			goto out;
215 		}
216 		if (!rng) {
217 			err = -ENODEV;
218 			goto out;
219 		}
220 
221 		if (mutex_lock_interruptible(&reading_mutex)) {
222 			err = -ERESTARTSYS;
223 			goto out_put;
224 		}
225 		if (!data_avail) {
226 			bytes_read = rng_get_data(rng, rng_buffer,
227 				rng_buffer_size(),
228 				!(filp->f_flags & O_NONBLOCK));
229 			if (bytes_read < 0) {
230 				err = bytes_read;
231 				goto out_unlock_reading;
232 			}
233 			data_avail = bytes_read;
234 		}
235 
236 		if (!data_avail) {
237 			if (filp->f_flags & O_NONBLOCK) {
238 				err = -EAGAIN;
239 				goto out_unlock_reading;
240 			}
241 		} else {
242 			len = data_avail;
243 			if (len > size)
244 				len = size;
245 
246 			data_avail -= len;
247 
248 			if (copy_to_user(buf + ret, rng_buffer + data_avail,
249 								len)) {
250 				err = -EFAULT;
251 				goto out_unlock_reading;
252 			}
253 
254 			size -= len;
255 			ret += len;
256 		}
257 
258 		mutex_unlock(&reading_mutex);
259 		put_rng(rng);
260 
261 		if (need_resched())
262 			schedule_timeout_interruptible(1);
263 
264 		if (signal_pending(current)) {
265 			err = -ERESTARTSYS;
266 			goto out;
267 		}
268 	}
269 out:
270 	return ret ? : err;
271 
272 out_unlock_reading:
273 	mutex_unlock(&reading_mutex);
274 out_put:
275 	put_rng(rng);
276 	goto out;
277 }
278 
279 static const struct file_operations rng_chrdev_ops = {
280 	.owner		= THIS_MODULE,
281 	.open		= rng_dev_open,
282 	.read		= rng_dev_read,
283 	.llseek		= noop_llseek,
284 };
285 
286 static const struct attribute_group *rng_dev_groups[];
287 
288 static struct miscdevice rng_miscdev = {
289 	.minor		= HWRNG_MINOR,
290 	.name		= RNG_MODULE_NAME,
291 	.nodename	= "hwrng",
292 	.fops		= &rng_chrdev_ops,
293 	.groups		= rng_dev_groups,
294 };
295 
296 static int enable_best_rng(void)
297 {
298 	int ret = -ENODEV;
299 
300 	BUG_ON(!mutex_is_locked(&rng_mutex));
301 
302 	/* rng_list is sorted by quality, use the best (=first) one */
303 	if (!list_empty(&rng_list)) {
304 		struct hwrng *new_rng;
305 
306 		new_rng = list_entry(rng_list.next, struct hwrng, list);
307 		ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
308 		if (!ret)
309 			cur_rng_set_by_user = 0;
310 	} else {
311 		drop_current_rng();
312 		cur_rng_set_by_user = 0;
313 		ret = 0;
314 	}
315 
316 	return ret;
317 }
318 
319 static ssize_t hwrng_attr_current_store(struct device *dev,
320 					struct device_attribute *attr,
321 					const char *buf, size_t len)
322 {
323 	int err = -ENODEV;
324 	struct hwrng *rng;
325 
326 	err = mutex_lock_interruptible(&rng_mutex);
327 	if (err)
328 		return -ERESTARTSYS;
329 
330 	if (sysfs_streq(buf, "")) {
331 		err = enable_best_rng();
332 	} else {
333 		list_for_each_entry(rng, &rng_list, list) {
334 			if (sysfs_streq(rng->name, buf)) {
335 				cur_rng_set_by_user = 1;
336 				err = set_current_rng(rng);
337 				break;
338 			}
339 		}
340 	}
341 
342 	mutex_unlock(&rng_mutex);
343 
344 	return err ? : len;
345 }
346 
347 static ssize_t hwrng_attr_current_show(struct device *dev,
348 				       struct device_attribute *attr,
349 				       char *buf)
350 {
351 	ssize_t ret;
352 	struct hwrng *rng;
353 
354 	rng = get_current_rng();
355 	if (IS_ERR(rng))
356 		return PTR_ERR(rng);
357 
358 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
359 	put_rng(rng);
360 
361 	return ret;
362 }
363 
364 static ssize_t hwrng_attr_available_show(struct device *dev,
365 					 struct device_attribute *attr,
366 					 char *buf)
367 {
368 	int err;
369 	struct hwrng *rng;
370 
371 	err = mutex_lock_interruptible(&rng_mutex);
372 	if (err)
373 		return -ERESTARTSYS;
374 	buf[0] = '\0';
375 	list_for_each_entry(rng, &rng_list, list) {
376 		strlcat(buf, rng->name, PAGE_SIZE);
377 		strlcat(buf, " ", PAGE_SIZE);
378 	}
379 	strlcat(buf, "\n", PAGE_SIZE);
380 	mutex_unlock(&rng_mutex);
381 
382 	return strlen(buf);
383 }
384 
385 static ssize_t hwrng_attr_selected_show(struct device *dev,
386 					struct device_attribute *attr,
387 					char *buf)
388 {
389 	return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
390 }
391 
392 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
393 		   hwrng_attr_current_show,
394 		   hwrng_attr_current_store);
395 static DEVICE_ATTR(rng_available, S_IRUGO,
396 		   hwrng_attr_available_show,
397 		   NULL);
398 static DEVICE_ATTR(rng_selected, S_IRUGO,
399 		   hwrng_attr_selected_show,
400 		   NULL);
401 
402 static struct attribute *rng_dev_attrs[] = {
403 	&dev_attr_rng_current.attr,
404 	&dev_attr_rng_available.attr,
405 	&dev_attr_rng_selected.attr,
406 	NULL
407 };
408 
409 ATTRIBUTE_GROUPS(rng_dev);
410 
411 static void __exit unregister_miscdev(void)
412 {
413 	misc_deregister(&rng_miscdev);
414 }
415 
416 static int __init register_miscdev(void)
417 {
418 	return misc_register(&rng_miscdev);
419 }
420 
421 static int hwrng_fillfn(void *unused)
422 {
423 	long rc;
424 
425 	set_freezable();
426 
427 	while (!kthread_freezable_should_stop(NULL)) {
428 		struct hwrng *rng;
429 
430 		rng = get_current_rng();
431 		if (IS_ERR(rng) || !rng)
432 			break;
433 		mutex_lock(&reading_mutex);
434 		rc = rng_get_data(rng, rng_fillbuf,
435 				  rng_buffer_size(), 1);
436 		mutex_unlock(&reading_mutex);
437 		put_rng(rng);
438 		if (rc <= 0) {
439 			pr_warn("hwrng: no data available\n");
440 			msleep_interruptible(10000);
441 			continue;
442 		}
443 		/* Outside lock, sure, but y'know: randomness. */
444 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
445 					   rc * current_quality * 8 >> 10);
446 	}
447 	hwrng_fill = NULL;
448 	return 0;
449 }
450 
451 static void start_khwrngd(void)
452 {
453 	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
454 	if (IS_ERR(hwrng_fill)) {
455 		pr_err("hwrng_fill thread creation failed\n");
456 		hwrng_fill = NULL;
457 	}
458 }
459 
460 int hwrng_register(struct hwrng *rng)
461 {
462 	int err = -EINVAL;
463 	struct hwrng *old_rng, *tmp;
464 	struct list_head *rng_list_ptr;
465 
466 	if (!rng->name || (!rng->data_read && !rng->read))
467 		goto out;
468 
469 	mutex_lock(&rng_mutex);
470 	/* Must not register two RNGs with the same name. */
471 	err = -EEXIST;
472 	list_for_each_entry(tmp, &rng_list, list) {
473 		if (strcmp(tmp->name, rng->name) == 0)
474 			goto out_unlock;
475 	}
476 
477 	init_completion(&rng->cleanup_done);
478 	complete(&rng->cleanup_done);
479 
480 	/* rng_list is sorted by decreasing quality */
481 	list_for_each(rng_list_ptr, &rng_list) {
482 		tmp = list_entry(rng_list_ptr, struct hwrng, list);
483 		if (tmp->quality < rng->quality)
484 			break;
485 	}
486 	list_add_tail(&rng->list, rng_list_ptr);
487 
488 	old_rng = current_rng;
489 	err = 0;
490 	if (!old_rng ||
491 	    (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
492 		/*
493 		 * Set new rng as current as the new rng source
494 		 * provides better entropy quality and was not
495 		 * chosen by userspace.
496 		 */
497 		err = set_current_rng(rng);
498 		if (err)
499 			goto out_unlock;
500 	}
501 
502 	if (old_rng && !rng->init) {
503 		/*
504 		 * Use a new device's input to add some randomness to
505 		 * the system.  If this rng device isn't going to be
506 		 * used right away, its init function hasn't been
507 		 * called yet; so only use the randomness from devices
508 		 * that don't need an init callback.
509 		 */
510 		add_early_randomness(rng);
511 	}
512 
513 out_unlock:
514 	mutex_unlock(&rng_mutex);
515 out:
516 	return err;
517 }
518 EXPORT_SYMBOL_GPL(hwrng_register);
519 
520 void hwrng_unregister(struct hwrng *rng)
521 {
522 	int err;
523 
524 	mutex_lock(&rng_mutex);
525 
526 	list_del(&rng->list);
527 	if (current_rng == rng) {
528 		err = enable_best_rng();
529 		if (err) {
530 			drop_current_rng();
531 			cur_rng_set_by_user = 0;
532 		}
533 	}
534 
535 	if (list_empty(&rng_list)) {
536 		mutex_unlock(&rng_mutex);
537 		if (hwrng_fill)
538 			kthread_stop(hwrng_fill);
539 	} else
540 		mutex_unlock(&rng_mutex);
541 
542 	wait_for_completion(&rng->cleanup_done);
543 }
544 EXPORT_SYMBOL_GPL(hwrng_unregister);
545 
546 static void devm_hwrng_release(struct device *dev, void *res)
547 {
548 	hwrng_unregister(*(struct hwrng **)res);
549 }
550 
551 static int devm_hwrng_match(struct device *dev, void *res, void *data)
552 {
553 	struct hwrng **r = res;
554 
555 	if (WARN_ON(!r || !*r))
556 		return 0;
557 
558 	return *r == data;
559 }
560 
561 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
562 {
563 	struct hwrng **ptr;
564 	int error;
565 
566 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
567 	if (!ptr)
568 		return -ENOMEM;
569 
570 	error = hwrng_register(rng);
571 	if (error) {
572 		devres_free(ptr);
573 		return error;
574 	}
575 
576 	*ptr = rng;
577 	devres_add(dev, ptr);
578 	return 0;
579 }
580 EXPORT_SYMBOL_GPL(devm_hwrng_register);
581 
582 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
583 {
584 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
585 }
586 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
587 
588 static int __init hwrng_modinit(void)
589 {
590 	int ret = -ENOMEM;
591 
592 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
593 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
594 	if (!rng_buffer)
595 		return -ENOMEM;
596 
597 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
598 	if (!rng_fillbuf) {
599 		kfree(rng_buffer);
600 		return -ENOMEM;
601 	}
602 
603 	ret = register_miscdev();
604 	if (ret) {
605 		kfree(rng_fillbuf);
606 		kfree(rng_buffer);
607 	}
608 
609 	return ret;
610 }
611 
612 static void __exit hwrng_modexit(void)
613 {
614 	mutex_lock(&rng_mutex);
615 	BUG_ON(current_rng);
616 	kfree(rng_buffer);
617 	kfree(rng_fillbuf);
618 	mutex_unlock(&rng_mutex);
619 
620 	unregister_miscdev();
621 }
622 
623 module_init(hwrng_modinit);
624 module_exit(hwrng_modexit);
625 
626 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
627 MODULE_LICENSE("GPL");
628