xref: /openbmc/linux/drivers/char/hw_random/core.c (revision e4781421e883340b796da5a724bda7226817990b)
1 /*
2         Added support for the AMD Geode LX RNG
3 	(c) Copyright 2004-2005 Advanced Micro Devices, Inc.
4 
5 	derived from
6 
7  	Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG)
8 	(c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com>
9 
10  	derived from
11 
12         Hardware driver for the AMD 768 Random Number Generator (RNG)
13         (c) Copyright 2001 Red Hat Inc <alan@redhat.com>
14 
15  	derived from
16 
17 	Hardware driver for Intel i810 Random Number Generator (RNG)
18 	Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
19 	Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
20 
21 	Added generic RNG API
22 	Copyright 2006 Michael Buesch <m@bues.ch>
23 	Copyright 2005 (c) MontaVista Software, Inc.
24 
25 	Please read Documentation/hw_random.txt for details on use.
26 
27 	----------------------------------------------------------
28 	This software may be used and distributed according to the terms
29         of the GNU General Public License, incorporated herein by reference.
30 
31  */
32 
33 
34 #include <linux/device.h>
35 #include <linux/hw_random.h>
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/fs.h>
39 #include <linux/sched.h>
40 #include <linux/miscdevice.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/err.h>
46 #include <linux/uaccess.h>
47 
48 
49 #define RNG_MODULE_NAME		"hw_random"
50 #define PFX			RNG_MODULE_NAME ": "
51 #define RNG_MISCDEV_MINOR	183 /* official */
52 
53 
54 static struct hwrng *current_rng;
55 static struct task_struct *hwrng_fill;
56 static LIST_HEAD(rng_list);
57 /* Protects rng_list and current_rng */
58 static DEFINE_MUTEX(rng_mutex);
59 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
60 static DEFINE_MUTEX(reading_mutex);
61 static int data_avail;
62 static u8 *rng_buffer, *rng_fillbuf;
63 static unsigned short current_quality;
64 static unsigned short default_quality; /* = 0; default to "off" */
65 
66 module_param(current_quality, ushort, 0644);
67 MODULE_PARM_DESC(current_quality,
68 		 "current hwrng entropy estimation per mill");
69 module_param(default_quality, ushort, 0644);
70 MODULE_PARM_DESC(default_quality,
71 		 "default entropy content of hwrng per mill");
72 
73 static void drop_current_rng(void);
74 static int hwrng_init(struct hwrng *rng);
75 static void start_khwrngd(void);
76 
77 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
78 			       int wait);
79 
80 static size_t rng_buffer_size(void)
81 {
82 	return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
83 }
84 
85 static void add_early_randomness(struct hwrng *rng)
86 {
87 	int bytes_read;
88 	size_t size = min_t(size_t, 16, rng_buffer_size());
89 
90 	mutex_lock(&reading_mutex);
91 	bytes_read = rng_get_data(rng, rng_buffer, size, 1);
92 	mutex_unlock(&reading_mutex);
93 	if (bytes_read > 0)
94 		add_device_randomness(rng_buffer, bytes_read);
95 	memset(rng_buffer, 0, size);
96 }
97 
98 static inline void cleanup_rng(struct kref *kref)
99 {
100 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
101 
102 	if (rng->cleanup)
103 		rng->cleanup(rng);
104 
105 	complete(&rng->cleanup_done);
106 }
107 
108 static int set_current_rng(struct hwrng *rng)
109 {
110 	int err;
111 
112 	BUG_ON(!mutex_is_locked(&rng_mutex));
113 
114 	err = hwrng_init(rng);
115 	if (err)
116 		return err;
117 
118 	drop_current_rng();
119 	current_rng = rng;
120 
121 	return 0;
122 }
123 
124 static void drop_current_rng(void)
125 {
126 	BUG_ON(!mutex_is_locked(&rng_mutex));
127 	if (!current_rng)
128 		return;
129 
130 	/* decrease last reference for triggering the cleanup */
131 	kref_put(&current_rng->ref, cleanup_rng);
132 	current_rng = NULL;
133 }
134 
135 /* Returns ERR_PTR(), NULL or refcounted hwrng */
136 static struct hwrng *get_current_rng(void)
137 {
138 	struct hwrng *rng;
139 
140 	if (mutex_lock_interruptible(&rng_mutex))
141 		return ERR_PTR(-ERESTARTSYS);
142 
143 	rng = current_rng;
144 	if (rng)
145 		kref_get(&rng->ref);
146 
147 	mutex_unlock(&rng_mutex);
148 	return rng;
149 }
150 
151 static void put_rng(struct hwrng *rng)
152 {
153 	/*
154 	 * Hold rng_mutex here so we serialize in case they set_current_rng
155 	 * on rng again immediately.
156 	 */
157 	mutex_lock(&rng_mutex);
158 	if (rng)
159 		kref_put(&rng->ref, cleanup_rng);
160 	mutex_unlock(&rng_mutex);
161 }
162 
163 static int hwrng_init(struct hwrng *rng)
164 {
165 	if (kref_get_unless_zero(&rng->ref))
166 		goto skip_init;
167 
168 	if (rng->init) {
169 		int ret;
170 
171 		ret =  rng->init(rng);
172 		if (ret)
173 			return ret;
174 	}
175 
176 	kref_init(&rng->ref);
177 	reinit_completion(&rng->cleanup_done);
178 
179 skip_init:
180 	add_early_randomness(rng);
181 
182 	current_quality = rng->quality ? : default_quality;
183 	if (current_quality > 1024)
184 		current_quality = 1024;
185 
186 	if (current_quality == 0 && hwrng_fill)
187 		kthread_stop(hwrng_fill);
188 	if (current_quality > 0 && !hwrng_fill)
189 		start_khwrngd();
190 
191 	return 0;
192 }
193 
194 static int rng_dev_open(struct inode *inode, struct file *filp)
195 {
196 	/* enforce read-only access to this chrdev */
197 	if ((filp->f_mode & FMODE_READ) == 0)
198 		return -EINVAL;
199 	if (filp->f_mode & FMODE_WRITE)
200 		return -EINVAL;
201 	return 0;
202 }
203 
204 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
205 			int wait) {
206 	int present;
207 
208 	BUG_ON(!mutex_is_locked(&reading_mutex));
209 	if (rng->read)
210 		return rng->read(rng, (void *)buffer, size, wait);
211 
212 	if (rng->data_present)
213 		present = rng->data_present(rng, wait);
214 	else
215 		present = 1;
216 
217 	if (present)
218 		return rng->data_read(rng, (u32 *)buffer);
219 
220 	return 0;
221 }
222 
223 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
224 			    size_t size, loff_t *offp)
225 {
226 	ssize_t ret = 0;
227 	int err = 0;
228 	int bytes_read, len;
229 	struct hwrng *rng;
230 
231 	while (size) {
232 		rng = get_current_rng();
233 		if (IS_ERR(rng)) {
234 			err = PTR_ERR(rng);
235 			goto out;
236 		}
237 		if (!rng) {
238 			err = -ENODEV;
239 			goto out;
240 		}
241 
242 		if (mutex_lock_interruptible(&reading_mutex)) {
243 			err = -ERESTARTSYS;
244 			goto out_put;
245 		}
246 		if (!data_avail) {
247 			bytes_read = rng_get_data(rng, rng_buffer,
248 				rng_buffer_size(),
249 				!(filp->f_flags & O_NONBLOCK));
250 			if (bytes_read < 0) {
251 				err = bytes_read;
252 				goto out_unlock_reading;
253 			}
254 			data_avail = bytes_read;
255 		}
256 
257 		if (!data_avail) {
258 			if (filp->f_flags & O_NONBLOCK) {
259 				err = -EAGAIN;
260 				goto out_unlock_reading;
261 			}
262 		} else {
263 			len = data_avail;
264 			if (len > size)
265 				len = size;
266 
267 			data_avail -= len;
268 
269 			if (copy_to_user(buf + ret, rng_buffer + data_avail,
270 								len)) {
271 				err = -EFAULT;
272 				goto out_unlock_reading;
273 			}
274 
275 			size -= len;
276 			ret += len;
277 		}
278 
279 		mutex_unlock(&reading_mutex);
280 		put_rng(rng);
281 
282 		if (need_resched())
283 			schedule_timeout_interruptible(1);
284 
285 		if (signal_pending(current)) {
286 			err = -ERESTARTSYS;
287 			goto out;
288 		}
289 	}
290 out:
291 	memset(rng_buffer, 0, rng_buffer_size());
292 	return ret ? : err;
293 
294 out_unlock_reading:
295 	mutex_unlock(&reading_mutex);
296 out_put:
297 	put_rng(rng);
298 	goto out;
299 }
300 
301 
302 static const struct file_operations rng_chrdev_ops = {
303 	.owner		= THIS_MODULE,
304 	.open		= rng_dev_open,
305 	.read		= rng_dev_read,
306 	.llseek		= noop_llseek,
307 };
308 
309 static const struct attribute_group *rng_dev_groups[];
310 
311 static struct miscdevice rng_miscdev = {
312 	.minor		= RNG_MISCDEV_MINOR,
313 	.name		= RNG_MODULE_NAME,
314 	.nodename	= "hwrng",
315 	.fops		= &rng_chrdev_ops,
316 	.groups		= rng_dev_groups,
317 };
318 
319 
320 static ssize_t hwrng_attr_current_store(struct device *dev,
321 					struct device_attribute *attr,
322 					const char *buf, size_t len)
323 {
324 	int err;
325 	struct hwrng *rng;
326 
327 	err = mutex_lock_interruptible(&rng_mutex);
328 	if (err)
329 		return -ERESTARTSYS;
330 	err = -ENODEV;
331 	list_for_each_entry(rng, &rng_list, list) {
332 		if (sysfs_streq(rng->name, buf)) {
333 			err = 0;
334 			if (rng != current_rng)
335 				err = set_current_rng(rng);
336 			break;
337 		}
338 	}
339 	mutex_unlock(&rng_mutex);
340 
341 	return err ? : len;
342 }
343 
344 static ssize_t hwrng_attr_current_show(struct device *dev,
345 				       struct device_attribute *attr,
346 				       char *buf)
347 {
348 	ssize_t ret;
349 	struct hwrng *rng;
350 
351 	rng = get_current_rng();
352 	if (IS_ERR(rng))
353 		return PTR_ERR(rng);
354 
355 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
356 	put_rng(rng);
357 
358 	return ret;
359 }
360 
361 static ssize_t hwrng_attr_available_show(struct device *dev,
362 					 struct device_attribute *attr,
363 					 char *buf)
364 {
365 	int err;
366 	struct hwrng *rng;
367 
368 	err = mutex_lock_interruptible(&rng_mutex);
369 	if (err)
370 		return -ERESTARTSYS;
371 	buf[0] = '\0';
372 	list_for_each_entry(rng, &rng_list, list) {
373 		strlcat(buf, rng->name, PAGE_SIZE);
374 		strlcat(buf, " ", PAGE_SIZE);
375 	}
376 	strlcat(buf, "\n", PAGE_SIZE);
377 	mutex_unlock(&rng_mutex);
378 
379 	return strlen(buf);
380 }
381 
382 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
383 		   hwrng_attr_current_show,
384 		   hwrng_attr_current_store);
385 static DEVICE_ATTR(rng_available, S_IRUGO,
386 		   hwrng_attr_available_show,
387 		   NULL);
388 
389 static struct attribute *rng_dev_attrs[] = {
390 	&dev_attr_rng_current.attr,
391 	&dev_attr_rng_available.attr,
392 	NULL
393 };
394 
395 ATTRIBUTE_GROUPS(rng_dev);
396 
397 static void __exit unregister_miscdev(void)
398 {
399 	misc_deregister(&rng_miscdev);
400 }
401 
402 static int __init register_miscdev(void)
403 {
404 	return misc_register(&rng_miscdev);
405 }
406 
407 static int hwrng_fillfn(void *unused)
408 {
409 	long rc;
410 
411 	while (!kthread_should_stop()) {
412 		struct hwrng *rng;
413 
414 		rng = get_current_rng();
415 		if (IS_ERR(rng) || !rng)
416 			break;
417 		mutex_lock(&reading_mutex);
418 		rc = rng_get_data(rng, rng_fillbuf,
419 				  rng_buffer_size(), 1);
420 		mutex_unlock(&reading_mutex);
421 		put_rng(rng);
422 		if (rc <= 0) {
423 			pr_warn("hwrng: no data available\n");
424 			msleep_interruptible(10000);
425 			continue;
426 		}
427 		/* Outside lock, sure, but y'know: randomness. */
428 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
429 					   rc * current_quality * 8 >> 10);
430 		memset(rng_fillbuf, 0, rng_buffer_size());
431 	}
432 	hwrng_fill = NULL;
433 	return 0;
434 }
435 
436 static void start_khwrngd(void)
437 {
438 	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
439 	if (IS_ERR(hwrng_fill)) {
440 		pr_err("hwrng_fill thread creation failed");
441 		hwrng_fill = NULL;
442 	}
443 }
444 
445 int hwrng_register(struct hwrng *rng)
446 {
447 	int err = -EINVAL;
448 	struct hwrng *old_rng, *tmp;
449 
450 	if (rng->name == NULL ||
451 	    (rng->data_read == NULL && rng->read == NULL))
452 		goto out;
453 
454 	mutex_lock(&rng_mutex);
455 	/* Must not register two RNGs with the same name. */
456 	err = -EEXIST;
457 	list_for_each_entry(tmp, &rng_list, list) {
458 		if (strcmp(tmp->name, rng->name) == 0)
459 			goto out_unlock;
460 	}
461 
462 	init_completion(&rng->cleanup_done);
463 	complete(&rng->cleanup_done);
464 
465 	old_rng = current_rng;
466 	err = 0;
467 	if (!old_rng) {
468 		err = set_current_rng(rng);
469 		if (err)
470 			goto out_unlock;
471 	}
472 	list_add_tail(&rng->list, &rng_list);
473 
474 	if (old_rng && !rng->init) {
475 		/*
476 		 * Use a new device's input to add some randomness to
477 		 * the system.  If this rng device isn't going to be
478 		 * used right away, its init function hasn't been
479 		 * called yet; so only use the randomness from devices
480 		 * that don't need an init callback.
481 		 */
482 		add_early_randomness(rng);
483 	}
484 
485 out_unlock:
486 	mutex_unlock(&rng_mutex);
487 out:
488 	return err;
489 }
490 EXPORT_SYMBOL_GPL(hwrng_register);
491 
492 void hwrng_unregister(struct hwrng *rng)
493 {
494 	mutex_lock(&rng_mutex);
495 
496 	list_del(&rng->list);
497 	if (current_rng == rng) {
498 		drop_current_rng();
499 		if (!list_empty(&rng_list)) {
500 			struct hwrng *tail;
501 
502 			tail = list_entry(rng_list.prev, struct hwrng, list);
503 
504 			set_current_rng(tail);
505 		}
506 	}
507 
508 	if (list_empty(&rng_list)) {
509 		mutex_unlock(&rng_mutex);
510 		if (hwrng_fill)
511 			kthread_stop(hwrng_fill);
512 	} else
513 		mutex_unlock(&rng_mutex);
514 
515 	wait_for_completion(&rng->cleanup_done);
516 }
517 EXPORT_SYMBOL_GPL(hwrng_unregister);
518 
519 static void devm_hwrng_release(struct device *dev, void *res)
520 {
521 	hwrng_unregister(*(struct hwrng **)res);
522 }
523 
524 static int devm_hwrng_match(struct device *dev, void *res, void *data)
525 {
526 	struct hwrng **r = res;
527 
528 	if (WARN_ON(!r || !*r))
529 		return 0;
530 
531 	return *r == data;
532 }
533 
534 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
535 {
536 	struct hwrng **ptr;
537 	int error;
538 
539 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
540 	if (!ptr)
541 		return -ENOMEM;
542 
543 	error = hwrng_register(rng);
544 	if (error) {
545 		devres_free(ptr);
546 		return error;
547 	}
548 
549 	*ptr = rng;
550 	devres_add(dev, ptr);
551 	return 0;
552 }
553 EXPORT_SYMBOL_GPL(devm_hwrng_register);
554 
555 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
556 {
557 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
558 }
559 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
560 
561 static int __init hwrng_modinit(void)
562 {
563 	int ret = -ENOMEM;
564 
565 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
566 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
567 	if (!rng_buffer)
568 		return -ENOMEM;
569 
570 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
571 	if (!rng_fillbuf) {
572 		kfree(rng_buffer);
573 		return -ENOMEM;
574 	}
575 
576 	ret = register_miscdev();
577 	if (ret) {
578 		kfree(rng_fillbuf);
579 		kfree(rng_buffer);
580 	}
581 
582 	return ret;
583 }
584 
585 static void __exit hwrng_modexit(void)
586 {
587 	mutex_lock(&rng_mutex);
588 	BUG_ON(current_rng);
589 	kfree(rng_buffer);
590 	kfree(rng_fillbuf);
591 	mutex_unlock(&rng_mutex);
592 
593 	unregister_miscdev();
594 }
595 
596 module_init(hwrng_modinit);
597 module_exit(hwrng_modexit);
598 
599 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
600 MODULE_LICENSE("GPL");
601