1 /* 2 * hw_random/core.c: HWRNG core API 3 * 4 * Copyright 2006 Michael Buesch <m@bues.ch> 5 * Copyright 2005 (c) MontaVista Software, Inc. 6 * 7 * Please read Documentation/admin-guide/hw_random.rst for details on use. 8 * 9 * This software may be used and distributed according to the terms 10 * of the GNU General Public License, incorporated herein by reference. 11 */ 12 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/err.h> 16 #include <linux/fs.h> 17 #include <linux/hw_random.h> 18 #include <linux/kernel.h> 19 #include <linux/kthread.h> 20 #include <linux/miscdevice.h> 21 #include <linux/module.h> 22 #include <linux/random.h> 23 #include <linux/sched.h> 24 #include <linux/sched/signal.h> 25 #include <linux/slab.h> 26 #include <linux/uaccess.h> 27 28 #define RNG_MODULE_NAME "hw_random" 29 30 static struct hwrng *current_rng; 31 /* the current rng has been explicitly chosen by user via sysfs */ 32 static int cur_rng_set_by_user; 33 static struct task_struct *hwrng_fill; 34 /* list of registered rngs */ 35 static LIST_HEAD(rng_list); 36 /* Protects rng_list and current_rng */ 37 static DEFINE_MUTEX(rng_mutex); 38 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ 39 static DEFINE_MUTEX(reading_mutex); 40 static int data_avail; 41 static u8 *rng_buffer, *rng_fillbuf; 42 static unsigned short current_quality; 43 static unsigned short default_quality = 1024; /* default to maximum */ 44 45 module_param(current_quality, ushort, 0644); 46 MODULE_PARM_DESC(current_quality, 47 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead"); 48 module_param(default_quality, ushort, 0644); 49 MODULE_PARM_DESC(default_quality, 50 "default maximum entropy content of hwrng per 1024 bits of input"); 51 52 static void drop_current_rng(void); 53 static int hwrng_init(struct hwrng *rng); 54 static int hwrng_fillfn(void *unused); 55 56 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 57 int wait); 58 59 static size_t rng_buffer_size(void) 60 { 61 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 62 } 63 64 static void add_early_randomness(struct hwrng *rng) 65 { 66 int bytes_read; 67 68 mutex_lock(&reading_mutex); 69 bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0); 70 mutex_unlock(&reading_mutex); 71 if (bytes_read > 0) { 72 size_t entropy = bytes_read * 8 * rng->quality / 1024; 73 add_hwgenerator_randomness(rng_fillbuf, bytes_read, entropy, false); 74 } 75 } 76 77 static inline void cleanup_rng(struct kref *kref) 78 { 79 struct hwrng *rng = container_of(kref, struct hwrng, ref); 80 81 if (rng->cleanup) 82 rng->cleanup(rng); 83 84 complete(&rng->cleanup_done); 85 } 86 87 static int set_current_rng(struct hwrng *rng) 88 { 89 int err; 90 91 BUG_ON(!mutex_is_locked(&rng_mutex)); 92 93 err = hwrng_init(rng); 94 if (err) 95 return err; 96 97 drop_current_rng(); 98 current_rng = rng; 99 100 /* if necessary, start hwrng thread */ 101 if (!hwrng_fill) { 102 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 103 if (IS_ERR(hwrng_fill)) { 104 pr_err("hwrng_fill thread creation failed\n"); 105 hwrng_fill = NULL; 106 } 107 } 108 109 return 0; 110 } 111 112 static void drop_current_rng(void) 113 { 114 BUG_ON(!mutex_is_locked(&rng_mutex)); 115 if (!current_rng) 116 return; 117 118 /* decrease last reference for triggering the cleanup */ 119 kref_put(¤t_rng->ref, cleanup_rng); 120 current_rng = NULL; 121 } 122 123 /* Returns ERR_PTR(), NULL or refcounted hwrng */ 124 static struct hwrng *get_current_rng_nolock(void) 125 { 126 if (current_rng) 127 kref_get(¤t_rng->ref); 128 129 return current_rng; 130 } 131 132 static struct hwrng *get_current_rng(void) 133 { 134 struct hwrng *rng; 135 136 if (mutex_lock_interruptible(&rng_mutex)) 137 return ERR_PTR(-ERESTARTSYS); 138 139 rng = get_current_rng_nolock(); 140 141 mutex_unlock(&rng_mutex); 142 return rng; 143 } 144 145 static void put_rng(struct hwrng *rng) 146 { 147 /* 148 * Hold rng_mutex here so we serialize in case they set_current_rng 149 * on rng again immediately. 150 */ 151 mutex_lock(&rng_mutex); 152 if (rng) 153 kref_put(&rng->ref, cleanup_rng); 154 mutex_unlock(&rng_mutex); 155 } 156 157 static int hwrng_init(struct hwrng *rng) 158 { 159 if (kref_get_unless_zero(&rng->ref)) 160 goto skip_init; 161 162 if (rng->init) { 163 int ret; 164 165 ret = rng->init(rng); 166 if (ret) 167 return ret; 168 } 169 170 kref_init(&rng->ref); 171 reinit_completion(&rng->cleanup_done); 172 173 skip_init: 174 rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024); 175 current_quality = rng->quality; /* obsolete */ 176 177 return 0; 178 } 179 180 static int rng_dev_open(struct inode *inode, struct file *filp) 181 { 182 /* enforce read-only access to this chrdev */ 183 if ((filp->f_mode & FMODE_READ) == 0) 184 return -EINVAL; 185 if (filp->f_mode & FMODE_WRITE) 186 return -EINVAL; 187 return 0; 188 } 189 190 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 191 int wait) { 192 int present; 193 194 BUG_ON(!mutex_is_locked(&reading_mutex)); 195 if (rng->read) 196 return rng->read(rng, (void *)buffer, size, wait); 197 198 if (rng->data_present) 199 present = rng->data_present(rng, wait); 200 else 201 present = 1; 202 203 if (present) 204 return rng->data_read(rng, (u32 *)buffer); 205 206 return 0; 207 } 208 209 static ssize_t rng_dev_read(struct file *filp, char __user *buf, 210 size_t size, loff_t *offp) 211 { 212 ssize_t ret = 0; 213 int err = 0; 214 int bytes_read, len; 215 struct hwrng *rng; 216 217 while (size) { 218 rng = get_current_rng(); 219 if (IS_ERR(rng)) { 220 err = PTR_ERR(rng); 221 goto out; 222 } 223 if (!rng) { 224 err = -ENODEV; 225 goto out; 226 } 227 228 if (mutex_lock_interruptible(&reading_mutex)) { 229 err = -ERESTARTSYS; 230 goto out_put; 231 } 232 if (!data_avail) { 233 bytes_read = rng_get_data(rng, rng_buffer, 234 rng_buffer_size(), 235 !(filp->f_flags & O_NONBLOCK)); 236 if (bytes_read < 0) { 237 err = bytes_read; 238 goto out_unlock_reading; 239 } 240 data_avail = bytes_read; 241 } 242 243 if (!data_avail) { 244 if (filp->f_flags & O_NONBLOCK) { 245 err = -EAGAIN; 246 goto out_unlock_reading; 247 } 248 } else { 249 len = data_avail; 250 if (len > size) 251 len = size; 252 253 data_avail -= len; 254 255 if (copy_to_user(buf + ret, rng_buffer + data_avail, 256 len)) { 257 err = -EFAULT; 258 goto out_unlock_reading; 259 } 260 261 size -= len; 262 ret += len; 263 } 264 265 mutex_unlock(&reading_mutex); 266 put_rng(rng); 267 268 if (need_resched()) 269 schedule_timeout_interruptible(1); 270 271 if (signal_pending(current)) { 272 err = -ERESTARTSYS; 273 goto out; 274 } 275 } 276 out: 277 return ret ? : err; 278 279 out_unlock_reading: 280 mutex_unlock(&reading_mutex); 281 out_put: 282 put_rng(rng); 283 goto out; 284 } 285 286 static const struct file_operations rng_chrdev_ops = { 287 .owner = THIS_MODULE, 288 .open = rng_dev_open, 289 .read = rng_dev_read, 290 .llseek = noop_llseek, 291 }; 292 293 static const struct attribute_group *rng_dev_groups[]; 294 295 static struct miscdevice rng_miscdev = { 296 .minor = HWRNG_MINOR, 297 .name = RNG_MODULE_NAME, 298 .nodename = "hwrng", 299 .fops = &rng_chrdev_ops, 300 .groups = rng_dev_groups, 301 }; 302 303 static int enable_best_rng(void) 304 { 305 struct hwrng *rng, *new_rng = NULL; 306 int ret = -ENODEV; 307 308 BUG_ON(!mutex_is_locked(&rng_mutex)); 309 310 /* no rng to use? */ 311 if (list_empty(&rng_list)) { 312 drop_current_rng(); 313 cur_rng_set_by_user = 0; 314 return 0; 315 } 316 317 /* use the rng which offers the best quality */ 318 list_for_each_entry(rng, &rng_list, list) { 319 if (!new_rng || rng->quality > new_rng->quality) 320 new_rng = rng; 321 } 322 323 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); 324 if (!ret) 325 cur_rng_set_by_user = 0; 326 327 return ret; 328 } 329 330 static ssize_t rng_current_store(struct device *dev, 331 struct device_attribute *attr, 332 const char *buf, size_t len) 333 { 334 int err; 335 struct hwrng *rng, *old_rng, *new_rng; 336 337 err = mutex_lock_interruptible(&rng_mutex); 338 if (err) 339 return -ERESTARTSYS; 340 341 old_rng = current_rng; 342 if (sysfs_streq(buf, "")) { 343 err = enable_best_rng(); 344 } else { 345 list_for_each_entry(rng, &rng_list, list) { 346 if (sysfs_streq(rng->name, buf)) { 347 err = set_current_rng(rng); 348 if (!err) 349 cur_rng_set_by_user = 1; 350 break; 351 } 352 } 353 } 354 new_rng = get_current_rng_nolock(); 355 mutex_unlock(&rng_mutex); 356 357 if (new_rng) { 358 if (new_rng != old_rng) 359 add_early_randomness(new_rng); 360 put_rng(new_rng); 361 } 362 363 return err ? : len; 364 } 365 366 static ssize_t rng_current_show(struct device *dev, 367 struct device_attribute *attr, 368 char *buf) 369 { 370 ssize_t ret; 371 struct hwrng *rng; 372 373 rng = get_current_rng(); 374 if (IS_ERR(rng)) 375 return PTR_ERR(rng); 376 377 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); 378 put_rng(rng); 379 380 return ret; 381 } 382 383 static ssize_t rng_available_show(struct device *dev, 384 struct device_attribute *attr, 385 char *buf) 386 { 387 int err; 388 struct hwrng *rng; 389 390 err = mutex_lock_interruptible(&rng_mutex); 391 if (err) 392 return -ERESTARTSYS; 393 buf[0] = '\0'; 394 list_for_each_entry(rng, &rng_list, list) { 395 strlcat(buf, rng->name, PAGE_SIZE); 396 strlcat(buf, " ", PAGE_SIZE); 397 } 398 strlcat(buf, "\n", PAGE_SIZE); 399 mutex_unlock(&rng_mutex); 400 401 return strlen(buf); 402 } 403 404 static ssize_t rng_selected_show(struct device *dev, 405 struct device_attribute *attr, 406 char *buf) 407 { 408 return sysfs_emit(buf, "%d\n", cur_rng_set_by_user); 409 } 410 411 static ssize_t rng_quality_show(struct device *dev, 412 struct device_attribute *attr, 413 char *buf) 414 { 415 ssize_t ret; 416 struct hwrng *rng; 417 418 rng = get_current_rng(); 419 if (IS_ERR(rng)) 420 return PTR_ERR(rng); 421 422 if (!rng) /* no need to put_rng */ 423 return -ENODEV; 424 425 ret = sysfs_emit(buf, "%hu\n", rng->quality); 426 put_rng(rng); 427 428 return ret; 429 } 430 431 static ssize_t rng_quality_store(struct device *dev, 432 struct device_attribute *attr, 433 const char *buf, size_t len) 434 { 435 u16 quality; 436 int ret = -EINVAL; 437 438 if (len < 2) 439 return -EINVAL; 440 441 ret = mutex_lock_interruptible(&rng_mutex); 442 if (ret) 443 return -ERESTARTSYS; 444 445 ret = kstrtou16(buf, 0, &quality); 446 if (ret || quality > 1024) { 447 ret = -EINVAL; 448 goto out; 449 } 450 451 if (!current_rng) { 452 ret = -ENODEV; 453 goto out; 454 } 455 456 current_rng->quality = quality; 457 current_quality = quality; /* obsolete */ 458 459 /* the best available RNG may have changed */ 460 ret = enable_best_rng(); 461 462 out: 463 mutex_unlock(&rng_mutex); 464 return ret ? ret : len; 465 } 466 467 static DEVICE_ATTR_RW(rng_current); 468 static DEVICE_ATTR_RO(rng_available); 469 static DEVICE_ATTR_RO(rng_selected); 470 static DEVICE_ATTR_RW(rng_quality); 471 472 static struct attribute *rng_dev_attrs[] = { 473 &dev_attr_rng_current.attr, 474 &dev_attr_rng_available.attr, 475 &dev_attr_rng_selected.attr, 476 &dev_attr_rng_quality.attr, 477 NULL 478 }; 479 480 ATTRIBUTE_GROUPS(rng_dev); 481 482 static void __exit unregister_miscdev(void) 483 { 484 misc_deregister(&rng_miscdev); 485 } 486 487 static int __init register_miscdev(void) 488 { 489 return misc_register(&rng_miscdev); 490 } 491 492 static int hwrng_fillfn(void *unused) 493 { 494 size_t entropy, entropy_credit = 0; /* in 1/1024 of a bit */ 495 long rc; 496 497 while (!kthread_should_stop()) { 498 unsigned short quality; 499 struct hwrng *rng; 500 501 rng = get_current_rng(); 502 if (IS_ERR(rng) || !rng) 503 break; 504 mutex_lock(&reading_mutex); 505 rc = rng_get_data(rng, rng_fillbuf, 506 rng_buffer_size(), 1); 507 if (current_quality != rng->quality) 508 rng->quality = current_quality; /* obsolete */ 509 quality = rng->quality; 510 mutex_unlock(&reading_mutex); 511 512 if (rc <= 0) 513 hwrng_msleep(rng, 10000); 514 515 put_rng(rng); 516 517 if (rc <= 0) 518 continue; 519 520 /* If we cannot credit at least one bit of entropy, 521 * keep track of the remainder for the next iteration 522 */ 523 entropy = rc * quality * 8 + entropy_credit; 524 if ((entropy >> 10) == 0) 525 entropy_credit = entropy; 526 527 /* Outside lock, sure, but y'know: randomness. */ 528 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 529 entropy >> 10, true); 530 } 531 hwrng_fill = NULL; 532 return 0; 533 } 534 535 int hwrng_register(struct hwrng *rng) 536 { 537 int err = -EINVAL; 538 struct hwrng *tmp; 539 bool is_new_current = false; 540 541 if (!rng->name || (!rng->data_read && !rng->read)) 542 goto out; 543 544 mutex_lock(&rng_mutex); 545 546 /* Must not register two RNGs with the same name. */ 547 err = -EEXIST; 548 list_for_each_entry(tmp, &rng_list, list) { 549 if (strcmp(tmp->name, rng->name) == 0) 550 goto out_unlock; 551 } 552 list_add_tail(&rng->list, &rng_list); 553 554 init_completion(&rng->cleanup_done); 555 complete(&rng->cleanup_done); 556 init_completion(&rng->dying); 557 558 if (!current_rng || 559 (!cur_rng_set_by_user && rng->quality > current_rng->quality)) { 560 /* 561 * Set new rng as current as the new rng source 562 * provides better entropy quality and was not 563 * chosen by userspace. 564 */ 565 err = set_current_rng(rng); 566 if (err) 567 goto out_unlock; 568 /* to use current_rng in add_early_randomness() we need 569 * to take a ref 570 */ 571 is_new_current = true; 572 kref_get(&rng->ref); 573 } 574 mutex_unlock(&rng_mutex); 575 if (is_new_current || !rng->init) { 576 /* 577 * Use a new device's input to add some randomness to 578 * the system. If this rng device isn't going to be 579 * used right away, its init function hasn't been 580 * called yet by set_current_rng(); so only use the 581 * randomness from devices that don't need an init callback 582 */ 583 add_early_randomness(rng); 584 } 585 if (is_new_current) 586 put_rng(rng); 587 return 0; 588 out_unlock: 589 mutex_unlock(&rng_mutex); 590 out: 591 return err; 592 } 593 EXPORT_SYMBOL_GPL(hwrng_register); 594 595 void hwrng_unregister(struct hwrng *rng) 596 { 597 struct hwrng *old_rng, *new_rng; 598 int err; 599 600 mutex_lock(&rng_mutex); 601 602 old_rng = current_rng; 603 list_del(&rng->list); 604 complete_all(&rng->dying); 605 if (current_rng == rng) { 606 err = enable_best_rng(); 607 if (err) { 608 drop_current_rng(); 609 cur_rng_set_by_user = 0; 610 } 611 } 612 613 new_rng = get_current_rng_nolock(); 614 if (list_empty(&rng_list)) { 615 mutex_unlock(&rng_mutex); 616 if (hwrng_fill) 617 kthread_stop(hwrng_fill); 618 } else 619 mutex_unlock(&rng_mutex); 620 621 if (new_rng) { 622 if (old_rng != new_rng) 623 add_early_randomness(new_rng); 624 put_rng(new_rng); 625 } 626 627 wait_for_completion(&rng->cleanup_done); 628 } 629 EXPORT_SYMBOL_GPL(hwrng_unregister); 630 631 static void devm_hwrng_release(struct device *dev, void *res) 632 { 633 hwrng_unregister(*(struct hwrng **)res); 634 } 635 636 static int devm_hwrng_match(struct device *dev, void *res, void *data) 637 { 638 struct hwrng **r = res; 639 640 if (WARN_ON(!r || !*r)) 641 return 0; 642 643 return *r == data; 644 } 645 646 int devm_hwrng_register(struct device *dev, struct hwrng *rng) 647 { 648 struct hwrng **ptr; 649 int error; 650 651 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); 652 if (!ptr) 653 return -ENOMEM; 654 655 error = hwrng_register(rng); 656 if (error) { 657 devres_free(ptr); 658 return error; 659 } 660 661 *ptr = rng; 662 devres_add(dev, ptr); 663 return 0; 664 } 665 EXPORT_SYMBOL_GPL(devm_hwrng_register); 666 667 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) 668 { 669 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); 670 } 671 EXPORT_SYMBOL_GPL(devm_hwrng_unregister); 672 673 long hwrng_msleep(struct hwrng *rng, unsigned int msecs) 674 { 675 unsigned long timeout = msecs_to_jiffies(msecs) + 1; 676 677 return wait_for_completion_interruptible_timeout(&rng->dying, timeout); 678 } 679 EXPORT_SYMBOL_GPL(hwrng_msleep); 680 681 static int __init hwrng_modinit(void) 682 { 683 int ret; 684 685 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ 686 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); 687 if (!rng_buffer) 688 return -ENOMEM; 689 690 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); 691 if (!rng_fillbuf) { 692 kfree(rng_buffer); 693 return -ENOMEM; 694 } 695 696 ret = register_miscdev(); 697 if (ret) { 698 kfree(rng_fillbuf); 699 kfree(rng_buffer); 700 } 701 702 return ret; 703 } 704 705 static void __exit hwrng_modexit(void) 706 { 707 mutex_lock(&rng_mutex); 708 BUG_ON(current_rng); 709 kfree(rng_buffer); 710 kfree(rng_fillbuf); 711 mutex_unlock(&rng_mutex); 712 713 unregister_miscdev(); 714 } 715 716 fs_initcall(hwrng_modinit); /* depends on misc_register() */ 717 module_exit(hwrng_modexit); 718 719 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 720 MODULE_LICENSE("GPL"); 721