1 /* 2 * hw_random/core.c: HWRNG core API 3 * 4 * Copyright 2006 Michael Buesch <m@bues.ch> 5 * Copyright 2005 (c) MontaVista Software, Inc. 6 * 7 * Please read Documentation/hw_random.txt for details on use. 8 * 9 * This software may be used and distributed according to the terms 10 * of the GNU General Public License, incorporated herein by reference. 11 */ 12 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/err.h> 16 #include <linux/fs.h> 17 #include <linux/hw_random.h> 18 #include <linux/kernel.h> 19 #include <linux/kthread.h> 20 #include <linux/sched/signal.h> 21 #include <linux/miscdevice.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/sched.h> 25 #include <linux/slab.h> 26 #include <linux/uaccess.h> 27 28 #define RNG_MODULE_NAME "hw_random" 29 30 static struct hwrng *current_rng; 31 /* the current rng has been explicitly chosen by user via sysfs */ 32 static int cur_rng_set_by_user; 33 static struct task_struct *hwrng_fill; 34 /* list of registered rngs, sorted decending by quality */ 35 static LIST_HEAD(rng_list); 36 /* Protects rng_list and current_rng */ 37 static DEFINE_MUTEX(rng_mutex); 38 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ 39 static DEFINE_MUTEX(reading_mutex); 40 static int data_avail; 41 static u8 *rng_buffer, *rng_fillbuf; 42 static unsigned short current_quality; 43 static unsigned short default_quality; /* = 0; default to "off" */ 44 45 module_param(current_quality, ushort, 0644); 46 MODULE_PARM_DESC(current_quality, 47 "current hwrng entropy estimation per mill"); 48 module_param(default_quality, ushort, 0644); 49 MODULE_PARM_DESC(default_quality, 50 "default entropy content of hwrng per mill"); 51 52 static void drop_current_rng(void); 53 static int hwrng_init(struct hwrng *rng); 54 static void start_khwrngd(void); 55 56 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 57 int wait); 58 59 static size_t rng_buffer_size(void) 60 { 61 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 62 } 63 64 static void add_early_randomness(struct hwrng *rng) 65 { 66 int bytes_read; 67 size_t size = min_t(size_t, 16, rng_buffer_size()); 68 69 mutex_lock(&reading_mutex); 70 bytes_read = rng_get_data(rng, rng_buffer, size, 1); 71 mutex_unlock(&reading_mutex); 72 if (bytes_read > 0) 73 add_device_randomness(rng_buffer, bytes_read); 74 } 75 76 static inline void cleanup_rng(struct kref *kref) 77 { 78 struct hwrng *rng = container_of(kref, struct hwrng, ref); 79 80 if (rng->cleanup) 81 rng->cleanup(rng); 82 83 complete(&rng->cleanup_done); 84 } 85 86 static int set_current_rng(struct hwrng *rng) 87 { 88 int err; 89 90 BUG_ON(!mutex_is_locked(&rng_mutex)); 91 92 err = hwrng_init(rng); 93 if (err) 94 return err; 95 96 drop_current_rng(); 97 current_rng = rng; 98 99 return 0; 100 } 101 102 static void drop_current_rng(void) 103 { 104 BUG_ON(!mutex_is_locked(&rng_mutex)); 105 if (!current_rng) 106 return; 107 108 /* decrease last reference for triggering the cleanup */ 109 kref_put(¤t_rng->ref, cleanup_rng); 110 current_rng = NULL; 111 } 112 113 /* Returns ERR_PTR(), NULL or refcounted hwrng */ 114 static struct hwrng *get_current_rng(void) 115 { 116 struct hwrng *rng; 117 118 if (mutex_lock_interruptible(&rng_mutex)) 119 return ERR_PTR(-ERESTARTSYS); 120 121 rng = current_rng; 122 if (rng) 123 kref_get(&rng->ref); 124 125 mutex_unlock(&rng_mutex); 126 return rng; 127 } 128 129 static void put_rng(struct hwrng *rng) 130 { 131 /* 132 * Hold rng_mutex here so we serialize in case they set_current_rng 133 * on rng again immediately. 134 */ 135 mutex_lock(&rng_mutex); 136 if (rng) 137 kref_put(&rng->ref, cleanup_rng); 138 mutex_unlock(&rng_mutex); 139 } 140 141 static int hwrng_init(struct hwrng *rng) 142 { 143 if (kref_get_unless_zero(&rng->ref)) 144 goto skip_init; 145 146 if (rng->init) { 147 int ret; 148 149 ret = rng->init(rng); 150 if (ret) 151 return ret; 152 } 153 154 kref_init(&rng->ref); 155 reinit_completion(&rng->cleanup_done); 156 157 skip_init: 158 add_early_randomness(rng); 159 160 current_quality = rng->quality ? : default_quality; 161 if (current_quality > 1024) 162 current_quality = 1024; 163 164 if (current_quality == 0 && hwrng_fill) 165 kthread_stop(hwrng_fill); 166 if (current_quality > 0 && !hwrng_fill) 167 start_khwrngd(); 168 169 return 0; 170 } 171 172 static int rng_dev_open(struct inode *inode, struct file *filp) 173 { 174 /* enforce read-only access to this chrdev */ 175 if ((filp->f_mode & FMODE_READ) == 0) 176 return -EINVAL; 177 if (filp->f_mode & FMODE_WRITE) 178 return -EINVAL; 179 return 0; 180 } 181 182 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 183 int wait) { 184 int present; 185 186 BUG_ON(!mutex_is_locked(&reading_mutex)); 187 if (rng->read) 188 return rng->read(rng, (void *)buffer, size, wait); 189 190 if (rng->data_present) 191 present = rng->data_present(rng, wait); 192 else 193 present = 1; 194 195 if (present) 196 return rng->data_read(rng, (u32 *)buffer); 197 198 return 0; 199 } 200 201 static ssize_t rng_dev_read(struct file *filp, char __user *buf, 202 size_t size, loff_t *offp) 203 { 204 ssize_t ret = 0; 205 int err = 0; 206 int bytes_read, len; 207 struct hwrng *rng; 208 209 while (size) { 210 rng = get_current_rng(); 211 if (IS_ERR(rng)) { 212 err = PTR_ERR(rng); 213 goto out; 214 } 215 if (!rng) { 216 err = -ENODEV; 217 goto out; 218 } 219 220 if (mutex_lock_interruptible(&reading_mutex)) { 221 err = -ERESTARTSYS; 222 goto out_put; 223 } 224 if (!data_avail) { 225 bytes_read = rng_get_data(rng, rng_buffer, 226 rng_buffer_size(), 227 !(filp->f_flags & O_NONBLOCK)); 228 if (bytes_read < 0) { 229 err = bytes_read; 230 goto out_unlock_reading; 231 } 232 data_avail = bytes_read; 233 } 234 235 if (!data_avail) { 236 if (filp->f_flags & O_NONBLOCK) { 237 err = -EAGAIN; 238 goto out_unlock_reading; 239 } 240 } else { 241 len = data_avail; 242 if (len > size) 243 len = size; 244 245 data_avail -= len; 246 247 if (copy_to_user(buf + ret, rng_buffer + data_avail, 248 len)) { 249 err = -EFAULT; 250 goto out_unlock_reading; 251 } 252 253 size -= len; 254 ret += len; 255 } 256 257 mutex_unlock(&reading_mutex); 258 put_rng(rng); 259 260 if (need_resched()) 261 schedule_timeout_interruptible(1); 262 263 if (signal_pending(current)) { 264 err = -ERESTARTSYS; 265 goto out; 266 } 267 } 268 out: 269 return ret ? : err; 270 271 out_unlock_reading: 272 mutex_unlock(&reading_mutex); 273 out_put: 274 put_rng(rng); 275 goto out; 276 } 277 278 static const struct file_operations rng_chrdev_ops = { 279 .owner = THIS_MODULE, 280 .open = rng_dev_open, 281 .read = rng_dev_read, 282 .llseek = noop_llseek, 283 }; 284 285 static const struct attribute_group *rng_dev_groups[]; 286 287 static struct miscdevice rng_miscdev = { 288 .minor = HWRNG_MINOR, 289 .name = RNG_MODULE_NAME, 290 .nodename = "hwrng", 291 .fops = &rng_chrdev_ops, 292 .groups = rng_dev_groups, 293 }; 294 295 static int enable_best_rng(void) 296 { 297 int ret = -ENODEV; 298 299 BUG_ON(!mutex_is_locked(&rng_mutex)); 300 301 /* rng_list is sorted by quality, use the best (=first) one */ 302 if (!list_empty(&rng_list)) { 303 struct hwrng *new_rng; 304 305 new_rng = list_entry(rng_list.next, struct hwrng, list); 306 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng)); 307 if (!ret) 308 cur_rng_set_by_user = 0; 309 } 310 311 return ret; 312 } 313 314 static ssize_t hwrng_attr_current_store(struct device *dev, 315 struct device_attribute *attr, 316 const char *buf, size_t len) 317 { 318 int err = -ENODEV; 319 struct hwrng *rng; 320 321 err = mutex_lock_interruptible(&rng_mutex); 322 if (err) 323 return -ERESTARTSYS; 324 325 if (sysfs_streq(buf, "")) { 326 err = enable_best_rng(); 327 } else { 328 list_for_each_entry(rng, &rng_list, list) { 329 if (sysfs_streq(rng->name, buf)) { 330 cur_rng_set_by_user = 1; 331 err = set_current_rng(rng); 332 break; 333 } 334 } 335 } 336 337 mutex_unlock(&rng_mutex); 338 339 return err ? : len; 340 } 341 342 static ssize_t hwrng_attr_current_show(struct device *dev, 343 struct device_attribute *attr, 344 char *buf) 345 { 346 ssize_t ret; 347 struct hwrng *rng; 348 349 rng = get_current_rng(); 350 if (IS_ERR(rng)) 351 return PTR_ERR(rng); 352 353 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); 354 put_rng(rng); 355 356 return ret; 357 } 358 359 static ssize_t hwrng_attr_available_show(struct device *dev, 360 struct device_attribute *attr, 361 char *buf) 362 { 363 int err; 364 struct hwrng *rng; 365 366 err = mutex_lock_interruptible(&rng_mutex); 367 if (err) 368 return -ERESTARTSYS; 369 buf[0] = '\0'; 370 list_for_each_entry(rng, &rng_list, list) { 371 strlcat(buf, rng->name, PAGE_SIZE); 372 strlcat(buf, " ", PAGE_SIZE); 373 } 374 strlcat(buf, "\n", PAGE_SIZE); 375 mutex_unlock(&rng_mutex); 376 377 return strlen(buf); 378 } 379 380 static ssize_t hwrng_attr_selected_show(struct device *dev, 381 struct device_attribute *attr, 382 char *buf) 383 { 384 return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user); 385 } 386 387 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, 388 hwrng_attr_current_show, 389 hwrng_attr_current_store); 390 static DEVICE_ATTR(rng_available, S_IRUGO, 391 hwrng_attr_available_show, 392 NULL); 393 static DEVICE_ATTR(rng_selected, S_IRUGO, 394 hwrng_attr_selected_show, 395 NULL); 396 397 static struct attribute *rng_dev_attrs[] = { 398 &dev_attr_rng_current.attr, 399 &dev_attr_rng_available.attr, 400 &dev_attr_rng_selected.attr, 401 NULL 402 }; 403 404 ATTRIBUTE_GROUPS(rng_dev); 405 406 static void __exit unregister_miscdev(void) 407 { 408 misc_deregister(&rng_miscdev); 409 } 410 411 static int __init register_miscdev(void) 412 { 413 return misc_register(&rng_miscdev); 414 } 415 416 static int hwrng_fillfn(void *unused) 417 { 418 long rc; 419 420 while (!kthread_should_stop()) { 421 struct hwrng *rng; 422 423 rng = get_current_rng(); 424 if (IS_ERR(rng) || !rng) 425 break; 426 mutex_lock(&reading_mutex); 427 rc = rng_get_data(rng, rng_fillbuf, 428 rng_buffer_size(), 1); 429 mutex_unlock(&reading_mutex); 430 put_rng(rng); 431 if (rc <= 0) { 432 pr_warn("hwrng: no data available\n"); 433 msleep_interruptible(10000); 434 continue; 435 } 436 /* Outside lock, sure, but y'know: randomness. */ 437 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 438 rc * current_quality * 8 >> 10); 439 } 440 hwrng_fill = NULL; 441 return 0; 442 } 443 444 static void start_khwrngd(void) 445 { 446 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 447 if (IS_ERR(hwrng_fill)) { 448 pr_err("hwrng_fill thread creation failed\n"); 449 hwrng_fill = NULL; 450 } 451 } 452 453 int hwrng_register(struct hwrng *rng) 454 { 455 int err = -EINVAL; 456 struct hwrng *old_rng, *tmp; 457 struct list_head *rng_list_ptr; 458 459 if (!rng->name || (!rng->data_read && !rng->read)) 460 goto out; 461 462 mutex_lock(&rng_mutex); 463 /* Must not register two RNGs with the same name. */ 464 err = -EEXIST; 465 list_for_each_entry(tmp, &rng_list, list) { 466 if (strcmp(tmp->name, rng->name) == 0) 467 goto out_unlock; 468 } 469 470 init_completion(&rng->cleanup_done); 471 complete(&rng->cleanup_done); 472 473 /* rng_list is sorted by decreasing quality */ 474 list_for_each(rng_list_ptr, &rng_list) { 475 tmp = list_entry(rng_list_ptr, struct hwrng, list); 476 if (tmp->quality < rng->quality) 477 break; 478 } 479 list_add_tail(&rng->list, rng_list_ptr); 480 481 old_rng = current_rng; 482 err = 0; 483 if (!old_rng || 484 (!cur_rng_set_by_user && rng->quality > old_rng->quality)) { 485 /* 486 * Set new rng as current as the new rng source 487 * provides better entropy quality and was not 488 * chosen by userspace. 489 */ 490 err = set_current_rng(rng); 491 if (err) 492 goto out_unlock; 493 } 494 495 if (old_rng && !rng->init) { 496 /* 497 * Use a new device's input to add some randomness to 498 * the system. If this rng device isn't going to be 499 * used right away, its init function hasn't been 500 * called yet; so only use the randomness from devices 501 * that don't need an init callback. 502 */ 503 add_early_randomness(rng); 504 } 505 506 out_unlock: 507 mutex_unlock(&rng_mutex); 508 out: 509 return err; 510 } 511 EXPORT_SYMBOL_GPL(hwrng_register); 512 513 void hwrng_unregister(struct hwrng *rng) 514 { 515 mutex_lock(&rng_mutex); 516 517 list_del(&rng->list); 518 if (current_rng == rng) 519 enable_best_rng(); 520 521 if (list_empty(&rng_list)) { 522 mutex_unlock(&rng_mutex); 523 if (hwrng_fill) 524 kthread_stop(hwrng_fill); 525 } else 526 mutex_unlock(&rng_mutex); 527 528 wait_for_completion(&rng->cleanup_done); 529 } 530 EXPORT_SYMBOL_GPL(hwrng_unregister); 531 532 static void devm_hwrng_release(struct device *dev, void *res) 533 { 534 hwrng_unregister(*(struct hwrng **)res); 535 } 536 537 static int devm_hwrng_match(struct device *dev, void *res, void *data) 538 { 539 struct hwrng **r = res; 540 541 if (WARN_ON(!r || !*r)) 542 return 0; 543 544 return *r == data; 545 } 546 547 int devm_hwrng_register(struct device *dev, struct hwrng *rng) 548 { 549 struct hwrng **ptr; 550 int error; 551 552 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); 553 if (!ptr) 554 return -ENOMEM; 555 556 error = hwrng_register(rng); 557 if (error) { 558 devres_free(ptr); 559 return error; 560 } 561 562 *ptr = rng; 563 devres_add(dev, ptr); 564 return 0; 565 } 566 EXPORT_SYMBOL_GPL(devm_hwrng_register); 567 568 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) 569 { 570 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); 571 } 572 EXPORT_SYMBOL_GPL(devm_hwrng_unregister); 573 574 static int __init hwrng_modinit(void) 575 { 576 int ret = -ENOMEM; 577 578 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ 579 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); 580 if (!rng_buffer) 581 return -ENOMEM; 582 583 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); 584 if (!rng_fillbuf) { 585 kfree(rng_buffer); 586 return -ENOMEM; 587 } 588 589 ret = register_miscdev(); 590 if (ret) { 591 kfree(rng_fillbuf); 592 kfree(rng_buffer); 593 } 594 595 return ret; 596 } 597 598 static void __exit hwrng_modexit(void) 599 { 600 mutex_lock(&rng_mutex); 601 BUG_ON(current_rng); 602 kfree(rng_buffer); 603 kfree(rng_fillbuf); 604 mutex_unlock(&rng_mutex); 605 606 unregister_miscdev(); 607 } 608 609 module_init(hwrng_modinit); 610 module_exit(hwrng_modexit); 611 612 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 613 MODULE_LICENSE("GPL"); 614