1 /* 2 * hw_random/core.c: HWRNG core API 3 * 4 * Copyright 2006 Michael Buesch <m@bues.ch> 5 * Copyright 2005 (c) MontaVista Software, Inc. 6 * 7 * Please read Documentation/hw_random.txt for details on use. 8 * 9 * This software may be used and distributed according to the terms 10 * of the GNU General Public License, incorporated herein by reference. 11 */ 12 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/err.h> 16 #include <linux/fs.h> 17 #include <linux/hw_random.h> 18 #include <linux/kernel.h> 19 #include <linux/kthread.h> 20 #include <linux/sched/signal.h> 21 #include <linux/miscdevice.h> 22 #include <linux/module.h> 23 #include <linux/random.h> 24 #include <linux/sched.h> 25 #include <linux/slab.h> 26 #include <linux/uaccess.h> 27 28 #define RNG_MODULE_NAME "hw_random" 29 30 static struct hwrng *current_rng; 31 /* the current rng has been explicitly chosen by user via sysfs */ 32 static int cur_rng_set_by_user; 33 static struct task_struct *hwrng_fill; 34 /* list of registered rngs, sorted decending by quality */ 35 static LIST_HEAD(rng_list); 36 /* Protects rng_list and current_rng */ 37 static DEFINE_MUTEX(rng_mutex); 38 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ 39 static DEFINE_MUTEX(reading_mutex); 40 static int data_avail; 41 static u8 *rng_buffer, *rng_fillbuf; 42 static unsigned short current_quality; 43 static unsigned short default_quality; /* = 0; default to "off" */ 44 45 module_param(current_quality, ushort, 0644); 46 MODULE_PARM_DESC(current_quality, 47 "current hwrng entropy estimation per mill"); 48 module_param(default_quality, ushort, 0644); 49 MODULE_PARM_DESC(default_quality, 50 "default entropy content of hwrng per mill"); 51 52 static void drop_current_rng(void); 53 static int hwrng_init(struct hwrng *rng); 54 static void start_khwrngd(void); 55 56 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 57 int wait); 58 59 static size_t rng_buffer_size(void) 60 { 61 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 62 } 63 64 static void add_early_randomness(struct hwrng *rng) 65 { 66 int bytes_read; 67 size_t size = min_t(size_t, 16, rng_buffer_size()); 68 69 mutex_lock(&reading_mutex); 70 bytes_read = rng_get_data(rng, rng_buffer, size, 1); 71 mutex_unlock(&reading_mutex); 72 if (bytes_read > 0) 73 add_device_randomness(rng_buffer, bytes_read); 74 } 75 76 static inline void cleanup_rng(struct kref *kref) 77 { 78 struct hwrng *rng = container_of(kref, struct hwrng, ref); 79 80 if (rng->cleanup) 81 rng->cleanup(rng); 82 83 complete(&rng->cleanup_done); 84 } 85 86 static int set_current_rng(struct hwrng *rng) 87 { 88 int err; 89 90 BUG_ON(!mutex_is_locked(&rng_mutex)); 91 92 err = hwrng_init(rng); 93 if (err) 94 return err; 95 96 drop_current_rng(); 97 current_rng = rng; 98 99 return 0; 100 } 101 102 static void drop_current_rng(void) 103 { 104 BUG_ON(!mutex_is_locked(&rng_mutex)); 105 if (!current_rng) 106 return; 107 108 /* decrease last reference for triggering the cleanup */ 109 kref_put(¤t_rng->ref, cleanup_rng); 110 current_rng = NULL; 111 } 112 113 /* Returns ERR_PTR(), NULL or refcounted hwrng */ 114 static struct hwrng *get_current_rng(void) 115 { 116 struct hwrng *rng; 117 118 if (mutex_lock_interruptible(&rng_mutex)) 119 return ERR_PTR(-ERESTARTSYS); 120 121 rng = current_rng; 122 if (rng) 123 kref_get(&rng->ref); 124 125 mutex_unlock(&rng_mutex); 126 return rng; 127 } 128 129 static void put_rng(struct hwrng *rng) 130 { 131 /* 132 * Hold rng_mutex here so we serialize in case they set_current_rng 133 * on rng again immediately. 134 */ 135 mutex_lock(&rng_mutex); 136 if (rng) 137 kref_put(&rng->ref, cleanup_rng); 138 mutex_unlock(&rng_mutex); 139 } 140 141 static int hwrng_init(struct hwrng *rng) 142 { 143 if (kref_get_unless_zero(&rng->ref)) 144 goto skip_init; 145 146 if (rng->init) { 147 int ret; 148 149 ret = rng->init(rng); 150 if (ret) 151 return ret; 152 } 153 154 kref_init(&rng->ref); 155 reinit_completion(&rng->cleanup_done); 156 157 skip_init: 158 add_early_randomness(rng); 159 160 current_quality = rng->quality ? : default_quality; 161 if (current_quality > 1024) 162 current_quality = 1024; 163 164 if (current_quality == 0 && hwrng_fill) 165 kthread_stop(hwrng_fill); 166 if (current_quality > 0 && !hwrng_fill) 167 start_khwrngd(); 168 169 return 0; 170 } 171 172 static int rng_dev_open(struct inode *inode, struct file *filp) 173 { 174 /* enforce read-only access to this chrdev */ 175 if ((filp->f_mode & FMODE_READ) == 0) 176 return -EINVAL; 177 if (filp->f_mode & FMODE_WRITE) 178 return -EINVAL; 179 return 0; 180 } 181 182 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 183 int wait) { 184 int present; 185 186 BUG_ON(!mutex_is_locked(&reading_mutex)); 187 if (rng->read) 188 return rng->read(rng, (void *)buffer, size, wait); 189 190 if (rng->data_present) 191 present = rng->data_present(rng, wait); 192 else 193 present = 1; 194 195 if (present) 196 return rng->data_read(rng, (u32 *)buffer); 197 198 return 0; 199 } 200 201 static ssize_t rng_dev_read(struct file *filp, char __user *buf, 202 size_t size, loff_t *offp) 203 { 204 ssize_t ret = 0; 205 int err = 0; 206 int bytes_read, len; 207 struct hwrng *rng; 208 209 while (size) { 210 rng = get_current_rng(); 211 if (IS_ERR(rng)) { 212 err = PTR_ERR(rng); 213 goto out; 214 } 215 if (!rng) { 216 err = -ENODEV; 217 goto out; 218 } 219 220 if (mutex_lock_interruptible(&reading_mutex)) { 221 err = -ERESTARTSYS; 222 goto out_put; 223 } 224 if (!data_avail) { 225 bytes_read = rng_get_data(rng, rng_buffer, 226 rng_buffer_size(), 227 !(filp->f_flags & O_NONBLOCK)); 228 if (bytes_read < 0) { 229 err = bytes_read; 230 goto out_unlock_reading; 231 } 232 data_avail = bytes_read; 233 } 234 235 if (!data_avail) { 236 if (filp->f_flags & O_NONBLOCK) { 237 err = -EAGAIN; 238 goto out_unlock_reading; 239 } 240 } else { 241 len = data_avail; 242 if (len > size) 243 len = size; 244 245 data_avail -= len; 246 247 if (copy_to_user(buf + ret, rng_buffer + data_avail, 248 len)) { 249 err = -EFAULT; 250 goto out_unlock_reading; 251 } 252 253 size -= len; 254 ret += len; 255 } 256 257 mutex_unlock(&reading_mutex); 258 put_rng(rng); 259 260 if (need_resched()) 261 schedule_timeout_interruptible(1); 262 263 if (signal_pending(current)) { 264 err = -ERESTARTSYS; 265 goto out; 266 } 267 } 268 out: 269 return ret ? : err; 270 271 out_unlock_reading: 272 mutex_unlock(&reading_mutex); 273 out_put: 274 put_rng(rng); 275 goto out; 276 } 277 278 static const struct file_operations rng_chrdev_ops = { 279 .owner = THIS_MODULE, 280 .open = rng_dev_open, 281 .read = rng_dev_read, 282 .llseek = noop_llseek, 283 }; 284 285 static const struct attribute_group *rng_dev_groups[]; 286 287 static struct miscdevice rng_miscdev = { 288 .minor = HWRNG_MINOR, 289 .name = RNG_MODULE_NAME, 290 .nodename = "hwrng", 291 .fops = &rng_chrdev_ops, 292 .groups = rng_dev_groups, 293 }; 294 295 static ssize_t hwrng_attr_current_store(struct device *dev, 296 struct device_attribute *attr, 297 const char *buf, size_t len) 298 { 299 int err; 300 struct hwrng *rng; 301 302 err = mutex_lock_interruptible(&rng_mutex); 303 if (err) 304 return -ERESTARTSYS; 305 err = -ENODEV; 306 list_for_each_entry(rng, &rng_list, list) { 307 if (sysfs_streq(rng->name, buf)) { 308 err = 0; 309 cur_rng_set_by_user = 1; 310 if (rng != current_rng) 311 err = set_current_rng(rng); 312 break; 313 } 314 } 315 mutex_unlock(&rng_mutex); 316 317 return err ? : len; 318 } 319 320 static ssize_t hwrng_attr_current_show(struct device *dev, 321 struct device_attribute *attr, 322 char *buf) 323 { 324 ssize_t ret; 325 struct hwrng *rng; 326 327 rng = get_current_rng(); 328 if (IS_ERR(rng)) 329 return PTR_ERR(rng); 330 331 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); 332 put_rng(rng); 333 334 return ret; 335 } 336 337 static ssize_t hwrng_attr_available_show(struct device *dev, 338 struct device_attribute *attr, 339 char *buf) 340 { 341 int err; 342 struct hwrng *rng; 343 344 err = mutex_lock_interruptible(&rng_mutex); 345 if (err) 346 return -ERESTARTSYS; 347 buf[0] = '\0'; 348 list_for_each_entry(rng, &rng_list, list) { 349 strlcat(buf, rng->name, PAGE_SIZE); 350 strlcat(buf, " ", PAGE_SIZE); 351 } 352 strlcat(buf, "\n", PAGE_SIZE); 353 mutex_unlock(&rng_mutex); 354 355 return strlen(buf); 356 } 357 358 static ssize_t hwrng_attr_selected_show(struct device *dev, 359 struct device_attribute *attr, 360 char *buf) 361 { 362 return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user); 363 } 364 365 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, 366 hwrng_attr_current_show, 367 hwrng_attr_current_store); 368 static DEVICE_ATTR(rng_available, S_IRUGO, 369 hwrng_attr_available_show, 370 NULL); 371 static DEVICE_ATTR(rng_selected, S_IRUGO, 372 hwrng_attr_selected_show, 373 NULL); 374 375 static struct attribute *rng_dev_attrs[] = { 376 &dev_attr_rng_current.attr, 377 &dev_attr_rng_available.attr, 378 &dev_attr_rng_selected.attr, 379 NULL 380 }; 381 382 ATTRIBUTE_GROUPS(rng_dev); 383 384 static void __exit unregister_miscdev(void) 385 { 386 misc_deregister(&rng_miscdev); 387 } 388 389 static int __init register_miscdev(void) 390 { 391 return misc_register(&rng_miscdev); 392 } 393 394 static int hwrng_fillfn(void *unused) 395 { 396 long rc; 397 398 while (!kthread_should_stop()) { 399 struct hwrng *rng; 400 401 rng = get_current_rng(); 402 if (IS_ERR(rng) || !rng) 403 break; 404 mutex_lock(&reading_mutex); 405 rc = rng_get_data(rng, rng_fillbuf, 406 rng_buffer_size(), 1); 407 mutex_unlock(&reading_mutex); 408 put_rng(rng); 409 if (rc <= 0) { 410 pr_warn("hwrng: no data available\n"); 411 msleep_interruptible(10000); 412 continue; 413 } 414 /* Outside lock, sure, but y'know: randomness. */ 415 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 416 rc * current_quality * 8 >> 10); 417 } 418 hwrng_fill = NULL; 419 return 0; 420 } 421 422 static void start_khwrngd(void) 423 { 424 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 425 if (IS_ERR(hwrng_fill)) { 426 pr_err("hwrng_fill thread creation failed"); 427 hwrng_fill = NULL; 428 } 429 } 430 431 int hwrng_register(struct hwrng *rng) 432 { 433 int err = -EINVAL; 434 struct hwrng *old_rng, *tmp; 435 struct list_head *rng_list_ptr; 436 437 if (!rng->name || (!rng->data_read && !rng->read)) 438 goto out; 439 440 mutex_lock(&rng_mutex); 441 /* Must not register two RNGs with the same name. */ 442 err = -EEXIST; 443 list_for_each_entry(tmp, &rng_list, list) { 444 if (strcmp(tmp->name, rng->name) == 0) 445 goto out_unlock; 446 } 447 448 init_completion(&rng->cleanup_done); 449 complete(&rng->cleanup_done); 450 451 /* rng_list is sorted by decreasing quality */ 452 list_for_each(rng_list_ptr, &rng_list) { 453 tmp = list_entry(rng_list_ptr, struct hwrng, list); 454 if (tmp->quality < rng->quality) 455 break; 456 } 457 list_add_tail(&rng->list, rng_list_ptr); 458 459 old_rng = current_rng; 460 err = 0; 461 if (!old_rng || 462 (!cur_rng_set_by_user && rng->quality > old_rng->quality)) { 463 /* 464 * Set new rng as current as the new rng source 465 * provides better entropy quality and was not 466 * chosen by userspace. 467 */ 468 err = set_current_rng(rng); 469 if (err) 470 goto out_unlock; 471 } 472 473 if (old_rng && !rng->init) { 474 /* 475 * Use a new device's input to add some randomness to 476 * the system. If this rng device isn't going to be 477 * used right away, its init function hasn't been 478 * called yet; so only use the randomness from devices 479 * that don't need an init callback. 480 */ 481 add_early_randomness(rng); 482 } 483 484 out_unlock: 485 mutex_unlock(&rng_mutex); 486 out: 487 return err; 488 } 489 EXPORT_SYMBOL_GPL(hwrng_register); 490 491 void hwrng_unregister(struct hwrng *rng) 492 { 493 mutex_lock(&rng_mutex); 494 495 list_del(&rng->list); 496 if (current_rng == rng) { 497 drop_current_rng(); 498 cur_rng_set_by_user = 0; 499 /* rng_list is sorted by quality, use the best (=first) one */ 500 if (!list_empty(&rng_list)) { 501 struct hwrng *new_rng; 502 503 new_rng = list_entry(rng_list.next, struct hwrng, list); 504 set_current_rng(new_rng); 505 } 506 } 507 508 if (list_empty(&rng_list)) { 509 mutex_unlock(&rng_mutex); 510 if (hwrng_fill) 511 kthread_stop(hwrng_fill); 512 } else 513 mutex_unlock(&rng_mutex); 514 515 wait_for_completion(&rng->cleanup_done); 516 } 517 EXPORT_SYMBOL_GPL(hwrng_unregister); 518 519 static void devm_hwrng_release(struct device *dev, void *res) 520 { 521 hwrng_unregister(*(struct hwrng **)res); 522 } 523 524 static int devm_hwrng_match(struct device *dev, void *res, void *data) 525 { 526 struct hwrng **r = res; 527 528 if (WARN_ON(!r || !*r)) 529 return 0; 530 531 return *r == data; 532 } 533 534 int devm_hwrng_register(struct device *dev, struct hwrng *rng) 535 { 536 struct hwrng **ptr; 537 int error; 538 539 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); 540 if (!ptr) 541 return -ENOMEM; 542 543 error = hwrng_register(rng); 544 if (error) { 545 devres_free(ptr); 546 return error; 547 } 548 549 *ptr = rng; 550 devres_add(dev, ptr); 551 return 0; 552 } 553 EXPORT_SYMBOL_GPL(devm_hwrng_register); 554 555 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) 556 { 557 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); 558 } 559 EXPORT_SYMBOL_GPL(devm_hwrng_unregister); 560 561 static int __init hwrng_modinit(void) 562 { 563 int ret = -ENOMEM; 564 565 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ 566 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); 567 if (!rng_buffer) 568 return -ENOMEM; 569 570 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); 571 if (!rng_fillbuf) { 572 kfree(rng_buffer); 573 return -ENOMEM; 574 } 575 576 ret = register_miscdev(); 577 if (ret) { 578 kfree(rng_fillbuf); 579 kfree(rng_buffer); 580 } 581 582 return ret; 583 } 584 585 static void __exit hwrng_modexit(void) 586 { 587 mutex_lock(&rng_mutex); 588 BUG_ON(current_rng); 589 kfree(rng_buffer); 590 kfree(rng_fillbuf); 591 mutex_unlock(&rng_mutex); 592 593 unregister_miscdev(); 594 } 595 596 module_init(hwrng_modinit); 597 module_exit(hwrng_modexit); 598 599 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 600 MODULE_LICENSE("GPL"); 601