1 /* 2 * hw_random/core.c: HWRNG core API 3 * 4 * Copyright 2006 Michael Buesch <m@bues.ch> 5 * Copyright 2005 (c) MontaVista Software, Inc. 6 * 7 * Please read Documentation/hw_random.txt for details on use. 8 * 9 * This software may be used and distributed according to the terms 10 * of the GNU General Public License, incorporated herein by reference. 11 */ 12 13 #include <linux/delay.h> 14 #include <linux/device.h> 15 #include <linux/err.h> 16 #include <linux/fs.h> 17 #include <linux/hw_random.h> 18 #include <linux/kernel.h> 19 #include <linux/kthread.h> 20 #include <linux/miscdevice.h> 21 #include <linux/module.h> 22 #include <linux/random.h> 23 #include <linux/sched.h> 24 #include <linux/slab.h> 25 #include <linux/uaccess.h> 26 27 #define RNG_MODULE_NAME "hw_random" 28 29 static struct hwrng *current_rng; 30 static struct task_struct *hwrng_fill; 31 static LIST_HEAD(rng_list); 32 /* Protects rng_list and current_rng */ 33 static DEFINE_MUTEX(rng_mutex); 34 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ 35 static DEFINE_MUTEX(reading_mutex); 36 static int data_avail; 37 static u8 *rng_buffer, *rng_fillbuf; 38 static unsigned short current_quality; 39 static unsigned short default_quality; /* = 0; default to "off" */ 40 41 module_param(current_quality, ushort, 0644); 42 MODULE_PARM_DESC(current_quality, 43 "current hwrng entropy estimation per mill"); 44 module_param(default_quality, ushort, 0644); 45 MODULE_PARM_DESC(default_quality, 46 "default entropy content of hwrng per mill"); 47 48 static void drop_current_rng(void); 49 static int hwrng_init(struct hwrng *rng); 50 static void start_khwrngd(void); 51 52 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 53 int wait); 54 55 static size_t rng_buffer_size(void) 56 { 57 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 58 } 59 60 static void add_early_randomness(struct hwrng *rng) 61 { 62 int bytes_read; 63 size_t size = min_t(size_t, 16, rng_buffer_size()); 64 65 mutex_lock(&reading_mutex); 66 bytes_read = rng_get_data(rng, rng_buffer, size, 1); 67 mutex_unlock(&reading_mutex); 68 if (bytes_read > 0) 69 add_device_randomness(rng_buffer, bytes_read); 70 } 71 72 static inline void cleanup_rng(struct kref *kref) 73 { 74 struct hwrng *rng = container_of(kref, struct hwrng, ref); 75 76 if (rng->cleanup) 77 rng->cleanup(rng); 78 79 complete(&rng->cleanup_done); 80 } 81 82 static int set_current_rng(struct hwrng *rng) 83 { 84 int err; 85 86 BUG_ON(!mutex_is_locked(&rng_mutex)); 87 88 err = hwrng_init(rng); 89 if (err) 90 return err; 91 92 drop_current_rng(); 93 current_rng = rng; 94 95 return 0; 96 } 97 98 static void drop_current_rng(void) 99 { 100 BUG_ON(!mutex_is_locked(&rng_mutex)); 101 if (!current_rng) 102 return; 103 104 /* decrease last reference for triggering the cleanup */ 105 kref_put(¤t_rng->ref, cleanup_rng); 106 current_rng = NULL; 107 } 108 109 /* Returns ERR_PTR(), NULL or refcounted hwrng */ 110 static struct hwrng *get_current_rng(void) 111 { 112 struct hwrng *rng; 113 114 if (mutex_lock_interruptible(&rng_mutex)) 115 return ERR_PTR(-ERESTARTSYS); 116 117 rng = current_rng; 118 if (rng) 119 kref_get(&rng->ref); 120 121 mutex_unlock(&rng_mutex); 122 return rng; 123 } 124 125 static void put_rng(struct hwrng *rng) 126 { 127 /* 128 * Hold rng_mutex here so we serialize in case they set_current_rng 129 * on rng again immediately. 130 */ 131 mutex_lock(&rng_mutex); 132 if (rng) 133 kref_put(&rng->ref, cleanup_rng); 134 mutex_unlock(&rng_mutex); 135 } 136 137 static int hwrng_init(struct hwrng *rng) 138 { 139 if (kref_get_unless_zero(&rng->ref)) 140 goto skip_init; 141 142 if (rng->init) { 143 int ret; 144 145 ret = rng->init(rng); 146 if (ret) 147 return ret; 148 } 149 150 kref_init(&rng->ref); 151 reinit_completion(&rng->cleanup_done); 152 153 skip_init: 154 add_early_randomness(rng); 155 156 current_quality = rng->quality ? : default_quality; 157 if (current_quality > 1024) 158 current_quality = 1024; 159 160 if (current_quality == 0 && hwrng_fill) 161 kthread_stop(hwrng_fill); 162 if (current_quality > 0 && !hwrng_fill) 163 start_khwrngd(); 164 165 return 0; 166 } 167 168 static int rng_dev_open(struct inode *inode, struct file *filp) 169 { 170 /* enforce read-only access to this chrdev */ 171 if ((filp->f_mode & FMODE_READ) == 0) 172 return -EINVAL; 173 if (filp->f_mode & FMODE_WRITE) 174 return -EINVAL; 175 return 0; 176 } 177 178 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 179 int wait) { 180 int present; 181 182 BUG_ON(!mutex_is_locked(&reading_mutex)); 183 if (rng->read) 184 return rng->read(rng, (void *)buffer, size, wait); 185 186 if (rng->data_present) 187 present = rng->data_present(rng, wait); 188 else 189 present = 1; 190 191 if (present) 192 return rng->data_read(rng, (u32 *)buffer); 193 194 return 0; 195 } 196 197 static ssize_t rng_dev_read(struct file *filp, char __user *buf, 198 size_t size, loff_t *offp) 199 { 200 ssize_t ret = 0; 201 int err = 0; 202 int bytes_read, len; 203 struct hwrng *rng; 204 205 while (size) { 206 rng = get_current_rng(); 207 if (IS_ERR(rng)) { 208 err = PTR_ERR(rng); 209 goto out; 210 } 211 if (!rng) { 212 err = -ENODEV; 213 goto out; 214 } 215 216 if (mutex_lock_interruptible(&reading_mutex)) { 217 err = -ERESTARTSYS; 218 goto out_put; 219 } 220 if (!data_avail) { 221 bytes_read = rng_get_data(rng, rng_buffer, 222 rng_buffer_size(), 223 !(filp->f_flags & O_NONBLOCK)); 224 if (bytes_read < 0) { 225 err = bytes_read; 226 goto out_unlock_reading; 227 } 228 data_avail = bytes_read; 229 } 230 231 if (!data_avail) { 232 if (filp->f_flags & O_NONBLOCK) { 233 err = -EAGAIN; 234 goto out_unlock_reading; 235 } 236 } else { 237 len = data_avail; 238 if (len > size) 239 len = size; 240 241 data_avail -= len; 242 243 if (copy_to_user(buf + ret, rng_buffer + data_avail, 244 len)) { 245 err = -EFAULT; 246 goto out_unlock_reading; 247 } 248 249 size -= len; 250 ret += len; 251 } 252 253 mutex_unlock(&reading_mutex); 254 put_rng(rng); 255 256 if (need_resched()) 257 schedule_timeout_interruptible(1); 258 259 if (signal_pending(current)) { 260 err = -ERESTARTSYS; 261 goto out; 262 } 263 } 264 out: 265 return ret ? : err; 266 267 out_unlock_reading: 268 mutex_unlock(&reading_mutex); 269 out_put: 270 put_rng(rng); 271 goto out; 272 } 273 274 static const struct file_operations rng_chrdev_ops = { 275 .owner = THIS_MODULE, 276 .open = rng_dev_open, 277 .read = rng_dev_read, 278 .llseek = noop_llseek, 279 }; 280 281 static const struct attribute_group *rng_dev_groups[]; 282 283 static struct miscdevice rng_miscdev = { 284 .minor = HWRNG_MINOR, 285 .name = RNG_MODULE_NAME, 286 .nodename = "hwrng", 287 .fops = &rng_chrdev_ops, 288 .groups = rng_dev_groups, 289 }; 290 291 static ssize_t hwrng_attr_current_store(struct device *dev, 292 struct device_attribute *attr, 293 const char *buf, size_t len) 294 { 295 int err; 296 struct hwrng *rng; 297 298 err = mutex_lock_interruptible(&rng_mutex); 299 if (err) 300 return -ERESTARTSYS; 301 err = -ENODEV; 302 list_for_each_entry(rng, &rng_list, list) { 303 if (sysfs_streq(rng->name, buf)) { 304 err = 0; 305 if (rng != current_rng) 306 err = set_current_rng(rng); 307 break; 308 } 309 } 310 mutex_unlock(&rng_mutex); 311 312 return err ? : len; 313 } 314 315 static ssize_t hwrng_attr_current_show(struct device *dev, 316 struct device_attribute *attr, 317 char *buf) 318 { 319 ssize_t ret; 320 struct hwrng *rng; 321 322 rng = get_current_rng(); 323 if (IS_ERR(rng)) 324 return PTR_ERR(rng); 325 326 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); 327 put_rng(rng); 328 329 return ret; 330 } 331 332 static ssize_t hwrng_attr_available_show(struct device *dev, 333 struct device_attribute *attr, 334 char *buf) 335 { 336 int err; 337 struct hwrng *rng; 338 339 err = mutex_lock_interruptible(&rng_mutex); 340 if (err) 341 return -ERESTARTSYS; 342 buf[0] = '\0'; 343 list_for_each_entry(rng, &rng_list, list) { 344 strlcat(buf, rng->name, PAGE_SIZE); 345 strlcat(buf, " ", PAGE_SIZE); 346 } 347 strlcat(buf, "\n", PAGE_SIZE); 348 mutex_unlock(&rng_mutex); 349 350 return strlen(buf); 351 } 352 353 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, 354 hwrng_attr_current_show, 355 hwrng_attr_current_store); 356 static DEVICE_ATTR(rng_available, S_IRUGO, 357 hwrng_attr_available_show, 358 NULL); 359 360 static struct attribute *rng_dev_attrs[] = { 361 &dev_attr_rng_current.attr, 362 &dev_attr_rng_available.attr, 363 NULL 364 }; 365 366 ATTRIBUTE_GROUPS(rng_dev); 367 368 static void __exit unregister_miscdev(void) 369 { 370 misc_deregister(&rng_miscdev); 371 } 372 373 static int __init register_miscdev(void) 374 { 375 return misc_register(&rng_miscdev); 376 } 377 378 static int hwrng_fillfn(void *unused) 379 { 380 long rc; 381 382 while (!kthread_should_stop()) { 383 struct hwrng *rng; 384 385 rng = get_current_rng(); 386 if (IS_ERR(rng) || !rng) 387 break; 388 mutex_lock(&reading_mutex); 389 rc = rng_get_data(rng, rng_fillbuf, 390 rng_buffer_size(), 1); 391 mutex_unlock(&reading_mutex); 392 put_rng(rng); 393 if (rc <= 0) { 394 pr_warn("hwrng: no data available\n"); 395 msleep_interruptible(10000); 396 continue; 397 } 398 /* Outside lock, sure, but y'know: randomness. */ 399 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 400 rc * current_quality * 8 >> 10); 401 } 402 hwrng_fill = NULL; 403 return 0; 404 } 405 406 static void start_khwrngd(void) 407 { 408 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 409 if (IS_ERR(hwrng_fill)) { 410 pr_err("hwrng_fill thread creation failed"); 411 hwrng_fill = NULL; 412 } 413 } 414 415 int hwrng_register(struct hwrng *rng) 416 { 417 int err = -EINVAL; 418 struct hwrng *old_rng, *tmp; 419 420 if (!rng->name || (!rng->data_read && !rng->read)) 421 goto out; 422 423 mutex_lock(&rng_mutex); 424 /* Must not register two RNGs with the same name. */ 425 err = -EEXIST; 426 list_for_each_entry(tmp, &rng_list, list) { 427 if (strcmp(tmp->name, rng->name) == 0) 428 goto out_unlock; 429 } 430 431 init_completion(&rng->cleanup_done); 432 complete(&rng->cleanup_done); 433 434 old_rng = current_rng; 435 err = 0; 436 if (!old_rng) { 437 err = set_current_rng(rng); 438 if (err) 439 goto out_unlock; 440 } 441 list_add_tail(&rng->list, &rng_list); 442 443 if (old_rng && !rng->init) { 444 /* 445 * Use a new device's input to add some randomness to 446 * the system. If this rng device isn't going to be 447 * used right away, its init function hasn't been 448 * called yet; so only use the randomness from devices 449 * that don't need an init callback. 450 */ 451 add_early_randomness(rng); 452 } 453 454 out_unlock: 455 mutex_unlock(&rng_mutex); 456 out: 457 return err; 458 } 459 EXPORT_SYMBOL_GPL(hwrng_register); 460 461 void hwrng_unregister(struct hwrng *rng) 462 { 463 mutex_lock(&rng_mutex); 464 465 list_del(&rng->list); 466 if (current_rng == rng) { 467 drop_current_rng(); 468 if (!list_empty(&rng_list)) { 469 struct hwrng *tail; 470 471 tail = list_entry(rng_list.prev, struct hwrng, list); 472 473 set_current_rng(tail); 474 } 475 } 476 477 if (list_empty(&rng_list)) { 478 mutex_unlock(&rng_mutex); 479 if (hwrng_fill) 480 kthread_stop(hwrng_fill); 481 } else 482 mutex_unlock(&rng_mutex); 483 484 wait_for_completion(&rng->cleanup_done); 485 } 486 EXPORT_SYMBOL_GPL(hwrng_unregister); 487 488 static void devm_hwrng_release(struct device *dev, void *res) 489 { 490 hwrng_unregister(*(struct hwrng **)res); 491 } 492 493 static int devm_hwrng_match(struct device *dev, void *res, void *data) 494 { 495 struct hwrng **r = res; 496 497 if (WARN_ON(!r || !*r)) 498 return 0; 499 500 return *r == data; 501 } 502 503 int devm_hwrng_register(struct device *dev, struct hwrng *rng) 504 { 505 struct hwrng **ptr; 506 int error; 507 508 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); 509 if (!ptr) 510 return -ENOMEM; 511 512 error = hwrng_register(rng); 513 if (error) { 514 devres_free(ptr); 515 return error; 516 } 517 518 *ptr = rng; 519 devres_add(dev, ptr); 520 return 0; 521 } 522 EXPORT_SYMBOL_GPL(devm_hwrng_register); 523 524 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) 525 { 526 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); 527 } 528 EXPORT_SYMBOL_GPL(devm_hwrng_unregister); 529 530 static int __init hwrng_modinit(void) 531 { 532 int ret = -ENOMEM; 533 534 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ 535 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); 536 if (!rng_buffer) 537 return -ENOMEM; 538 539 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); 540 if (!rng_fillbuf) { 541 kfree(rng_buffer); 542 return -ENOMEM; 543 } 544 545 ret = register_miscdev(); 546 if (ret) { 547 kfree(rng_fillbuf); 548 kfree(rng_buffer); 549 } 550 551 return ret; 552 } 553 554 static void __exit hwrng_modexit(void) 555 { 556 mutex_lock(&rng_mutex); 557 BUG_ON(current_rng); 558 kfree(rng_buffer); 559 kfree(rng_fillbuf); 560 mutex_unlock(&rng_mutex); 561 562 unregister_miscdev(); 563 } 564 565 module_init(hwrng_modinit); 566 module_exit(hwrng_modexit); 567 568 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 569 MODULE_LICENSE("GPL"); 570