1 /* 2 Added support for the AMD Geode LX RNG 3 (c) Copyright 2004-2005 Advanced Micro Devices, Inc. 4 5 derived from 6 7 Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) 8 (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> 9 10 derived from 11 12 Hardware driver for the AMD 768 Random Number Generator (RNG) 13 (c) Copyright 2001 Red Hat Inc <alan@redhat.com> 14 15 derived from 16 17 Hardware driver for Intel i810 Random Number Generator (RNG) 18 Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> 19 Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> 20 21 Added generic RNG API 22 Copyright 2006 Michael Buesch <m@bues.ch> 23 Copyright 2005 (c) MontaVista Software, Inc. 24 25 Please read Documentation/hw_random.txt for details on use. 26 27 ---------------------------------------------------------- 28 This software may be used and distributed according to the terms 29 of the GNU General Public License, incorporated herein by reference. 30 31 */ 32 33 34 #include <linux/device.h> 35 #include <linux/hw_random.h> 36 #include <linux/module.h> 37 #include <linux/kernel.h> 38 #include <linux/fs.h> 39 #include <linux/sched.h> 40 #include <linux/miscdevice.h> 41 #include <linux/kthread.h> 42 #include <linux/delay.h> 43 #include <linux/slab.h> 44 #include <linux/random.h> 45 #include <linux/err.h> 46 #include <asm/uaccess.h> 47 48 49 #define RNG_MODULE_NAME "hw_random" 50 #define PFX RNG_MODULE_NAME ": " 51 #define RNG_MISCDEV_MINOR 183 /* official */ 52 53 54 static struct hwrng *current_rng; 55 static struct task_struct *hwrng_fill; 56 static LIST_HEAD(rng_list); 57 /* Protects rng_list and current_rng */ 58 static DEFINE_MUTEX(rng_mutex); 59 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */ 60 static DEFINE_MUTEX(reading_mutex); 61 static int data_avail; 62 static u8 *rng_buffer, *rng_fillbuf; 63 static unsigned short current_quality; 64 static unsigned short default_quality; /* = 0; default to "off" */ 65 66 module_param(current_quality, ushort, 0644); 67 MODULE_PARM_DESC(current_quality, 68 "current hwrng entropy estimation per mill"); 69 module_param(default_quality, ushort, 0644); 70 MODULE_PARM_DESC(default_quality, 71 "default entropy content of hwrng per mill"); 72 73 static void drop_current_rng(void); 74 static int hwrng_init(struct hwrng *rng); 75 static void start_khwrngd(void); 76 77 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 78 int wait); 79 80 static size_t rng_buffer_size(void) 81 { 82 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 83 } 84 85 static void add_early_randomness(struct hwrng *rng) 86 { 87 unsigned char bytes[16]; 88 int bytes_read; 89 90 mutex_lock(&reading_mutex); 91 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 92 mutex_unlock(&reading_mutex); 93 if (bytes_read > 0) 94 add_device_randomness(bytes, bytes_read); 95 } 96 97 static inline void cleanup_rng(struct kref *kref) 98 { 99 struct hwrng *rng = container_of(kref, struct hwrng, ref); 100 101 if (rng->cleanup) 102 rng->cleanup(rng); 103 104 complete(&rng->cleanup_done); 105 } 106 107 static int set_current_rng(struct hwrng *rng) 108 { 109 int err; 110 111 BUG_ON(!mutex_is_locked(&rng_mutex)); 112 113 err = hwrng_init(rng); 114 if (err) 115 return err; 116 117 drop_current_rng(); 118 current_rng = rng; 119 120 return 0; 121 } 122 123 static void drop_current_rng(void) 124 { 125 BUG_ON(!mutex_is_locked(&rng_mutex)); 126 if (!current_rng) 127 return; 128 129 /* decrease last reference for triggering the cleanup */ 130 kref_put(¤t_rng->ref, cleanup_rng); 131 current_rng = NULL; 132 } 133 134 /* Returns ERR_PTR(), NULL or refcounted hwrng */ 135 static struct hwrng *get_current_rng(void) 136 { 137 struct hwrng *rng; 138 139 if (mutex_lock_interruptible(&rng_mutex)) 140 return ERR_PTR(-ERESTARTSYS); 141 142 rng = current_rng; 143 if (rng) 144 kref_get(&rng->ref); 145 146 mutex_unlock(&rng_mutex); 147 return rng; 148 } 149 150 static void put_rng(struct hwrng *rng) 151 { 152 /* 153 * Hold rng_mutex here so we serialize in case they set_current_rng 154 * on rng again immediately. 155 */ 156 mutex_lock(&rng_mutex); 157 if (rng) 158 kref_put(&rng->ref, cleanup_rng); 159 mutex_unlock(&rng_mutex); 160 } 161 162 static int hwrng_init(struct hwrng *rng) 163 { 164 if (kref_get_unless_zero(&rng->ref)) 165 goto skip_init; 166 167 if (rng->init) { 168 int ret; 169 170 ret = rng->init(rng); 171 if (ret) 172 return ret; 173 } 174 175 kref_init(&rng->ref); 176 reinit_completion(&rng->cleanup_done); 177 178 skip_init: 179 add_early_randomness(rng); 180 181 current_quality = rng->quality ? : default_quality; 182 if (current_quality > 1024) 183 current_quality = 1024; 184 185 if (current_quality == 0 && hwrng_fill) 186 kthread_stop(hwrng_fill); 187 if (current_quality > 0 && !hwrng_fill) 188 start_khwrngd(); 189 190 return 0; 191 } 192 193 static int rng_dev_open(struct inode *inode, struct file *filp) 194 { 195 /* enforce read-only access to this chrdev */ 196 if ((filp->f_mode & FMODE_READ) == 0) 197 return -EINVAL; 198 if (filp->f_mode & FMODE_WRITE) 199 return -EINVAL; 200 return 0; 201 } 202 203 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, 204 int wait) { 205 int present; 206 207 BUG_ON(!mutex_is_locked(&reading_mutex)); 208 if (rng->read) 209 return rng->read(rng, (void *)buffer, size, wait); 210 211 if (rng->data_present) 212 present = rng->data_present(rng, wait); 213 else 214 present = 1; 215 216 if (present) 217 return rng->data_read(rng, (u32 *)buffer); 218 219 return 0; 220 } 221 222 static ssize_t rng_dev_read(struct file *filp, char __user *buf, 223 size_t size, loff_t *offp) 224 { 225 ssize_t ret = 0; 226 int err = 0; 227 int bytes_read, len; 228 struct hwrng *rng; 229 230 while (size) { 231 rng = get_current_rng(); 232 if (IS_ERR(rng)) { 233 err = PTR_ERR(rng); 234 goto out; 235 } 236 if (!rng) { 237 err = -ENODEV; 238 goto out; 239 } 240 241 mutex_lock(&reading_mutex); 242 if (!data_avail) { 243 bytes_read = rng_get_data(rng, rng_buffer, 244 rng_buffer_size(), 245 !(filp->f_flags & O_NONBLOCK)); 246 if (bytes_read < 0) { 247 err = bytes_read; 248 goto out_unlock_reading; 249 } 250 data_avail = bytes_read; 251 } 252 253 if (!data_avail) { 254 if (filp->f_flags & O_NONBLOCK) { 255 err = -EAGAIN; 256 goto out_unlock_reading; 257 } 258 } else { 259 len = data_avail; 260 if (len > size) 261 len = size; 262 263 data_avail -= len; 264 265 if (copy_to_user(buf + ret, rng_buffer + data_avail, 266 len)) { 267 err = -EFAULT; 268 goto out_unlock_reading; 269 } 270 271 size -= len; 272 ret += len; 273 } 274 275 mutex_unlock(&reading_mutex); 276 put_rng(rng); 277 278 if (need_resched()) 279 schedule_timeout_interruptible(1); 280 281 if (signal_pending(current)) { 282 err = -ERESTARTSYS; 283 goto out; 284 } 285 } 286 out: 287 return ret ? : err; 288 289 out_unlock_reading: 290 mutex_unlock(&reading_mutex); 291 put_rng(rng); 292 goto out; 293 } 294 295 296 static const struct file_operations rng_chrdev_ops = { 297 .owner = THIS_MODULE, 298 .open = rng_dev_open, 299 .read = rng_dev_read, 300 .llseek = noop_llseek, 301 }; 302 303 static const struct attribute_group *rng_dev_groups[]; 304 305 static struct miscdevice rng_miscdev = { 306 .minor = RNG_MISCDEV_MINOR, 307 .name = RNG_MODULE_NAME, 308 .nodename = "hwrng", 309 .fops = &rng_chrdev_ops, 310 .groups = rng_dev_groups, 311 }; 312 313 314 static ssize_t hwrng_attr_current_store(struct device *dev, 315 struct device_attribute *attr, 316 const char *buf, size_t len) 317 { 318 int err; 319 struct hwrng *rng; 320 321 err = mutex_lock_interruptible(&rng_mutex); 322 if (err) 323 return -ERESTARTSYS; 324 err = -ENODEV; 325 list_for_each_entry(rng, &rng_list, list) { 326 if (strcmp(rng->name, buf) == 0) { 327 err = 0; 328 if (rng != current_rng) 329 err = set_current_rng(rng); 330 break; 331 } 332 } 333 mutex_unlock(&rng_mutex); 334 335 return err ? : len; 336 } 337 338 static ssize_t hwrng_attr_current_show(struct device *dev, 339 struct device_attribute *attr, 340 char *buf) 341 { 342 ssize_t ret; 343 struct hwrng *rng; 344 345 rng = get_current_rng(); 346 if (IS_ERR(rng)) 347 return PTR_ERR(rng); 348 349 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none"); 350 put_rng(rng); 351 352 return ret; 353 } 354 355 static ssize_t hwrng_attr_available_show(struct device *dev, 356 struct device_attribute *attr, 357 char *buf) 358 { 359 int err; 360 struct hwrng *rng; 361 362 err = mutex_lock_interruptible(&rng_mutex); 363 if (err) 364 return -ERESTARTSYS; 365 buf[0] = '\0'; 366 list_for_each_entry(rng, &rng_list, list) { 367 strlcat(buf, rng->name, PAGE_SIZE); 368 strlcat(buf, " ", PAGE_SIZE); 369 } 370 strlcat(buf, "\n", PAGE_SIZE); 371 mutex_unlock(&rng_mutex); 372 373 return strlen(buf); 374 } 375 376 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR, 377 hwrng_attr_current_show, 378 hwrng_attr_current_store); 379 static DEVICE_ATTR(rng_available, S_IRUGO, 380 hwrng_attr_available_show, 381 NULL); 382 383 static struct attribute *rng_dev_attrs[] = { 384 &dev_attr_rng_current.attr, 385 &dev_attr_rng_available.attr, 386 NULL 387 }; 388 389 ATTRIBUTE_GROUPS(rng_dev); 390 391 static void __exit unregister_miscdev(void) 392 { 393 misc_deregister(&rng_miscdev); 394 } 395 396 static int __init register_miscdev(void) 397 { 398 return misc_register(&rng_miscdev); 399 } 400 401 static int hwrng_fillfn(void *unused) 402 { 403 long rc; 404 405 while (!kthread_should_stop()) { 406 struct hwrng *rng; 407 408 rng = get_current_rng(); 409 if (IS_ERR(rng) || !rng) 410 break; 411 mutex_lock(&reading_mutex); 412 rc = rng_get_data(rng, rng_fillbuf, 413 rng_buffer_size(), 1); 414 mutex_unlock(&reading_mutex); 415 put_rng(rng); 416 if (rc <= 0) { 417 pr_warn("hwrng: no data available\n"); 418 msleep_interruptible(10000); 419 continue; 420 } 421 /* Outside lock, sure, but y'know: randomness. */ 422 add_hwgenerator_randomness((void *)rng_fillbuf, rc, 423 rc * current_quality * 8 >> 10); 424 } 425 hwrng_fill = NULL; 426 return 0; 427 } 428 429 static void start_khwrngd(void) 430 { 431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 432 if (hwrng_fill == ERR_PTR(-ENOMEM)) { 433 pr_err("hwrng_fill thread creation failed"); 434 hwrng_fill = NULL; 435 } 436 } 437 438 int hwrng_register(struct hwrng *rng) 439 { 440 int err = -EINVAL; 441 struct hwrng *old_rng, *tmp; 442 443 if (rng->name == NULL || 444 (rng->data_read == NULL && rng->read == NULL)) 445 goto out; 446 447 mutex_lock(&rng_mutex); 448 449 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */ 450 err = -ENOMEM; 451 if (!rng_buffer) { 452 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL); 453 if (!rng_buffer) 454 goto out_unlock; 455 } 456 if (!rng_fillbuf) { 457 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL); 458 if (!rng_fillbuf) { 459 kfree(rng_buffer); 460 goto out_unlock; 461 } 462 } 463 464 /* Must not register two RNGs with the same name. */ 465 err = -EEXIST; 466 list_for_each_entry(tmp, &rng_list, list) { 467 if (strcmp(tmp->name, rng->name) == 0) 468 goto out_unlock; 469 } 470 471 init_completion(&rng->cleanup_done); 472 complete(&rng->cleanup_done); 473 474 old_rng = current_rng; 475 err = 0; 476 if (!old_rng) { 477 err = set_current_rng(rng); 478 if (err) 479 goto out_unlock; 480 } 481 list_add_tail(&rng->list, &rng_list); 482 483 if (old_rng && !rng->init) { 484 /* 485 * Use a new device's input to add some randomness to 486 * the system. If this rng device isn't going to be 487 * used right away, its init function hasn't been 488 * called yet; so only use the randomness from devices 489 * that don't need an init callback. 490 */ 491 add_early_randomness(rng); 492 } 493 494 out_unlock: 495 mutex_unlock(&rng_mutex); 496 out: 497 return err; 498 } 499 EXPORT_SYMBOL_GPL(hwrng_register); 500 501 void hwrng_unregister(struct hwrng *rng) 502 { 503 mutex_lock(&rng_mutex); 504 505 list_del(&rng->list); 506 if (current_rng == rng) { 507 drop_current_rng(); 508 if (!list_empty(&rng_list)) { 509 struct hwrng *tail; 510 511 tail = list_entry(rng_list.prev, struct hwrng, list); 512 513 set_current_rng(tail); 514 } 515 } 516 517 if (list_empty(&rng_list)) { 518 mutex_unlock(&rng_mutex); 519 if (hwrng_fill) 520 kthread_stop(hwrng_fill); 521 } else 522 mutex_unlock(&rng_mutex); 523 524 wait_for_completion(&rng->cleanup_done); 525 } 526 EXPORT_SYMBOL_GPL(hwrng_unregister); 527 528 static void devm_hwrng_release(struct device *dev, void *res) 529 { 530 hwrng_unregister(*(struct hwrng **)res); 531 } 532 533 static int devm_hwrng_match(struct device *dev, void *res, void *data) 534 { 535 struct hwrng **r = res; 536 537 if (WARN_ON(!r || !*r)) 538 return 0; 539 540 return *r == data; 541 } 542 543 int devm_hwrng_register(struct device *dev, struct hwrng *rng) 544 { 545 struct hwrng **ptr; 546 int error; 547 548 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL); 549 if (!ptr) 550 return -ENOMEM; 551 552 error = hwrng_register(rng); 553 if (error) { 554 devres_free(ptr); 555 return error; 556 } 557 558 *ptr = rng; 559 devres_add(dev, ptr); 560 return 0; 561 } 562 EXPORT_SYMBOL_GPL(devm_hwrng_register); 563 564 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng) 565 { 566 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng); 567 } 568 EXPORT_SYMBOL_GPL(devm_hwrng_unregister); 569 570 static int __init hwrng_modinit(void) 571 { 572 return register_miscdev(); 573 } 574 575 static void __exit hwrng_modexit(void) 576 { 577 mutex_lock(&rng_mutex); 578 BUG_ON(current_rng); 579 kfree(rng_buffer); 580 kfree(rng_fillbuf); 581 mutex_unlock(&rng_mutex); 582 583 unregister_miscdev(); 584 } 585 586 module_init(hwrng_modinit); 587 module_exit(hwrng_modexit); 588 589 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver"); 590 MODULE_LICENSE("GPL"); 591