1 /* 2 * zcrypt 2.1.0 3 * 4 * Copyright IBM Corp. 2001, 2012 5 * Author(s): Robert Burroughs 6 * Eric Rossman (edrossma@us.ibm.com) 7 * Cornelia Huck <cornelia.huck@de.ibm.com> 8 * 9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 */ 28 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/miscdevice.h> 33 #include <linux/fs.h> 34 #include <linux/proc_fs.h> 35 #include <linux/seq_file.h> 36 #include <linux/compat.h> 37 #include <linux/slab.h> 38 #include <linux/atomic.h> 39 #include <asm/uaccess.h> 40 #include <linux/hw_random.h> 41 #include <linux/debugfs.h> 42 #include <asm/debug.h> 43 44 #include "zcrypt_debug.h" 45 #include "zcrypt_api.h" 46 47 #include "zcrypt_msgtype6.h" 48 49 /* 50 * Module description. 51 */ 52 MODULE_AUTHOR("IBM Corporation"); 53 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 54 "Copyright IBM Corp. 2001, 2012"); 55 MODULE_LICENSE("GPL"); 56 57 static DEFINE_SPINLOCK(zcrypt_device_lock); 58 static LIST_HEAD(zcrypt_device_list); 59 static int zcrypt_device_count = 0; 60 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 61 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 62 63 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 64 EXPORT_SYMBOL(zcrypt_rescan_req); 65 66 static int zcrypt_rng_device_add(void); 67 static void zcrypt_rng_device_remove(void); 68 69 static DEFINE_SPINLOCK(zcrypt_ops_list_lock); 70 static LIST_HEAD(zcrypt_ops_list); 71 72 static debug_info_t *zcrypt_dbf_common; 73 static debug_info_t *zcrypt_dbf_devices; 74 static struct dentry *debugfs_root; 75 76 /* 77 * Device attributes common for all crypto devices. 78 */ 79 static ssize_t zcrypt_type_show(struct device *dev, 80 struct device_attribute *attr, char *buf) 81 { 82 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 83 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); 84 } 85 86 static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); 87 88 static ssize_t zcrypt_online_show(struct device *dev, 89 struct device_attribute *attr, char *buf) 90 { 91 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 92 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); 93 } 94 95 static ssize_t zcrypt_online_store(struct device *dev, 96 struct device_attribute *attr, 97 const char *buf, size_t count) 98 { 99 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 100 int online; 101 102 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 103 return -EINVAL; 104 zdev->online = online; 105 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dman", zdev->ap_dev->qid, 106 zdev->online); 107 if (!online) 108 ap_flush_queue(zdev->ap_dev); 109 return count; 110 } 111 112 static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); 113 114 static struct attribute * zcrypt_device_attrs[] = { 115 &dev_attr_type.attr, 116 &dev_attr_online.attr, 117 NULL, 118 }; 119 120 static struct attribute_group zcrypt_device_attr_group = { 121 .attrs = zcrypt_device_attrs, 122 }; 123 124 /** 125 * Process a rescan of the transport layer. 126 * 127 * Returns 1, if the rescan has been processed, otherwise 0. 128 */ 129 static inline int zcrypt_process_rescan(void) 130 { 131 if (atomic_read(&zcrypt_rescan_req)) { 132 atomic_set(&zcrypt_rescan_req, 0); 133 atomic_inc(&zcrypt_rescan_count); 134 ap_bus_force_rescan(); 135 ZCRYPT_DBF_COMMON(DBF_INFO, "rescan%07d", 136 atomic_inc_return(&zcrypt_rescan_count)); 137 return 1; 138 } 139 return 0; 140 } 141 142 /** 143 * __zcrypt_increase_preference(): Increase preference of a crypto device. 144 * @zdev: Pointer the crypto device 145 * 146 * Move the device towards the head of the device list. 147 * Need to be called while holding the zcrypt device list lock. 148 * Note: cards with speed_rating of 0 are kept at the end of the list. 149 */ 150 static void __zcrypt_increase_preference(struct zcrypt_device *zdev) 151 { 152 struct zcrypt_device *tmp; 153 struct list_head *l; 154 155 if (zdev->speed_rating == 0) 156 return; 157 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { 158 tmp = list_entry(l, struct zcrypt_device, list); 159 if ((tmp->request_count + 1) * tmp->speed_rating <= 160 (zdev->request_count + 1) * zdev->speed_rating && 161 tmp->speed_rating != 0) 162 break; 163 } 164 if (l == zdev->list.prev) 165 return; 166 /* Move zdev behind l */ 167 list_move(&zdev->list, l); 168 } 169 170 /** 171 * __zcrypt_decrease_preference(): Decrease preference of a crypto device. 172 * @zdev: Pointer to a crypto device. 173 * 174 * Move the device towards the tail of the device list. 175 * Need to be called while holding the zcrypt device list lock. 176 * Note: cards with speed_rating of 0 are kept at the end of the list. 177 */ 178 static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) 179 { 180 struct zcrypt_device *tmp; 181 struct list_head *l; 182 183 if (zdev->speed_rating == 0) 184 return; 185 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { 186 tmp = list_entry(l, struct zcrypt_device, list); 187 if ((tmp->request_count + 1) * tmp->speed_rating > 188 (zdev->request_count + 1) * zdev->speed_rating || 189 tmp->speed_rating == 0) 190 break; 191 } 192 if (l == zdev->list.next) 193 return; 194 /* Move zdev before l */ 195 list_move_tail(&zdev->list, l); 196 } 197 198 static void zcrypt_device_release(struct kref *kref) 199 { 200 struct zcrypt_device *zdev = 201 container_of(kref, struct zcrypt_device, refcount); 202 zcrypt_device_free(zdev); 203 } 204 205 void zcrypt_device_get(struct zcrypt_device *zdev) 206 { 207 kref_get(&zdev->refcount); 208 } 209 EXPORT_SYMBOL(zcrypt_device_get); 210 211 int zcrypt_device_put(struct zcrypt_device *zdev) 212 { 213 return kref_put(&zdev->refcount, zcrypt_device_release); 214 } 215 EXPORT_SYMBOL(zcrypt_device_put); 216 217 struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) 218 { 219 struct zcrypt_device *zdev; 220 221 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); 222 if (!zdev) 223 return NULL; 224 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); 225 if (!zdev->reply.message) 226 goto out_free; 227 zdev->reply.length = max_response_size; 228 spin_lock_init(&zdev->lock); 229 INIT_LIST_HEAD(&zdev->list); 230 zdev->dbf_area = zcrypt_dbf_devices; 231 return zdev; 232 233 out_free: 234 kfree(zdev); 235 return NULL; 236 } 237 EXPORT_SYMBOL(zcrypt_device_alloc); 238 239 void zcrypt_device_free(struct zcrypt_device *zdev) 240 { 241 kfree(zdev->reply.message); 242 kfree(zdev); 243 } 244 EXPORT_SYMBOL(zcrypt_device_free); 245 246 /** 247 * zcrypt_device_register() - Register a crypto device. 248 * @zdev: Pointer to a crypto device 249 * 250 * Register a crypto device. Returns 0 if successful. 251 */ 252 int zcrypt_device_register(struct zcrypt_device *zdev) 253 { 254 int rc; 255 256 if (!zdev->ops) 257 return -ENODEV; 258 rc = sysfs_create_group(&zdev->ap_dev->device.kobj, 259 &zcrypt_device_attr_group); 260 if (rc) 261 goto out; 262 get_device(&zdev->ap_dev->device); 263 kref_init(&zdev->refcount); 264 spin_lock_bh(&zcrypt_device_lock); 265 zdev->online = 1; /* New devices are online by default. */ 266 ZCRYPT_DBF_DEV(DBF_INFO, zdev, "dev%04xo%dreg", zdev->ap_dev->qid, 267 zdev->online); 268 list_add_tail(&zdev->list, &zcrypt_device_list); 269 __zcrypt_increase_preference(zdev); 270 zcrypt_device_count++; 271 spin_unlock_bh(&zcrypt_device_lock); 272 if (zdev->ops->rng) { 273 rc = zcrypt_rng_device_add(); 274 if (rc) 275 goto out_unregister; 276 } 277 return 0; 278 279 out_unregister: 280 spin_lock_bh(&zcrypt_device_lock); 281 zcrypt_device_count--; 282 list_del_init(&zdev->list); 283 spin_unlock_bh(&zcrypt_device_lock); 284 sysfs_remove_group(&zdev->ap_dev->device.kobj, 285 &zcrypt_device_attr_group); 286 put_device(&zdev->ap_dev->device); 287 zcrypt_device_put(zdev); 288 out: 289 return rc; 290 } 291 EXPORT_SYMBOL(zcrypt_device_register); 292 293 /** 294 * zcrypt_device_unregister(): Unregister a crypto device. 295 * @zdev: Pointer to crypto device 296 * 297 * Unregister a crypto device. 298 */ 299 void zcrypt_device_unregister(struct zcrypt_device *zdev) 300 { 301 if (zdev->ops->rng) 302 zcrypt_rng_device_remove(); 303 spin_lock_bh(&zcrypt_device_lock); 304 zcrypt_device_count--; 305 list_del_init(&zdev->list); 306 spin_unlock_bh(&zcrypt_device_lock); 307 sysfs_remove_group(&zdev->ap_dev->device.kobj, 308 &zcrypt_device_attr_group); 309 put_device(&zdev->ap_dev->device); 310 zcrypt_device_put(zdev); 311 } 312 EXPORT_SYMBOL(zcrypt_device_unregister); 313 314 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 315 { 316 if (zops->owner) { 317 spin_lock_bh(&zcrypt_ops_list_lock); 318 list_add_tail(&zops->list, &zcrypt_ops_list); 319 spin_unlock_bh(&zcrypt_ops_list_lock); 320 } 321 } 322 EXPORT_SYMBOL(zcrypt_msgtype_register); 323 324 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 325 { 326 spin_lock_bh(&zcrypt_ops_list_lock); 327 list_del_init(&zops->list); 328 spin_unlock_bh(&zcrypt_ops_list_lock); 329 } 330 EXPORT_SYMBOL(zcrypt_msgtype_unregister); 331 332 static inline 333 struct zcrypt_ops *__ops_lookup(unsigned char *name, int variant) 334 { 335 struct zcrypt_ops *zops; 336 int found = 0; 337 338 spin_lock_bh(&zcrypt_ops_list_lock); 339 list_for_each_entry(zops, &zcrypt_ops_list, list) { 340 if ((zops->variant == variant) && 341 (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) { 342 found = 1; 343 break; 344 } 345 } 346 if (!found || !try_module_get(zops->owner)) 347 zops = NULL; 348 349 spin_unlock_bh(&zcrypt_ops_list_lock); 350 351 return zops; 352 } 353 354 struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant) 355 { 356 struct zcrypt_ops *zops = NULL; 357 358 zops = __ops_lookup(name, variant); 359 if (!zops) { 360 request_module("%s", name); 361 zops = __ops_lookup(name, variant); 362 } 363 return zops; 364 } 365 EXPORT_SYMBOL(zcrypt_msgtype_request); 366 367 void zcrypt_msgtype_release(struct zcrypt_ops *zops) 368 { 369 if (zops) 370 module_put(zops->owner); 371 } 372 EXPORT_SYMBOL(zcrypt_msgtype_release); 373 374 /** 375 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 376 * 377 * This function is not supported beyond zcrypt 1.3.1. 378 */ 379 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 380 size_t count, loff_t *f_pos) 381 { 382 return -EPERM; 383 } 384 385 /** 386 * zcrypt_write(): Not allowed. 387 * 388 * Write is is not allowed 389 */ 390 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 391 size_t count, loff_t *f_pos) 392 { 393 return -EPERM; 394 } 395 396 /** 397 * zcrypt_open(): Count number of users. 398 * 399 * Device open function to count number of users. 400 */ 401 static int zcrypt_open(struct inode *inode, struct file *filp) 402 { 403 atomic_inc(&zcrypt_open_count); 404 return nonseekable_open(inode, filp); 405 } 406 407 /** 408 * zcrypt_release(): Count number of users. 409 * 410 * Device close function to count number of users. 411 */ 412 static int zcrypt_release(struct inode *inode, struct file *filp) 413 { 414 atomic_dec(&zcrypt_open_count); 415 return 0; 416 } 417 418 /* 419 * zcrypt ioctls. 420 */ 421 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 422 { 423 struct zcrypt_device *zdev; 424 int rc; 425 426 if (mex->outputdatalength < mex->inputdatalength) 427 return -EINVAL; 428 /* 429 * As long as outputdatalength is big enough, we can set the 430 * outputdatalength equal to the inputdatalength, since that is the 431 * number of bytes we will copy in any case 432 */ 433 mex->outputdatalength = mex->inputdatalength; 434 435 spin_lock_bh(&zcrypt_device_lock); 436 list_for_each_entry(zdev, &zcrypt_device_list, list) { 437 if (!zdev->online || 438 !zdev->ops->rsa_modexpo || 439 zdev->min_mod_size > mex->inputdatalength || 440 zdev->max_mod_size < mex->inputdatalength) 441 continue; 442 zcrypt_device_get(zdev); 443 get_device(&zdev->ap_dev->device); 444 zdev->request_count++; 445 __zcrypt_decrease_preference(zdev); 446 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 447 spin_unlock_bh(&zcrypt_device_lock); 448 rc = zdev->ops->rsa_modexpo(zdev, mex); 449 spin_lock_bh(&zcrypt_device_lock); 450 module_put(zdev->ap_dev->drv->driver.owner); 451 } 452 else 453 rc = -EAGAIN; 454 zdev->request_count--; 455 __zcrypt_increase_preference(zdev); 456 put_device(&zdev->ap_dev->device); 457 zcrypt_device_put(zdev); 458 spin_unlock_bh(&zcrypt_device_lock); 459 return rc; 460 } 461 spin_unlock_bh(&zcrypt_device_lock); 462 return -ENODEV; 463 } 464 465 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 466 { 467 struct zcrypt_device *zdev; 468 unsigned long long z1, z2, z3; 469 int rc, copied; 470 471 if (crt->outputdatalength < crt->inputdatalength || 472 (crt->inputdatalength & 1)) 473 return -EINVAL; 474 /* 475 * As long as outputdatalength is big enough, we can set the 476 * outputdatalength equal to the inputdatalength, since that is the 477 * number of bytes we will copy in any case 478 */ 479 crt->outputdatalength = crt->inputdatalength; 480 481 copied = 0; 482 restart: 483 spin_lock_bh(&zcrypt_device_lock); 484 list_for_each_entry(zdev, &zcrypt_device_list, list) { 485 if (!zdev->online || 486 !zdev->ops->rsa_modexpo_crt || 487 zdev->min_mod_size > crt->inputdatalength || 488 zdev->max_mod_size < crt->inputdatalength) 489 continue; 490 if (zdev->short_crt && crt->inputdatalength > 240) { 491 /* 492 * Check inputdata for leading zeros for cards 493 * that can't handle np_prime, bp_key, or 494 * u_mult_inv > 128 bytes. 495 */ 496 if (copied == 0) { 497 unsigned int len; 498 spin_unlock_bh(&zcrypt_device_lock); 499 /* len is max 256 / 2 - 120 = 8 500 * For bigger device just assume len of leading 501 * 0s is 8 as stated in the requirements for 502 * ica_rsa_modexpo_crt struct in zcrypt.h. 503 */ 504 if (crt->inputdatalength <= 256) 505 len = crt->inputdatalength / 2 - 120; 506 else 507 len = 8; 508 if (len > sizeof(z1)) 509 return -EFAULT; 510 z1 = z2 = z3 = 0; 511 if (copy_from_user(&z1, crt->np_prime, len) || 512 copy_from_user(&z2, crt->bp_key, len) || 513 copy_from_user(&z3, crt->u_mult_inv, len)) 514 return -EFAULT; 515 z1 = z2 = z3 = 0; 516 copied = 1; 517 /* 518 * We have to restart device lookup - 519 * the device list may have changed by now. 520 */ 521 goto restart; 522 } 523 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) 524 /* The device can't handle this request. */ 525 continue; 526 } 527 zcrypt_device_get(zdev); 528 get_device(&zdev->ap_dev->device); 529 zdev->request_count++; 530 __zcrypt_decrease_preference(zdev); 531 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 532 spin_unlock_bh(&zcrypt_device_lock); 533 rc = zdev->ops->rsa_modexpo_crt(zdev, crt); 534 spin_lock_bh(&zcrypt_device_lock); 535 module_put(zdev->ap_dev->drv->driver.owner); 536 } 537 else 538 rc = -EAGAIN; 539 zdev->request_count--; 540 __zcrypt_increase_preference(zdev); 541 put_device(&zdev->ap_dev->device); 542 zcrypt_device_put(zdev); 543 spin_unlock_bh(&zcrypt_device_lock); 544 return rc; 545 } 546 spin_unlock_bh(&zcrypt_device_lock); 547 return -ENODEV; 548 } 549 550 static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 551 { 552 struct zcrypt_device *zdev; 553 int rc; 554 555 spin_lock_bh(&zcrypt_device_lock); 556 list_for_each_entry(zdev, &zcrypt_device_list, list) { 557 if (!zdev->online || !zdev->ops->send_cprb || 558 (zdev->ops->variant == MSGTYPE06_VARIANT_EP11) || 559 (xcRB->user_defined != AUTOSELECT && 560 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined)) 561 continue; 562 zcrypt_device_get(zdev); 563 get_device(&zdev->ap_dev->device); 564 zdev->request_count++; 565 __zcrypt_decrease_preference(zdev); 566 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 567 spin_unlock_bh(&zcrypt_device_lock); 568 rc = zdev->ops->send_cprb(zdev, xcRB); 569 spin_lock_bh(&zcrypt_device_lock); 570 module_put(zdev->ap_dev->drv->driver.owner); 571 } 572 else 573 rc = -EAGAIN; 574 zdev->request_count--; 575 __zcrypt_increase_preference(zdev); 576 put_device(&zdev->ap_dev->device); 577 zcrypt_device_put(zdev); 578 spin_unlock_bh(&zcrypt_device_lock); 579 return rc; 580 } 581 spin_unlock_bh(&zcrypt_device_lock); 582 return -ENODEV; 583 } 584 585 struct ep11_target_dev_list { 586 unsigned short targets_num; 587 struct ep11_target_dev *targets; 588 }; 589 590 static bool is_desired_ep11dev(unsigned int dev_qid, 591 struct ep11_target_dev_list dev_list) 592 { 593 int n; 594 595 for (n = 0; n < dev_list.targets_num; n++, dev_list.targets++) { 596 if ((AP_QID_DEVICE(dev_qid) == dev_list.targets->ap_id) && 597 (AP_QID_QUEUE(dev_qid) == dev_list.targets->dom_id)) { 598 return true; 599 } 600 } 601 return false; 602 } 603 604 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 605 { 606 struct zcrypt_device *zdev; 607 bool autoselect = false; 608 int rc; 609 struct ep11_target_dev_list ep11_dev_list = { 610 .targets_num = 0x00, 611 .targets = NULL, 612 }; 613 614 ep11_dev_list.targets_num = (unsigned short) xcrb->targets_num; 615 616 /* empty list indicates autoselect (all available targets) */ 617 if (ep11_dev_list.targets_num == 0) 618 autoselect = true; 619 else { 620 ep11_dev_list.targets = kcalloc((unsigned short) 621 xcrb->targets_num, 622 sizeof(struct ep11_target_dev), 623 GFP_KERNEL); 624 if (!ep11_dev_list.targets) 625 return -ENOMEM; 626 627 if (copy_from_user(ep11_dev_list.targets, 628 (struct ep11_target_dev __force __user *) 629 xcrb->targets, xcrb->targets_num * 630 sizeof(struct ep11_target_dev))) 631 return -EFAULT; 632 } 633 634 spin_lock_bh(&zcrypt_device_lock); 635 list_for_each_entry(zdev, &zcrypt_device_list, list) { 636 /* check if device is eligible */ 637 if (!zdev->online || 638 zdev->ops->variant != MSGTYPE06_VARIANT_EP11) 639 continue; 640 641 /* check if device is selected as valid target */ 642 if (!is_desired_ep11dev(zdev->ap_dev->qid, ep11_dev_list) && 643 !autoselect) 644 continue; 645 646 zcrypt_device_get(zdev); 647 get_device(&zdev->ap_dev->device); 648 zdev->request_count++; 649 __zcrypt_decrease_preference(zdev); 650 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 651 spin_unlock_bh(&zcrypt_device_lock); 652 rc = zdev->ops->send_ep11_cprb(zdev, xcrb); 653 spin_lock_bh(&zcrypt_device_lock); 654 module_put(zdev->ap_dev->drv->driver.owner); 655 } else { 656 rc = -EAGAIN; 657 } 658 zdev->request_count--; 659 __zcrypt_increase_preference(zdev); 660 put_device(&zdev->ap_dev->device); 661 zcrypt_device_put(zdev); 662 spin_unlock_bh(&zcrypt_device_lock); 663 return rc; 664 } 665 spin_unlock_bh(&zcrypt_device_lock); 666 return -ENODEV; 667 } 668 669 static long zcrypt_rng(char *buffer) 670 { 671 struct zcrypt_device *zdev; 672 int rc; 673 674 spin_lock_bh(&zcrypt_device_lock); 675 list_for_each_entry(zdev, &zcrypt_device_list, list) { 676 if (!zdev->online || !zdev->ops->rng) 677 continue; 678 zcrypt_device_get(zdev); 679 get_device(&zdev->ap_dev->device); 680 zdev->request_count++; 681 __zcrypt_decrease_preference(zdev); 682 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 683 spin_unlock_bh(&zcrypt_device_lock); 684 rc = zdev->ops->rng(zdev, buffer); 685 spin_lock_bh(&zcrypt_device_lock); 686 module_put(zdev->ap_dev->drv->driver.owner); 687 } else 688 rc = -EAGAIN; 689 zdev->request_count--; 690 __zcrypt_increase_preference(zdev); 691 put_device(&zdev->ap_dev->device); 692 zcrypt_device_put(zdev); 693 spin_unlock_bh(&zcrypt_device_lock); 694 return rc; 695 } 696 spin_unlock_bh(&zcrypt_device_lock); 697 return -ENODEV; 698 } 699 700 static void zcrypt_status_mask(char status[AP_DEVICES]) 701 { 702 struct zcrypt_device *zdev; 703 704 memset(status, 0, sizeof(char) * AP_DEVICES); 705 spin_lock_bh(&zcrypt_device_lock); 706 list_for_each_entry(zdev, &zcrypt_device_list, list) 707 status[AP_QID_DEVICE(zdev->ap_dev->qid)] = 708 zdev->online ? zdev->user_space_type : 0x0d; 709 spin_unlock_bh(&zcrypt_device_lock); 710 } 711 712 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 713 { 714 struct zcrypt_device *zdev; 715 716 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 717 spin_lock_bh(&zcrypt_device_lock); 718 list_for_each_entry(zdev, &zcrypt_device_list, list) { 719 spin_lock(&zdev->ap_dev->lock); 720 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = 721 zdev->ap_dev->pendingq_count + 722 zdev->ap_dev->requestq_count; 723 spin_unlock(&zdev->ap_dev->lock); 724 } 725 spin_unlock_bh(&zcrypt_device_lock); 726 } 727 728 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 729 { 730 struct zcrypt_device *zdev; 731 732 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 733 spin_lock_bh(&zcrypt_device_lock); 734 list_for_each_entry(zdev, &zcrypt_device_list, list) { 735 spin_lock(&zdev->ap_dev->lock); 736 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = 737 zdev->ap_dev->total_request_count; 738 spin_unlock(&zdev->ap_dev->lock); 739 } 740 spin_unlock_bh(&zcrypt_device_lock); 741 } 742 743 static int zcrypt_pendingq_count(void) 744 { 745 struct zcrypt_device *zdev; 746 int pendingq_count = 0; 747 748 spin_lock_bh(&zcrypt_device_lock); 749 list_for_each_entry(zdev, &zcrypt_device_list, list) { 750 spin_lock(&zdev->ap_dev->lock); 751 pendingq_count += zdev->ap_dev->pendingq_count; 752 spin_unlock(&zdev->ap_dev->lock); 753 } 754 spin_unlock_bh(&zcrypt_device_lock); 755 return pendingq_count; 756 } 757 758 static int zcrypt_requestq_count(void) 759 { 760 struct zcrypt_device *zdev; 761 int requestq_count = 0; 762 763 spin_lock_bh(&zcrypt_device_lock); 764 list_for_each_entry(zdev, &zcrypt_device_list, list) { 765 spin_lock(&zdev->ap_dev->lock); 766 requestq_count += zdev->ap_dev->requestq_count; 767 spin_unlock(&zdev->ap_dev->lock); 768 } 769 spin_unlock_bh(&zcrypt_device_lock); 770 return requestq_count; 771 } 772 773 static int zcrypt_count_type(int type) 774 { 775 struct zcrypt_device *zdev; 776 int device_count = 0; 777 778 spin_lock_bh(&zcrypt_device_lock); 779 list_for_each_entry(zdev, &zcrypt_device_list, list) 780 if (zdev->user_space_type == type) 781 device_count++; 782 spin_unlock_bh(&zcrypt_device_lock); 783 return device_count; 784 } 785 786 /** 787 * zcrypt_ica_status(): Old, depracted combi status call. 788 * 789 * Old, deprecated combi status call. 790 */ 791 static long zcrypt_ica_status(struct file *filp, unsigned long arg) 792 { 793 struct ica_z90_status *pstat; 794 int ret; 795 796 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); 797 if (!pstat) 798 return -ENOMEM; 799 pstat->totalcount = zcrypt_device_count; 800 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); 801 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); 802 pstat->requestqWaitCount = zcrypt_requestq_count(); 803 pstat->pendingqWaitCount = zcrypt_pendingq_count(); 804 pstat->totalOpenCount = atomic_read(&zcrypt_open_count); 805 pstat->cryptoDomain = ap_domain_index; 806 zcrypt_status_mask(pstat->status); 807 zcrypt_qdepth_mask(pstat->qdepth); 808 ret = 0; 809 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) 810 ret = -EFAULT; 811 kfree(pstat); 812 return ret; 813 } 814 815 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 816 unsigned long arg) 817 { 818 int rc; 819 820 switch (cmd) { 821 case ICARSAMODEXPO: { 822 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 823 struct ica_rsa_modexpo mex; 824 if (copy_from_user(&mex, umex, sizeof(mex))) 825 return -EFAULT; 826 do { 827 rc = zcrypt_rsa_modexpo(&mex); 828 } while (rc == -EAGAIN); 829 /* on failure: retry once again after a requested rescan */ 830 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 831 do { 832 rc = zcrypt_rsa_modexpo(&mex); 833 } while (rc == -EAGAIN); 834 if (rc) 835 return rc; 836 return put_user(mex.outputdatalength, &umex->outputdatalength); 837 } 838 case ICARSACRT: { 839 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 840 struct ica_rsa_modexpo_crt crt; 841 if (copy_from_user(&crt, ucrt, sizeof(crt))) 842 return -EFAULT; 843 do { 844 rc = zcrypt_rsa_crt(&crt); 845 } while (rc == -EAGAIN); 846 /* on failure: retry once again after a requested rescan */ 847 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 848 do { 849 rc = zcrypt_rsa_crt(&crt); 850 } while (rc == -EAGAIN); 851 if (rc) 852 return rc; 853 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 854 } 855 case ZSECSENDCPRB: { 856 struct ica_xcRB __user *uxcRB = (void __user *) arg; 857 struct ica_xcRB xcRB; 858 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 859 return -EFAULT; 860 do { 861 rc = zcrypt_send_cprb(&xcRB); 862 } while (rc == -EAGAIN); 863 /* on failure: retry once again after a requested rescan */ 864 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 865 do { 866 rc = zcrypt_send_cprb(&xcRB); 867 } while (rc == -EAGAIN); 868 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 869 return -EFAULT; 870 return rc; 871 } 872 case ZSENDEP11CPRB: { 873 struct ep11_urb __user *uxcrb = (void __user *)arg; 874 struct ep11_urb xcrb; 875 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 876 return -EFAULT; 877 do { 878 rc = zcrypt_send_ep11_cprb(&xcrb); 879 } while (rc == -EAGAIN); 880 /* on failure: retry once again after a requested rescan */ 881 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 882 do { 883 rc = zcrypt_send_ep11_cprb(&xcrb); 884 } while (rc == -EAGAIN); 885 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 886 return -EFAULT; 887 return rc; 888 } 889 case Z90STAT_STATUS_MASK: { 890 char status[AP_DEVICES]; 891 zcrypt_status_mask(status); 892 if (copy_to_user((char __user *) arg, status, 893 sizeof(char) * AP_DEVICES)) 894 return -EFAULT; 895 return 0; 896 } 897 case Z90STAT_QDEPTH_MASK: { 898 char qdepth[AP_DEVICES]; 899 zcrypt_qdepth_mask(qdepth); 900 if (copy_to_user((char __user *) arg, qdepth, 901 sizeof(char) * AP_DEVICES)) 902 return -EFAULT; 903 return 0; 904 } 905 case Z90STAT_PERDEV_REQCNT: { 906 int reqcnt[AP_DEVICES]; 907 zcrypt_perdev_reqcnt(reqcnt); 908 if (copy_to_user((int __user *) arg, reqcnt, 909 sizeof(int) * AP_DEVICES)) 910 return -EFAULT; 911 return 0; 912 } 913 case Z90STAT_REQUESTQ_COUNT: 914 return put_user(zcrypt_requestq_count(), (int __user *) arg); 915 case Z90STAT_PENDINGQ_COUNT: 916 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 917 case Z90STAT_TOTALOPEN_COUNT: 918 return put_user(atomic_read(&zcrypt_open_count), 919 (int __user *) arg); 920 case Z90STAT_DOMAIN_INDEX: 921 return put_user(ap_domain_index, (int __user *) arg); 922 /* 923 * Deprecated ioctls. Don't add another device count ioctl, 924 * you can count them yourself in the user space with the 925 * output of the Z90STAT_STATUS_MASK ioctl. 926 */ 927 case ICAZ90STATUS: 928 return zcrypt_ica_status(filp, arg); 929 case Z90STAT_TOTALCOUNT: 930 return put_user(zcrypt_device_count, (int __user *) arg); 931 case Z90STAT_PCICACOUNT: 932 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 933 (int __user *) arg); 934 case Z90STAT_PCICCCOUNT: 935 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 936 (int __user *) arg); 937 case Z90STAT_PCIXCCMCL2COUNT: 938 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 939 (int __user *) arg); 940 case Z90STAT_PCIXCCMCL3COUNT: 941 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 942 (int __user *) arg); 943 case Z90STAT_PCIXCCCOUNT: 944 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 945 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 946 (int __user *) arg); 947 case Z90STAT_CEX2CCOUNT: 948 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 949 (int __user *) arg); 950 case Z90STAT_CEX2ACOUNT: 951 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 952 (int __user *) arg); 953 default: 954 /* unknown ioctl number */ 955 return -ENOIOCTLCMD; 956 } 957 } 958 959 #ifdef CONFIG_COMPAT 960 /* 961 * ioctl32 conversion routines 962 */ 963 struct compat_ica_rsa_modexpo { 964 compat_uptr_t inputdata; 965 unsigned int inputdatalength; 966 compat_uptr_t outputdata; 967 unsigned int outputdatalength; 968 compat_uptr_t b_key; 969 compat_uptr_t n_modulus; 970 }; 971 972 static long trans_modexpo32(struct file *filp, unsigned int cmd, 973 unsigned long arg) 974 { 975 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 976 struct compat_ica_rsa_modexpo mex32; 977 struct ica_rsa_modexpo mex64; 978 long rc; 979 980 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 981 return -EFAULT; 982 mex64.inputdata = compat_ptr(mex32.inputdata); 983 mex64.inputdatalength = mex32.inputdatalength; 984 mex64.outputdata = compat_ptr(mex32.outputdata); 985 mex64.outputdatalength = mex32.outputdatalength; 986 mex64.b_key = compat_ptr(mex32.b_key); 987 mex64.n_modulus = compat_ptr(mex32.n_modulus); 988 do { 989 rc = zcrypt_rsa_modexpo(&mex64); 990 } while (rc == -EAGAIN); 991 /* on failure: retry once again after a requested rescan */ 992 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 993 do { 994 rc = zcrypt_rsa_modexpo(&mex64); 995 } while (rc == -EAGAIN); 996 if (rc) 997 return rc; 998 return put_user(mex64.outputdatalength, 999 &umex32->outputdatalength); 1000 } 1001 1002 struct compat_ica_rsa_modexpo_crt { 1003 compat_uptr_t inputdata; 1004 unsigned int inputdatalength; 1005 compat_uptr_t outputdata; 1006 unsigned int outputdatalength; 1007 compat_uptr_t bp_key; 1008 compat_uptr_t bq_key; 1009 compat_uptr_t np_prime; 1010 compat_uptr_t nq_prime; 1011 compat_uptr_t u_mult_inv; 1012 }; 1013 1014 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1015 unsigned long arg) 1016 { 1017 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1018 struct compat_ica_rsa_modexpo_crt crt32; 1019 struct ica_rsa_modexpo_crt crt64; 1020 long rc; 1021 1022 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1023 return -EFAULT; 1024 crt64.inputdata = compat_ptr(crt32.inputdata); 1025 crt64.inputdatalength = crt32.inputdatalength; 1026 crt64.outputdata= compat_ptr(crt32.outputdata); 1027 crt64.outputdatalength = crt32.outputdatalength; 1028 crt64.bp_key = compat_ptr(crt32.bp_key); 1029 crt64.bq_key = compat_ptr(crt32.bq_key); 1030 crt64.np_prime = compat_ptr(crt32.np_prime); 1031 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1032 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1033 do { 1034 rc = zcrypt_rsa_crt(&crt64); 1035 } while (rc == -EAGAIN); 1036 /* on failure: retry once again after a requested rescan */ 1037 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1038 do { 1039 rc = zcrypt_rsa_crt(&crt64); 1040 } while (rc == -EAGAIN); 1041 if (rc) 1042 return rc; 1043 return put_user(crt64.outputdatalength, 1044 &ucrt32->outputdatalength); 1045 } 1046 1047 struct compat_ica_xcRB { 1048 unsigned short agent_ID; 1049 unsigned int user_defined; 1050 unsigned short request_ID; 1051 unsigned int request_control_blk_length; 1052 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 1053 compat_uptr_t request_control_blk_addr; 1054 unsigned int request_data_length; 1055 char padding2[16 - sizeof (compat_uptr_t)]; 1056 compat_uptr_t request_data_address; 1057 unsigned int reply_control_blk_length; 1058 char padding3[16 - sizeof (compat_uptr_t)]; 1059 compat_uptr_t reply_control_blk_addr; 1060 unsigned int reply_data_length; 1061 char padding4[16 - sizeof (compat_uptr_t)]; 1062 compat_uptr_t reply_data_addr; 1063 unsigned short priority_window; 1064 unsigned int status; 1065 } __attribute__((packed)); 1066 1067 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1068 unsigned long arg) 1069 { 1070 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1071 struct compat_ica_xcRB xcRB32; 1072 struct ica_xcRB xcRB64; 1073 long rc; 1074 1075 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1076 return -EFAULT; 1077 xcRB64.agent_ID = xcRB32.agent_ID; 1078 xcRB64.user_defined = xcRB32.user_defined; 1079 xcRB64.request_ID = xcRB32.request_ID; 1080 xcRB64.request_control_blk_length = 1081 xcRB32.request_control_blk_length; 1082 xcRB64.request_control_blk_addr = 1083 compat_ptr(xcRB32.request_control_blk_addr); 1084 xcRB64.request_data_length = 1085 xcRB32.request_data_length; 1086 xcRB64.request_data_address = 1087 compat_ptr(xcRB32.request_data_address); 1088 xcRB64.reply_control_blk_length = 1089 xcRB32.reply_control_blk_length; 1090 xcRB64.reply_control_blk_addr = 1091 compat_ptr(xcRB32.reply_control_blk_addr); 1092 xcRB64.reply_data_length = xcRB32.reply_data_length; 1093 xcRB64.reply_data_addr = 1094 compat_ptr(xcRB32.reply_data_addr); 1095 xcRB64.priority_window = xcRB32.priority_window; 1096 xcRB64.status = xcRB32.status; 1097 do { 1098 rc = zcrypt_send_cprb(&xcRB64); 1099 } while (rc == -EAGAIN); 1100 /* on failure: retry once again after a requested rescan */ 1101 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1102 do { 1103 rc = zcrypt_send_cprb(&xcRB64); 1104 } while (rc == -EAGAIN); 1105 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1106 xcRB32.reply_data_length = xcRB64.reply_data_length; 1107 xcRB32.status = xcRB64.status; 1108 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1109 return -EFAULT; 1110 return rc; 1111 } 1112 1113 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1114 unsigned long arg) 1115 { 1116 if (cmd == ICARSAMODEXPO) 1117 return trans_modexpo32(filp, cmd, arg); 1118 if (cmd == ICARSACRT) 1119 return trans_modexpo_crt32(filp, cmd, arg); 1120 if (cmd == ZSECSENDCPRB) 1121 return trans_xcRB32(filp, cmd, arg); 1122 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1123 } 1124 #endif 1125 1126 /* 1127 * Misc device file operations. 1128 */ 1129 static const struct file_operations zcrypt_fops = { 1130 .owner = THIS_MODULE, 1131 .read = zcrypt_read, 1132 .write = zcrypt_write, 1133 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1134 #ifdef CONFIG_COMPAT 1135 .compat_ioctl = zcrypt_compat_ioctl, 1136 #endif 1137 .open = zcrypt_open, 1138 .release = zcrypt_release, 1139 .llseek = no_llseek, 1140 }; 1141 1142 /* 1143 * Misc device. 1144 */ 1145 static struct miscdevice zcrypt_misc_device = { 1146 .minor = MISC_DYNAMIC_MINOR, 1147 .name = "z90crypt", 1148 .fops = &zcrypt_fops, 1149 }; 1150 1151 /* 1152 * Deprecated /proc entry support. 1153 */ 1154 static struct proc_dir_entry *zcrypt_entry; 1155 1156 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) 1157 { 1158 int i; 1159 1160 for (i = 0; i < len; i++) 1161 seq_printf(m, "%01x", (unsigned int) addr[i]); 1162 seq_putc(m, ' '); 1163 } 1164 1165 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) 1166 { 1167 int inl, c, cx; 1168 1169 seq_printf(m, " "); 1170 inl = 0; 1171 for (c = 0; c < (len / 16); c++) { 1172 sprintcl(m, addr+inl, 16); 1173 inl += 16; 1174 } 1175 cx = len%16; 1176 if (cx) { 1177 sprintcl(m, addr+inl, cx); 1178 inl += cx; 1179 } 1180 seq_putc(m, '\n'); 1181 } 1182 1183 static void sprinthx(unsigned char *title, struct seq_file *m, 1184 unsigned char *addr, unsigned int len) 1185 { 1186 int inl, r, rx; 1187 1188 seq_printf(m, "\n%s\n", title); 1189 inl = 0; 1190 for (r = 0; r < (len / 64); r++) { 1191 sprintrw(m, addr+inl, 64); 1192 inl += 64; 1193 } 1194 rx = len % 64; 1195 if (rx) { 1196 sprintrw(m, addr+inl, rx); 1197 inl += rx; 1198 } 1199 seq_putc(m, '\n'); 1200 } 1201 1202 static void sprinthx4(unsigned char *title, struct seq_file *m, 1203 unsigned int *array, unsigned int len) 1204 { 1205 int r; 1206 1207 seq_printf(m, "\n%s\n", title); 1208 for (r = 0; r < len; r++) { 1209 if ((r % 8) == 0) 1210 seq_printf(m, " "); 1211 seq_printf(m, "%08X ", array[r]); 1212 if ((r % 8) == 7) 1213 seq_putc(m, '\n'); 1214 } 1215 seq_putc(m, '\n'); 1216 } 1217 1218 static int zcrypt_proc_show(struct seq_file *m, void *v) 1219 { 1220 char workarea[sizeof(int) * AP_DEVICES]; 1221 1222 seq_printf(m, "\nzcrypt version: %d.%d.%d\n", 1223 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 1224 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); 1225 seq_printf(m, "Total device count: %d\n", zcrypt_device_count); 1226 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); 1227 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); 1228 seq_printf(m, "PCIXCC MCL2 count: %d\n", 1229 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 1230 seq_printf(m, "PCIXCC MCL3 count: %d\n", 1231 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); 1232 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); 1233 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); 1234 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); 1235 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); 1236 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); 1237 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); 1238 seq_printf(m, "Total open handles: %d\n\n", 1239 atomic_read(&zcrypt_open_count)); 1240 zcrypt_status_mask(workarea); 1241 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1242 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1243 m, workarea, AP_DEVICES); 1244 zcrypt_qdepth_mask(workarea); 1245 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); 1246 zcrypt_perdev_reqcnt((int *) workarea); 1247 sprinthx4("Per-device successfully completed request counts", 1248 m, (unsigned int *) workarea, AP_DEVICES); 1249 return 0; 1250 } 1251 1252 static int zcrypt_proc_open(struct inode *inode, struct file *file) 1253 { 1254 return single_open(file, zcrypt_proc_show, NULL); 1255 } 1256 1257 static void zcrypt_disable_card(int index) 1258 { 1259 struct zcrypt_device *zdev; 1260 1261 spin_lock_bh(&zcrypt_device_lock); 1262 list_for_each_entry(zdev, &zcrypt_device_list, list) 1263 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1264 zdev->online = 0; 1265 ap_flush_queue(zdev->ap_dev); 1266 break; 1267 } 1268 spin_unlock_bh(&zcrypt_device_lock); 1269 } 1270 1271 static void zcrypt_enable_card(int index) 1272 { 1273 struct zcrypt_device *zdev; 1274 1275 spin_lock_bh(&zcrypt_device_lock); 1276 list_for_each_entry(zdev, &zcrypt_device_list, list) 1277 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1278 zdev->online = 1; 1279 break; 1280 } 1281 spin_unlock_bh(&zcrypt_device_lock); 1282 } 1283 1284 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, 1285 size_t count, loff_t *pos) 1286 { 1287 unsigned char *lbuf, *ptr; 1288 size_t local_count; 1289 int j; 1290 1291 if (count <= 0) 1292 return 0; 1293 1294 #define LBUFSIZE 1200UL 1295 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1296 if (!lbuf) 1297 return 0; 1298 1299 local_count = min(LBUFSIZE - 1, count); 1300 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1301 kfree(lbuf); 1302 return -EFAULT; 1303 } 1304 lbuf[local_count] = '\0'; 1305 1306 ptr = strstr(lbuf, "Online devices"); 1307 if (!ptr) 1308 goto out; 1309 ptr = strstr(ptr, "\n"); 1310 if (!ptr) 1311 goto out; 1312 ptr++; 1313 1314 if (strstr(ptr, "Waiting work element counts") == NULL) 1315 goto out; 1316 1317 for (j = 0; j < 64 && *ptr; ptr++) { 1318 /* 1319 * '0' for no device, '1' for PCICA, '2' for PCICC, 1320 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1321 * '5' for CEX2C and '6' for CEX2A' 1322 * '7' for CEX3C and '8' for CEX3A 1323 */ 1324 if (*ptr >= '0' && *ptr <= '8') 1325 j++; 1326 else if (*ptr == 'd' || *ptr == 'D') 1327 zcrypt_disable_card(j++); 1328 else if (*ptr == 'e' || *ptr == 'E') 1329 zcrypt_enable_card(j++); 1330 else if (*ptr != ' ' && *ptr != '\t') 1331 break; 1332 } 1333 out: 1334 kfree(lbuf); 1335 return count; 1336 } 1337 1338 static const struct file_operations zcrypt_proc_fops = { 1339 .owner = THIS_MODULE, 1340 .open = zcrypt_proc_open, 1341 .read = seq_read, 1342 .llseek = seq_lseek, 1343 .release = single_release, 1344 .write = zcrypt_proc_write, 1345 }; 1346 1347 static int zcrypt_rng_device_count; 1348 static u32 *zcrypt_rng_buffer; 1349 static int zcrypt_rng_buffer_index; 1350 static DEFINE_MUTEX(zcrypt_rng_mutex); 1351 1352 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1353 { 1354 int rc; 1355 1356 /* 1357 * We don't need locking here because the RNG API guarantees serialized 1358 * read method calls. 1359 */ 1360 if (zcrypt_rng_buffer_index == 0) { 1361 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1362 /* on failure: retry once again after a requested rescan */ 1363 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1364 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1365 if (rc < 0) 1366 return -EIO; 1367 zcrypt_rng_buffer_index = rc / sizeof *data; 1368 } 1369 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1370 return sizeof *data; 1371 } 1372 1373 static struct hwrng zcrypt_rng_dev = { 1374 .name = "zcrypt", 1375 .data_read = zcrypt_rng_data_read, 1376 }; 1377 1378 static int zcrypt_rng_device_add(void) 1379 { 1380 int rc = 0; 1381 1382 mutex_lock(&zcrypt_rng_mutex); 1383 if (zcrypt_rng_device_count == 0) { 1384 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1385 if (!zcrypt_rng_buffer) { 1386 rc = -ENOMEM; 1387 goto out; 1388 } 1389 zcrypt_rng_buffer_index = 0; 1390 rc = hwrng_register(&zcrypt_rng_dev); 1391 if (rc) 1392 goto out_free; 1393 zcrypt_rng_device_count = 1; 1394 } else 1395 zcrypt_rng_device_count++; 1396 mutex_unlock(&zcrypt_rng_mutex); 1397 return 0; 1398 1399 out_free: 1400 free_page((unsigned long) zcrypt_rng_buffer); 1401 out: 1402 mutex_unlock(&zcrypt_rng_mutex); 1403 return rc; 1404 } 1405 1406 static void zcrypt_rng_device_remove(void) 1407 { 1408 mutex_lock(&zcrypt_rng_mutex); 1409 zcrypt_rng_device_count--; 1410 if (zcrypt_rng_device_count == 0) { 1411 hwrng_unregister(&zcrypt_rng_dev); 1412 free_page((unsigned long) zcrypt_rng_buffer); 1413 } 1414 mutex_unlock(&zcrypt_rng_mutex); 1415 } 1416 1417 int __init zcrypt_debug_init(void) 1418 { 1419 debugfs_root = debugfs_create_dir("zcrypt", NULL); 1420 1421 zcrypt_dbf_common = debug_register("zcrypt_common", 1, 1, 16); 1422 debug_register_view(zcrypt_dbf_common, &debug_hex_ascii_view); 1423 debug_set_level(zcrypt_dbf_common, DBF_ERR); 1424 1425 zcrypt_dbf_devices = debug_register("zcrypt_devices", 1, 1, 16); 1426 debug_register_view(zcrypt_dbf_devices, &debug_hex_ascii_view); 1427 debug_set_level(zcrypt_dbf_devices, DBF_ERR); 1428 1429 return 0; 1430 } 1431 1432 void zcrypt_debug_exit(void) 1433 { 1434 debugfs_remove(debugfs_root); 1435 if (zcrypt_dbf_common) 1436 debug_unregister(zcrypt_dbf_common); 1437 if (zcrypt_dbf_devices) 1438 debug_unregister(zcrypt_dbf_devices); 1439 } 1440 1441 /** 1442 * zcrypt_api_init(): Module initialization. 1443 * 1444 * The module initialization code. 1445 */ 1446 int __init zcrypt_api_init(void) 1447 { 1448 int rc; 1449 1450 rc = zcrypt_debug_init(); 1451 if (rc) 1452 goto out; 1453 1454 atomic_set(&zcrypt_rescan_req, 0); 1455 1456 /* Register the request sprayer. */ 1457 rc = misc_register(&zcrypt_misc_device); 1458 if (rc < 0) 1459 goto out; 1460 1461 /* Set up the proc file system */ 1462 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, &zcrypt_proc_fops); 1463 if (!zcrypt_entry) { 1464 rc = -ENOMEM; 1465 goto out_misc; 1466 } 1467 1468 return 0; 1469 1470 out_misc: 1471 misc_deregister(&zcrypt_misc_device); 1472 out: 1473 return rc; 1474 } 1475 1476 /** 1477 * zcrypt_api_exit(): Module termination. 1478 * 1479 * The module termination code. 1480 */ 1481 void zcrypt_api_exit(void) 1482 { 1483 remove_proc_entry("driver/z90crypt", NULL); 1484 misc_deregister(&zcrypt_misc_device); 1485 zcrypt_debug_exit(); 1486 } 1487 1488 module_init(zcrypt_api_init); 1489 module_exit(zcrypt_api_exit); 1490