1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/miscdevice.h> 19 #include <linux/fs.h> 20 #include <linux/compat.h> 21 #include <linux/slab.h> 22 #include <linux/atomic.h> 23 #include <linux/uaccess.h> 24 #include <linux/hw_random.h> 25 #include <linux/debugfs.h> 26 #include <linux/cdev.h> 27 #include <linux/ctype.h> 28 #include <linux/capability.h> 29 #include <asm/debug.h> 30 31 #define CREATE_TRACE_POINTS 32 #include <asm/trace/zcrypt.h> 33 34 #include "zcrypt_api.h" 35 #include "zcrypt_debug.h" 36 37 #include "zcrypt_msgtype6.h" 38 #include "zcrypt_msgtype50.h" 39 #include "zcrypt_ccamisc.h" 40 #include "zcrypt_ep11misc.h" 41 42 /* 43 * Module description. 44 */ 45 MODULE_AUTHOR("IBM Corporation"); 46 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 47 "Copyright IBM Corp. 2001, 2012"); 48 MODULE_LICENSE("GPL"); 49 50 /* 51 * zcrypt tracepoint functions 52 */ 53 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 54 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 55 56 DEFINE_SPINLOCK(zcrypt_list_lock); 57 LIST_HEAD(zcrypt_card_list); 58 59 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 60 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 61 62 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 63 EXPORT_SYMBOL(zcrypt_rescan_req); 64 65 static LIST_HEAD(zcrypt_ops_list); 66 67 /* Zcrypt related debug feature stuff. */ 68 debug_info_t *zcrypt_dbf_info; 69 70 /* 71 * Process a rescan of the transport layer. 72 * 73 * Returns 1, if the rescan has been processed, otherwise 0. 74 */ 75 static inline int zcrypt_process_rescan(void) 76 { 77 if (atomic_read(&zcrypt_rescan_req)) { 78 atomic_set(&zcrypt_rescan_req, 0); 79 atomic_inc(&zcrypt_rescan_count); 80 ap_bus_force_rescan(); 81 ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__, 82 atomic_inc_return(&zcrypt_rescan_count)); 83 return 1; 84 } 85 return 0; 86 } 87 88 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 89 { 90 list_add_tail(&zops->list, &zcrypt_ops_list); 91 } 92 93 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 94 { 95 list_del_init(&zops->list); 96 } 97 98 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 99 { 100 struct zcrypt_ops *zops; 101 102 list_for_each_entry(zops, &zcrypt_ops_list, list) 103 if (zops->variant == variant && 104 (!strncmp(zops->name, name, sizeof(zops->name)))) 105 return zops; 106 return NULL; 107 } 108 EXPORT_SYMBOL(zcrypt_msgtype); 109 110 /* 111 * Multi device nodes extension functions. 112 */ 113 114 struct zcdn_device; 115 116 static struct class *zcrypt_class; 117 static dev_t zcrypt_devt; 118 static struct cdev zcrypt_cdev; 119 120 struct zcdn_device { 121 struct device device; 122 struct ap_perms perms; 123 }; 124 125 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 126 127 #define ZCDN_MAX_NAME 32 128 129 static int zcdn_create(const char *name); 130 static int zcdn_destroy(const char *name); 131 132 /* 133 * Find zcdn device by name. 134 * Returns reference to the zcdn device which needs to be released 135 * with put_device() after use. 136 */ 137 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 138 { 139 struct device *dev = class_find_device_by_name(zcrypt_class, name); 140 141 return dev ? to_zcdn_dev(dev) : NULL; 142 } 143 144 /* 145 * Find zcdn device by devt value. 146 * Returns reference to the zcdn device which needs to be released 147 * with put_device() after use. 148 */ 149 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 150 { 151 struct device *dev = class_find_device_by_devt(zcrypt_class, devt); 152 153 return dev ? to_zcdn_dev(dev) : NULL; 154 } 155 156 static ssize_t ioctlmask_show(struct device *dev, 157 struct device_attribute *attr, 158 char *buf) 159 { 160 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 161 int i, n; 162 163 if (mutex_lock_interruptible(&ap_perms_mutex)) 164 return -ERESTARTSYS; 165 166 n = sysfs_emit(buf, "0x"); 167 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 168 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); 169 n += sysfs_emit_at(buf, n, "\n"); 170 171 mutex_unlock(&ap_perms_mutex); 172 173 return n; 174 } 175 176 static ssize_t ioctlmask_store(struct device *dev, 177 struct device_attribute *attr, 178 const char *buf, size_t count) 179 { 180 int rc; 181 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 182 183 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 184 AP_IOCTLS, &ap_perms_mutex); 185 if (rc) 186 return rc; 187 188 return count; 189 } 190 191 static DEVICE_ATTR_RW(ioctlmask); 192 193 static ssize_t apmask_show(struct device *dev, 194 struct device_attribute *attr, 195 char *buf) 196 { 197 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 198 int i, n; 199 200 if (mutex_lock_interruptible(&ap_perms_mutex)) 201 return -ERESTARTSYS; 202 203 n = sysfs_emit(buf, "0x"); 204 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 205 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); 206 n += sysfs_emit_at(buf, n, "\n"); 207 208 mutex_unlock(&ap_perms_mutex); 209 210 return n; 211 } 212 213 static ssize_t apmask_store(struct device *dev, 214 struct device_attribute *attr, 215 const char *buf, size_t count) 216 { 217 int rc; 218 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 219 220 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 221 AP_DEVICES, &ap_perms_mutex); 222 if (rc) 223 return rc; 224 225 return count; 226 } 227 228 static DEVICE_ATTR_RW(apmask); 229 230 static ssize_t aqmask_show(struct device *dev, 231 struct device_attribute *attr, 232 char *buf) 233 { 234 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 235 int i, n; 236 237 if (mutex_lock_interruptible(&ap_perms_mutex)) 238 return -ERESTARTSYS; 239 240 n = sysfs_emit(buf, "0x"); 241 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 242 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); 243 n += sysfs_emit_at(buf, n, "\n"); 244 245 mutex_unlock(&ap_perms_mutex); 246 247 return n; 248 } 249 250 static ssize_t aqmask_store(struct device *dev, 251 struct device_attribute *attr, 252 const char *buf, size_t count) 253 { 254 int rc; 255 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 256 257 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 258 AP_DOMAINS, &ap_perms_mutex); 259 if (rc) 260 return rc; 261 262 return count; 263 } 264 265 static DEVICE_ATTR_RW(aqmask); 266 267 static ssize_t admask_show(struct device *dev, 268 struct device_attribute *attr, 269 char *buf) 270 { 271 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 272 int i, n; 273 274 if (mutex_lock_interruptible(&ap_perms_mutex)) 275 return -ERESTARTSYS; 276 277 n = sysfs_emit(buf, "0x"); 278 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++) 279 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); 280 n += sysfs_emit_at(buf, n, "\n"); 281 282 mutex_unlock(&ap_perms_mutex); 283 284 return n; 285 } 286 287 static ssize_t admask_store(struct device *dev, 288 struct device_attribute *attr, 289 const char *buf, size_t count) 290 { 291 int rc; 292 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 293 294 rc = ap_parse_mask_str(buf, zcdndev->perms.adm, 295 AP_DOMAINS, &ap_perms_mutex); 296 if (rc) 297 return rc; 298 299 return count; 300 } 301 302 static DEVICE_ATTR_RW(admask); 303 304 static struct attribute *zcdn_dev_attrs[] = { 305 &dev_attr_ioctlmask.attr, 306 &dev_attr_apmask.attr, 307 &dev_attr_aqmask.attr, 308 &dev_attr_admask.attr, 309 NULL 310 }; 311 312 static struct attribute_group zcdn_dev_attr_group = { 313 .attrs = zcdn_dev_attrs 314 }; 315 316 static const struct attribute_group *zcdn_dev_attr_groups[] = { 317 &zcdn_dev_attr_group, 318 NULL 319 }; 320 321 static ssize_t zcdn_create_store(const struct class *class, 322 const struct class_attribute *attr, 323 const char *buf, size_t count) 324 { 325 int rc; 326 char name[ZCDN_MAX_NAME]; 327 328 strscpy(name, skip_spaces(buf), sizeof(name)); 329 330 rc = zcdn_create(strim(name)); 331 332 return rc ? rc : count; 333 } 334 335 static const struct class_attribute class_attr_zcdn_create = 336 __ATTR(create, 0600, NULL, zcdn_create_store); 337 338 static ssize_t zcdn_destroy_store(const struct class *class, 339 const struct class_attribute *attr, 340 const char *buf, size_t count) 341 { 342 int rc; 343 char name[ZCDN_MAX_NAME]; 344 345 strscpy(name, skip_spaces(buf), sizeof(name)); 346 347 rc = zcdn_destroy(strim(name)); 348 349 return rc ? rc : count; 350 } 351 352 static const struct class_attribute class_attr_zcdn_destroy = 353 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 354 355 static void zcdn_device_release(struct device *dev) 356 { 357 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 358 359 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n", 360 __func__, MAJOR(dev->devt), MINOR(dev->devt)); 361 362 kfree(zcdndev); 363 } 364 365 static int zcdn_create(const char *name) 366 { 367 dev_t devt; 368 int i, rc = 0; 369 char nodename[ZCDN_MAX_NAME]; 370 struct zcdn_device *zcdndev; 371 372 if (mutex_lock_interruptible(&ap_perms_mutex)) 373 return -ERESTARTSYS; 374 375 /* check if device node with this name already exists */ 376 if (name[0]) { 377 zcdndev = find_zcdndev_by_name(name); 378 if (zcdndev) { 379 put_device(&zcdndev->device); 380 rc = -EEXIST; 381 goto unlockout; 382 } 383 } 384 385 /* find an unused minor number */ 386 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 387 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 388 zcdndev = find_zcdndev_by_devt(devt); 389 if (zcdndev) 390 put_device(&zcdndev->device); 391 else 392 break; 393 } 394 if (i == ZCRYPT_MAX_MINOR_NODES) { 395 rc = -ENOSPC; 396 goto unlockout; 397 } 398 399 /* alloc and prepare a new zcdn device */ 400 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 401 if (!zcdndev) { 402 rc = -ENOMEM; 403 goto unlockout; 404 } 405 zcdndev->device.release = zcdn_device_release; 406 zcdndev->device.class = zcrypt_class; 407 zcdndev->device.devt = devt; 408 zcdndev->device.groups = zcdn_dev_attr_groups; 409 if (name[0]) 410 strncpy(nodename, name, sizeof(nodename)); 411 else 412 snprintf(nodename, sizeof(nodename), 413 ZCRYPT_NAME "_%d", (int)MINOR(devt)); 414 nodename[sizeof(nodename) - 1] = '\0'; 415 if (dev_set_name(&zcdndev->device, nodename)) { 416 rc = -EINVAL; 417 goto unlockout; 418 } 419 rc = device_register(&zcdndev->device); 420 if (rc) { 421 put_device(&zcdndev->device); 422 goto unlockout; 423 } 424 425 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n", 426 __func__, MAJOR(devt), MINOR(devt)); 427 428 unlockout: 429 mutex_unlock(&ap_perms_mutex); 430 return rc; 431 } 432 433 static int zcdn_destroy(const char *name) 434 { 435 int rc = 0; 436 struct zcdn_device *zcdndev; 437 438 if (mutex_lock_interruptible(&ap_perms_mutex)) 439 return -ERESTARTSYS; 440 441 /* try to find this zcdn device */ 442 zcdndev = find_zcdndev_by_name(name); 443 if (!zcdndev) { 444 rc = -ENOENT; 445 goto unlockout; 446 } 447 448 /* 449 * The zcdn device is not hard destroyed. It is subject to 450 * reference counting and thus just needs to be unregistered. 451 */ 452 put_device(&zcdndev->device); 453 device_unregister(&zcdndev->device); 454 455 unlockout: 456 mutex_unlock(&ap_perms_mutex); 457 return rc; 458 } 459 460 static void zcdn_destroy_all(void) 461 { 462 int i; 463 dev_t devt; 464 struct zcdn_device *zcdndev; 465 466 mutex_lock(&ap_perms_mutex); 467 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 468 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 469 zcdndev = find_zcdndev_by_devt(devt); 470 if (zcdndev) { 471 put_device(&zcdndev->device); 472 device_unregister(&zcdndev->device); 473 } 474 } 475 mutex_unlock(&ap_perms_mutex); 476 } 477 478 /* 479 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 480 * 481 * This function is not supported beyond zcrypt 1.3.1. 482 */ 483 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 484 size_t count, loff_t *f_pos) 485 { 486 return -EPERM; 487 } 488 489 /* 490 * zcrypt_write(): Not allowed. 491 * 492 * Write is not allowed 493 */ 494 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 495 size_t count, loff_t *f_pos) 496 { 497 return -EPERM; 498 } 499 500 /* 501 * zcrypt_open(): Count number of users. 502 * 503 * Device open function to count number of users. 504 */ 505 static int zcrypt_open(struct inode *inode, struct file *filp) 506 { 507 struct ap_perms *perms = &ap_perms; 508 509 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 510 struct zcdn_device *zcdndev; 511 512 if (mutex_lock_interruptible(&ap_perms_mutex)) 513 return -ERESTARTSYS; 514 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 515 /* find returns a reference, no get_device() needed */ 516 mutex_unlock(&ap_perms_mutex); 517 if (zcdndev) 518 perms = &zcdndev->perms; 519 } 520 filp->private_data = (void *)perms; 521 522 atomic_inc(&zcrypt_open_count); 523 return stream_open(inode, filp); 524 } 525 526 /* 527 * zcrypt_release(): Count number of users. 528 * 529 * Device close function to count number of users. 530 */ 531 static int zcrypt_release(struct inode *inode, struct file *filp) 532 { 533 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 534 struct zcdn_device *zcdndev; 535 536 mutex_lock(&ap_perms_mutex); 537 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 538 mutex_unlock(&ap_perms_mutex); 539 if (zcdndev) { 540 /* 2 puts here: one for find, one for open */ 541 put_device(&zcdndev->device); 542 put_device(&zcdndev->device); 543 } 544 } 545 546 atomic_dec(&zcrypt_open_count); 547 return 0; 548 } 549 550 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 551 unsigned int cmd) 552 { 553 int rc = -EPERM; 554 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 555 556 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 557 if (test_bit_inv(ioctlnr, perms->ioctlm)) 558 rc = 0; 559 } 560 561 if (rc) 562 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n", 563 __func__, ioctlnr, rc); 564 565 return rc; 566 } 567 568 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 569 { 570 return test_bit_inv(card, perms->apm) ? true : false; 571 } 572 573 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 574 { 575 return test_bit_inv(queue, perms->aqm) ? true : false; 576 } 577 578 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 579 struct zcrypt_queue *zq, 580 struct module **pmod, 581 unsigned int weight) 582 { 583 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 584 return NULL; 585 zcrypt_queue_get(zq); 586 get_device(&zq->queue->ap_dev.device); 587 atomic_add(weight, &zc->load); 588 atomic_add(weight, &zq->load); 589 zq->request_count++; 590 *pmod = zq->queue->ap_dev.device.driver->owner; 591 return zq; 592 } 593 594 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 595 struct zcrypt_queue *zq, 596 struct module *mod, 597 unsigned int weight) 598 { 599 zq->request_count--; 600 atomic_sub(weight, &zc->load); 601 atomic_sub(weight, &zq->load); 602 put_device(&zq->queue->ap_dev.device); 603 zcrypt_queue_put(zq); 604 module_put(mod); 605 } 606 607 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 608 struct zcrypt_card *pref_zc, 609 unsigned int weight, 610 unsigned int pref_weight) 611 { 612 if (!pref_zc) 613 return true; 614 weight += atomic_read(&zc->load); 615 pref_weight += atomic_read(&pref_zc->load); 616 if (weight == pref_weight) 617 return atomic64_read(&zc->card->total_request_count) < 618 atomic64_read(&pref_zc->card->total_request_count); 619 return weight < pref_weight; 620 } 621 622 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 623 struct zcrypt_queue *pref_zq, 624 unsigned int weight, 625 unsigned int pref_weight) 626 { 627 if (!pref_zq) 628 return true; 629 weight += atomic_read(&zq->load); 630 pref_weight += atomic_read(&pref_zq->load); 631 if (weight == pref_weight) 632 return zq->queue->total_request_count < 633 pref_zq->queue->total_request_count; 634 return weight < pref_weight; 635 } 636 637 /* 638 * zcrypt ioctls. 639 */ 640 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 641 struct zcrypt_track *tr, 642 struct ica_rsa_modexpo *mex) 643 { 644 struct zcrypt_card *zc, *pref_zc; 645 struct zcrypt_queue *zq, *pref_zq; 646 struct ap_message ap_msg; 647 unsigned int wgt = 0, pref_wgt = 0; 648 unsigned int func_code; 649 int cpen, qpen, qid = 0, rc = -ENODEV; 650 struct module *mod; 651 652 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 653 654 ap_init_message(&ap_msg); 655 656 if (mex->outputdatalength < mex->inputdatalength) { 657 func_code = 0; 658 rc = -EINVAL; 659 goto out; 660 } 661 662 /* 663 * As long as outputdatalength is big enough, we can set the 664 * outputdatalength equal to the inputdatalength, since that is the 665 * number of bytes we will copy in any case 666 */ 667 mex->outputdatalength = mex->inputdatalength; 668 669 rc = get_rsa_modex_fc(mex, &func_code); 670 if (rc) 671 goto out; 672 673 pref_zc = NULL; 674 pref_zq = NULL; 675 spin_lock(&zcrypt_list_lock); 676 for_each_zcrypt_card(zc) { 677 /* Check for usable accelerator or CCA card */ 678 if (!zc->online || !zc->card->config || zc->card->chkstop || 679 !(zc->card->functions & 0x18000000)) 680 continue; 681 /* Check for size limits */ 682 if (zc->min_mod_size > mex->inputdatalength || 683 zc->max_mod_size < mex->inputdatalength) 684 continue; 685 /* check if device node has admission for this card */ 686 if (!zcrypt_check_card(perms, zc->card->id)) 687 continue; 688 /* get weight index of the card device */ 689 wgt = zc->speed_rating[func_code]; 690 /* penalty if this msg was previously sent via this card */ 691 cpen = (tr && tr->again_counter && tr->last_qid && 692 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 693 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 694 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 695 continue; 696 for_each_zcrypt_queue(zq, zc) { 697 /* check if device is usable and eligible */ 698 if (!zq->online || !zq->ops->rsa_modexpo || 699 !zq->queue->config || zq->queue->chkstop) 700 continue; 701 /* check if device node has admission for this queue */ 702 if (!zcrypt_check_queue(perms, 703 AP_QID_QUEUE(zq->queue->qid))) 704 continue; 705 /* penalty if the msg was previously sent at this qid */ 706 qpen = (tr && tr->again_counter && tr->last_qid && 707 tr->last_qid == zq->queue->qid) ? 708 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 709 if (!zcrypt_queue_compare(zq, pref_zq, 710 wgt + cpen + qpen, pref_wgt)) 711 continue; 712 pref_zc = zc; 713 pref_zq = zq; 714 pref_wgt = wgt + cpen + qpen; 715 } 716 } 717 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 718 spin_unlock(&zcrypt_list_lock); 719 720 if (!pref_zq) { 721 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 722 __func__); 723 rc = -ENODEV; 724 goto out; 725 } 726 727 qid = pref_zq->queue->qid; 728 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); 729 730 spin_lock(&zcrypt_list_lock); 731 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 732 spin_unlock(&zcrypt_list_lock); 733 734 out: 735 ap_release_message(&ap_msg); 736 if (tr) { 737 tr->last_rc = rc; 738 tr->last_qid = qid; 739 } 740 trace_s390_zcrypt_rep(mex, func_code, rc, 741 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 742 return rc; 743 } 744 745 static long zcrypt_rsa_crt(struct ap_perms *perms, 746 struct zcrypt_track *tr, 747 struct ica_rsa_modexpo_crt *crt) 748 { 749 struct zcrypt_card *zc, *pref_zc; 750 struct zcrypt_queue *zq, *pref_zq; 751 struct ap_message ap_msg; 752 unsigned int wgt = 0, pref_wgt = 0; 753 unsigned int func_code; 754 int cpen, qpen, qid = 0, rc = -ENODEV; 755 struct module *mod; 756 757 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 758 759 ap_init_message(&ap_msg); 760 761 if (crt->outputdatalength < crt->inputdatalength) { 762 func_code = 0; 763 rc = -EINVAL; 764 goto out; 765 } 766 767 /* 768 * As long as outputdatalength is big enough, we can set the 769 * outputdatalength equal to the inputdatalength, since that is the 770 * number of bytes we will copy in any case 771 */ 772 crt->outputdatalength = crt->inputdatalength; 773 774 rc = get_rsa_crt_fc(crt, &func_code); 775 if (rc) 776 goto out; 777 778 pref_zc = NULL; 779 pref_zq = NULL; 780 spin_lock(&zcrypt_list_lock); 781 for_each_zcrypt_card(zc) { 782 /* Check for usable accelerator or CCA card */ 783 if (!zc->online || !zc->card->config || zc->card->chkstop || 784 !(zc->card->functions & 0x18000000)) 785 continue; 786 /* Check for size limits */ 787 if (zc->min_mod_size > crt->inputdatalength || 788 zc->max_mod_size < crt->inputdatalength) 789 continue; 790 /* check if device node has admission for this card */ 791 if (!zcrypt_check_card(perms, zc->card->id)) 792 continue; 793 /* get weight index of the card device */ 794 wgt = zc->speed_rating[func_code]; 795 /* penalty if this msg was previously sent via this card */ 796 cpen = (tr && tr->again_counter && tr->last_qid && 797 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 798 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 799 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 800 continue; 801 for_each_zcrypt_queue(zq, zc) { 802 /* check if device is usable and eligible */ 803 if (!zq->online || !zq->ops->rsa_modexpo_crt || 804 !zq->queue->config || zq->queue->chkstop) 805 continue; 806 /* check if device node has admission for this queue */ 807 if (!zcrypt_check_queue(perms, 808 AP_QID_QUEUE(zq->queue->qid))) 809 continue; 810 /* penalty if the msg was previously sent at this qid */ 811 qpen = (tr && tr->again_counter && tr->last_qid && 812 tr->last_qid == zq->queue->qid) ? 813 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 814 if (!zcrypt_queue_compare(zq, pref_zq, 815 wgt + cpen + qpen, pref_wgt)) 816 continue; 817 pref_zc = zc; 818 pref_zq = zq; 819 pref_wgt = wgt + cpen + qpen; 820 } 821 } 822 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 823 spin_unlock(&zcrypt_list_lock); 824 825 if (!pref_zq) { 826 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 827 __func__); 828 rc = -ENODEV; 829 goto out; 830 } 831 832 qid = pref_zq->queue->qid; 833 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); 834 835 spin_lock(&zcrypt_list_lock); 836 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 837 spin_unlock(&zcrypt_list_lock); 838 839 out: 840 ap_release_message(&ap_msg); 841 if (tr) { 842 tr->last_rc = rc; 843 tr->last_qid = qid; 844 } 845 trace_s390_zcrypt_rep(crt, func_code, rc, 846 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 847 return rc; 848 } 849 850 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 851 struct zcrypt_track *tr, 852 struct ica_xcRB *xcrb) 853 { 854 struct zcrypt_card *zc, *pref_zc; 855 struct zcrypt_queue *zq, *pref_zq; 856 struct ap_message ap_msg; 857 unsigned int wgt = 0, pref_wgt = 0; 858 unsigned int func_code; 859 unsigned short *domain, tdom; 860 int cpen, qpen, qid = 0, rc = -ENODEV; 861 struct module *mod; 862 863 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 864 865 xcrb->status = 0; 866 ap_init_message(&ap_msg); 867 868 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 869 if (rc) 870 goto out; 871 872 tdom = *domain; 873 if (perms != &ap_perms && tdom < AP_DOMAINS) { 874 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 875 if (!test_bit_inv(tdom, perms->adm)) { 876 rc = -ENODEV; 877 goto out; 878 } 879 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 880 rc = -EOPNOTSUPP; 881 goto out; 882 } 883 } 884 /* 885 * If a valid target domain is set and this domain is NOT a usage 886 * domain but a control only domain, autoselect target domain. 887 */ 888 if (tdom < AP_DOMAINS && 889 !ap_test_config_usage_domain(tdom) && 890 ap_test_config_ctrl_domain(tdom)) 891 tdom = AUTOSEL_DOM; 892 893 pref_zc = NULL; 894 pref_zq = NULL; 895 spin_lock(&zcrypt_list_lock); 896 for_each_zcrypt_card(zc) { 897 /* Check for usable CCA card */ 898 if (!zc->online || !zc->card->config || zc->card->chkstop || 899 !(zc->card->functions & 0x10000000)) 900 continue; 901 /* Check for user selected CCA card */ 902 if (xcrb->user_defined != AUTOSELECT && 903 xcrb->user_defined != zc->card->id) 904 continue; 905 /* check if request size exceeds card max msg size */ 906 if (ap_msg.len > zc->card->maxmsgsize) 907 continue; 908 /* check if device node has admission for this card */ 909 if (!zcrypt_check_card(perms, zc->card->id)) 910 continue; 911 /* get weight index of the card device */ 912 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 913 /* penalty if this msg was previously sent via this card */ 914 cpen = (tr && tr->again_counter && tr->last_qid && 915 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 916 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 917 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 918 continue; 919 for_each_zcrypt_queue(zq, zc) { 920 /* check for device usable and eligible */ 921 if (!zq->online || !zq->ops->send_cprb || 922 !zq->queue->config || zq->queue->chkstop || 923 (tdom != AUTOSEL_DOM && 924 tdom != AP_QID_QUEUE(zq->queue->qid))) 925 continue; 926 /* check if device node has admission for this queue */ 927 if (!zcrypt_check_queue(perms, 928 AP_QID_QUEUE(zq->queue->qid))) 929 continue; 930 /* penalty if the msg was previously sent at this qid */ 931 qpen = (tr && tr->again_counter && tr->last_qid && 932 tr->last_qid == zq->queue->qid) ? 933 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 934 if (!zcrypt_queue_compare(zq, pref_zq, 935 wgt + cpen + qpen, pref_wgt)) 936 continue; 937 pref_zc = zc; 938 pref_zq = zq; 939 pref_wgt = wgt + cpen + qpen; 940 } 941 } 942 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 943 spin_unlock(&zcrypt_list_lock); 944 945 if (!pref_zq) { 946 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 947 __func__, xcrb->user_defined, *domain); 948 rc = -ENODEV; 949 goto out; 950 } 951 952 /* in case of auto select, provide the correct domain */ 953 qid = pref_zq->queue->qid; 954 if (*domain == AUTOSEL_DOM) 955 *domain = AP_QID_QUEUE(qid); 956 957 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 958 959 spin_lock(&zcrypt_list_lock); 960 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 961 spin_unlock(&zcrypt_list_lock); 962 963 out: 964 ap_release_message(&ap_msg); 965 if (tr) { 966 tr->last_rc = rc; 967 tr->last_qid = qid; 968 } 969 trace_s390_zcrypt_rep(xcrb, func_code, rc, 970 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 971 return rc; 972 } 973 974 long zcrypt_send_cprb(struct ica_xcRB *xcrb) 975 { 976 return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb); 977 } 978 EXPORT_SYMBOL(zcrypt_send_cprb); 979 980 static bool is_desired_ep11_card(unsigned int dev_id, 981 unsigned short target_num, 982 struct ep11_target_dev *targets) 983 { 984 while (target_num-- > 0) { 985 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) 986 return true; 987 targets++; 988 } 989 return false; 990 } 991 992 static bool is_desired_ep11_queue(unsigned int dev_qid, 993 unsigned short target_num, 994 struct ep11_target_dev *targets) 995 { 996 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); 997 998 while (target_num-- > 0) { 999 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && 1000 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) 1001 return true; 1002 targets++; 1003 } 1004 return false; 1005 } 1006 1007 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, 1008 struct zcrypt_track *tr, 1009 struct ep11_urb *xcrb) 1010 { 1011 struct zcrypt_card *zc, *pref_zc; 1012 struct zcrypt_queue *zq, *pref_zq; 1013 struct ep11_target_dev *targets; 1014 unsigned short target_num; 1015 unsigned int wgt = 0, pref_wgt = 0; 1016 unsigned int func_code, domain; 1017 struct ap_message ap_msg; 1018 int cpen, qpen, qid = 0, rc = -ENODEV; 1019 struct module *mod; 1020 1021 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1022 1023 ap_init_message(&ap_msg); 1024 1025 target_num = (unsigned short)xcrb->targets_num; 1026 1027 /* empty list indicates autoselect (all available targets) */ 1028 targets = NULL; 1029 if (target_num != 0) { 1030 struct ep11_target_dev __user *uptr; 1031 1032 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1033 if (!targets) { 1034 func_code = 0; 1035 rc = -ENOMEM; 1036 goto out; 1037 } 1038 1039 uptr = (struct ep11_target_dev __force __user *)xcrb->targets; 1040 if (z_copy_from_user(userspace, targets, uptr, 1041 target_num * sizeof(*targets))) { 1042 func_code = 0; 1043 rc = -EFAULT; 1044 goto out_free; 1045 } 1046 } 1047 1048 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1049 if (rc) 1050 goto out_free; 1051 1052 if (perms != &ap_perms && domain < AUTOSEL_DOM) { 1053 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 1054 if (!test_bit_inv(domain, perms->adm)) { 1055 rc = -ENODEV; 1056 goto out_free; 1057 } 1058 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 1059 rc = -EOPNOTSUPP; 1060 goto out_free; 1061 } 1062 } 1063 1064 pref_zc = NULL; 1065 pref_zq = NULL; 1066 spin_lock(&zcrypt_list_lock); 1067 for_each_zcrypt_card(zc) { 1068 /* Check for usable EP11 card */ 1069 if (!zc->online || !zc->card->config || zc->card->chkstop || 1070 !(zc->card->functions & 0x04000000)) 1071 continue; 1072 /* Check for user selected EP11 card */ 1073 if (targets && 1074 !is_desired_ep11_card(zc->card->id, target_num, targets)) 1075 continue; 1076 /* check if request size exceeds card max msg size */ 1077 if (ap_msg.len > zc->card->maxmsgsize) 1078 continue; 1079 /* check if device node has admission for this card */ 1080 if (!zcrypt_check_card(perms, zc->card->id)) 1081 continue; 1082 /* get weight index of the card device */ 1083 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1084 /* penalty if this msg was previously sent via this card */ 1085 cpen = (tr && tr->again_counter && tr->last_qid && 1086 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 1087 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 1088 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1089 continue; 1090 for_each_zcrypt_queue(zq, zc) { 1091 /* check if device is usable and eligible */ 1092 if (!zq->online || !zq->ops->send_ep11_cprb || 1093 !zq->queue->config || zq->queue->chkstop || 1094 (targets && 1095 !is_desired_ep11_queue(zq->queue->qid, 1096 target_num, targets))) 1097 continue; 1098 /* check if device node has admission for this queue */ 1099 if (!zcrypt_check_queue(perms, 1100 AP_QID_QUEUE(zq->queue->qid))) 1101 continue; 1102 /* penalty if the msg was previously sent at this qid */ 1103 qpen = (tr && tr->again_counter && tr->last_qid && 1104 tr->last_qid == zq->queue->qid) ? 1105 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 1106 if (!zcrypt_queue_compare(zq, pref_zq, 1107 wgt + cpen + qpen, pref_wgt)) 1108 continue; 1109 pref_zc = zc; 1110 pref_zq = zq; 1111 pref_wgt = wgt + cpen + qpen; 1112 } 1113 } 1114 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1115 spin_unlock(&zcrypt_list_lock); 1116 1117 if (!pref_zq) { 1118 if (targets && target_num == 1) { 1119 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 1120 __func__, (int)targets->ap_id, 1121 (int)targets->dom_id); 1122 } else if (targets) { 1123 ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", 1124 __func__, (int)target_num); 1125 } else { 1126 ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", 1127 __func__); 1128 } 1129 rc = -ENODEV; 1130 goto out_free; 1131 } 1132 1133 qid = pref_zq->queue->qid; 1134 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); 1135 1136 spin_lock(&zcrypt_list_lock); 1137 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1138 spin_unlock(&zcrypt_list_lock); 1139 1140 out_free: 1141 kfree(targets); 1142 out: 1143 ap_release_message(&ap_msg); 1144 if (tr) { 1145 tr->last_rc = rc; 1146 tr->last_qid = qid; 1147 } 1148 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1149 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1150 return rc; 1151 } 1152 1153 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 1154 { 1155 return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb); 1156 } 1157 EXPORT_SYMBOL(zcrypt_send_ep11_cprb); 1158 1159 static long zcrypt_rng(char *buffer) 1160 { 1161 struct zcrypt_card *zc, *pref_zc; 1162 struct zcrypt_queue *zq, *pref_zq; 1163 unsigned int wgt = 0, pref_wgt = 0; 1164 unsigned int func_code; 1165 struct ap_message ap_msg; 1166 unsigned int domain; 1167 int qid = 0, rc = -ENODEV; 1168 struct module *mod; 1169 1170 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1171 1172 ap_init_message(&ap_msg); 1173 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1174 if (rc) 1175 goto out; 1176 1177 pref_zc = NULL; 1178 pref_zq = NULL; 1179 spin_lock(&zcrypt_list_lock); 1180 for_each_zcrypt_card(zc) { 1181 /* Check for usable CCA card */ 1182 if (!zc->online || !zc->card->config || zc->card->chkstop || 1183 !(zc->card->functions & 0x10000000)) 1184 continue; 1185 /* get weight index of the card device */ 1186 wgt = zc->speed_rating[func_code]; 1187 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1188 continue; 1189 for_each_zcrypt_queue(zq, zc) { 1190 /* check if device is usable and eligible */ 1191 if (!zq->online || !zq->ops->rng || 1192 !zq->queue->config || zq->queue->chkstop) 1193 continue; 1194 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1195 continue; 1196 pref_zc = zc; 1197 pref_zq = zq; 1198 pref_wgt = wgt; 1199 } 1200 } 1201 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1202 spin_unlock(&zcrypt_list_lock); 1203 1204 if (!pref_zq) { 1205 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 1206 __func__); 1207 rc = -ENODEV; 1208 goto out; 1209 } 1210 1211 qid = pref_zq->queue->qid; 1212 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1213 1214 spin_lock(&zcrypt_list_lock); 1215 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1216 spin_unlock(&zcrypt_list_lock); 1217 1218 out: 1219 ap_release_message(&ap_msg); 1220 trace_s390_zcrypt_rep(buffer, func_code, rc, 1221 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1222 return rc; 1223 } 1224 1225 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1226 { 1227 struct zcrypt_card *zc; 1228 struct zcrypt_queue *zq; 1229 struct zcrypt_device_status *stat; 1230 int card, queue; 1231 1232 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1233 * sizeof(struct zcrypt_device_status)); 1234 1235 spin_lock(&zcrypt_list_lock); 1236 for_each_zcrypt_card(zc) { 1237 for_each_zcrypt_queue(zq, zc) { 1238 card = AP_QID_CARD(zq->queue->qid); 1239 if (card >= MAX_ZDEV_CARDIDS) 1240 continue; 1241 queue = AP_QID_QUEUE(zq->queue->qid); 1242 stat = &devstatus[card * AP_DOMAINS + queue]; 1243 stat->hwtype = zc->card->ap_dev.device_type; 1244 stat->functions = zc->card->functions >> 26; 1245 stat->qid = zq->queue->qid; 1246 stat->online = zq->online ? 0x01 : 0x00; 1247 } 1248 } 1249 spin_unlock(&zcrypt_list_lock); 1250 } 1251 1252 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 1253 { 1254 struct zcrypt_card *zc; 1255 struct zcrypt_queue *zq; 1256 struct zcrypt_device_status_ext *stat; 1257 int card, queue; 1258 1259 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 1260 * sizeof(struct zcrypt_device_status_ext)); 1261 1262 spin_lock(&zcrypt_list_lock); 1263 for_each_zcrypt_card(zc) { 1264 for_each_zcrypt_queue(zq, zc) { 1265 card = AP_QID_CARD(zq->queue->qid); 1266 queue = AP_QID_QUEUE(zq->queue->qid); 1267 stat = &devstatus[card * AP_DOMAINS + queue]; 1268 stat->hwtype = zc->card->ap_dev.device_type; 1269 stat->functions = zc->card->functions >> 26; 1270 stat->qid = zq->queue->qid; 1271 stat->online = zq->online ? 0x01 : 0x00; 1272 } 1273 } 1274 spin_unlock(&zcrypt_list_lock); 1275 } 1276 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1277 1278 int zcrypt_device_status_ext(int card, int queue, 1279 struct zcrypt_device_status_ext *devstat) 1280 { 1281 struct zcrypt_card *zc; 1282 struct zcrypt_queue *zq; 1283 1284 memset(devstat, 0, sizeof(*devstat)); 1285 1286 spin_lock(&zcrypt_list_lock); 1287 for_each_zcrypt_card(zc) { 1288 for_each_zcrypt_queue(zq, zc) { 1289 if (card == AP_QID_CARD(zq->queue->qid) && 1290 queue == AP_QID_QUEUE(zq->queue->qid)) { 1291 devstat->hwtype = zc->card->ap_dev.device_type; 1292 devstat->functions = zc->card->functions >> 26; 1293 devstat->qid = zq->queue->qid; 1294 devstat->online = zq->online ? 0x01 : 0x00; 1295 spin_unlock(&zcrypt_list_lock); 1296 return 0; 1297 } 1298 } 1299 } 1300 spin_unlock(&zcrypt_list_lock); 1301 1302 return -ENODEV; 1303 } 1304 EXPORT_SYMBOL(zcrypt_device_status_ext); 1305 1306 static void zcrypt_status_mask(char status[], size_t max_adapters) 1307 { 1308 struct zcrypt_card *zc; 1309 struct zcrypt_queue *zq; 1310 int card; 1311 1312 memset(status, 0, max_adapters); 1313 spin_lock(&zcrypt_list_lock); 1314 for_each_zcrypt_card(zc) { 1315 for_each_zcrypt_queue(zq, zc) { 1316 card = AP_QID_CARD(zq->queue->qid); 1317 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1318 card >= max_adapters) 1319 continue; 1320 status[card] = zc->online ? zc->user_space_type : 0x0d; 1321 } 1322 } 1323 spin_unlock(&zcrypt_list_lock); 1324 } 1325 1326 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1327 { 1328 struct zcrypt_card *zc; 1329 struct zcrypt_queue *zq; 1330 int card; 1331 1332 memset(qdepth, 0, max_adapters); 1333 spin_lock(&zcrypt_list_lock); 1334 local_bh_disable(); 1335 for_each_zcrypt_card(zc) { 1336 for_each_zcrypt_queue(zq, zc) { 1337 card = AP_QID_CARD(zq->queue->qid); 1338 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1339 card >= max_adapters) 1340 continue; 1341 spin_lock(&zq->queue->lock); 1342 qdepth[card] = 1343 zq->queue->pendingq_count + 1344 zq->queue->requestq_count; 1345 spin_unlock(&zq->queue->lock); 1346 } 1347 } 1348 local_bh_enable(); 1349 spin_unlock(&zcrypt_list_lock); 1350 } 1351 1352 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) 1353 { 1354 struct zcrypt_card *zc; 1355 struct zcrypt_queue *zq; 1356 int card; 1357 u64 cnt; 1358 1359 memset(reqcnt, 0, sizeof(int) * max_adapters); 1360 spin_lock(&zcrypt_list_lock); 1361 local_bh_disable(); 1362 for_each_zcrypt_card(zc) { 1363 for_each_zcrypt_queue(zq, zc) { 1364 card = AP_QID_CARD(zq->queue->qid); 1365 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1366 card >= max_adapters) 1367 continue; 1368 spin_lock(&zq->queue->lock); 1369 cnt = zq->queue->total_request_count; 1370 spin_unlock(&zq->queue->lock); 1371 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1372 } 1373 } 1374 local_bh_enable(); 1375 spin_unlock(&zcrypt_list_lock); 1376 } 1377 1378 static int zcrypt_pendingq_count(void) 1379 { 1380 struct zcrypt_card *zc; 1381 struct zcrypt_queue *zq; 1382 int pendingq_count; 1383 1384 pendingq_count = 0; 1385 spin_lock(&zcrypt_list_lock); 1386 local_bh_disable(); 1387 for_each_zcrypt_card(zc) { 1388 for_each_zcrypt_queue(zq, zc) { 1389 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1390 continue; 1391 spin_lock(&zq->queue->lock); 1392 pendingq_count += zq->queue->pendingq_count; 1393 spin_unlock(&zq->queue->lock); 1394 } 1395 } 1396 local_bh_enable(); 1397 spin_unlock(&zcrypt_list_lock); 1398 return pendingq_count; 1399 } 1400 1401 static int zcrypt_requestq_count(void) 1402 { 1403 struct zcrypt_card *zc; 1404 struct zcrypt_queue *zq; 1405 int requestq_count; 1406 1407 requestq_count = 0; 1408 spin_lock(&zcrypt_list_lock); 1409 local_bh_disable(); 1410 for_each_zcrypt_card(zc) { 1411 for_each_zcrypt_queue(zq, zc) { 1412 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1413 continue; 1414 spin_lock(&zq->queue->lock); 1415 requestq_count += zq->queue->requestq_count; 1416 spin_unlock(&zq->queue->lock); 1417 } 1418 } 1419 local_bh_enable(); 1420 spin_unlock(&zcrypt_list_lock); 1421 return requestq_count; 1422 } 1423 1424 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) 1425 { 1426 int rc; 1427 struct zcrypt_track tr; 1428 struct ica_rsa_modexpo mex; 1429 struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1430 1431 memset(&tr, 0, sizeof(tr)); 1432 if (copy_from_user(&mex, umex, sizeof(mex))) 1433 return -EFAULT; 1434 1435 do { 1436 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1437 if (rc == -EAGAIN) 1438 tr.again_counter++; 1439 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1440 /* on failure: retry once again after a requested rescan */ 1441 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1442 do { 1443 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1444 if (rc == -EAGAIN) 1445 tr.again_counter++; 1446 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1447 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1448 rc = -EIO; 1449 if (rc) { 1450 ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc); 1451 return rc; 1452 } 1453 return put_user(mex.outputdatalength, &umex->outputdatalength); 1454 } 1455 1456 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) 1457 { 1458 int rc; 1459 struct zcrypt_track tr; 1460 struct ica_rsa_modexpo_crt crt; 1461 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1462 1463 memset(&tr, 0, sizeof(tr)); 1464 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1465 return -EFAULT; 1466 1467 do { 1468 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1469 if (rc == -EAGAIN) 1470 tr.again_counter++; 1471 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1472 /* on failure: retry once again after a requested rescan */ 1473 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1474 do { 1475 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1476 if (rc == -EAGAIN) 1477 tr.again_counter++; 1478 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1479 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1480 rc = -EIO; 1481 if (rc) { 1482 ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc); 1483 return rc; 1484 } 1485 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1486 } 1487 1488 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1489 { 1490 int rc; 1491 struct ica_xcRB xcrb; 1492 struct zcrypt_track tr; 1493 struct ica_xcRB __user *uxcrb = (void __user *)arg; 1494 1495 memset(&tr, 0, sizeof(tr)); 1496 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1497 return -EFAULT; 1498 1499 do { 1500 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1501 if (rc == -EAGAIN) 1502 tr.again_counter++; 1503 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1504 /* on failure: retry once again after a requested rescan */ 1505 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1506 do { 1507 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1508 if (rc == -EAGAIN) 1509 tr.again_counter++; 1510 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1511 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1512 rc = -EIO; 1513 if (rc) 1514 ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1515 rc, xcrb.status); 1516 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1517 return -EFAULT; 1518 return rc; 1519 } 1520 1521 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) 1522 { 1523 int rc; 1524 struct ep11_urb xcrb; 1525 struct zcrypt_track tr; 1526 struct ep11_urb __user *uxcrb = (void __user *)arg; 1527 1528 memset(&tr, 0, sizeof(tr)); 1529 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1530 return -EFAULT; 1531 1532 do { 1533 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1534 if (rc == -EAGAIN) 1535 tr.again_counter++; 1536 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1537 /* on failure: retry once again after a requested rescan */ 1538 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1539 do { 1540 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1541 if (rc == -EAGAIN) 1542 tr.again_counter++; 1543 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1544 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1545 rc = -EIO; 1546 if (rc) 1547 ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc); 1548 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1549 return -EFAULT; 1550 return rc; 1551 } 1552 1553 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1554 unsigned long arg) 1555 { 1556 int rc; 1557 struct ap_perms *perms = 1558 (struct ap_perms *)filp->private_data; 1559 1560 rc = zcrypt_check_ioctl(perms, cmd); 1561 if (rc) 1562 return rc; 1563 1564 switch (cmd) { 1565 case ICARSAMODEXPO: 1566 return icarsamodexpo_ioctl(perms, arg); 1567 case ICARSACRT: 1568 return icarsacrt_ioctl(perms, arg); 1569 case ZSECSENDCPRB: 1570 return zsecsendcprb_ioctl(perms, arg); 1571 case ZSENDEP11CPRB: 1572 return zsendep11cprb_ioctl(perms, arg); 1573 case ZCRYPT_DEVICE_STATUS: { 1574 struct zcrypt_device_status_ext *device_status; 1575 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1576 * sizeof(struct zcrypt_device_status_ext); 1577 1578 device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1579 sizeof(struct zcrypt_device_status_ext), 1580 GFP_KERNEL); 1581 if (!device_status) 1582 return -ENOMEM; 1583 zcrypt_device_status_mask_ext(device_status); 1584 if (copy_to_user((char __user *)arg, device_status, 1585 total_size)) 1586 rc = -EFAULT; 1587 kvfree(device_status); 1588 return rc; 1589 } 1590 case ZCRYPT_STATUS_MASK: { 1591 char status[AP_DEVICES]; 1592 1593 zcrypt_status_mask(status, AP_DEVICES); 1594 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1595 return -EFAULT; 1596 return 0; 1597 } 1598 case ZCRYPT_QDEPTH_MASK: { 1599 char qdepth[AP_DEVICES]; 1600 1601 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1602 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1603 return -EFAULT; 1604 return 0; 1605 } 1606 case ZCRYPT_PERDEV_REQCNT: { 1607 u32 *reqcnt; 1608 1609 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); 1610 if (!reqcnt) 1611 return -ENOMEM; 1612 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1613 if (copy_to_user((int __user *)arg, reqcnt, 1614 sizeof(u32) * AP_DEVICES)) 1615 rc = -EFAULT; 1616 kfree(reqcnt); 1617 return rc; 1618 } 1619 case Z90STAT_REQUESTQ_COUNT: 1620 return put_user(zcrypt_requestq_count(), (int __user *)arg); 1621 case Z90STAT_PENDINGQ_COUNT: 1622 return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1623 case Z90STAT_TOTALOPEN_COUNT: 1624 return put_user(atomic_read(&zcrypt_open_count), 1625 (int __user *)arg); 1626 case Z90STAT_DOMAIN_INDEX: 1627 return put_user(ap_domain_index, (int __user *)arg); 1628 /* 1629 * Deprecated ioctls 1630 */ 1631 case ZDEVICESTATUS: { 1632 /* the old ioctl supports only 64 adapters */ 1633 struct zcrypt_device_status *device_status; 1634 size_t total_size = MAX_ZDEV_ENTRIES 1635 * sizeof(struct zcrypt_device_status); 1636 1637 device_status = kzalloc(total_size, GFP_KERNEL); 1638 if (!device_status) 1639 return -ENOMEM; 1640 zcrypt_device_status_mask(device_status); 1641 if (copy_to_user((char __user *)arg, device_status, 1642 total_size)) 1643 rc = -EFAULT; 1644 kfree(device_status); 1645 return rc; 1646 } 1647 case Z90STAT_STATUS_MASK: { 1648 /* the old ioctl supports only 64 adapters */ 1649 char status[MAX_ZDEV_CARDIDS]; 1650 1651 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1652 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1653 return -EFAULT; 1654 return 0; 1655 } 1656 case Z90STAT_QDEPTH_MASK: { 1657 /* the old ioctl supports only 64 adapters */ 1658 char qdepth[MAX_ZDEV_CARDIDS]; 1659 1660 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1661 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1662 return -EFAULT; 1663 return 0; 1664 } 1665 case Z90STAT_PERDEV_REQCNT: { 1666 /* the old ioctl supports only 64 adapters */ 1667 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1668 1669 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1670 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1671 return -EFAULT; 1672 return 0; 1673 } 1674 /* unknown ioctl number */ 1675 default: 1676 ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd); 1677 return -ENOIOCTLCMD; 1678 } 1679 } 1680 1681 #ifdef CONFIG_COMPAT 1682 /* 1683 * ioctl32 conversion routines 1684 */ 1685 struct compat_ica_rsa_modexpo { 1686 compat_uptr_t inputdata; 1687 unsigned int inputdatalength; 1688 compat_uptr_t outputdata; 1689 unsigned int outputdatalength; 1690 compat_uptr_t b_key; 1691 compat_uptr_t n_modulus; 1692 }; 1693 1694 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1695 unsigned int cmd, unsigned long arg) 1696 { 1697 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1698 struct compat_ica_rsa_modexpo mex32; 1699 struct ica_rsa_modexpo mex64; 1700 struct zcrypt_track tr; 1701 long rc; 1702 1703 memset(&tr, 0, sizeof(tr)); 1704 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1705 return -EFAULT; 1706 mex64.inputdata = compat_ptr(mex32.inputdata); 1707 mex64.inputdatalength = mex32.inputdatalength; 1708 mex64.outputdata = compat_ptr(mex32.outputdata); 1709 mex64.outputdatalength = mex32.outputdatalength; 1710 mex64.b_key = compat_ptr(mex32.b_key); 1711 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1712 do { 1713 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1714 if (rc == -EAGAIN) 1715 tr.again_counter++; 1716 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1717 /* on failure: retry once again after a requested rescan */ 1718 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1719 do { 1720 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1721 if (rc == -EAGAIN) 1722 tr.again_counter++; 1723 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1724 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1725 rc = -EIO; 1726 if (rc) 1727 return rc; 1728 return put_user(mex64.outputdatalength, 1729 &umex32->outputdatalength); 1730 } 1731 1732 struct compat_ica_rsa_modexpo_crt { 1733 compat_uptr_t inputdata; 1734 unsigned int inputdatalength; 1735 compat_uptr_t outputdata; 1736 unsigned int outputdatalength; 1737 compat_uptr_t bp_key; 1738 compat_uptr_t bq_key; 1739 compat_uptr_t np_prime; 1740 compat_uptr_t nq_prime; 1741 compat_uptr_t u_mult_inv; 1742 }; 1743 1744 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1745 unsigned int cmd, unsigned long arg) 1746 { 1747 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1748 struct compat_ica_rsa_modexpo_crt crt32; 1749 struct ica_rsa_modexpo_crt crt64; 1750 struct zcrypt_track tr; 1751 long rc; 1752 1753 memset(&tr, 0, sizeof(tr)); 1754 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1755 return -EFAULT; 1756 crt64.inputdata = compat_ptr(crt32.inputdata); 1757 crt64.inputdatalength = crt32.inputdatalength; 1758 crt64.outputdata = compat_ptr(crt32.outputdata); 1759 crt64.outputdatalength = crt32.outputdatalength; 1760 crt64.bp_key = compat_ptr(crt32.bp_key); 1761 crt64.bq_key = compat_ptr(crt32.bq_key); 1762 crt64.np_prime = compat_ptr(crt32.np_prime); 1763 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1764 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1765 do { 1766 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1767 if (rc == -EAGAIN) 1768 tr.again_counter++; 1769 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1770 /* on failure: retry once again after a requested rescan */ 1771 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1772 do { 1773 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1774 if (rc == -EAGAIN) 1775 tr.again_counter++; 1776 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1777 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1778 rc = -EIO; 1779 if (rc) 1780 return rc; 1781 return put_user(crt64.outputdatalength, 1782 &ucrt32->outputdatalength); 1783 } 1784 1785 struct compat_ica_xcrb { 1786 unsigned short agent_ID; 1787 unsigned int user_defined; 1788 unsigned short request_ID; 1789 unsigned int request_control_blk_length; 1790 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1791 compat_uptr_t request_control_blk_addr; 1792 unsigned int request_data_length; 1793 char padding2[16 - sizeof(compat_uptr_t)]; 1794 compat_uptr_t request_data_address; 1795 unsigned int reply_control_blk_length; 1796 char padding3[16 - sizeof(compat_uptr_t)]; 1797 compat_uptr_t reply_control_blk_addr; 1798 unsigned int reply_data_length; 1799 char padding4[16 - sizeof(compat_uptr_t)]; 1800 compat_uptr_t reply_data_addr; 1801 unsigned short priority_window; 1802 unsigned int status; 1803 } __packed; 1804 1805 static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1806 unsigned int cmd, unsigned long arg) 1807 { 1808 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1809 struct compat_ica_xcrb xcrb32; 1810 struct zcrypt_track tr; 1811 struct ica_xcRB xcrb64; 1812 long rc; 1813 1814 memset(&tr, 0, sizeof(tr)); 1815 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1816 return -EFAULT; 1817 xcrb64.agent_ID = xcrb32.agent_ID; 1818 xcrb64.user_defined = xcrb32.user_defined; 1819 xcrb64.request_ID = xcrb32.request_ID; 1820 xcrb64.request_control_blk_length = 1821 xcrb32.request_control_blk_length; 1822 xcrb64.request_control_blk_addr = 1823 compat_ptr(xcrb32.request_control_blk_addr); 1824 xcrb64.request_data_length = 1825 xcrb32.request_data_length; 1826 xcrb64.request_data_address = 1827 compat_ptr(xcrb32.request_data_address); 1828 xcrb64.reply_control_blk_length = 1829 xcrb32.reply_control_blk_length; 1830 xcrb64.reply_control_blk_addr = 1831 compat_ptr(xcrb32.reply_control_blk_addr); 1832 xcrb64.reply_data_length = xcrb32.reply_data_length; 1833 xcrb64.reply_data_addr = 1834 compat_ptr(xcrb32.reply_data_addr); 1835 xcrb64.priority_window = xcrb32.priority_window; 1836 xcrb64.status = xcrb32.status; 1837 do { 1838 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1839 if (rc == -EAGAIN) 1840 tr.again_counter++; 1841 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1842 /* on failure: retry once again after a requested rescan */ 1843 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1844 do { 1845 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1846 if (rc == -EAGAIN) 1847 tr.again_counter++; 1848 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1849 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1850 rc = -EIO; 1851 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1852 xcrb32.reply_data_length = xcrb64.reply_data_length; 1853 xcrb32.status = xcrb64.status; 1854 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1855 return -EFAULT; 1856 return rc; 1857 } 1858 1859 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1860 unsigned long arg) 1861 { 1862 int rc; 1863 struct ap_perms *perms = 1864 (struct ap_perms *)filp->private_data; 1865 1866 rc = zcrypt_check_ioctl(perms, cmd); 1867 if (rc) 1868 return rc; 1869 1870 if (cmd == ICARSAMODEXPO) 1871 return trans_modexpo32(perms, filp, cmd, arg); 1872 if (cmd == ICARSACRT) 1873 return trans_modexpo_crt32(perms, filp, cmd, arg); 1874 if (cmd == ZSECSENDCPRB) 1875 return trans_xcrb32(perms, filp, cmd, arg); 1876 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1877 } 1878 #endif 1879 1880 /* 1881 * Misc device file operations. 1882 */ 1883 static const struct file_operations zcrypt_fops = { 1884 .owner = THIS_MODULE, 1885 .read = zcrypt_read, 1886 .write = zcrypt_write, 1887 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1888 #ifdef CONFIG_COMPAT 1889 .compat_ioctl = zcrypt_compat_ioctl, 1890 #endif 1891 .open = zcrypt_open, 1892 .release = zcrypt_release, 1893 .llseek = no_llseek, 1894 }; 1895 1896 /* 1897 * Misc device. 1898 */ 1899 static struct miscdevice zcrypt_misc_device = { 1900 .minor = MISC_DYNAMIC_MINOR, 1901 .name = "z90crypt", 1902 .fops = &zcrypt_fops, 1903 }; 1904 1905 static int zcrypt_rng_device_count; 1906 static u32 *zcrypt_rng_buffer; 1907 static int zcrypt_rng_buffer_index; 1908 static DEFINE_MUTEX(zcrypt_rng_mutex); 1909 1910 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1911 { 1912 int rc; 1913 1914 /* 1915 * We don't need locking here because the RNG API guarantees serialized 1916 * read method calls. 1917 */ 1918 if (zcrypt_rng_buffer_index == 0) { 1919 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1920 /* on failure: retry once again after a requested rescan */ 1921 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1922 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1923 if (rc < 0) 1924 return -EIO; 1925 zcrypt_rng_buffer_index = rc / sizeof(*data); 1926 } 1927 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1928 return sizeof(*data); 1929 } 1930 1931 static struct hwrng zcrypt_rng_dev = { 1932 .name = "zcrypt", 1933 .data_read = zcrypt_rng_data_read, 1934 .quality = 990, 1935 }; 1936 1937 int zcrypt_rng_device_add(void) 1938 { 1939 int rc = 0; 1940 1941 mutex_lock(&zcrypt_rng_mutex); 1942 if (zcrypt_rng_device_count == 0) { 1943 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 1944 if (!zcrypt_rng_buffer) { 1945 rc = -ENOMEM; 1946 goto out; 1947 } 1948 zcrypt_rng_buffer_index = 0; 1949 rc = hwrng_register(&zcrypt_rng_dev); 1950 if (rc) 1951 goto out_free; 1952 zcrypt_rng_device_count = 1; 1953 } else { 1954 zcrypt_rng_device_count++; 1955 } 1956 mutex_unlock(&zcrypt_rng_mutex); 1957 return 0; 1958 1959 out_free: 1960 free_page((unsigned long)zcrypt_rng_buffer); 1961 out: 1962 mutex_unlock(&zcrypt_rng_mutex); 1963 return rc; 1964 } 1965 1966 void zcrypt_rng_device_remove(void) 1967 { 1968 mutex_lock(&zcrypt_rng_mutex); 1969 zcrypt_rng_device_count--; 1970 if (zcrypt_rng_device_count == 0) { 1971 hwrng_unregister(&zcrypt_rng_dev); 1972 free_page((unsigned long)zcrypt_rng_buffer); 1973 } 1974 mutex_unlock(&zcrypt_rng_mutex); 1975 } 1976 1977 /* 1978 * Wait until the zcrypt api is operational. 1979 * The AP bus scan and the binding of ap devices to device drivers is 1980 * an asynchronous job. This function waits until these initial jobs 1981 * are done and so the zcrypt api should be ready to serve crypto 1982 * requests - if there are resources available. The function uses an 1983 * internal timeout of 60s. The very first caller will either wait for 1984 * ap bus bindings complete or the timeout happens. This state will be 1985 * remembered for further callers which will only be blocked until a 1986 * decision is made (timeout or bindings complete). 1987 * On timeout -ETIME is returned, on success the return value is 0. 1988 */ 1989 int zcrypt_wait_api_operational(void) 1990 { 1991 static DEFINE_MUTEX(zcrypt_wait_api_lock); 1992 static int zcrypt_wait_api_state; 1993 int rc; 1994 1995 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); 1996 if (rc) 1997 return rc; 1998 1999 switch (zcrypt_wait_api_state) { 2000 case 0: 2001 /* initial state, invoke wait for the ap bus complete */ 2002 rc = ap_wait_init_apqn_bindings_complete( 2003 msecs_to_jiffies(60 * 1000)); 2004 switch (rc) { 2005 case 0: 2006 /* ap bus bindings are complete */ 2007 zcrypt_wait_api_state = 1; 2008 break; 2009 case -EINTR: 2010 /* interrupted, go back to caller */ 2011 break; 2012 case -ETIME: 2013 /* timeout */ 2014 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n", 2015 __func__); 2016 zcrypt_wait_api_state = -ETIME; 2017 break; 2018 default: 2019 /* other failure */ 2020 ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n", 2021 __func__, rc); 2022 break; 2023 } 2024 break; 2025 case 1: 2026 /* a previous caller already found ap bus bindings complete */ 2027 rc = 0; 2028 break; 2029 default: 2030 /* a previous caller had timeout or other failure */ 2031 rc = zcrypt_wait_api_state; 2032 break; 2033 } 2034 2035 mutex_unlock(&zcrypt_wait_api_lock); 2036 2037 return rc; 2038 } 2039 EXPORT_SYMBOL(zcrypt_wait_api_operational); 2040 2041 int __init zcrypt_debug_init(void) 2042 { 2043 zcrypt_dbf_info = debug_register("zcrypt", 2, 1, 2044 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 2045 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 2046 debug_set_level(zcrypt_dbf_info, DBF_ERR); 2047 2048 return 0; 2049 } 2050 2051 void zcrypt_debug_exit(void) 2052 { 2053 debug_unregister(zcrypt_dbf_info); 2054 } 2055 2056 static int __init zcdn_init(void) 2057 { 2058 int rc; 2059 2060 /* create a new class 'zcrypt' */ 2061 zcrypt_class = class_create(ZCRYPT_NAME); 2062 if (IS_ERR(zcrypt_class)) { 2063 rc = PTR_ERR(zcrypt_class); 2064 goto out_class_create_failed; 2065 } 2066 zcrypt_class->dev_release = zcdn_device_release; 2067 2068 /* alloc device minor range */ 2069 rc = alloc_chrdev_region(&zcrypt_devt, 2070 0, ZCRYPT_MAX_MINOR_NODES, 2071 ZCRYPT_NAME); 2072 if (rc) 2073 goto out_alloc_chrdev_failed; 2074 2075 cdev_init(&zcrypt_cdev, &zcrypt_fops); 2076 zcrypt_cdev.owner = THIS_MODULE; 2077 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2078 if (rc) 2079 goto out_cdev_add_failed; 2080 2081 /* need some class specific sysfs attributes */ 2082 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 2083 if (rc) 2084 goto out_class_create_file_1_failed; 2085 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 2086 if (rc) 2087 goto out_class_create_file_2_failed; 2088 2089 return 0; 2090 2091 out_class_create_file_2_failed: 2092 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2093 out_class_create_file_1_failed: 2094 cdev_del(&zcrypt_cdev); 2095 out_cdev_add_failed: 2096 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2097 out_alloc_chrdev_failed: 2098 class_destroy(zcrypt_class); 2099 out_class_create_failed: 2100 return rc; 2101 } 2102 2103 static void zcdn_exit(void) 2104 { 2105 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2106 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 2107 zcdn_destroy_all(); 2108 cdev_del(&zcrypt_cdev); 2109 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2110 class_destroy(zcrypt_class); 2111 } 2112 2113 /* 2114 * zcrypt_api_init(): Module initialization. 2115 * 2116 * The module initialization code. 2117 */ 2118 int __init zcrypt_api_init(void) 2119 { 2120 int rc; 2121 2122 rc = zcrypt_debug_init(); 2123 if (rc) 2124 goto out; 2125 2126 rc = zcdn_init(); 2127 if (rc) 2128 goto out; 2129 2130 /* Register the request sprayer. */ 2131 rc = misc_register(&zcrypt_misc_device); 2132 if (rc < 0) 2133 goto out_misc_register_failed; 2134 2135 zcrypt_msgtype6_init(); 2136 zcrypt_msgtype50_init(); 2137 2138 return 0; 2139 2140 out_misc_register_failed: 2141 zcdn_exit(); 2142 zcrypt_debug_exit(); 2143 out: 2144 return rc; 2145 } 2146 2147 /* 2148 * zcrypt_api_exit(): Module termination. 2149 * 2150 * The module termination code. 2151 */ 2152 void __exit zcrypt_api_exit(void) 2153 { 2154 zcdn_exit(); 2155 misc_deregister(&zcrypt_misc_device); 2156 zcrypt_msgtype6_exit(); 2157 zcrypt_msgtype50_exit(); 2158 zcrypt_ccamisc_exit(); 2159 zcrypt_ep11misc_exit(); 2160 zcrypt_debug_exit(); 2161 } 2162 2163 module_init(zcrypt_api_init); 2164 module_exit(zcrypt_api_exit); 2165