1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/miscdevice.h> 19 #include <linux/fs.h> 20 #include <linux/compat.h> 21 #include <linux/slab.h> 22 #include <linux/atomic.h> 23 #include <linux/uaccess.h> 24 #include <linux/hw_random.h> 25 #include <linux/debugfs.h> 26 #include <linux/cdev.h> 27 #include <linux/ctype.h> 28 #include <linux/capability.h> 29 #include <asm/debug.h> 30 31 #define CREATE_TRACE_POINTS 32 #include <asm/trace/zcrypt.h> 33 34 #include "zcrypt_api.h" 35 #include "zcrypt_debug.h" 36 37 #include "zcrypt_msgtype6.h" 38 #include "zcrypt_msgtype50.h" 39 #include "zcrypt_ccamisc.h" 40 #include "zcrypt_ep11misc.h" 41 42 /* 43 * Module description. 44 */ 45 MODULE_AUTHOR("IBM Corporation"); 46 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 47 "Copyright IBM Corp. 2001, 2012"); 48 MODULE_LICENSE("GPL"); 49 50 /* 51 * zcrypt tracepoint functions 52 */ 53 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 54 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 55 56 DEFINE_SPINLOCK(zcrypt_list_lock); 57 LIST_HEAD(zcrypt_card_list); 58 59 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 60 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 61 62 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 63 EXPORT_SYMBOL(zcrypt_rescan_req); 64 65 static LIST_HEAD(zcrypt_ops_list); 66 67 /* Zcrypt related debug feature stuff. */ 68 debug_info_t *zcrypt_dbf_info; 69 70 /* 71 * Process a rescan of the transport layer. 72 * 73 * Returns 1, if the rescan has been processed, otherwise 0. 74 */ 75 static inline int zcrypt_process_rescan(void) 76 { 77 if (atomic_read(&zcrypt_rescan_req)) { 78 atomic_set(&zcrypt_rescan_req, 0); 79 atomic_inc(&zcrypt_rescan_count); 80 ap_bus_force_rescan(); 81 ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__, 82 atomic_inc_return(&zcrypt_rescan_count)); 83 return 1; 84 } 85 return 0; 86 } 87 88 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 89 { 90 list_add_tail(&zops->list, &zcrypt_ops_list); 91 } 92 93 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 94 { 95 list_del_init(&zops->list); 96 } 97 98 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 99 { 100 struct zcrypt_ops *zops; 101 102 list_for_each_entry(zops, &zcrypt_ops_list, list) 103 if (zops->variant == variant && 104 (!strncmp(zops->name, name, sizeof(zops->name)))) 105 return zops; 106 return NULL; 107 } 108 EXPORT_SYMBOL(zcrypt_msgtype); 109 110 /* 111 * Multi device nodes extension functions. 112 */ 113 114 struct zcdn_device; 115 116 static struct class *zcrypt_class; 117 static dev_t zcrypt_devt; 118 static struct cdev zcrypt_cdev; 119 120 struct zcdn_device { 121 struct device device; 122 struct ap_perms perms; 123 }; 124 125 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 126 127 #define ZCDN_MAX_NAME 32 128 129 static int zcdn_create(const char *name); 130 static int zcdn_destroy(const char *name); 131 132 /* 133 * Find zcdn device by name. 134 * Returns reference to the zcdn device which needs to be released 135 * with put_device() after use. 136 */ 137 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 138 { 139 struct device *dev = class_find_device_by_name(zcrypt_class, name); 140 141 return dev ? to_zcdn_dev(dev) : NULL; 142 } 143 144 /* 145 * Find zcdn device by devt value. 146 * Returns reference to the zcdn device which needs to be released 147 * with put_device() after use. 148 */ 149 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 150 { 151 struct device *dev = class_find_device_by_devt(zcrypt_class, devt); 152 153 return dev ? to_zcdn_dev(dev) : NULL; 154 } 155 156 static ssize_t ioctlmask_show(struct device *dev, 157 struct device_attribute *attr, 158 char *buf) 159 { 160 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 161 int i, n; 162 163 if (mutex_lock_interruptible(&ap_perms_mutex)) 164 return -ERESTARTSYS; 165 166 n = sysfs_emit(buf, "0x"); 167 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 168 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]); 169 n += sysfs_emit_at(buf, n, "\n"); 170 171 mutex_unlock(&ap_perms_mutex); 172 173 return n; 174 } 175 176 static ssize_t ioctlmask_store(struct device *dev, 177 struct device_attribute *attr, 178 const char *buf, size_t count) 179 { 180 int rc; 181 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 182 183 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 184 AP_IOCTLS, &ap_perms_mutex); 185 if (rc) 186 return rc; 187 188 return count; 189 } 190 191 static DEVICE_ATTR_RW(ioctlmask); 192 193 static ssize_t apmask_show(struct device *dev, 194 struct device_attribute *attr, 195 char *buf) 196 { 197 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 198 int i, n; 199 200 if (mutex_lock_interruptible(&ap_perms_mutex)) 201 return -ERESTARTSYS; 202 203 n = sysfs_emit(buf, "0x"); 204 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 205 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]); 206 n += sysfs_emit_at(buf, n, "\n"); 207 208 mutex_unlock(&ap_perms_mutex); 209 210 return n; 211 } 212 213 static ssize_t apmask_store(struct device *dev, 214 struct device_attribute *attr, 215 const char *buf, size_t count) 216 { 217 int rc; 218 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 219 220 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 221 AP_DEVICES, &ap_perms_mutex); 222 if (rc) 223 return rc; 224 225 return count; 226 } 227 228 static DEVICE_ATTR_RW(apmask); 229 230 static ssize_t aqmask_show(struct device *dev, 231 struct device_attribute *attr, 232 char *buf) 233 { 234 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 235 int i, n; 236 237 if (mutex_lock_interruptible(&ap_perms_mutex)) 238 return -ERESTARTSYS; 239 240 n = sysfs_emit(buf, "0x"); 241 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 242 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]); 243 n += sysfs_emit_at(buf, n, "\n"); 244 245 mutex_unlock(&ap_perms_mutex); 246 247 return n; 248 } 249 250 static ssize_t aqmask_store(struct device *dev, 251 struct device_attribute *attr, 252 const char *buf, size_t count) 253 { 254 int rc; 255 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 256 257 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 258 AP_DOMAINS, &ap_perms_mutex); 259 if (rc) 260 return rc; 261 262 return count; 263 } 264 265 static DEVICE_ATTR_RW(aqmask); 266 267 static ssize_t admask_show(struct device *dev, 268 struct device_attribute *attr, 269 char *buf) 270 { 271 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 272 int i, n; 273 274 if (mutex_lock_interruptible(&ap_perms_mutex)) 275 return -ERESTARTSYS; 276 277 n = sysfs_emit(buf, "0x"); 278 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++) 279 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]); 280 n += sysfs_emit_at(buf, n, "\n"); 281 282 mutex_unlock(&ap_perms_mutex); 283 284 return n; 285 } 286 287 static ssize_t admask_store(struct device *dev, 288 struct device_attribute *attr, 289 const char *buf, size_t count) 290 { 291 int rc; 292 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 293 294 rc = ap_parse_mask_str(buf, zcdndev->perms.adm, 295 AP_DOMAINS, &ap_perms_mutex); 296 if (rc) 297 return rc; 298 299 return count; 300 } 301 302 static DEVICE_ATTR_RW(admask); 303 304 static struct attribute *zcdn_dev_attrs[] = { 305 &dev_attr_ioctlmask.attr, 306 &dev_attr_apmask.attr, 307 &dev_attr_aqmask.attr, 308 &dev_attr_admask.attr, 309 NULL 310 }; 311 312 static struct attribute_group zcdn_dev_attr_group = { 313 .attrs = zcdn_dev_attrs 314 }; 315 316 static const struct attribute_group *zcdn_dev_attr_groups[] = { 317 &zcdn_dev_attr_group, 318 NULL 319 }; 320 321 static ssize_t zcdn_create_store(const struct class *class, 322 const struct class_attribute *attr, 323 const char *buf, size_t count) 324 { 325 int rc; 326 char name[ZCDN_MAX_NAME]; 327 328 strscpy(name, skip_spaces(buf), sizeof(name)); 329 330 rc = zcdn_create(strim(name)); 331 332 return rc ? rc : count; 333 } 334 335 static const struct class_attribute class_attr_zcdn_create = 336 __ATTR(create, 0600, NULL, zcdn_create_store); 337 338 static ssize_t zcdn_destroy_store(const struct class *class, 339 const struct class_attribute *attr, 340 const char *buf, size_t count) 341 { 342 int rc; 343 char name[ZCDN_MAX_NAME]; 344 345 strscpy(name, skip_spaces(buf), sizeof(name)); 346 347 rc = zcdn_destroy(strim(name)); 348 349 return rc ? rc : count; 350 } 351 352 static const struct class_attribute class_attr_zcdn_destroy = 353 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 354 355 static void zcdn_device_release(struct device *dev) 356 { 357 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 358 359 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n", 360 __func__, MAJOR(dev->devt), MINOR(dev->devt)); 361 362 kfree(zcdndev); 363 } 364 365 static int zcdn_create(const char *name) 366 { 367 dev_t devt; 368 int i, rc = 0; 369 struct zcdn_device *zcdndev; 370 371 if (mutex_lock_interruptible(&ap_perms_mutex)) 372 return -ERESTARTSYS; 373 374 /* check if device node with this name already exists */ 375 if (name[0]) { 376 zcdndev = find_zcdndev_by_name(name); 377 if (zcdndev) { 378 put_device(&zcdndev->device); 379 rc = -EEXIST; 380 goto unlockout; 381 } 382 } 383 384 /* find an unused minor number */ 385 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 386 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 387 zcdndev = find_zcdndev_by_devt(devt); 388 if (zcdndev) 389 put_device(&zcdndev->device); 390 else 391 break; 392 } 393 if (i == ZCRYPT_MAX_MINOR_NODES) { 394 rc = -ENOSPC; 395 goto unlockout; 396 } 397 398 /* alloc and prepare a new zcdn device */ 399 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 400 if (!zcdndev) { 401 rc = -ENOMEM; 402 goto unlockout; 403 } 404 zcdndev->device.release = zcdn_device_release; 405 zcdndev->device.class = zcrypt_class; 406 zcdndev->device.devt = devt; 407 zcdndev->device.groups = zcdn_dev_attr_groups; 408 if (name[0]) 409 rc = dev_set_name(&zcdndev->device, "%s", name); 410 else 411 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt)); 412 if (rc) { 413 kfree(zcdndev); 414 goto unlockout; 415 } 416 rc = device_register(&zcdndev->device); 417 if (rc) { 418 put_device(&zcdndev->device); 419 goto unlockout; 420 } 421 422 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n", 423 __func__, MAJOR(devt), MINOR(devt)); 424 425 unlockout: 426 mutex_unlock(&ap_perms_mutex); 427 return rc; 428 } 429 430 static int zcdn_destroy(const char *name) 431 { 432 int rc = 0; 433 struct zcdn_device *zcdndev; 434 435 if (mutex_lock_interruptible(&ap_perms_mutex)) 436 return -ERESTARTSYS; 437 438 /* try to find this zcdn device */ 439 zcdndev = find_zcdndev_by_name(name); 440 if (!zcdndev) { 441 rc = -ENOENT; 442 goto unlockout; 443 } 444 445 /* 446 * The zcdn device is not hard destroyed. It is subject to 447 * reference counting and thus just needs to be unregistered. 448 */ 449 put_device(&zcdndev->device); 450 device_unregister(&zcdndev->device); 451 452 unlockout: 453 mutex_unlock(&ap_perms_mutex); 454 return rc; 455 } 456 457 static void zcdn_destroy_all(void) 458 { 459 int i; 460 dev_t devt; 461 struct zcdn_device *zcdndev; 462 463 mutex_lock(&ap_perms_mutex); 464 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 465 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 466 zcdndev = find_zcdndev_by_devt(devt); 467 if (zcdndev) { 468 put_device(&zcdndev->device); 469 device_unregister(&zcdndev->device); 470 } 471 } 472 mutex_unlock(&ap_perms_mutex); 473 } 474 475 /* 476 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 477 * 478 * This function is not supported beyond zcrypt 1.3.1. 479 */ 480 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 481 size_t count, loff_t *f_pos) 482 { 483 return -EPERM; 484 } 485 486 /* 487 * zcrypt_write(): Not allowed. 488 * 489 * Write is not allowed 490 */ 491 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 492 size_t count, loff_t *f_pos) 493 { 494 return -EPERM; 495 } 496 497 /* 498 * zcrypt_open(): Count number of users. 499 * 500 * Device open function to count number of users. 501 */ 502 static int zcrypt_open(struct inode *inode, struct file *filp) 503 { 504 struct ap_perms *perms = &ap_perms; 505 506 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 507 struct zcdn_device *zcdndev; 508 509 if (mutex_lock_interruptible(&ap_perms_mutex)) 510 return -ERESTARTSYS; 511 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 512 /* find returns a reference, no get_device() needed */ 513 mutex_unlock(&ap_perms_mutex); 514 if (zcdndev) 515 perms = &zcdndev->perms; 516 } 517 filp->private_data = (void *)perms; 518 519 atomic_inc(&zcrypt_open_count); 520 return stream_open(inode, filp); 521 } 522 523 /* 524 * zcrypt_release(): Count number of users. 525 * 526 * Device close function to count number of users. 527 */ 528 static int zcrypt_release(struct inode *inode, struct file *filp) 529 { 530 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 531 struct zcdn_device *zcdndev; 532 533 mutex_lock(&ap_perms_mutex); 534 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 535 mutex_unlock(&ap_perms_mutex); 536 if (zcdndev) { 537 /* 2 puts here: one for find, one for open */ 538 put_device(&zcdndev->device); 539 put_device(&zcdndev->device); 540 } 541 } 542 543 atomic_dec(&zcrypt_open_count); 544 return 0; 545 } 546 547 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 548 unsigned int cmd) 549 { 550 int rc = -EPERM; 551 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 552 553 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 554 if (test_bit_inv(ioctlnr, perms->ioctlm)) 555 rc = 0; 556 } 557 558 if (rc) 559 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n", 560 __func__, ioctlnr, rc); 561 562 return rc; 563 } 564 565 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 566 { 567 return test_bit_inv(card, perms->apm) ? true : false; 568 } 569 570 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 571 { 572 return test_bit_inv(queue, perms->aqm) ? true : false; 573 } 574 575 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 576 struct zcrypt_queue *zq, 577 struct module **pmod, 578 unsigned int weight) 579 { 580 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 581 return NULL; 582 zcrypt_card_get(zc); 583 zcrypt_queue_get(zq); 584 get_device(&zq->queue->ap_dev.device); 585 atomic_add(weight, &zc->load); 586 atomic_add(weight, &zq->load); 587 zq->request_count++; 588 *pmod = zq->queue->ap_dev.device.driver->owner; 589 return zq; 590 } 591 592 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 593 struct zcrypt_queue *zq, 594 struct module *mod, 595 unsigned int weight) 596 { 597 zq->request_count--; 598 atomic_sub(weight, &zc->load); 599 atomic_sub(weight, &zq->load); 600 put_device(&zq->queue->ap_dev.device); 601 zcrypt_queue_put(zq); 602 zcrypt_card_put(zc); 603 module_put(mod); 604 } 605 606 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 607 struct zcrypt_card *pref_zc, 608 unsigned int weight, 609 unsigned int pref_weight) 610 { 611 if (!pref_zc) 612 return true; 613 weight += atomic_read(&zc->load); 614 pref_weight += atomic_read(&pref_zc->load); 615 if (weight == pref_weight) 616 return atomic64_read(&zc->card->total_request_count) < 617 atomic64_read(&pref_zc->card->total_request_count); 618 return weight < pref_weight; 619 } 620 621 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 622 struct zcrypt_queue *pref_zq, 623 unsigned int weight, 624 unsigned int pref_weight) 625 { 626 if (!pref_zq) 627 return true; 628 weight += atomic_read(&zq->load); 629 pref_weight += atomic_read(&pref_zq->load); 630 if (weight == pref_weight) 631 return zq->queue->total_request_count < 632 pref_zq->queue->total_request_count; 633 return weight < pref_weight; 634 } 635 636 /* 637 * zcrypt ioctls. 638 */ 639 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 640 struct zcrypt_track *tr, 641 struct ica_rsa_modexpo *mex) 642 { 643 struct zcrypt_card *zc, *pref_zc; 644 struct zcrypt_queue *zq, *pref_zq; 645 struct ap_message ap_msg; 646 unsigned int wgt = 0, pref_wgt = 0; 647 unsigned int func_code; 648 int cpen, qpen, qid = 0, rc = -ENODEV; 649 struct module *mod; 650 651 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 652 653 ap_init_message(&ap_msg); 654 655 if (mex->outputdatalength < mex->inputdatalength) { 656 func_code = 0; 657 rc = -EINVAL; 658 goto out; 659 } 660 661 /* 662 * As long as outputdatalength is big enough, we can set the 663 * outputdatalength equal to the inputdatalength, since that is the 664 * number of bytes we will copy in any case 665 */ 666 mex->outputdatalength = mex->inputdatalength; 667 668 rc = get_rsa_modex_fc(mex, &func_code); 669 if (rc) 670 goto out; 671 672 pref_zc = NULL; 673 pref_zq = NULL; 674 spin_lock(&zcrypt_list_lock); 675 for_each_zcrypt_card(zc) { 676 /* Check for usable accelerator or CCA card */ 677 if (!zc->online || !zc->card->config || zc->card->chkstop || 678 !(zc->card->functions & 0x18000000)) 679 continue; 680 /* Check for size limits */ 681 if (zc->min_mod_size > mex->inputdatalength || 682 zc->max_mod_size < mex->inputdatalength) 683 continue; 684 /* check if device node has admission for this card */ 685 if (!zcrypt_check_card(perms, zc->card->id)) 686 continue; 687 /* get weight index of the card device */ 688 wgt = zc->speed_rating[func_code]; 689 /* penalty if this msg was previously sent via this card */ 690 cpen = (tr && tr->again_counter && tr->last_qid && 691 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 692 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 693 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 694 continue; 695 for_each_zcrypt_queue(zq, zc) { 696 /* check if device is usable and eligible */ 697 if (!zq->online || !zq->ops->rsa_modexpo || 698 !zq->queue->config || zq->queue->chkstop) 699 continue; 700 /* check if device node has admission for this queue */ 701 if (!zcrypt_check_queue(perms, 702 AP_QID_QUEUE(zq->queue->qid))) 703 continue; 704 /* penalty if the msg was previously sent at this qid */ 705 qpen = (tr && tr->again_counter && tr->last_qid && 706 tr->last_qid == zq->queue->qid) ? 707 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 708 if (!zcrypt_queue_compare(zq, pref_zq, 709 wgt + cpen + qpen, pref_wgt)) 710 continue; 711 pref_zc = zc; 712 pref_zq = zq; 713 pref_wgt = wgt + cpen + qpen; 714 } 715 } 716 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 717 spin_unlock(&zcrypt_list_lock); 718 719 if (!pref_zq) { 720 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 721 __func__); 722 rc = -ENODEV; 723 goto out; 724 } 725 726 qid = pref_zq->queue->qid; 727 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); 728 729 spin_lock(&zcrypt_list_lock); 730 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 731 spin_unlock(&zcrypt_list_lock); 732 733 out: 734 ap_release_message(&ap_msg); 735 if (tr) { 736 tr->last_rc = rc; 737 tr->last_qid = qid; 738 } 739 trace_s390_zcrypt_rep(mex, func_code, rc, 740 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 741 return rc; 742 } 743 744 static long zcrypt_rsa_crt(struct ap_perms *perms, 745 struct zcrypt_track *tr, 746 struct ica_rsa_modexpo_crt *crt) 747 { 748 struct zcrypt_card *zc, *pref_zc; 749 struct zcrypt_queue *zq, *pref_zq; 750 struct ap_message ap_msg; 751 unsigned int wgt = 0, pref_wgt = 0; 752 unsigned int func_code; 753 int cpen, qpen, qid = 0, rc = -ENODEV; 754 struct module *mod; 755 756 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 757 758 ap_init_message(&ap_msg); 759 760 if (crt->outputdatalength < crt->inputdatalength) { 761 func_code = 0; 762 rc = -EINVAL; 763 goto out; 764 } 765 766 /* 767 * As long as outputdatalength is big enough, we can set the 768 * outputdatalength equal to the inputdatalength, since that is the 769 * number of bytes we will copy in any case 770 */ 771 crt->outputdatalength = crt->inputdatalength; 772 773 rc = get_rsa_crt_fc(crt, &func_code); 774 if (rc) 775 goto out; 776 777 pref_zc = NULL; 778 pref_zq = NULL; 779 spin_lock(&zcrypt_list_lock); 780 for_each_zcrypt_card(zc) { 781 /* Check for usable accelerator or CCA card */ 782 if (!zc->online || !zc->card->config || zc->card->chkstop || 783 !(zc->card->functions & 0x18000000)) 784 continue; 785 /* Check for size limits */ 786 if (zc->min_mod_size > crt->inputdatalength || 787 zc->max_mod_size < crt->inputdatalength) 788 continue; 789 /* check if device node has admission for this card */ 790 if (!zcrypt_check_card(perms, zc->card->id)) 791 continue; 792 /* get weight index of the card device */ 793 wgt = zc->speed_rating[func_code]; 794 /* penalty if this msg was previously sent via this card */ 795 cpen = (tr && tr->again_counter && tr->last_qid && 796 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 797 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 798 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 799 continue; 800 for_each_zcrypt_queue(zq, zc) { 801 /* check if device is usable and eligible */ 802 if (!zq->online || !zq->ops->rsa_modexpo_crt || 803 !zq->queue->config || zq->queue->chkstop) 804 continue; 805 /* check if device node has admission for this queue */ 806 if (!zcrypt_check_queue(perms, 807 AP_QID_QUEUE(zq->queue->qid))) 808 continue; 809 /* penalty if the msg was previously sent at this qid */ 810 qpen = (tr && tr->again_counter && tr->last_qid && 811 tr->last_qid == zq->queue->qid) ? 812 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 813 if (!zcrypt_queue_compare(zq, pref_zq, 814 wgt + cpen + qpen, pref_wgt)) 815 continue; 816 pref_zc = zc; 817 pref_zq = zq; 818 pref_wgt = wgt + cpen + qpen; 819 } 820 } 821 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 822 spin_unlock(&zcrypt_list_lock); 823 824 if (!pref_zq) { 825 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 826 __func__); 827 rc = -ENODEV; 828 goto out; 829 } 830 831 qid = pref_zq->queue->qid; 832 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); 833 834 spin_lock(&zcrypt_list_lock); 835 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 836 spin_unlock(&zcrypt_list_lock); 837 838 out: 839 ap_release_message(&ap_msg); 840 if (tr) { 841 tr->last_rc = rc; 842 tr->last_qid = qid; 843 } 844 trace_s390_zcrypt_rep(crt, func_code, rc, 845 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 846 return rc; 847 } 848 849 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 850 struct zcrypt_track *tr, 851 struct ica_xcRB *xcrb) 852 { 853 struct zcrypt_card *zc, *pref_zc; 854 struct zcrypt_queue *zq, *pref_zq; 855 struct ap_message ap_msg; 856 unsigned int wgt = 0, pref_wgt = 0; 857 unsigned int func_code; 858 unsigned short *domain, tdom; 859 int cpen, qpen, qid = 0, rc = -ENODEV; 860 struct module *mod; 861 862 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); 863 864 xcrb->status = 0; 865 ap_init_message(&ap_msg); 866 867 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 868 if (rc) 869 goto out; 870 871 tdom = *domain; 872 if (perms != &ap_perms && tdom < AP_DOMAINS) { 873 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 874 if (!test_bit_inv(tdom, perms->adm)) { 875 rc = -ENODEV; 876 goto out; 877 } 878 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 879 rc = -EOPNOTSUPP; 880 goto out; 881 } 882 } 883 /* 884 * If a valid target domain is set and this domain is NOT a usage 885 * domain but a control only domain, autoselect target domain. 886 */ 887 if (tdom < AP_DOMAINS && 888 !ap_test_config_usage_domain(tdom) && 889 ap_test_config_ctrl_domain(tdom)) 890 tdom = AUTOSEL_DOM; 891 892 pref_zc = NULL; 893 pref_zq = NULL; 894 spin_lock(&zcrypt_list_lock); 895 for_each_zcrypt_card(zc) { 896 /* Check for usable CCA card */ 897 if (!zc->online || !zc->card->config || zc->card->chkstop || 898 !(zc->card->functions & 0x10000000)) 899 continue; 900 /* Check for user selected CCA card */ 901 if (xcrb->user_defined != AUTOSELECT && 902 xcrb->user_defined != zc->card->id) 903 continue; 904 /* check if request size exceeds card max msg size */ 905 if (ap_msg.len > zc->card->maxmsgsize) 906 continue; 907 /* check if device node has admission for this card */ 908 if (!zcrypt_check_card(perms, zc->card->id)) 909 continue; 910 /* get weight index of the card device */ 911 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 912 /* penalty if this msg was previously sent via this card */ 913 cpen = (tr && tr->again_counter && tr->last_qid && 914 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 915 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 916 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 917 continue; 918 for_each_zcrypt_queue(zq, zc) { 919 /* check for device usable and eligible */ 920 if (!zq->online || !zq->ops->send_cprb || 921 !zq->queue->config || zq->queue->chkstop || 922 (tdom != AUTOSEL_DOM && 923 tdom != AP_QID_QUEUE(zq->queue->qid))) 924 continue; 925 /* check if device node has admission for this queue */ 926 if (!zcrypt_check_queue(perms, 927 AP_QID_QUEUE(zq->queue->qid))) 928 continue; 929 /* penalty if the msg was previously sent at this qid */ 930 qpen = (tr && tr->again_counter && tr->last_qid && 931 tr->last_qid == zq->queue->qid) ? 932 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 933 if (!zcrypt_queue_compare(zq, pref_zq, 934 wgt + cpen + qpen, pref_wgt)) 935 continue; 936 pref_zc = zc; 937 pref_zq = zq; 938 pref_wgt = wgt + cpen + qpen; 939 } 940 } 941 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 942 spin_unlock(&zcrypt_list_lock); 943 944 if (!pref_zq) { 945 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 946 __func__, xcrb->user_defined, *domain); 947 rc = -ENODEV; 948 goto out; 949 } 950 951 /* in case of auto select, provide the correct domain */ 952 qid = pref_zq->queue->qid; 953 if (*domain == AUTOSEL_DOM) 954 *domain = AP_QID_QUEUE(qid); 955 956 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg); 957 958 spin_lock(&zcrypt_list_lock); 959 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 960 spin_unlock(&zcrypt_list_lock); 961 962 out: 963 ap_release_message(&ap_msg); 964 if (tr) { 965 tr->last_rc = rc; 966 tr->last_qid = qid; 967 } 968 trace_s390_zcrypt_rep(xcrb, func_code, rc, 969 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 970 return rc; 971 } 972 973 long zcrypt_send_cprb(struct ica_xcRB *xcrb) 974 { 975 return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb); 976 } 977 EXPORT_SYMBOL(zcrypt_send_cprb); 978 979 static bool is_desired_ep11_card(unsigned int dev_id, 980 unsigned short target_num, 981 struct ep11_target_dev *targets) 982 { 983 while (target_num-- > 0) { 984 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) 985 return true; 986 targets++; 987 } 988 return false; 989 } 990 991 static bool is_desired_ep11_queue(unsigned int dev_qid, 992 unsigned short target_num, 993 struct ep11_target_dev *targets) 994 { 995 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); 996 997 while (target_num-- > 0) { 998 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && 999 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) 1000 return true; 1001 targets++; 1002 } 1003 return false; 1004 } 1005 1006 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, 1007 struct zcrypt_track *tr, 1008 struct ep11_urb *xcrb) 1009 { 1010 struct zcrypt_card *zc, *pref_zc; 1011 struct zcrypt_queue *zq, *pref_zq; 1012 struct ep11_target_dev *targets; 1013 unsigned short target_num; 1014 unsigned int wgt = 0, pref_wgt = 0; 1015 unsigned int func_code, domain; 1016 struct ap_message ap_msg; 1017 int cpen, qpen, qid = 0, rc = -ENODEV; 1018 struct module *mod; 1019 1020 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1021 1022 ap_init_message(&ap_msg); 1023 1024 target_num = (unsigned short)xcrb->targets_num; 1025 1026 /* empty list indicates autoselect (all available targets) */ 1027 targets = NULL; 1028 if (target_num != 0) { 1029 struct ep11_target_dev __user *uptr; 1030 1031 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1032 if (!targets) { 1033 func_code = 0; 1034 rc = -ENOMEM; 1035 goto out; 1036 } 1037 1038 uptr = (struct ep11_target_dev __force __user *)xcrb->targets; 1039 if (z_copy_from_user(userspace, targets, uptr, 1040 target_num * sizeof(*targets))) { 1041 func_code = 0; 1042 rc = -EFAULT; 1043 goto out_free; 1044 } 1045 } 1046 1047 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); 1048 if (rc) 1049 goto out_free; 1050 1051 if (perms != &ap_perms && domain < AUTOSEL_DOM) { 1052 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { 1053 if (!test_bit_inv(domain, perms->adm)) { 1054 rc = -ENODEV; 1055 goto out_free; 1056 } 1057 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { 1058 rc = -EOPNOTSUPP; 1059 goto out_free; 1060 } 1061 } 1062 1063 pref_zc = NULL; 1064 pref_zq = NULL; 1065 spin_lock(&zcrypt_list_lock); 1066 for_each_zcrypt_card(zc) { 1067 /* Check for usable EP11 card */ 1068 if (!zc->online || !zc->card->config || zc->card->chkstop || 1069 !(zc->card->functions & 0x04000000)) 1070 continue; 1071 /* Check for user selected EP11 card */ 1072 if (targets && 1073 !is_desired_ep11_card(zc->card->id, target_num, targets)) 1074 continue; 1075 /* check if request size exceeds card max msg size */ 1076 if (ap_msg.len > zc->card->maxmsgsize) 1077 continue; 1078 /* check if device node has admission for this card */ 1079 if (!zcrypt_check_card(perms, zc->card->id)) 1080 continue; 1081 /* get weight index of the card device */ 1082 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1083 /* penalty if this msg was previously sent via this card */ 1084 cpen = (tr && tr->again_counter && tr->last_qid && 1085 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 1086 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 1087 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1088 continue; 1089 for_each_zcrypt_queue(zq, zc) { 1090 /* check if device is usable and eligible */ 1091 if (!zq->online || !zq->ops->send_ep11_cprb || 1092 !zq->queue->config || zq->queue->chkstop || 1093 (targets && 1094 !is_desired_ep11_queue(zq->queue->qid, 1095 target_num, targets))) 1096 continue; 1097 /* check if device node has admission for this queue */ 1098 if (!zcrypt_check_queue(perms, 1099 AP_QID_QUEUE(zq->queue->qid))) 1100 continue; 1101 /* penalty if the msg was previously sent at this qid */ 1102 qpen = (tr && tr->again_counter && tr->last_qid && 1103 tr->last_qid == zq->queue->qid) ? 1104 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 1105 if (!zcrypt_queue_compare(zq, pref_zq, 1106 wgt + cpen + qpen, pref_wgt)) 1107 continue; 1108 pref_zc = zc; 1109 pref_zq = zq; 1110 pref_wgt = wgt + cpen + qpen; 1111 } 1112 } 1113 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1114 spin_unlock(&zcrypt_list_lock); 1115 1116 if (!pref_zq) { 1117 if (targets && target_num == 1) { 1118 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n", 1119 __func__, (int)targets->ap_id, 1120 (int)targets->dom_id); 1121 } else if (targets) { 1122 ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n", 1123 __func__, (int)target_num); 1124 } else { 1125 ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n", 1126 __func__); 1127 } 1128 rc = -ENODEV; 1129 goto out_free; 1130 } 1131 1132 qid = pref_zq->queue->qid; 1133 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); 1134 1135 spin_lock(&zcrypt_list_lock); 1136 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1137 spin_unlock(&zcrypt_list_lock); 1138 1139 out_free: 1140 kfree(targets); 1141 out: 1142 ap_release_message(&ap_msg); 1143 if (tr) { 1144 tr->last_rc = rc; 1145 tr->last_qid = qid; 1146 } 1147 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1148 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1149 return rc; 1150 } 1151 1152 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 1153 { 1154 return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb); 1155 } 1156 EXPORT_SYMBOL(zcrypt_send_ep11_cprb); 1157 1158 static long zcrypt_rng(char *buffer) 1159 { 1160 struct zcrypt_card *zc, *pref_zc; 1161 struct zcrypt_queue *zq, *pref_zq; 1162 unsigned int wgt = 0, pref_wgt = 0; 1163 unsigned int func_code; 1164 struct ap_message ap_msg; 1165 unsigned int domain; 1166 int qid = 0, rc = -ENODEV; 1167 struct module *mod; 1168 1169 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1170 1171 ap_init_message(&ap_msg); 1172 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); 1173 if (rc) 1174 goto out; 1175 1176 pref_zc = NULL; 1177 pref_zq = NULL; 1178 spin_lock(&zcrypt_list_lock); 1179 for_each_zcrypt_card(zc) { 1180 /* Check for usable CCA card */ 1181 if (!zc->online || !zc->card->config || zc->card->chkstop || 1182 !(zc->card->functions & 0x10000000)) 1183 continue; 1184 /* get weight index of the card device */ 1185 wgt = zc->speed_rating[func_code]; 1186 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1187 continue; 1188 for_each_zcrypt_queue(zq, zc) { 1189 /* check if device is usable and eligible */ 1190 if (!zq->online || !zq->ops->rng || 1191 !zq->queue->config || zq->queue->chkstop) 1192 continue; 1193 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1194 continue; 1195 pref_zc = zc; 1196 pref_zq = zq; 1197 pref_wgt = wgt; 1198 } 1199 } 1200 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1201 spin_unlock(&zcrypt_list_lock); 1202 1203 if (!pref_zq) { 1204 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n", 1205 __func__); 1206 rc = -ENODEV; 1207 goto out; 1208 } 1209 1210 qid = pref_zq->queue->qid; 1211 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1212 1213 spin_lock(&zcrypt_list_lock); 1214 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1215 spin_unlock(&zcrypt_list_lock); 1216 1217 out: 1218 ap_release_message(&ap_msg); 1219 trace_s390_zcrypt_rep(buffer, func_code, rc, 1220 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1221 return rc; 1222 } 1223 1224 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1225 { 1226 struct zcrypt_card *zc; 1227 struct zcrypt_queue *zq; 1228 struct zcrypt_device_status *stat; 1229 int card, queue; 1230 1231 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1232 * sizeof(struct zcrypt_device_status)); 1233 1234 spin_lock(&zcrypt_list_lock); 1235 for_each_zcrypt_card(zc) { 1236 for_each_zcrypt_queue(zq, zc) { 1237 card = AP_QID_CARD(zq->queue->qid); 1238 if (card >= MAX_ZDEV_CARDIDS) 1239 continue; 1240 queue = AP_QID_QUEUE(zq->queue->qid); 1241 stat = &devstatus[card * AP_DOMAINS + queue]; 1242 stat->hwtype = zc->card->ap_dev.device_type; 1243 stat->functions = zc->card->functions >> 26; 1244 stat->qid = zq->queue->qid; 1245 stat->online = zq->online ? 0x01 : 0x00; 1246 } 1247 } 1248 spin_unlock(&zcrypt_list_lock); 1249 } 1250 1251 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 1252 { 1253 struct zcrypt_card *zc; 1254 struct zcrypt_queue *zq; 1255 struct zcrypt_device_status_ext *stat; 1256 int card, queue; 1257 1258 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 1259 * sizeof(struct zcrypt_device_status_ext)); 1260 1261 spin_lock(&zcrypt_list_lock); 1262 for_each_zcrypt_card(zc) { 1263 for_each_zcrypt_queue(zq, zc) { 1264 card = AP_QID_CARD(zq->queue->qid); 1265 queue = AP_QID_QUEUE(zq->queue->qid); 1266 stat = &devstatus[card * AP_DOMAINS + queue]; 1267 stat->hwtype = zc->card->ap_dev.device_type; 1268 stat->functions = zc->card->functions >> 26; 1269 stat->qid = zq->queue->qid; 1270 stat->online = zq->online ? 0x01 : 0x00; 1271 } 1272 } 1273 spin_unlock(&zcrypt_list_lock); 1274 } 1275 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1276 1277 int zcrypt_device_status_ext(int card, int queue, 1278 struct zcrypt_device_status_ext *devstat) 1279 { 1280 struct zcrypt_card *zc; 1281 struct zcrypt_queue *zq; 1282 1283 memset(devstat, 0, sizeof(*devstat)); 1284 1285 spin_lock(&zcrypt_list_lock); 1286 for_each_zcrypt_card(zc) { 1287 for_each_zcrypt_queue(zq, zc) { 1288 if (card == AP_QID_CARD(zq->queue->qid) && 1289 queue == AP_QID_QUEUE(zq->queue->qid)) { 1290 devstat->hwtype = zc->card->ap_dev.device_type; 1291 devstat->functions = zc->card->functions >> 26; 1292 devstat->qid = zq->queue->qid; 1293 devstat->online = zq->online ? 0x01 : 0x00; 1294 spin_unlock(&zcrypt_list_lock); 1295 return 0; 1296 } 1297 } 1298 } 1299 spin_unlock(&zcrypt_list_lock); 1300 1301 return -ENODEV; 1302 } 1303 EXPORT_SYMBOL(zcrypt_device_status_ext); 1304 1305 static void zcrypt_status_mask(char status[], size_t max_adapters) 1306 { 1307 struct zcrypt_card *zc; 1308 struct zcrypt_queue *zq; 1309 int card; 1310 1311 memset(status, 0, max_adapters); 1312 spin_lock(&zcrypt_list_lock); 1313 for_each_zcrypt_card(zc) { 1314 for_each_zcrypt_queue(zq, zc) { 1315 card = AP_QID_CARD(zq->queue->qid); 1316 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1317 card >= max_adapters) 1318 continue; 1319 status[card] = zc->online ? zc->user_space_type : 0x0d; 1320 } 1321 } 1322 spin_unlock(&zcrypt_list_lock); 1323 } 1324 1325 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1326 { 1327 struct zcrypt_card *zc; 1328 struct zcrypt_queue *zq; 1329 int card; 1330 1331 memset(qdepth, 0, max_adapters); 1332 spin_lock(&zcrypt_list_lock); 1333 local_bh_disable(); 1334 for_each_zcrypt_card(zc) { 1335 for_each_zcrypt_queue(zq, zc) { 1336 card = AP_QID_CARD(zq->queue->qid); 1337 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1338 card >= max_adapters) 1339 continue; 1340 spin_lock(&zq->queue->lock); 1341 qdepth[card] = 1342 zq->queue->pendingq_count + 1343 zq->queue->requestq_count; 1344 spin_unlock(&zq->queue->lock); 1345 } 1346 } 1347 local_bh_enable(); 1348 spin_unlock(&zcrypt_list_lock); 1349 } 1350 1351 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) 1352 { 1353 struct zcrypt_card *zc; 1354 struct zcrypt_queue *zq; 1355 int card; 1356 u64 cnt; 1357 1358 memset(reqcnt, 0, sizeof(int) * max_adapters); 1359 spin_lock(&zcrypt_list_lock); 1360 local_bh_disable(); 1361 for_each_zcrypt_card(zc) { 1362 for_each_zcrypt_queue(zq, zc) { 1363 card = AP_QID_CARD(zq->queue->qid); 1364 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index || 1365 card >= max_adapters) 1366 continue; 1367 spin_lock(&zq->queue->lock); 1368 cnt = zq->queue->total_request_count; 1369 spin_unlock(&zq->queue->lock); 1370 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX; 1371 } 1372 } 1373 local_bh_enable(); 1374 spin_unlock(&zcrypt_list_lock); 1375 } 1376 1377 static int zcrypt_pendingq_count(void) 1378 { 1379 struct zcrypt_card *zc; 1380 struct zcrypt_queue *zq; 1381 int pendingq_count; 1382 1383 pendingq_count = 0; 1384 spin_lock(&zcrypt_list_lock); 1385 local_bh_disable(); 1386 for_each_zcrypt_card(zc) { 1387 for_each_zcrypt_queue(zq, zc) { 1388 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1389 continue; 1390 spin_lock(&zq->queue->lock); 1391 pendingq_count += zq->queue->pendingq_count; 1392 spin_unlock(&zq->queue->lock); 1393 } 1394 } 1395 local_bh_enable(); 1396 spin_unlock(&zcrypt_list_lock); 1397 return pendingq_count; 1398 } 1399 1400 static int zcrypt_requestq_count(void) 1401 { 1402 struct zcrypt_card *zc; 1403 struct zcrypt_queue *zq; 1404 int requestq_count; 1405 1406 requestq_count = 0; 1407 spin_lock(&zcrypt_list_lock); 1408 local_bh_disable(); 1409 for_each_zcrypt_card(zc) { 1410 for_each_zcrypt_queue(zq, zc) { 1411 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1412 continue; 1413 spin_lock(&zq->queue->lock); 1414 requestq_count += zq->queue->requestq_count; 1415 spin_unlock(&zq->queue->lock); 1416 } 1417 } 1418 local_bh_enable(); 1419 spin_unlock(&zcrypt_list_lock); 1420 return requestq_count; 1421 } 1422 1423 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) 1424 { 1425 int rc; 1426 struct zcrypt_track tr; 1427 struct ica_rsa_modexpo mex; 1428 struct ica_rsa_modexpo __user *umex = (void __user *)arg; 1429 1430 memset(&tr, 0, sizeof(tr)); 1431 if (copy_from_user(&mex, umex, sizeof(mex))) 1432 return -EFAULT; 1433 1434 do { 1435 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1436 if (rc == -EAGAIN) 1437 tr.again_counter++; 1438 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1439 /* on failure: retry once again after a requested rescan */ 1440 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1441 do { 1442 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1443 if (rc == -EAGAIN) 1444 tr.again_counter++; 1445 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1446 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1447 rc = -EIO; 1448 if (rc) { 1449 ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc); 1450 return rc; 1451 } 1452 return put_user(mex.outputdatalength, &umex->outputdatalength); 1453 } 1454 1455 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) 1456 { 1457 int rc; 1458 struct zcrypt_track tr; 1459 struct ica_rsa_modexpo_crt crt; 1460 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg; 1461 1462 memset(&tr, 0, sizeof(tr)); 1463 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1464 return -EFAULT; 1465 1466 do { 1467 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1468 if (rc == -EAGAIN) 1469 tr.again_counter++; 1470 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1471 /* on failure: retry once again after a requested rescan */ 1472 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1473 do { 1474 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1475 if (rc == -EAGAIN) 1476 tr.again_counter++; 1477 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1478 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1479 rc = -EIO; 1480 if (rc) { 1481 ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc); 1482 return rc; 1483 } 1484 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1485 } 1486 1487 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1488 { 1489 int rc; 1490 struct ica_xcRB xcrb; 1491 struct zcrypt_track tr; 1492 struct ica_xcRB __user *uxcrb = (void __user *)arg; 1493 1494 memset(&tr, 0, sizeof(tr)); 1495 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1496 return -EFAULT; 1497 1498 do { 1499 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1500 if (rc == -EAGAIN) 1501 tr.again_counter++; 1502 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1503 /* on failure: retry once again after a requested rescan */ 1504 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1505 do { 1506 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); 1507 if (rc == -EAGAIN) 1508 tr.again_counter++; 1509 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1510 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1511 rc = -EIO; 1512 if (rc) 1513 ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n", 1514 rc, xcrb.status); 1515 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1516 return -EFAULT; 1517 return rc; 1518 } 1519 1520 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) 1521 { 1522 int rc; 1523 struct ep11_urb xcrb; 1524 struct zcrypt_track tr; 1525 struct ep11_urb __user *uxcrb = (void __user *)arg; 1526 1527 memset(&tr, 0, sizeof(tr)); 1528 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1529 return -EFAULT; 1530 1531 do { 1532 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1533 if (rc == -EAGAIN) 1534 tr.again_counter++; 1535 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1536 /* on failure: retry once again after a requested rescan */ 1537 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1538 do { 1539 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1540 if (rc == -EAGAIN) 1541 tr.again_counter++; 1542 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1543 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1544 rc = -EIO; 1545 if (rc) 1546 ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc); 1547 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1548 return -EFAULT; 1549 return rc; 1550 } 1551 1552 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1553 unsigned long arg) 1554 { 1555 int rc; 1556 struct ap_perms *perms = 1557 (struct ap_perms *)filp->private_data; 1558 1559 rc = zcrypt_check_ioctl(perms, cmd); 1560 if (rc) 1561 return rc; 1562 1563 switch (cmd) { 1564 case ICARSAMODEXPO: 1565 return icarsamodexpo_ioctl(perms, arg); 1566 case ICARSACRT: 1567 return icarsacrt_ioctl(perms, arg); 1568 case ZSECSENDCPRB: 1569 return zsecsendcprb_ioctl(perms, arg); 1570 case ZSENDEP11CPRB: 1571 return zsendep11cprb_ioctl(perms, arg); 1572 case ZCRYPT_DEVICE_STATUS: { 1573 struct zcrypt_device_status_ext *device_status; 1574 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1575 * sizeof(struct zcrypt_device_status_ext); 1576 1577 device_status = kvmalloc_array(MAX_ZDEV_ENTRIES_EXT, 1578 sizeof(struct zcrypt_device_status_ext), 1579 GFP_KERNEL); 1580 if (!device_status) 1581 return -ENOMEM; 1582 zcrypt_device_status_mask_ext(device_status); 1583 if (copy_to_user((char __user *)arg, device_status, 1584 total_size)) 1585 rc = -EFAULT; 1586 kvfree(device_status); 1587 return rc; 1588 } 1589 case ZCRYPT_STATUS_MASK: { 1590 char status[AP_DEVICES]; 1591 1592 zcrypt_status_mask(status, AP_DEVICES); 1593 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1594 return -EFAULT; 1595 return 0; 1596 } 1597 case ZCRYPT_QDEPTH_MASK: { 1598 char qdepth[AP_DEVICES]; 1599 1600 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1601 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1602 return -EFAULT; 1603 return 0; 1604 } 1605 case ZCRYPT_PERDEV_REQCNT: { 1606 u32 *reqcnt; 1607 1608 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); 1609 if (!reqcnt) 1610 return -ENOMEM; 1611 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1612 if (copy_to_user((int __user *)arg, reqcnt, 1613 sizeof(u32) * AP_DEVICES)) 1614 rc = -EFAULT; 1615 kfree(reqcnt); 1616 return rc; 1617 } 1618 case Z90STAT_REQUESTQ_COUNT: 1619 return put_user(zcrypt_requestq_count(), (int __user *)arg); 1620 case Z90STAT_PENDINGQ_COUNT: 1621 return put_user(zcrypt_pendingq_count(), (int __user *)arg); 1622 case Z90STAT_TOTALOPEN_COUNT: 1623 return put_user(atomic_read(&zcrypt_open_count), 1624 (int __user *)arg); 1625 case Z90STAT_DOMAIN_INDEX: 1626 return put_user(ap_domain_index, (int __user *)arg); 1627 /* 1628 * Deprecated ioctls 1629 */ 1630 case ZDEVICESTATUS: { 1631 /* the old ioctl supports only 64 adapters */ 1632 struct zcrypt_device_status *device_status; 1633 size_t total_size = MAX_ZDEV_ENTRIES 1634 * sizeof(struct zcrypt_device_status); 1635 1636 device_status = kzalloc(total_size, GFP_KERNEL); 1637 if (!device_status) 1638 return -ENOMEM; 1639 zcrypt_device_status_mask(device_status); 1640 if (copy_to_user((char __user *)arg, device_status, 1641 total_size)) 1642 rc = -EFAULT; 1643 kfree(device_status); 1644 return rc; 1645 } 1646 case Z90STAT_STATUS_MASK: { 1647 /* the old ioctl supports only 64 adapters */ 1648 char status[MAX_ZDEV_CARDIDS]; 1649 1650 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1651 if (copy_to_user((char __user *)arg, status, sizeof(status))) 1652 return -EFAULT; 1653 return 0; 1654 } 1655 case Z90STAT_QDEPTH_MASK: { 1656 /* the old ioctl supports only 64 adapters */ 1657 char qdepth[MAX_ZDEV_CARDIDS]; 1658 1659 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1660 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth))) 1661 return -EFAULT; 1662 return 0; 1663 } 1664 case Z90STAT_PERDEV_REQCNT: { 1665 /* the old ioctl supports only 64 adapters */ 1666 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1667 1668 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1669 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt))) 1670 return -EFAULT; 1671 return 0; 1672 } 1673 /* unknown ioctl number */ 1674 default: 1675 ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd); 1676 return -ENOIOCTLCMD; 1677 } 1678 } 1679 1680 #ifdef CONFIG_COMPAT 1681 /* 1682 * ioctl32 conversion routines 1683 */ 1684 struct compat_ica_rsa_modexpo { 1685 compat_uptr_t inputdata; 1686 unsigned int inputdatalength; 1687 compat_uptr_t outputdata; 1688 unsigned int outputdatalength; 1689 compat_uptr_t b_key; 1690 compat_uptr_t n_modulus; 1691 }; 1692 1693 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1694 unsigned int cmd, unsigned long arg) 1695 { 1696 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1697 struct compat_ica_rsa_modexpo mex32; 1698 struct ica_rsa_modexpo mex64; 1699 struct zcrypt_track tr; 1700 long rc; 1701 1702 memset(&tr, 0, sizeof(tr)); 1703 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1704 return -EFAULT; 1705 mex64.inputdata = compat_ptr(mex32.inputdata); 1706 mex64.inputdatalength = mex32.inputdatalength; 1707 mex64.outputdata = compat_ptr(mex32.outputdata); 1708 mex64.outputdatalength = mex32.outputdatalength; 1709 mex64.b_key = compat_ptr(mex32.b_key); 1710 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1711 do { 1712 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1713 if (rc == -EAGAIN) 1714 tr.again_counter++; 1715 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1716 /* on failure: retry once again after a requested rescan */ 1717 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1718 do { 1719 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1720 if (rc == -EAGAIN) 1721 tr.again_counter++; 1722 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1723 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1724 rc = -EIO; 1725 if (rc) 1726 return rc; 1727 return put_user(mex64.outputdatalength, 1728 &umex32->outputdatalength); 1729 } 1730 1731 struct compat_ica_rsa_modexpo_crt { 1732 compat_uptr_t inputdata; 1733 unsigned int inputdatalength; 1734 compat_uptr_t outputdata; 1735 unsigned int outputdatalength; 1736 compat_uptr_t bp_key; 1737 compat_uptr_t bq_key; 1738 compat_uptr_t np_prime; 1739 compat_uptr_t nq_prime; 1740 compat_uptr_t u_mult_inv; 1741 }; 1742 1743 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1744 unsigned int cmd, unsigned long arg) 1745 { 1746 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1747 struct compat_ica_rsa_modexpo_crt crt32; 1748 struct ica_rsa_modexpo_crt crt64; 1749 struct zcrypt_track tr; 1750 long rc; 1751 1752 memset(&tr, 0, sizeof(tr)); 1753 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1754 return -EFAULT; 1755 crt64.inputdata = compat_ptr(crt32.inputdata); 1756 crt64.inputdatalength = crt32.inputdatalength; 1757 crt64.outputdata = compat_ptr(crt32.outputdata); 1758 crt64.outputdatalength = crt32.outputdatalength; 1759 crt64.bp_key = compat_ptr(crt32.bp_key); 1760 crt64.bq_key = compat_ptr(crt32.bq_key); 1761 crt64.np_prime = compat_ptr(crt32.np_prime); 1762 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1763 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1764 do { 1765 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1766 if (rc == -EAGAIN) 1767 tr.again_counter++; 1768 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1769 /* on failure: retry once again after a requested rescan */ 1770 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1771 do { 1772 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1773 if (rc == -EAGAIN) 1774 tr.again_counter++; 1775 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1776 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1777 rc = -EIO; 1778 if (rc) 1779 return rc; 1780 return put_user(crt64.outputdatalength, 1781 &ucrt32->outputdatalength); 1782 } 1783 1784 struct compat_ica_xcrb { 1785 unsigned short agent_ID; 1786 unsigned int user_defined; 1787 unsigned short request_ID; 1788 unsigned int request_control_blk_length; 1789 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1790 compat_uptr_t request_control_blk_addr; 1791 unsigned int request_data_length; 1792 char padding2[16 - sizeof(compat_uptr_t)]; 1793 compat_uptr_t request_data_address; 1794 unsigned int reply_control_blk_length; 1795 char padding3[16 - sizeof(compat_uptr_t)]; 1796 compat_uptr_t reply_control_blk_addr; 1797 unsigned int reply_data_length; 1798 char padding4[16 - sizeof(compat_uptr_t)]; 1799 compat_uptr_t reply_data_addr; 1800 unsigned short priority_window; 1801 unsigned int status; 1802 } __packed; 1803 1804 static long trans_xcrb32(struct ap_perms *perms, struct file *filp, 1805 unsigned int cmd, unsigned long arg) 1806 { 1807 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); 1808 struct compat_ica_xcrb xcrb32; 1809 struct zcrypt_track tr; 1810 struct ica_xcRB xcrb64; 1811 long rc; 1812 1813 memset(&tr, 0, sizeof(tr)); 1814 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32))) 1815 return -EFAULT; 1816 xcrb64.agent_ID = xcrb32.agent_ID; 1817 xcrb64.user_defined = xcrb32.user_defined; 1818 xcrb64.request_ID = xcrb32.request_ID; 1819 xcrb64.request_control_blk_length = 1820 xcrb32.request_control_blk_length; 1821 xcrb64.request_control_blk_addr = 1822 compat_ptr(xcrb32.request_control_blk_addr); 1823 xcrb64.request_data_length = 1824 xcrb32.request_data_length; 1825 xcrb64.request_data_address = 1826 compat_ptr(xcrb32.request_data_address); 1827 xcrb64.reply_control_blk_length = 1828 xcrb32.reply_control_blk_length; 1829 xcrb64.reply_control_blk_addr = 1830 compat_ptr(xcrb32.reply_control_blk_addr); 1831 xcrb64.reply_data_length = xcrb32.reply_data_length; 1832 xcrb64.reply_data_addr = 1833 compat_ptr(xcrb32.reply_data_addr); 1834 xcrb64.priority_window = xcrb32.priority_window; 1835 xcrb64.status = xcrb32.status; 1836 do { 1837 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1838 if (rc == -EAGAIN) 1839 tr.again_counter++; 1840 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1841 /* on failure: retry once again after a requested rescan */ 1842 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1843 do { 1844 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); 1845 if (rc == -EAGAIN) 1846 tr.again_counter++; 1847 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1848 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1849 rc = -EIO; 1850 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length; 1851 xcrb32.reply_data_length = xcrb64.reply_data_length; 1852 xcrb32.status = xcrb64.status; 1853 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32))) 1854 return -EFAULT; 1855 return rc; 1856 } 1857 1858 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1859 unsigned long arg) 1860 { 1861 int rc; 1862 struct ap_perms *perms = 1863 (struct ap_perms *)filp->private_data; 1864 1865 rc = zcrypt_check_ioctl(perms, cmd); 1866 if (rc) 1867 return rc; 1868 1869 if (cmd == ICARSAMODEXPO) 1870 return trans_modexpo32(perms, filp, cmd, arg); 1871 if (cmd == ICARSACRT) 1872 return trans_modexpo_crt32(perms, filp, cmd, arg); 1873 if (cmd == ZSECSENDCPRB) 1874 return trans_xcrb32(perms, filp, cmd, arg); 1875 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1876 } 1877 #endif 1878 1879 /* 1880 * Misc device file operations. 1881 */ 1882 static const struct file_operations zcrypt_fops = { 1883 .owner = THIS_MODULE, 1884 .read = zcrypt_read, 1885 .write = zcrypt_write, 1886 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1887 #ifdef CONFIG_COMPAT 1888 .compat_ioctl = zcrypt_compat_ioctl, 1889 #endif 1890 .open = zcrypt_open, 1891 .release = zcrypt_release, 1892 .llseek = no_llseek, 1893 }; 1894 1895 /* 1896 * Misc device. 1897 */ 1898 static struct miscdevice zcrypt_misc_device = { 1899 .minor = MISC_DYNAMIC_MINOR, 1900 .name = "z90crypt", 1901 .fops = &zcrypt_fops, 1902 }; 1903 1904 static int zcrypt_rng_device_count; 1905 static u32 *zcrypt_rng_buffer; 1906 static int zcrypt_rng_buffer_index; 1907 static DEFINE_MUTEX(zcrypt_rng_mutex); 1908 1909 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1910 { 1911 int rc; 1912 1913 /* 1914 * We don't need locking here because the RNG API guarantees serialized 1915 * read method calls. 1916 */ 1917 if (zcrypt_rng_buffer_index == 0) { 1918 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1919 /* on failure: retry once again after a requested rescan */ 1920 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1921 rc = zcrypt_rng((char *)zcrypt_rng_buffer); 1922 if (rc < 0) 1923 return -EIO; 1924 zcrypt_rng_buffer_index = rc / sizeof(*data); 1925 } 1926 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1927 return sizeof(*data); 1928 } 1929 1930 static struct hwrng zcrypt_rng_dev = { 1931 .name = "zcrypt", 1932 .data_read = zcrypt_rng_data_read, 1933 .quality = 990, 1934 }; 1935 1936 int zcrypt_rng_device_add(void) 1937 { 1938 int rc = 0; 1939 1940 mutex_lock(&zcrypt_rng_mutex); 1941 if (zcrypt_rng_device_count == 0) { 1942 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL); 1943 if (!zcrypt_rng_buffer) { 1944 rc = -ENOMEM; 1945 goto out; 1946 } 1947 zcrypt_rng_buffer_index = 0; 1948 rc = hwrng_register(&zcrypt_rng_dev); 1949 if (rc) 1950 goto out_free; 1951 zcrypt_rng_device_count = 1; 1952 } else { 1953 zcrypt_rng_device_count++; 1954 } 1955 mutex_unlock(&zcrypt_rng_mutex); 1956 return 0; 1957 1958 out_free: 1959 free_page((unsigned long)zcrypt_rng_buffer); 1960 out: 1961 mutex_unlock(&zcrypt_rng_mutex); 1962 return rc; 1963 } 1964 1965 void zcrypt_rng_device_remove(void) 1966 { 1967 mutex_lock(&zcrypt_rng_mutex); 1968 zcrypt_rng_device_count--; 1969 if (zcrypt_rng_device_count == 0) { 1970 hwrng_unregister(&zcrypt_rng_dev); 1971 free_page((unsigned long)zcrypt_rng_buffer); 1972 } 1973 mutex_unlock(&zcrypt_rng_mutex); 1974 } 1975 1976 /* 1977 * Wait until the zcrypt api is operational. 1978 * The AP bus scan and the binding of ap devices to device drivers is 1979 * an asynchronous job. This function waits until these initial jobs 1980 * are done and so the zcrypt api should be ready to serve crypto 1981 * requests - if there are resources available. The function uses an 1982 * internal timeout of 60s. The very first caller will either wait for 1983 * ap bus bindings complete or the timeout happens. This state will be 1984 * remembered for further callers which will only be blocked until a 1985 * decision is made (timeout or bindings complete). 1986 * On timeout -ETIME is returned, on success the return value is 0. 1987 */ 1988 int zcrypt_wait_api_operational(void) 1989 { 1990 static DEFINE_MUTEX(zcrypt_wait_api_lock); 1991 static int zcrypt_wait_api_state; 1992 int rc; 1993 1994 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); 1995 if (rc) 1996 return rc; 1997 1998 switch (zcrypt_wait_api_state) { 1999 case 0: 2000 /* initial state, invoke wait for the ap bus complete */ 2001 rc = ap_wait_init_apqn_bindings_complete( 2002 msecs_to_jiffies(60 * 1000)); 2003 switch (rc) { 2004 case 0: 2005 /* ap bus bindings are complete */ 2006 zcrypt_wait_api_state = 1; 2007 break; 2008 case -EINTR: 2009 /* interrupted, go back to caller */ 2010 break; 2011 case -ETIME: 2012 /* timeout */ 2013 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n", 2014 __func__); 2015 zcrypt_wait_api_state = -ETIME; 2016 break; 2017 default: 2018 /* other failure */ 2019 ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n", 2020 __func__, rc); 2021 break; 2022 } 2023 break; 2024 case 1: 2025 /* a previous caller already found ap bus bindings complete */ 2026 rc = 0; 2027 break; 2028 default: 2029 /* a previous caller had timeout or other failure */ 2030 rc = zcrypt_wait_api_state; 2031 break; 2032 } 2033 2034 mutex_unlock(&zcrypt_wait_api_lock); 2035 2036 return rc; 2037 } 2038 EXPORT_SYMBOL(zcrypt_wait_api_operational); 2039 2040 int __init zcrypt_debug_init(void) 2041 { 2042 zcrypt_dbf_info = debug_register("zcrypt", 2, 1, 2043 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 2044 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 2045 debug_set_level(zcrypt_dbf_info, DBF_ERR); 2046 2047 return 0; 2048 } 2049 2050 void zcrypt_debug_exit(void) 2051 { 2052 debug_unregister(zcrypt_dbf_info); 2053 } 2054 2055 static int __init zcdn_init(void) 2056 { 2057 int rc; 2058 2059 /* create a new class 'zcrypt' */ 2060 zcrypt_class = class_create(ZCRYPT_NAME); 2061 if (IS_ERR(zcrypt_class)) { 2062 rc = PTR_ERR(zcrypt_class); 2063 goto out_class_create_failed; 2064 } 2065 zcrypt_class->dev_release = zcdn_device_release; 2066 2067 /* alloc device minor range */ 2068 rc = alloc_chrdev_region(&zcrypt_devt, 2069 0, ZCRYPT_MAX_MINOR_NODES, 2070 ZCRYPT_NAME); 2071 if (rc) 2072 goto out_alloc_chrdev_failed; 2073 2074 cdev_init(&zcrypt_cdev, &zcrypt_fops); 2075 zcrypt_cdev.owner = THIS_MODULE; 2076 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2077 if (rc) 2078 goto out_cdev_add_failed; 2079 2080 /* need some class specific sysfs attributes */ 2081 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 2082 if (rc) 2083 goto out_class_create_file_1_failed; 2084 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 2085 if (rc) 2086 goto out_class_create_file_2_failed; 2087 2088 return 0; 2089 2090 out_class_create_file_2_failed: 2091 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2092 out_class_create_file_1_failed: 2093 cdev_del(&zcrypt_cdev); 2094 out_cdev_add_failed: 2095 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2096 out_alloc_chrdev_failed: 2097 class_destroy(zcrypt_class); 2098 out_class_create_failed: 2099 return rc; 2100 } 2101 2102 static void zcdn_exit(void) 2103 { 2104 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2105 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 2106 zcdn_destroy_all(); 2107 cdev_del(&zcrypt_cdev); 2108 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2109 class_destroy(zcrypt_class); 2110 } 2111 2112 /* 2113 * zcrypt_api_init(): Module initialization. 2114 * 2115 * The module initialization code. 2116 */ 2117 int __init zcrypt_api_init(void) 2118 { 2119 int rc; 2120 2121 rc = zcrypt_debug_init(); 2122 if (rc) 2123 goto out; 2124 2125 rc = zcdn_init(); 2126 if (rc) 2127 goto out; 2128 2129 /* Register the request sprayer. */ 2130 rc = misc_register(&zcrypt_misc_device); 2131 if (rc < 0) 2132 goto out_misc_register_failed; 2133 2134 zcrypt_msgtype6_init(); 2135 zcrypt_msgtype50_init(); 2136 2137 return 0; 2138 2139 out_misc_register_failed: 2140 zcdn_exit(); 2141 zcrypt_debug_exit(); 2142 out: 2143 return rc; 2144 } 2145 2146 /* 2147 * zcrypt_api_exit(): Module termination. 2148 * 2149 * The module termination code. 2150 */ 2151 void __exit zcrypt_api_exit(void) 2152 { 2153 zcdn_exit(); 2154 misc_deregister(&zcrypt_misc_device); 2155 zcrypt_msgtype6_exit(); 2156 zcrypt_msgtype50_exit(); 2157 zcrypt_ccamisc_exit(); 2158 zcrypt_ep11misc_exit(); 2159 zcrypt_debug_exit(); 2160 } 2161 2162 module_init(zcrypt_api_init); 2163 module_exit(zcrypt_api_exit); 2164