1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/miscdevice.h> 19 #include <linux/fs.h> 20 #include <linux/compat.h> 21 #include <linux/slab.h> 22 #include <linux/atomic.h> 23 #include <linux/uaccess.h> 24 #include <linux/hw_random.h> 25 #include <linux/debugfs.h> 26 #include <linux/cdev.h> 27 #include <linux/ctype.h> 28 #include <asm/debug.h> 29 30 #define CREATE_TRACE_POINTS 31 #include <asm/trace/zcrypt.h> 32 33 #include "zcrypt_api.h" 34 #include "zcrypt_debug.h" 35 36 #include "zcrypt_msgtype6.h" 37 #include "zcrypt_msgtype50.h" 38 39 /* 40 * Module description. 41 */ 42 MODULE_AUTHOR("IBM Corporation"); 43 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 44 "Copyright IBM Corp. 2001, 2012"); 45 MODULE_LICENSE("GPL"); 46 47 /* 48 * zcrypt tracepoint functions 49 */ 50 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 51 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 52 53 static int zcrypt_hwrng_seed = 1; 54 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440); 55 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 56 57 DEFINE_SPINLOCK(zcrypt_list_lock); 58 LIST_HEAD(zcrypt_card_list); 59 int zcrypt_device_count; 60 61 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 62 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 63 64 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 65 EXPORT_SYMBOL(zcrypt_rescan_req); 66 67 static LIST_HEAD(zcrypt_ops_list); 68 69 /* Zcrypt related debug feature stuff. */ 70 debug_info_t *zcrypt_dbf_info; 71 72 /** 73 * Process a rescan of the transport layer. 74 * 75 * Returns 1, if the rescan has been processed, otherwise 0. 76 */ 77 static inline int zcrypt_process_rescan(void) 78 { 79 if (atomic_read(&zcrypt_rescan_req)) { 80 atomic_set(&zcrypt_rescan_req, 0); 81 atomic_inc(&zcrypt_rescan_count); 82 ap_bus_force_rescan(); 83 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", 84 atomic_inc_return(&zcrypt_rescan_count)); 85 return 1; 86 } 87 return 0; 88 } 89 90 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 91 { 92 list_add_tail(&zops->list, &zcrypt_ops_list); 93 } 94 95 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 96 { 97 list_del_init(&zops->list); 98 } 99 100 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 101 { 102 struct zcrypt_ops *zops; 103 104 list_for_each_entry(zops, &zcrypt_ops_list, list) 105 if ((zops->variant == variant) && 106 (!strncmp(zops->name, name, sizeof(zops->name)))) 107 return zops; 108 return NULL; 109 } 110 EXPORT_SYMBOL(zcrypt_msgtype); 111 112 /* 113 * Multi device nodes extension functions. 114 */ 115 116 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 117 118 struct zcdn_device; 119 120 static struct class *zcrypt_class; 121 static dev_t zcrypt_devt; 122 static struct cdev zcrypt_cdev; 123 124 struct zcdn_device { 125 struct device device; 126 struct ap_perms perms; 127 }; 128 129 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 130 131 #define ZCDN_MAX_NAME 32 132 133 static int zcdn_create(const char *name); 134 static int zcdn_destroy(const char *name); 135 136 /* helper function, matches the name for find_zcdndev_by_name() */ 137 static int __match_zcdn_name(struct device *dev, const void *data) 138 { 139 return strcmp(dev_name(dev), (const char *)data) == 0; 140 } 141 142 /* helper function, matches the devt value for find_zcdndev_by_devt() */ 143 static int __match_zcdn_devt(struct device *dev, const void *data) 144 { 145 return dev->devt == *((dev_t *) data); 146 } 147 148 /* 149 * Find zcdn device by name. 150 * Returns reference to the zcdn device which needs to be released 151 * with put_device() after use. 152 */ 153 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 154 { 155 struct device *dev = 156 class_find_device(zcrypt_class, NULL, 157 (void *) name, 158 __match_zcdn_name); 159 160 return dev ? to_zcdn_dev(dev) : NULL; 161 } 162 163 /* 164 * Find zcdn device by devt value. 165 * Returns reference to the zcdn device which needs to be released 166 * with put_device() after use. 167 */ 168 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 169 { 170 struct device *dev = 171 class_find_device(zcrypt_class, NULL, 172 (void *) &devt, 173 __match_zcdn_devt); 174 175 return dev ? to_zcdn_dev(dev) : NULL; 176 } 177 178 static ssize_t ioctlmask_show(struct device *dev, 179 struct device_attribute *attr, 180 char *buf) 181 { 182 int i, rc; 183 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 184 185 if (mutex_lock_interruptible(&ap_perms_mutex)) 186 return -ERESTARTSYS; 187 188 buf[0] = '0'; 189 buf[1] = 'x'; 190 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 191 snprintf(buf + 2 + 2 * i * sizeof(long), 192 PAGE_SIZE - 2 - 2 * i * sizeof(long), 193 "%016lx", zcdndev->perms.ioctlm[i]); 194 buf[2 + 2 * i * sizeof(long)] = '\n'; 195 buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 196 rc = 2 + 2 * i * sizeof(long) + 1; 197 198 mutex_unlock(&ap_perms_mutex); 199 200 return rc; 201 } 202 203 static ssize_t ioctlmask_store(struct device *dev, 204 struct device_attribute *attr, 205 const char *buf, size_t count) 206 { 207 int rc; 208 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 209 210 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 211 AP_IOCTLS, &ap_perms_mutex); 212 if (rc) 213 return rc; 214 215 return count; 216 } 217 218 static DEVICE_ATTR_RW(ioctlmask); 219 220 static ssize_t apmask_show(struct device *dev, 221 struct device_attribute *attr, 222 char *buf) 223 { 224 int i, rc; 225 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 226 227 if (mutex_lock_interruptible(&ap_perms_mutex)) 228 return -ERESTARTSYS; 229 230 buf[0] = '0'; 231 buf[1] = 'x'; 232 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 233 snprintf(buf + 2 + 2 * i * sizeof(long), 234 PAGE_SIZE - 2 - 2 * i * sizeof(long), 235 "%016lx", zcdndev->perms.apm[i]); 236 buf[2 + 2 * i * sizeof(long)] = '\n'; 237 buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 238 rc = 2 + 2 * i * sizeof(long) + 1; 239 240 mutex_unlock(&ap_perms_mutex); 241 242 return rc; 243 } 244 245 static ssize_t apmask_store(struct device *dev, 246 struct device_attribute *attr, 247 const char *buf, size_t count) 248 { 249 int rc; 250 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 251 252 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 253 AP_DEVICES, &ap_perms_mutex); 254 if (rc) 255 return rc; 256 257 return count; 258 } 259 260 static DEVICE_ATTR_RW(apmask); 261 262 static ssize_t aqmask_show(struct device *dev, 263 struct device_attribute *attr, 264 char *buf) 265 { 266 int i, rc; 267 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 268 269 if (mutex_lock_interruptible(&ap_perms_mutex)) 270 return -ERESTARTSYS; 271 272 buf[0] = '0'; 273 buf[1] = 'x'; 274 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 275 snprintf(buf + 2 + 2 * i * sizeof(long), 276 PAGE_SIZE - 2 - 2 * i * sizeof(long), 277 "%016lx", zcdndev->perms.aqm[i]); 278 buf[2 + 2 * i * sizeof(long)] = '\n'; 279 buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 280 rc = 2 + 2 * i * sizeof(long) + 1; 281 282 mutex_unlock(&ap_perms_mutex); 283 284 return rc; 285 } 286 287 static ssize_t aqmask_store(struct device *dev, 288 struct device_attribute *attr, 289 const char *buf, size_t count) 290 { 291 int rc; 292 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 293 294 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 295 AP_DOMAINS, &ap_perms_mutex); 296 if (rc) 297 return rc; 298 299 return count; 300 } 301 302 static DEVICE_ATTR_RW(aqmask); 303 304 static struct attribute *zcdn_dev_attrs[] = { 305 &dev_attr_ioctlmask.attr, 306 &dev_attr_apmask.attr, 307 &dev_attr_aqmask.attr, 308 NULL 309 }; 310 311 static struct attribute_group zcdn_dev_attr_group = { 312 .attrs = zcdn_dev_attrs 313 }; 314 315 static const struct attribute_group *zcdn_dev_attr_groups[] = { 316 &zcdn_dev_attr_group, 317 NULL 318 }; 319 320 static ssize_t zcdn_create_store(struct class *class, 321 struct class_attribute *attr, 322 const char *buf, size_t count) 323 { 324 int rc; 325 char name[ZCDN_MAX_NAME]; 326 327 strncpy(name, skip_spaces(buf), sizeof(name)); 328 name[sizeof(name) - 1] = '\0'; 329 330 rc = zcdn_create(strim(name)); 331 332 return rc ? rc : count; 333 } 334 335 static const struct class_attribute class_attr_zcdn_create = 336 __ATTR(create, 0600, NULL, zcdn_create_store); 337 338 static ssize_t zcdn_destroy_store(struct class *class, 339 struct class_attribute *attr, 340 const char *buf, size_t count) 341 { 342 int rc; 343 char name[ZCDN_MAX_NAME]; 344 345 strncpy(name, skip_spaces(buf), sizeof(name)); 346 name[sizeof(name) - 1] = '\0'; 347 348 rc = zcdn_destroy(strim(name)); 349 350 return rc ? rc : count; 351 } 352 353 static const struct class_attribute class_attr_zcdn_destroy = 354 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 355 356 static void zcdn_device_release(struct device *dev) 357 { 358 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 359 360 ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n", 361 MAJOR(dev->devt), MINOR(dev->devt)); 362 363 kfree(zcdndev); 364 } 365 366 static int zcdn_create(const char *name) 367 { 368 dev_t devt; 369 int i, rc = 0; 370 char nodename[ZCDN_MAX_NAME]; 371 struct zcdn_device *zcdndev; 372 373 if (mutex_lock_interruptible(&ap_perms_mutex)) 374 return -ERESTARTSYS; 375 376 /* check if device node with this name already exists */ 377 if (name[0]) { 378 zcdndev = find_zcdndev_by_name(name); 379 if (zcdndev) { 380 put_device(&zcdndev->device); 381 rc = -EEXIST; 382 goto unlockout; 383 } 384 } 385 386 /* find an unused minor number */ 387 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 388 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 389 zcdndev = find_zcdndev_by_devt(devt); 390 if (zcdndev) 391 put_device(&zcdndev->device); 392 else 393 break; 394 } 395 if (i == ZCRYPT_MAX_MINOR_NODES) { 396 rc = -ENOSPC; 397 goto unlockout; 398 } 399 400 /* alloc and prepare a new zcdn device */ 401 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 402 if (!zcdndev) { 403 rc = -ENOMEM; 404 goto unlockout; 405 } 406 zcdndev->device.release = zcdn_device_release; 407 zcdndev->device.class = zcrypt_class; 408 zcdndev->device.devt = devt; 409 zcdndev->device.groups = zcdn_dev_attr_groups; 410 if (name[0]) 411 strncpy(nodename, name, sizeof(nodename)); 412 else 413 snprintf(nodename, sizeof(nodename), 414 ZCRYPT_NAME "_%d", (int) MINOR(devt)); 415 nodename[sizeof(nodename)-1] = '\0'; 416 if (dev_set_name(&zcdndev->device, nodename)) { 417 rc = -EINVAL; 418 goto unlockout; 419 } 420 rc = device_register(&zcdndev->device); 421 if (rc) { 422 put_device(&zcdndev->device); 423 goto unlockout; 424 } 425 426 ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n", 427 MAJOR(devt), MINOR(devt)); 428 429 unlockout: 430 mutex_unlock(&ap_perms_mutex); 431 return rc; 432 } 433 434 static int zcdn_destroy(const char *name) 435 { 436 int rc = 0; 437 struct zcdn_device *zcdndev; 438 439 if (mutex_lock_interruptible(&ap_perms_mutex)) 440 return -ERESTARTSYS; 441 442 /* try to find this zcdn device */ 443 zcdndev = find_zcdndev_by_name(name); 444 if (!zcdndev) { 445 rc = -ENOENT; 446 goto unlockout; 447 } 448 449 /* 450 * The zcdn device is not hard destroyed. It is subject to 451 * reference counting and thus just needs to be unregistered. 452 */ 453 put_device(&zcdndev->device); 454 device_unregister(&zcdndev->device); 455 456 unlockout: 457 mutex_unlock(&ap_perms_mutex); 458 return rc; 459 } 460 461 static void zcdn_destroy_all(void) 462 { 463 int i; 464 dev_t devt; 465 struct zcdn_device *zcdndev; 466 467 mutex_lock(&ap_perms_mutex); 468 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 469 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 470 zcdndev = find_zcdndev_by_devt(devt); 471 if (zcdndev) { 472 put_device(&zcdndev->device); 473 device_unregister(&zcdndev->device); 474 } 475 } 476 mutex_unlock(&ap_perms_mutex); 477 } 478 479 #endif 480 481 /** 482 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 483 * 484 * This function is not supported beyond zcrypt 1.3.1. 485 */ 486 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 487 size_t count, loff_t *f_pos) 488 { 489 return -EPERM; 490 } 491 492 /** 493 * zcrypt_write(): Not allowed. 494 * 495 * Write is is not allowed 496 */ 497 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 498 size_t count, loff_t *f_pos) 499 { 500 return -EPERM; 501 } 502 503 /** 504 * zcrypt_open(): Count number of users. 505 * 506 * Device open function to count number of users. 507 */ 508 static int zcrypt_open(struct inode *inode, struct file *filp) 509 { 510 struct ap_perms *perms = &ap_perms; 511 512 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 513 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 514 struct zcdn_device *zcdndev; 515 516 if (mutex_lock_interruptible(&ap_perms_mutex)) 517 return -ERESTARTSYS; 518 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 519 /* find returns a reference, no get_device() needed */ 520 mutex_unlock(&ap_perms_mutex); 521 if (zcdndev) 522 perms = &zcdndev->perms; 523 } 524 #endif 525 filp->private_data = (void *) perms; 526 527 atomic_inc(&zcrypt_open_count); 528 return stream_open(inode, filp); 529 } 530 531 /** 532 * zcrypt_release(): Count number of users. 533 * 534 * Device close function to count number of users. 535 */ 536 static int zcrypt_release(struct inode *inode, struct file *filp) 537 { 538 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 539 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 540 struct zcdn_device *zcdndev; 541 542 if (mutex_lock_interruptible(&ap_perms_mutex)) 543 return -ERESTARTSYS; 544 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 545 mutex_unlock(&ap_perms_mutex); 546 if (zcdndev) { 547 /* 2 puts here: one for find, one for open */ 548 put_device(&zcdndev->device); 549 put_device(&zcdndev->device); 550 } 551 } 552 #endif 553 554 atomic_dec(&zcrypt_open_count); 555 return 0; 556 } 557 558 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 559 unsigned int cmd) 560 { 561 int rc = -EPERM; 562 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 563 564 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 565 if (test_bit_inv(ioctlnr, perms->ioctlm)) 566 rc = 0; 567 } 568 569 if (rc) 570 ZCRYPT_DBF(DBF_WARN, 571 "ioctl check failed: ioctlnr=0x%04x rc=%d\n", 572 ioctlnr, rc); 573 574 return rc; 575 } 576 577 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 578 { 579 return test_bit_inv(card, perms->apm) ? true : false; 580 } 581 582 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 583 { 584 return test_bit_inv(queue, perms->aqm) ? true : false; 585 } 586 587 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 588 struct zcrypt_queue *zq, 589 struct module **pmod, 590 unsigned int weight) 591 { 592 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 593 return NULL; 594 zcrypt_queue_get(zq); 595 get_device(&zq->queue->ap_dev.device); 596 atomic_add(weight, &zc->load); 597 atomic_add(weight, &zq->load); 598 zq->request_count++; 599 *pmod = zq->queue->ap_dev.drv->driver.owner; 600 return zq; 601 } 602 603 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 604 struct zcrypt_queue *zq, 605 struct module *mod, 606 unsigned int weight) 607 { 608 zq->request_count--; 609 atomic_sub(weight, &zc->load); 610 atomic_sub(weight, &zq->load); 611 put_device(&zq->queue->ap_dev.device); 612 zcrypt_queue_put(zq); 613 module_put(mod); 614 } 615 616 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 617 struct zcrypt_card *pref_zc, 618 unsigned int weight, 619 unsigned int pref_weight) 620 { 621 if (!pref_zc) 622 return false; 623 weight += atomic_read(&zc->load); 624 pref_weight += atomic_read(&pref_zc->load); 625 if (weight == pref_weight) 626 return atomic_read(&zc->card->total_request_count) > 627 atomic_read(&pref_zc->card->total_request_count); 628 return weight > pref_weight; 629 } 630 631 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 632 struct zcrypt_queue *pref_zq, 633 unsigned int weight, 634 unsigned int pref_weight) 635 { 636 if (!pref_zq) 637 return false; 638 weight += atomic_read(&zq->load); 639 pref_weight += atomic_read(&pref_zq->load); 640 if (weight == pref_weight) 641 return zq->queue->total_request_count > 642 pref_zq->queue->total_request_count; 643 return weight > pref_weight; 644 } 645 646 /* 647 * zcrypt ioctls. 648 */ 649 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 650 struct ica_rsa_modexpo *mex) 651 { 652 struct zcrypt_card *zc, *pref_zc; 653 struct zcrypt_queue *zq, *pref_zq; 654 unsigned int weight, pref_weight; 655 unsigned int func_code; 656 int qid = 0, rc = -ENODEV; 657 struct module *mod; 658 659 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 660 661 if (mex->outputdatalength < mex->inputdatalength) { 662 func_code = 0; 663 rc = -EINVAL; 664 goto out; 665 } 666 667 /* 668 * As long as outputdatalength is big enough, we can set the 669 * outputdatalength equal to the inputdatalength, since that is the 670 * number of bytes we will copy in any case 671 */ 672 mex->outputdatalength = mex->inputdatalength; 673 674 rc = get_rsa_modex_fc(mex, &func_code); 675 if (rc) 676 goto out; 677 678 pref_zc = NULL; 679 pref_zq = NULL; 680 spin_lock(&zcrypt_list_lock); 681 for_each_zcrypt_card(zc) { 682 /* Check for online accelarator and CCA cards */ 683 if (!zc->online || !(zc->card->functions & 0x18000000)) 684 continue; 685 /* Check for size limits */ 686 if (zc->min_mod_size > mex->inputdatalength || 687 zc->max_mod_size < mex->inputdatalength) 688 continue; 689 /* check if device node has admission for this card */ 690 if (!zcrypt_check_card(perms, zc->card->id)) 691 continue; 692 /* get weight index of the card device */ 693 weight = zc->speed_rating[func_code]; 694 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 695 continue; 696 for_each_zcrypt_queue(zq, zc) { 697 /* check if device is online and eligible */ 698 if (!zq->online || !zq->ops->rsa_modexpo) 699 continue; 700 /* check if device node has admission for this queue */ 701 if (!zcrypt_check_queue(perms, 702 AP_QID_QUEUE(zq->queue->qid))) 703 continue; 704 if (zcrypt_queue_compare(zq, pref_zq, 705 weight, pref_weight)) 706 continue; 707 pref_zc = zc; 708 pref_zq = zq; 709 pref_weight = weight; 710 } 711 } 712 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 713 spin_unlock(&zcrypt_list_lock); 714 715 if (!pref_zq) { 716 rc = -ENODEV; 717 goto out; 718 } 719 720 qid = pref_zq->queue->qid; 721 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 722 723 spin_lock(&zcrypt_list_lock); 724 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 725 spin_unlock(&zcrypt_list_lock); 726 727 out: 728 trace_s390_zcrypt_rep(mex, func_code, rc, 729 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 730 return rc; 731 } 732 733 static long zcrypt_rsa_crt(struct ap_perms *perms, 734 struct ica_rsa_modexpo_crt *crt) 735 { 736 struct zcrypt_card *zc, *pref_zc; 737 struct zcrypt_queue *zq, *pref_zq; 738 unsigned int weight, pref_weight; 739 unsigned int func_code; 740 int qid = 0, rc = -ENODEV; 741 struct module *mod; 742 743 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 744 745 if (crt->outputdatalength < crt->inputdatalength) { 746 func_code = 0; 747 rc = -EINVAL; 748 goto out; 749 } 750 751 /* 752 * As long as outputdatalength is big enough, we can set the 753 * outputdatalength equal to the inputdatalength, since that is the 754 * number of bytes we will copy in any case 755 */ 756 crt->outputdatalength = crt->inputdatalength; 757 758 rc = get_rsa_crt_fc(crt, &func_code); 759 if (rc) 760 goto out; 761 762 pref_zc = NULL; 763 pref_zq = NULL; 764 spin_lock(&zcrypt_list_lock); 765 for_each_zcrypt_card(zc) { 766 /* Check for online accelarator and CCA cards */ 767 if (!zc->online || !(zc->card->functions & 0x18000000)) 768 continue; 769 /* Check for size limits */ 770 if (zc->min_mod_size > crt->inputdatalength || 771 zc->max_mod_size < crt->inputdatalength) 772 continue; 773 /* check if device node has admission for this card */ 774 if (!zcrypt_check_card(perms, zc->card->id)) 775 continue; 776 /* get weight index of the card device */ 777 weight = zc->speed_rating[func_code]; 778 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 779 continue; 780 for_each_zcrypt_queue(zq, zc) { 781 /* check if device is online and eligible */ 782 if (!zq->online || !zq->ops->rsa_modexpo_crt) 783 continue; 784 /* check if device node has admission for this queue */ 785 if (!zcrypt_check_queue(perms, 786 AP_QID_QUEUE(zq->queue->qid))) 787 continue; 788 if (zcrypt_queue_compare(zq, pref_zq, 789 weight, pref_weight)) 790 continue; 791 pref_zc = zc; 792 pref_zq = zq; 793 pref_weight = weight; 794 } 795 } 796 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 797 spin_unlock(&zcrypt_list_lock); 798 799 if (!pref_zq) { 800 rc = -ENODEV; 801 goto out; 802 } 803 804 qid = pref_zq->queue->qid; 805 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 806 807 spin_lock(&zcrypt_list_lock); 808 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 809 spin_unlock(&zcrypt_list_lock); 810 811 out: 812 trace_s390_zcrypt_rep(crt, func_code, rc, 813 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 814 return rc; 815 } 816 817 static long _zcrypt_send_cprb(struct ap_perms *perms, 818 struct ica_xcRB *xcRB) 819 { 820 struct zcrypt_card *zc, *pref_zc; 821 struct zcrypt_queue *zq, *pref_zq; 822 struct ap_message ap_msg; 823 unsigned int weight, pref_weight; 824 unsigned int func_code; 825 unsigned short *domain, tdom; 826 int qid = 0, rc = -ENODEV; 827 struct module *mod; 828 829 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 830 831 xcRB->status = 0; 832 ap_init_message(&ap_msg); 833 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 834 if (rc) 835 goto out; 836 837 /* 838 * If a valid target domain is set and this domain is NOT a usage 839 * domain but a control only domain, use the default domain as target. 840 */ 841 tdom = *domain; 842 if (tdom >= 0 && tdom < AP_DOMAINS && 843 !ap_test_config_usage_domain(tdom) && 844 ap_test_config_ctrl_domain(tdom) && 845 ap_domain_index >= 0) 846 tdom = ap_domain_index; 847 848 pref_zc = NULL; 849 pref_zq = NULL; 850 spin_lock(&zcrypt_list_lock); 851 for_each_zcrypt_card(zc) { 852 /* Check for online CCA cards */ 853 if (!zc->online || !(zc->card->functions & 0x10000000)) 854 continue; 855 /* Check for user selected CCA card */ 856 if (xcRB->user_defined != AUTOSELECT && 857 xcRB->user_defined != zc->card->id) 858 continue; 859 /* check if device node has admission for this card */ 860 if (!zcrypt_check_card(perms, zc->card->id)) 861 continue; 862 /* get weight index of the card device */ 863 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 864 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 865 continue; 866 for_each_zcrypt_queue(zq, zc) { 867 /* check if device is online and eligible */ 868 if (!zq->online || 869 !zq->ops->send_cprb || 870 (tdom != (unsigned short) AUTOSELECT && 871 tdom != AP_QID_QUEUE(zq->queue->qid))) 872 continue; 873 /* check if device node has admission for this queue */ 874 if (!zcrypt_check_queue(perms, 875 AP_QID_QUEUE(zq->queue->qid))) 876 continue; 877 if (zcrypt_queue_compare(zq, pref_zq, 878 weight, pref_weight)) 879 continue; 880 pref_zc = zc; 881 pref_zq = zq; 882 pref_weight = weight; 883 } 884 } 885 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 886 spin_unlock(&zcrypt_list_lock); 887 888 if (!pref_zq) { 889 rc = -ENODEV; 890 goto out; 891 } 892 893 /* in case of auto select, provide the correct domain */ 894 qid = pref_zq->queue->qid; 895 if (*domain == (unsigned short) AUTOSELECT) 896 *domain = AP_QID_QUEUE(qid); 897 898 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 899 900 spin_lock(&zcrypt_list_lock); 901 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 902 spin_unlock(&zcrypt_list_lock); 903 904 out: 905 ap_release_message(&ap_msg); 906 trace_s390_zcrypt_rep(xcRB, func_code, rc, 907 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 908 return rc; 909 } 910 911 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 912 { 913 return _zcrypt_send_cprb(&ap_perms, xcRB); 914 } 915 EXPORT_SYMBOL(zcrypt_send_cprb); 916 917 static bool is_desired_ep11_card(unsigned int dev_id, 918 unsigned short target_num, 919 struct ep11_target_dev *targets) 920 { 921 while (target_num-- > 0) { 922 if (dev_id == targets->ap_id) 923 return true; 924 targets++; 925 } 926 return false; 927 } 928 929 static bool is_desired_ep11_queue(unsigned int dev_qid, 930 unsigned short target_num, 931 struct ep11_target_dev *targets) 932 { 933 while (target_num-- > 0) { 934 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 935 return true; 936 targets++; 937 } 938 return false; 939 } 940 941 static long zcrypt_send_ep11_cprb(struct ap_perms *perms, 942 struct ep11_urb *xcrb) 943 { 944 struct zcrypt_card *zc, *pref_zc; 945 struct zcrypt_queue *zq, *pref_zq; 946 struct ep11_target_dev *targets; 947 unsigned short target_num; 948 unsigned int weight, pref_weight; 949 unsigned int func_code; 950 struct ap_message ap_msg; 951 int qid = 0, rc = -ENODEV; 952 struct module *mod; 953 954 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 955 956 ap_init_message(&ap_msg); 957 958 target_num = (unsigned short) xcrb->targets_num; 959 960 /* empty list indicates autoselect (all available targets) */ 961 targets = NULL; 962 if (target_num != 0) { 963 struct ep11_target_dev __user *uptr; 964 965 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 966 if (!targets) { 967 func_code = 0; 968 rc = -ENOMEM; 969 goto out; 970 } 971 972 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 973 if (copy_from_user(targets, uptr, 974 target_num * sizeof(*targets))) { 975 func_code = 0; 976 rc = -EFAULT; 977 goto out_free; 978 } 979 } 980 981 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 982 if (rc) 983 goto out_free; 984 985 pref_zc = NULL; 986 pref_zq = NULL; 987 spin_lock(&zcrypt_list_lock); 988 for_each_zcrypt_card(zc) { 989 /* Check for online EP11 cards */ 990 if (!zc->online || !(zc->card->functions & 0x04000000)) 991 continue; 992 /* Check for user selected EP11 card */ 993 if (targets && 994 !is_desired_ep11_card(zc->card->id, target_num, targets)) 995 continue; 996 /* check if device node has admission for this card */ 997 if (!zcrypt_check_card(perms, zc->card->id)) 998 continue; 999 /* get weight index of the card device */ 1000 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1001 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 1002 continue; 1003 for_each_zcrypt_queue(zq, zc) { 1004 /* check if device is online and eligible */ 1005 if (!zq->online || 1006 !zq->ops->send_ep11_cprb || 1007 (targets && 1008 !is_desired_ep11_queue(zq->queue->qid, 1009 target_num, targets))) 1010 continue; 1011 /* check if device node has admission for this queue */ 1012 if (!zcrypt_check_queue(perms, 1013 AP_QID_QUEUE(zq->queue->qid))) 1014 continue; 1015 if (zcrypt_queue_compare(zq, pref_zq, 1016 weight, pref_weight)) 1017 continue; 1018 pref_zc = zc; 1019 pref_zq = zq; 1020 pref_weight = weight; 1021 } 1022 } 1023 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 1024 spin_unlock(&zcrypt_list_lock); 1025 1026 if (!pref_zq) { 1027 rc = -ENODEV; 1028 goto out_free; 1029 } 1030 1031 qid = pref_zq->queue->qid; 1032 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 1033 1034 spin_lock(&zcrypt_list_lock); 1035 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 1036 spin_unlock(&zcrypt_list_lock); 1037 1038 out_free: 1039 kfree(targets); 1040 out: 1041 ap_release_message(&ap_msg); 1042 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1043 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1044 return rc; 1045 } 1046 1047 static long zcrypt_rng(char *buffer) 1048 { 1049 struct zcrypt_card *zc, *pref_zc; 1050 struct zcrypt_queue *zq, *pref_zq; 1051 unsigned int weight, pref_weight; 1052 unsigned int func_code; 1053 struct ap_message ap_msg; 1054 unsigned int domain; 1055 int qid = 0, rc = -ENODEV; 1056 struct module *mod; 1057 1058 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1059 1060 ap_init_message(&ap_msg); 1061 rc = get_rng_fc(&ap_msg, &func_code, &domain); 1062 if (rc) 1063 goto out; 1064 1065 pref_zc = NULL; 1066 pref_zq = NULL; 1067 spin_lock(&zcrypt_list_lock); 1068 for_each_zcrypt_card(zc) { 1069 /* Check for online CCA cards */ 1070 if (!zc->online || !(zc->card->functions & 0x10000000)) 1071 continue; 1072 /* get weight index of the card device */ 1073 weight = zc->speed_rating[func_code]; 1074 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 1075 continue; 1076 for_each_zcrypt_queue(zq, zc) { 1077 /* check if device is online and eligible */ 1078 if (!zq->online || !zq->ops->rng) 1079 continue; 1080 if (zcrypt_queue_compare(zq, pref_zq, 1081 weight, pref_weight)) 1082 continue; 1083 pref_zc = zc; 1084 pref_zq = zq; 1085 pref_weight = weight; 1086 } 1087 } 1088 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight); 1089 spin_unlock(&zcrypt_list_lock); 1090 1091 if (!pref_zq) { 1092 rc = -ENODEV; 1093 goto out; 1094 } 1095 1096 qid = pref_zq->queue->qid; 1097 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1098 1099 spin_lock(&zcrypt_list_lock); 1100 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight); 1101 spin_unlock(&zcrypt_list_lock); 1102 1103 out: 1104 ap_release_message(&ap_msg); 1105 trace_s390_zcrypt_rep(buffer, func_code, rc, 1106 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1107 return rc; 1108 } 1109 1110 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1111 { 1112 struct zcrypt_card *zc; 1113 struct zcrypt_queue *zq; 1114 struct zcrypt_device_status *stat; 1115 int card, queue; 1116 1117 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1118 * sizeof(struct zcrypt_device_status)); 1119 1120 spin_lock(&zcrypt_list_lock); 1121 for_each_zcrypt_card(zc) { 1122 for_each_zcrypt_queue(zq, zc) { 1123 card = AP_QID_CARD(zq->queue->qid); 1124 if (card >= MAX_ZDEV_CARDIDS) 1125 continue; 1126 queue = AP_QID_QUEUE(zq->queue->qid); 1127 stat = &devstatus[card * AP_DOMAINS + queue]; 1128 stat->hwtype = zc->card->ap_dev.device_type; 1129 stat->functions = zc->card->functions >> 26; 1130 stat->qid = zq->queue->qid; 1131 stat->online = zq->online ? 0x01 : 0x00; 1132 } 1133 } 1134 spin_unlock(&zcrypt_list_lock); 1135 } 1136 1137 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 1138 { 1139 struct zcrypt_card *zc; 1140 struct zcrypt_queue *zq; 1141 struct zcrypt_device_status_ext *stat; 1142 int card, queue; 1143 1144 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 1145 * sizeof(struct zcrypt_device_status_ext)); 1146 1147 spin_lock(&zcrypt_list_lock); 1148 for_each_zcrypt_card(zc) { 1149 for_each_zcrypt_queue(zq, zc) { 1150 card = AP_QID_CARD(zq->queue->qid); 1151 queue = AP_QID_QUEUE(zq->queue->qid); 1152 stat = &devstatus[card * AP_DOMAINS + queue]; 1153 stat->hwtype = zc->card->ap_dev.device_type; 1154 stat->functions = zc->card->functions >> 26; 1155 stat->qid = zq->queue->qid; 1156 stat->online = zq->online ? 0x01 : 0x00; 1157 } 1158 } 1159 spin_unlock(&zcrypt_list_lock); 1160 } 1161 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1162 1163 static void zcrypt_status_mask(char status[], size_t max_adapters) 1164 { 1165 struct zcrypt_card *zc; 1166 struct zcrypt_queue *zq; 1167 int card; 1168 1169 memset(status, 0, max_adapters); 1170 spin_lock(&zcrypt_list_lock); 1171 for_each_zcrypt_card(zc) { 1172 for_each_zcrypt_queue(zq, zc) { 1173 card = AP_QID_CARD(zq->queue->qid); 1174 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1175 || card >= max_adapters) 1176 continue; 1177 status[card] = zc->online ? zc->user_space_type : 0x0d; 1178 } 1179 } 1180 spin_unlock(&zcrypt_list_lock); 1181 } 1182 1183 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1184 { 1185 struct zcrypt_card *zc; 1186 struct zcrypt_queue *zq; 1187 int card; 1188 1189 memset(qdepth, 0, max_adapters); 1190 spin_lock(&zcrypt_list_lock); 1191 local_bh_disable(); 1192 for_each_zcrypt_card(zc) { 1193 for_each_zcrypt_queue(zq, zc) { 1194 card = AP_QID_CARD(zq->queue->qid); 1195 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1196 || card >= max_adapters) 1197 continue; 1198 spin_lock(&zq->queue->lock); 1199 qdepth[card] = 1200 zq->queue->pendingq_count + 1201 zq->queue->requestq_count; 1202 spin_unlock(&zq->queue->lock); 1203 } 1204 } 1205 local_bh_enable(); 1206 spin_unlock(&zcrypt_list_lock); 1207 } 1208 1209 static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) 1210 { 1211 struct zcrypt_card *zc; 1212 struct zcrypt_queue *zq; 1213 int card; 1214 1215 memset(reqcnt, 0, sizeof(int) * max_adapters); 1216 spin_lock(&zcrypt_list_lock); 1217 local_bh_disable(); 1218 for_each_zcrypt_card(zc) { 1219 for_each_zcrypt_queue(zq, zc) { 1220 card = AP_QID_CARD(zq->queue->qid); 1221 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1222 || card >= max_adapters) 1223 continue; 1224 spin_lock(&zq->queue->lock); 1225 reqcnt[card] = zq->queue->total_request_count; 1226 spin_unlock(&zq->queue->lock); 1227 } 1228 } 1229 local_bh_enable(); 1230 spin_unlock(&zcrypt_list_lock); 1231 } 1232 1233 static int zcrypt_pendingq_count(void) 1234 { 1235 struct zcrypt_card *zc; 1236 struct zcrypt_queue *zq; 1237 int pendingq_count; 1238 1239 pendingq_count = 0; 1240 spin_lock(&zcrypt_list_lock); 1241 local_bh_disable(); 1242 for_each_zcrypt_card(zc) { 1243 for_each_zcrypt_queue(zq, zc) { 1244 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1245 continue; 1246 spin_lock(&zq->queue->lock); 1247 pendingq_count += zq->queue->pendingq_count; 1248 spin_unlock(&zq->queue->lock); 1249 } 1250 } 1251 local_bh_enable(); 1252 spin_unlock(&zcrypt_list_lock); 1253 return pendingq_count; 1254 } 1255 1256 static int zcrypt_requestq_count(void) 1257 { 1258 struct zcrypt_card *zc; 1259 struct zcrypt_queue *zq; 1260 int requestq_count; 1261 1262 requestq_count = 0; 1263 spin_lock(&zcrypt_list_lock); 1264 local_bh_disable(); 1265 for_each_zcrypt_card(zc) { 1266 for_each_zcrypt_queue(zq, zc) { 1267 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1268 continue; 1269 spin_lock(&zq->queue->lock); 1270 requestq_count += zq->queue->requestq_count; 1271 spin_unlock(&zq->queue->lock); 1272 } 1273 } 1274 local_bh_enable(); 1275 spin_unlock(&zcrypt_list_lock); 1276 return requestq_count; 1277 } 1278 1279 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1280 unsigned long arg) 1281 { 1282 int rc; 1283 struct ap_perms *perms = 1284 (struct ap_perms *) filp->private_data; 1285 1286 rc = zcrypt_check_ioctl(perms, cmd); 1287 if (rc) 1288 return rc; 1289 1290 switch (cmd) { 1291 case ICARSAMODEXPO: { 1292 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 1293 struct ica_rsa_modexpo mex; 1294 1295 if (copy_from_user(&mex, umex, sizeof(mex))) 1296 return -EFAULT; 1297 do { 1298 rc = zcrypt_rsa_modexpo(perms, &mex); 1299 } while (rc == -EAGAIN); 1300 /* on failure: retry once again after a requested rescan */ 1301 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1302 do { 1303 rc = zcrypt_rsa_modexpo(perms, &mex); 1304 } while (rc == -EAGAIN); 1305 if (rc) { 1306 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); 1307 return rc; 1308 } 1309 return put_user(mex.outputdatalength, &umex->outputdatalength); 1310 } 1311 case ICARSACRT: { 1312 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 1313 struct ica_rsa_modexpo_crt crt; 1314 1315 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1316 return -EFAULT; 1317 do { 1318 rc = zcrypt_rsa_crt(perms, &crt); 1319 } while (rc == -EAGAIN); 1320 /* on failure: retry once again after a requested rescan */ 1321 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1322 do { 1323 rc = zcrypt_rsa_crt(perms, &crt); 1324 } while (rc == -EAGAIN); 1325 if (rc) { 1326 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); 1327 return rc; 1328 } 1329 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1330 } 1331 case ZSECSENDCPRB: { 1332 struct ica_xcRB __user *uxcRB = (void __user *) arg; 1333 struct ica_xcRB xcRB; 1334 1335 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 1336 return -EFAULT; 1337 do { 1338 rc = _zcrypt_send_cprb(perms, &xcRB); 1339 } while (rc == -EAGAIN); 1340 /* on failure: retry once again after a requested rescan */ 1341 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1342 do { 1343 rc = _zcrypt_send_cprb(perms, &xcRB); 1344 } while (rc == -EAGAIN); 1345 if (rc) 1346 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n", 1347 rc, xcRB.status); 1348 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 1349 return -EFAULT; 1350 return rc; 1351 } 1352 case ZSENDEP11CPRB: { 1353 struct ep11_urb __user *uxcrb = (void __user *)arg; 1354 struct ep11_urb xcrb; 1355 1356 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1357 return -EFAULT; 1358 do { 1359 rc = zcrypt_send_ep11_cprb(perms, &xcrb); 1360 } while (rc == -EAGAIN); 1361 /* on failure: retry once again after a requested rescan */ 1362 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1363 do { 1364 rc = zcrypt_send_ep11_cprb(perms, &xcrb); 1365 } while (rc == -EAGAIN); 1366 if (rc) 1367 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); 1368 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1369 return -EFAULT; 1370 return rc; 1371 } 1372 case ZCRYPT_DEVICE_STATUS: { 1373 struct zcrypt_device_status_ext *device_status; 1374 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1375 * sizeof(struct zcrypt_device_status_ext); 1376 1377 device_status = kzalloc(total_size, GFP_KERNEL); 1378 if (!device_status) 1379 return -ENOMEM; 1380 zcrypt_device_status_mask_ext(device_status); 1381 if (copy_to_user((char __user *) arg, device_status, 1382 total_size)) 1383 rc = -EFAULT; 1384 kfree(device_status); 1385 return rc; 1386 } 1387 case ZCRYPT_STATUS_MASK: { 1388 char status[AP_DEVICES]; 1389 1390 zcrypt_status_mask(status, AP_DEVICES); 1391 if (copy_to_user((char __user *) arg, status, sizeof(status))) 1392 return -EFAULT; 1393 return 0; 1394 } 1395 case ZCRYPT_QDEPTH_MASK: { 1396 char qdepth[AP_DEVICES]; 1397 1398 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1399 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1400 return -EFAULT; 1401 return 0; 1402 } 1403 case ZCRYPT_PERDEV_REQCNT: { 1404 int *reqcnt; 1405 1406 reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); 1407 if (!reqcnt) 1408 return -ENOMEM; 1409 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1410 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 1411 rc = -EFAULT; 1412 kfree(reqcnt); 1413 return rc; 1414 } 1415 case Z90STAT_REQUESTQ_COUNT: 1416 return put_user(zcrypt_requestq_count(), (int __user *) arg); 1417 case Z90STAT_PENDINGQ_COUNT: 1418 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 1419 case Z90STAT_TOTALOPEN_COUNT: 1420 return put_user(atomic_read(&zcrypt_open_count), 1421 (int __user *) arg); 1422 case Z90STAT_DOMAIN_INDEX: 1423 return put_user(ap_domain_index, (int __user *) arg); 1424 /* 1425 * Deprecated ioctls 1426 */ 1427 case ZDEVICESTATUS: { 1428 /* the old ioctl supports only 64 adapters */ 1429 struct zcrypt_device_status *device_status; 1430 size_t total_size = MAX_ZDEV_ENTRIES 1431 * sizeof(struct zcrypt_device_status); 1432 1433 device_status = kzalloc(total_size, GFP_KERNEL); 1434 if (!device_status) 1435 return -ENOMEM; 1436 zcrypt_device_status_mask(device_status); 1437 if (copy_to_user((char __user *) arg, device_status, 1438 total_size)) 1439 rc = -EFAULT; 1440 kfree(device_status); 1441 return rc; 1442 } 1443 case Z90STAT_STATUS_MASK: { 1444 /* the old ioctl supports only 64 adapters */ 1445 char status[MAX_ZDEV_CARDIDS]; 1446 1447 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1448 if (copy_to_user((char __user *) arg, status, sizeof(status))) 1449 return -EFAULT; 1450 return 0; 1451 } 1452 case Z90STAT_QDEPTH_MASK: { 1453 /* the old ioctl supports only 64 adapters */ 1454 char qdepth[MAX_ZDEV_CARDIDS]; 1455 1456 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1457 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1458 return -EFAULT; 1459 return 0; 1460 } 1461 case Z90STAT_PERDEV_REQCNT: { 1462 /* the old ioctl supports only 64 adapters */ 1463 int reqcnt[MAX_ZDEV_CARDIDS]; 1464 1465 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1466 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 1467 return -EFAULT; 1468 return 0; 1469 } 1470 /* unknown ioctl number */ 1471 default: 1472 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd); 1473 return -ENOIOCTLCMD; 1474 } 1475 } 1476 1477 #ifdef CONFIG_COMPAT 1478 /* 1479 * ioctl32 conversion routines 1480 */ 1481 struct compat_ica_rsa_modexpo { 1482 compat_uptr_t inputdata; 1483 unsigned int inputdatalength; 1484 compat_uptr_t outputdata; 1485 unsigned int outputdatalength; 1486 compat_uptr_t b_key; 1487 compat_uptr_t n_modulus; 1488 }; 1489 1490 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1491 unsigned int cmd, unsigned long arg) 1492 { 1493 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1494 struct compat_ica_rsa_modexpo mex32; 1495 struct ica_rsa_modexpo mex64; 1496 long rc; 1497 1498 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1499 return -EFAULT; 1500 mex64.inputdata = compat_ptr(mex32.inputdata); 1501 mex64.inputdatalength = mex32.inputdatalength; 1502 mex64.outputdata = compat_ptr(mex32.outputdata); 1503 mex64.outputdatalength = mex32.outputdatalength; 1504 mex64.b_key = compat_ptr(mex32.b_key); 1505 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1506 do { 1507 rc = zcrypt_rsa_modexpo(perms, &mex64); 1508 } while (rc == -EAGAIN); 1509 /* on failure: retry once again after a requested rescan */ 1510 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1511 do { 1512 rc = zcrypt_rsa_modexpo(perms, &mex64); 1513 } while (rc == -EAGAIN); 1514 if (rc) 1515 return rc; 1516 return put_user(mex64.outputdatalength, 1517 &umex32->outputdatalength); 1518 } 1519 1520 struct compat_ica_rsa_modexpo_crt { 1521 compat_uptr_t inputdata; 1522 unsigned int inputdatalength; 1523 compat_uptr_t outputdata; 1524 unsigned int outputdatalength; 1525 compat_uptr_t bp_key; 1526 compat_uptr_t bq_key; 1527 compat_uptr_t np_prime; 1528 compat_uptr_t nq_prime; 1529 compat_uptr_t u_mult_inv; 1530 }; 1531 1532 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1533 unsigned int cmd, unsigned long arg) 1534 { 1535 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1536 struct compat_ica_rsa_modexpo_crt crt32; 1537 struct ica_rsa_modexpo_crt crt64; 1538 long rc; 1539 1540 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1541 return -EFAULT; 1542 crt64.inputdata = compat_ptr(crt32.inputdata); 1543 crt64.inputdatalength = crt32.inputdatalength; 1544 crt64.outputdata = compat_ptr(crt32.outputdata); 1545 crt64.outputdatalength = crt32.outputdatalength; 1546 crt64.bp_key = compat_ptr(crt32.bp_key); 1547 crt64.bq_key = compat_ptr(crt32.bq_key); 1548 crt64.np_prime = compat_ptr(crt32.np_prime); 1549 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1550 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1551 do { 1552 rc = zcrypt_rsa_crt(perms, &crt64); 1553 } while (rc == -EAGAIN); 1554 /* on failure: retry once again after a requested rescan */ 1555 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1556 do { 1557 rc = zcrypt_rsa_crt(perms, &crt64); 1558 } while (rc == -EAGAIN); 1559 if (rc) 1560 return rc; 1561 return put_user(crt64.outputdatalength, 1562 &ucrt32->outputdatalength); 1563 } 1564 1565 struct compat_ica_xcRB { 1566 unsigned short agent_ID; 1567 unsigned int user_defined; 1568 unsigned short request_ID; 1569 unsigned int request_control_blk_length; 1570 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1571 compat_uptr_t request_control_blk_addr; 1572 unsigned int request_data_length; 1573 char padding2[16 - sizeof(compat_uptr_t)]; 1574 compat_uptr_t request_data_address; 1575 unsigned int reply_control_blk_length; 1576 char padding3[16 - sizeof(compat_uptr_t)]; 1577 compat_uptr_t reply_control_blk_addr; 1578 unsigned int reply_data_length; 1579 char padding4[16 - sizeof(compat_uptr_t)]; 1580 compat_uptr_t reply_data_addr; 1581 unsigned short priority_window; 1582 unsigned int status; 1583 } __packed; 1584 1585 static long trans_xcRB32(struct ap_perms *perms, struct file *filp, 1586 unsigned int cmd, unsigned long arg) 1587 { 1588 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1589 struct compat_ica_xcRB xcRB32; 1590 struct ica_xcRB xcRB64; 1591 long rc; 1592 1593 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1594 return -EFAULT; 1595 xcRB64.agent_ID = xcRB32.agent_ID; 1596 xcRB64.user_defined = xcRB32.user_defined; 1597 xcRB64.request_ID = xcRB32.request_ID; 1598 xcRB64.request_control_blk_length = 1599 xcRB32.request_control_blk_length; 1600 xcRB64.request_control_blk_addr = 1601 compat_ptr(xcRB32.request_control_blk_addr); 1602 xcRB64.request_data_length = 1603 xcRB32.request_data_length; 1604 xcRB64.request_data_address = 1605 compat_ptr(xcRB32.request_data_address); 1606 xcRB64.reply_control_blk_length = 1607 xcRB32.reply_control_blk_length; 1608 xcRB64.reply_control_blk_addr = 1609 compat_ptr(xcRB32.reply_control_blk_addr); 1610 xcRB64.reply_data_length = xcRB32.reply_data_length; 1611 xcRB64.reply_data_addr = 1612 compat_ptr(xcRB32.reply_data_addr); 1613 xcRB64.priority_window = xcRB32.priority_window; 1614 xcRB64.status = xcRB32.status; 1615 do { 1616 rc = _zcrypt_send_cprb(perms, &xcRB64); 1617 } while (rc == -EAGAIN); 1618 /* on failure: retry once again after a requested rescan */ 1619 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1620 do { 1621 rc = _zcrypt_send_cprb(perms, &xcRB64); 1622 } while (rc == -EAGAIN); 1623 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1624 xcRB32.reply_data_length = xcRB64.reply_data_length; 1625 xcRB32.status = xcRB64.status; 1626 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1627 return -EFAULT; 1628 return rc; 1629 } 1630 1631 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1632 unsigned long arg) 1633 { 1634 int rc; 1635 struct ap_perms *perms = 1636 (struct ap_perms *) filp->private_data; 1637 1638 rc = zcrypt_check_ioctl(perms, cmd); 1639 if (rc) 1640 return rc; 1641 1642 if (cmd == ICARSAMODEXPO) 1643 return trans_modexpo32(perms, filp, cmd, arg); 1644 if (cmd == ICARSACRT) 1645 return trans_modexpo_crt32(perms, filp, cmd, arg); 1646 if (cmd == ZSECSENDCPRB) 1647 return trans_xcRB32(perms, filp, cmd, arg); 1648 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1649 } 1650 #endif 1651 1652 /* 1653 * Misc device file operations. 1654 */ 1655 static const struct file_operations zcrypt_fops = { 1656 .owner = THIS_MODULE, 1657 .read = zcrypt_read, 1658 .write = zcrypt_write, 1659 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1660 #ifdef CONFIG_COMPAT 1661 .compat_ioctl = zcrypt_compat_ioctl, 1662 #endif 1663 .open = zcrypt_open, 1664 .release = zcrypt_release, 1665 .llseek = no_llseek, 1666 }; 1667 1668 /* 1669 * Misc device. 1670 */ 1671 static struct miscdevice zcrypt_misc_device = { 1672 .minor = MISC_DYNAMIC_MINOR, 1673 .name = "z90crypt", 1674 .fops = &zcrypt_fops, 1675 }; 1676 1677 static int zcrypt_rng_device_count; 1678 static u32 *zcrypt_rng_buffer; 1679 static int zcrypt_rng_buffer_index; 1680 static DEFINE_MUTEX(zcrypt_rng_mutex); 1681 1682 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1683 { 1684 int rc; 1685 1686 /* 1687 * We don't need locking here because the RNG API guarantees serialized 1688 * read method calls. 1689 */ 1690 if (zcrypt_rng_buffer_index == 0) { 1691 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1692 /* on failure: retry once again after a requested rescan */ 1693 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1694 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1695 if (rc < 0) 1696 return -EIO; 1697 zcrypt_rng_buffer_index = rc / sizeof(*data); 1698 } 1699 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1700 return sizeof(*data); 1701 } 1702 1703 static struct hwrng zcrypt_rng_dev = { 1704 .name = "zcrypt", 1705 .data_read = zcrypt_rng_data_read, 1706 .quality = 990, 1707 }; 1708 1709 int zcrypt_rng_device_add(void) 1710 { 1711 int rc = 0; 1712 1713 mutex_lock(&zcrypt_rng_mutex); 1714 if (zcrypt_rng_device_count == 0) { 1715 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1716 if (!zcrypt_rng_buffer) { 1717 rc = -ENOMEM; 1718 goto out; 1719 } 1720 zcrypt_rng_buffer_index = 0; 1721 if (!zcrypt_hwrng_seed) 1722 zcrypt_rng_dev.quality = 0; 1723 rc = hwrng_register(&zcrypt_rng_dev); 1724 if (rc) 1725 goto out_free; 1726 zcrypt_rng_device_count = 1; 1727 } else 1728 zcrypt_rng_device_count++; 1729 mutex_unlock(&zcrypt_rng_mutex); 1730 return 0; 1731 1732 out_free: 1733 free_page((unsigned long) zcrypt_rng_buffer); 1734 out: 1735 mutex_unlock(&zcrypt_rng_mutex); 1736 return rc; 1737 } 1738 1739 void zcrypt_rng_device_remove(void) 1740 { 1741 mutex_lock(&zcrypt_rng_mutex); 1742 zcrypt_rng_device_count--; 1743 if (zcrypt_rng_device_count == 0) { 1744 hwrng_unregister(&zcrypt_rng_dev); 1745 free_page((unsigned long) zcrypt_rng_buffer); 1746 } 1747 mutex_unlock(&zcrypt_rng_mutex); 1748 } 1749 1750 int __init zcrypt_debug_init(void) 1751 { 1752 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1753 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1754 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1755 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1756 1757 return 0; 1758 } 1759 1760 void zcrypt_debug_exit(void) 1761 { 1762 debug_unregister(zcrypt_dbf_info); 1763 } 1764 1765 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1766 1767 static int __init zcdn_init(void) 1768 { 1769 int rc; 1770 1771 /* create a new class 'zcrypt' */ 1772 zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME); 1773 if (IS_ERR(zcrypt_class)) { 1774 rc = PTR_ERR(zcrypt_class); 1775 goto out_class_create_failed; 1776 } 1777 zcrypt_class->dev_release = zcdn_device_release; 1778 1779 /* alloc device minor range */ 1780 rc = alloc_chrdev_region(&zcrypt_devt, 1781 0, ZCRYPT_MAX_MINOR_NODES, 1782 ZCRYPT_NAME); 1783 if (rc) 1784 goto out_alloc_chrdev_failed; 1785 1786 cdev_init(&zcrypt_cdev, &zcrypt_fops); 1787 zcrypt_cdev.owner = THIS_MODULE; 1788 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 1789 if (rc) 1790 goto out_cdev_add_failed; 1791 1792 /* need some class specific sysfs attributes */ 1793 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 1794 if (rc) 1795 goto out_class_create_file_1_failed; 1796 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 1797 if (rc) 1798 goto out_class_create_file_2_failed; 1799 1800 return 0; 1801 1802 out_class_create_file_2_failed: 1803 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 1804 out_class_create_file_1_failed: 1805 cdev_del(&zcrypt_cdev); 1806 out_cdev_add_failed: 1807 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 1808 out_alloc_chrdev_failed: 1809 class_destroy(zcrypt_class); 1810 out_class_create_failed: 1811 return rc; 1812 } 1813 1814 static void zcdn_exit(void) 1815 { 1816 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 1817 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 1818 zcdn_destroy_all(); 1819 cdev_del(&zcrypt_cdev); 1820 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 1821 class_destroy(zcrypt_class); 1822 } 1823 1824 #endif 1825 1826 /** 1827 * zcrypt_api_init(): Module initialization. 1828 * 1829 * The module initialization code. 1830 */ 1831 int __init zcrypt_api_init(void) 1832 { 1833 int rc; 1834 1835 rc = zcrypt_debug_init(); 1836 if (rc) 1837 goto out; 1838 1839 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1840 rc = zcdn_init(); 1841 if (rc) 1842 goto out; 1843 #endif 1844 1845 /* Register the request sprayer. */ 1846 rc = misc_register(&zcrypt_misc_device); 1847 if (rc < 0) 1848 goto out_misc_register_failed; 1849 1850 zcrypt_msgtype6_init(); 1851 zcrypt_msgtype50_init(); 1852 1853 return 0; 1854 1855 out_misc_register_failed: 1856 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1857 zcdn_exit(); 1858 #endif 1859 zcrypt_debug_exit(); 1860 out: 1861 return rc; 1862 } 1863 1864 /** 1865 * zcrypt_api_exit(): Module termination. 1866 * 1867 * The module termination code. 1868 */ 1869 void __exit zcrypt_api_exit(void) 1870 { 1871 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 1872 zcdn_exit(); 1873 #endif 1874 misc_deregister(&zcrypt_misc_device); 1875 zcrypt_msgtype6_exit(); 1876 zcrypt_msgtype50_exit(); 1877 zcrypt_debug_exit(); 1878 } 1879 1880 module_init(zcrypt_api_init); 1881 module_exit(zcrypt_api_exit); 1882