1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * zcrypt 2.1.0 4 * 5 * Copyright IBM Corp. 2001, 2012 6 * Author(s): Robert Burroughs 7 * Eric Rossman (edrossma@us.ibm.com) 8 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 12 * Ralph Wuerthner <rwuerthn@de.ibm.com> 13 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/miscdevice.h> 20 #include <linux/fs.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/compat.h> 24 #include <linux/slab.h> 25 #include <linux/atomic.h> 26 #include <linux/uaccess.h> 27 #include <linux/hw_random.h> 28 #include <linux/debugfs.h> 29 #include <asm/debug.h> 30 31 #define CREATE_TRACE_POINTS 32 #include <asm/trace/zcrypt.h> 33 34 #include "zcrypt_api.h" 35 #include "zcrypt_debug.h" 36 37 #include "zcrypt_msgtype6.h" 38 #include "zcrypt_msgtype50.h" 39 40 /* 41 * Module description. 42 */ 43 MODULE_AUTHOR("IBM Corporation"); 44 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 45 "Copyright IBM Corp. 2001, 2012"); 46 MODULE_LICENSE("GPL"); 47 48 /* 49 * zcrypt tracepoint functions 50 */ 51 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 52 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 53 54 static int zcrypt_hwrng_seed = 1; 55 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 56 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 57 58 DEFINE_SPINLOCK(zcrypt_list_lock); 59 LIST_HEAD(zcrypt_card_list); 60 int zcrypt_device_count; 61 62 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 63 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 64 65 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 66 EXPORT_SYMBOL(zcrypt_rescan_req); 67 68 static LIST_HEAD(zcrypt_ops_list); 69 70 /* Zcrypt related debug feature stuff. */ 71 debug_info_t *zcrypt_dbf_info; 72 73 /** 74 * Process a rescan of the transport layer. 75 * 76 * Returns 1, if the rescan has been processed, otherwise 0. 77 */ 78 static inline int zcrypt_process_rescan(void) 79 { 80 if (atomic_read(&zcrypt_rescan_req)) { 81 atomic_set(&zcrypt_rescan_req, 0); 82 atomic_inc(&zcrypt_rescan_count); 83 ap_bus_force_rescan(); 84 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", 85 atomic_inc_return(&zcrypt_rescan_count)); 86 return 1; 87 } 88 return 0; 89 } 90 91 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 92 { 93 list_add_tail(&zops->list, &zcrypt_ops_list); 94 } 95 96 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 97 { 98 list_del_init(&zops->list); 99 } 100 101 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 102 { 103 struct zcrypt_ops *zops; 104 105 list_for_each_entry(zops, &zcrypt_ops_list, list) 106 if ((zops->variant == variant) && 107 (!strncmp(zops->name, name, sizeof(zops->name)))) 108 return zops; 109 return NULL; 110 } 111 EXPORT_SYMBOL(zcrypt_msgtype); 112 113 /** 114 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 115 * 116 * This function is not supported beyond zcrypt 1.3.1. 117 */ 118 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 119 size_t count, loff_t *f_pos) 120 { 121 return -EPERM; 122 } 123 124 /** 125 * zcrypt_write(): Not allowed. 126 * 127 * Write is is not allowed 128 */ 129 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 130 size_t count, loff_t *f_pos) 131 { 132 return -EPERM; 133 } 134 135 /** 136 * zcrypt_open(): Count number of users. 137 * 138 * Device open function to count number of users. 139 */ 140 static int zcrypt_open(struct inode *inode, struct file *filp) 141 { 142 atomic_inc(&zcrypt_open_count); 143 return nonseekable_open(inode, filp); 144 } 145 146 /** 147 * zcrypt_release(): Count number of users. 148 * 149 * Device close function to count number of users. 150 */ 151 static int zcrypt_release(struct inode *inode, struct file *filp) 152 { 153 atomic_dec(&zcrypt_open_count); 154 return 0; 155 } 156 157 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 158 struct zcrypt_queue *zq, 159 unsigned int weight) 160 { 161 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 162 return NULL; 163 zcrypt_queue_get(zq); 164 get_device(&zq->queue->ap_dev.device); 165 atomic_add(weight, &zc->load); 166 atomic_add(weight, &zq->load); 167 zq->request_count++; 168 return zq; 169 } 170 171 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 172 struct zcrypt_queue *zq, 173 unsigned int weight) 174 { 175 struct module *mod = zq->queue->ap_dev.drv->driver.owner; 176 177 zq->request_count--; 178 atomic_sub(weight, &zc->load); 179 atomic_sub(weight, &zq->load); 180 put_device(&zq->queue->ap_dev.device); 181 zcrypt_queue_put(zq); 182 module_put(mod); 183 } 184 185 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 186 struct zcrypt_card *pref_zc, 187 unsigned weight, unsigned pref_weight) 188 { 189 if (!pref_zc) 190 return false; 191 weight += atomic_read(&zc->load); 192 pref_weight += atomic_read(&pref_zc->load); 193 if (weight == pref_weight) 194 return atomic_read(&zc->card->total_request_count) > 195 atomic_read(&pref_zc->card->total_request_count); 196 return weight > pref_weight; 197 } 198 199 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 200 struct zcrypt_queue *pref_zq, 201 unsigned weight, unsigned pref_weight) 202 { 203 if (!pref_zq) 204 return false; 205 weight += atomic_read(&zq->load); 206 pref_weight += atomic_read(&pref_zq->load); 207 if (weight == pref_weight) 208 return zq->queue->total_request_count > 209 pref_zq->queue->total_request_count; 210 return weight > pref_weight; 211 } 212 213 /* 214 * zcrypt ioctls. 215 */ 216 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 217 { 218 struct zcrypt_card *zc, *pref_zc; 219 struct zcrypt_queue *zq, *pref_zq; 220 unsigned int weight, pref_weight; 221 unsigned int func_code; 222 int qid = 0, rc = -ENODEV; 223 224 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 225 226 if (mex->outputdatalength < mex->inputdatalength) { 227 rc = -EINVAL; 228 goto out; 229 } 230 231 /* 232 * As long as outputdatalength is big enough, we can set the 233 * outputdatalength equal to the inputdatalength, since that is the 234 * number of bytes we will copy in any case 235 */ 236 mex->outputdatalength = mex->inputdatalength; 237 238 rc = get_rsa_modex_fc(mex, &func_code); 239 if (rc) 240 goto out; 241 242 pref_zc = NULL; 243 pref_zq = NULL; 244 spin_lock(&zcrypt_list_lock); 245 for_each_zcrypt_card(zc) { 246 /* Check for online accelarator and CCA cards */ 247 if (!zc->online || !(zc->card->functions & 0x18000000)) 248 continue; 249 /* Check for size limits */ 250 if (zc->min_mod_size > mex->inputdatalength || 251 zc->max_mod_size < mex->inputdatalength) 252 continue; 253 /* get weight index of the card device */ 254 weight = zc->speed_rating[func_code]; 255 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 256 continue; 257 for_each_zcrypt_queue(zq, zc) { 258 /* check if device is online and eligible */ 259 if (!zq->online || !zq->ops->rsa_modexpo) 260 continue; 261 if (zcrypt_queue_compare(zq, pref_zq, 262 weight, pref_weight)) 263 continue; 264 pref_zc = zc; 265 pref_zq = zq; 266 pref_weight = weight; 267 } 268 } 269 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 270 spin_unlock(&zcrypt_list_lock); 271 272 if (!pref_zq) { 273 rc = -ENODEV; 274 goto out; 275 } 276 277 qid = pref_zq->queue->qid; 278 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 279 280 spin_lock(&zcrypt_list_lock); 281 zcrypt_drop_queue(pref_zc, pref_zq, weight); 282 spin_unlock(&zcrypt_list_lock); 283 284 out: 285 trace_s390_zcrypt_rep(mex, func_code, rc, 286 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 287 return rc; 288 } 289 290 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 291 { 292 struct zcrypt_card *zc, *pref_zc; 293 struct zcrypt_queue *zq, *pref_zq; 294 unsigned int weight, pref_weight; 295 unsigned int func_code; 296 int qid = 0, rc = -ENODEV; 297 298 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 299 300 if (crt->outputdatalength < crt->inputdatalength) { 301 rc = -EINVAL; 302 goto out; 303 } 304 305 /* 306 * As long as outputdatalength is big enough, we can set the 307 * outputdatalength equal to the inputdatalength, since that is the 308 * number of bytes we will copy in any case 309 */ 310 crt->outputdatalength = crt->inputdatalength; 311 312 rc = get_rsa_crt_fc(crt, &func_code); 313 if (rc) 314 goto out; 315 316 pref_zc = NULL; 317 pref_zq = NULL; 318 spin_lock(&zcrypt_list_lock); 319 for_each_zcrypt_card(zc) { 320 /* Check for online accelarator and CCA cards */ 321 if (!zc->online || !(zc->card->functions & 0x18000000)) 322 continue; 323 /* Check for size limits */ 324 if (zc->min_mod_size > crt->inputdatalength || 325 zc->max_mod_size < crt->inputdatalength) 326 continue; 327 /* get weight index of the card device */ 328 weight = zc->speed_rating[func_code]; 329 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 330 continue; 331 for_each_zcrypt_queue(zq, zc) { 332 /* check if device is online and eligible */ 333 if (!zq->online || !zq->ops->rsa_modexpo_crt) 334 continue; 335 if (zcrypt_queue_compare(zq, pref_zq, 336 weight, pref_weight)) 337 continue; 338 pref_zc = zc; 339 pref_zq = zq; 340 pref_weight = weight; 341 } 342 } 343 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 344 spin_unlock(&zcrypt_list_lock); 345 346 if (!pref_zq) { 347 rc = -ENODEV; 348 goto out; 349 } 350 351 qid = pref_zq->queue->qid; 352 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 353 354 spin_lock(&zcrypt_list_lock); 355 zcrypt_drop_queue(pref_zc, pref_zq, weight); 356 spin_unlock(&zcrypt_list_lock); 357 358 out: 359 trace_s390_zcrypt_rep(crt, func_code, rc, 360 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 361 return rc; 362 } 363 364 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 365 { 366 struct zcrypt_card *zc, *pref_zc; 367 struct zcrypt_queue *zq, *pref_zq; 368 struct ap_message ap_msg; 369 unsigned int weight, pref_weight; 370 unsigned int func_code; 371 unsigned short *domain; 372 int qid = 0, rc = -ENODEV; 373 374 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 375 376 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 377 if (rc) 378 goto out; 379 380 pref_zc = NULL; 381 pref_zq = NULL; 382 spin_lock(&zcrypt_list_lock); 383 for_each_zcrypt_card(zc) { 384 /* Check for online CCA cards */ 385 if (!zc->online || !(zc->card->functions & 0x10000000)) 386 continue; 387 /* Check for user selected CCA card */ 388 if (xcRB->user_defined != AUTOSELECT && 389 xcRB->user_defined != zc->card->id) 390 continue; 391 /* get weight index of the card device */ 392 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 393 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 394 continue; 395 for_each_zcrypt_queue(zq, zc) { 396 /* check if device is online and eligible */ 397 if (!zq->online || 398 !zq->ops->send_cprb || 399 ((*domain != (unsigned short) AUTOSELECT) && 400 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 401 continue; 402 if (zcrypt_queue_compare(zq, pref_zq, 403 weight, pref_weight)) 404 continue; 405 pref_zc = zc; 406 pref_zq = zq; 407 pref_weight = weight; 408 } 409 } 410 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 411 spin_unlock(&zcrypt_list_lock); 412 413 if (!pref_zq) { 414 rc = -ENODEV; 415 goto out; 416 } 417 418 /* in case of auto select, provide the correct domain */ 419 qid = pref_zq->queue->qid; 420 if (*domain == (unsigned short) AUTOSELECT) 421 *domain = AP_QID_QUEUE(qid); 422 423 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 424 425 spin_lock(&zcrypt_list_lock); 426 zcrypt_drop_queue(pref_zc, pref_zq, weight); 427 spin_unlock(&zcrypt_list_lock); 428 429 out: 430 trace_s390_zcrypt_rep(xcRB, func_code, rc, 431 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 432 return rc; 433 } 434 EXPORT_SYMBOL(zcrypt_send_cprb); 435 436 static bool is_desired_ep11_card(unsigned int dev_id, 437 unsigned short target_num, 438 struct ep11_target_dev *targets) 439 { 440 while (target_num-- > 0) { 441 if (dev_id == targets->ap_id) 442 return true; 443 targets++; 444 } 445 return false; 446 } 447 448 static bool is_desired_ep11_queue(unsigned int dev_qid, 449 unsigned short target_num, 450 struct ep11_target_dev *targets) 451 { 452 while (target_num-- > 0) { 453 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 454 return true; 455 targets++; 456 } 457 return false; 458 } 459 460 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 461 { 462 struct zcrypt_card *zc, *pref_zc; 463 struct zcrypt_queue *zq, *pref_zq; 464 struct ep11_target_dev *targets; 465 unsigned short target_num; 466 unsigned int weight, pref_weight; 467 unsigned int func_code; 468 struct ap_message ap_msg; 469 int qid = 0, rc = -ENODEV; 470 471 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 472 473 target_num = (unsigned short) xcrb->targets_num; 474 475 /* empty list indicates autoselect (all available targets) */ 476 targets = NULL; 477 if (target_num != 0) { 478 struct ep11_target_dev __user *uptr; 479 480 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 481 if (!targets) { 482 rc = -ENOMEM; 483 goto out; 484 } 485 486 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 487 if (copy_from_user(targets, uptr, 488 target_num * sizeof(*targets))) { 489 rc = -EFAULT; 490 goto out; 491 } 492 } 493 494 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 495 if (rc) 496 goto out_free; 497 498 pref_zc = NULL; 499 pref_zq = NULL; 500 spin_lock(&zcrypt_list_lock); 501 for_each_zcrypt_card(zc) { 502 /* Check for online EP11 cards */ 503 if (!zc->online || !(zc->card->functions & 0x04000000)) 504 continue; 505 /* Check for user selected EP11 card */ 506 if (targets && 507 !is_desired_ep11_card(zc->card->id, target_num, targets)) 508 continue; 509 /* get weight index of the card device */ 510 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 511 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 512 continue; 513 for_each_zcrypt_queue(zq, zc) { 514 /* check if device is online and eligible */ 515 if (!zq->online || 516 !zq->ops->send_ep11_cprb || 517 (targets && 518 !is_desired_ep11_queue(zq->queue->qid, 519 target_num, targets))) 520 continue; 521 if (zcrypt_queue_compare(zq, pref_zq, 522 weight, pref_weight)) 523 continue; 524 pref_zc = zc; 525 pref_zq = zq; 526 pref_weight = weight; 527 } 528 } 529 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 530 spin_unlock(&zcrypt_list_lock); 531 532 if (!pref_zq) { 533 rc = -ENODEV; 534 goto out_free; 535 } 536 537 qid = pref_zq->queue->qid; 538 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 539 540 spin_lock(&zcrypt_list_lock); 541 zcrypt_drop_queue(pref_zc, pref_zq, weight); 542 spin_unlock(&zcrypt_list_lock); 543 544 out_free: 545 kfree(targets); 546 out: 547 trace_s390_zcrypt_rep(xcrb, func_code, rc, 548 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 549 return rc; 550 } 551 552 static long zcrypt_rng(char *buffer) 553 { 554 struct zcrypt_card *zc, *pref_zc; 555 struct zcrypt_queue *zq, *pref_zq; 556 unsigned int weight, pref_weight; 557 unsigned int func_code; 558 struct ap_message ap_msg; 559 unsigned int domain; 560 int qid = 0, rc = -ENODEV; 561 562 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 563 564 rc = get_rng_fc(&ap_msg, &func_code, &domain); 565 if (rc) 566 goto out; 567 568 pref_zc = NULL; 569 pref_zq = NULL; 570 spin_lock(&zcrypt_list_lock); 571 for_each_zcrypt_card(zc) { 572 /* Check for online CCA cards */ 573 if (!zc->online || !(zc->card->functions & 0x10000000)) 574 continue; 575 /* get weight index of the card device */ 576 weight = zc->speed_rating[func_code]; 577 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 578 continue; 579 for_each_zcrypt_queue(zq, zc) { 580 /* check if device is online and eligible */ 581 if (!zq->online || !zq->ops->rng) 582 continue; 583 if (zcrypt_queue_compare(zq, pref_zq, 584 weight, pref_weight)) 585 continue; 586 pref_zc = zc; 587 pref_zq = zq; 588 pref_weight = weight; 589 } 590 } 591 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 592 spin_unlock(&zcrypt_list_lock); 593 594 if (!pref_zq) 595 return -ENODEV; 596 597 qid = pref_zq->queue->qid; 598 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 599 600 spin_lock(&zcrypt_list_lock); 601 zcrypt_drop_queue(pref_zc, pref_zq, weight); 602 spin_unlock(&zcrypt_list_lock); 603 604 out: 605 trace_s390_zcrypt_rep(buffer, func_code, rc, 606 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 607 return rc; 608 } 609 610 void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) 611 { 612 struct zcrypt_card *zc; 613 struct zcrypt_queue *zq; 614 struct zcrypt_device_status *stat; 615 616 memset(matrix, 0, sizeof(*matrix)); 617 spin_lock(&zcrypt_list_lock); 618 for_each_zcrypt_card(zc) { 619 for_each_zcrypt_queue(zq, zc) { 620 stat = matrix->device; 621 stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; 622 stat += AP_QID_QUEUE(zq->queue->qid); 623 stat->hwtype = zc->card->ap_dev.device_type; 624 stat->functions = zc->card->functions >> 26; 625 stat->qid = zq->queue->qid; 626 stat->online = zq->online ? 0x01 : 0x00; 627 } 628 } 629 spin_unlock(&zcrypt_list_lock); 630 } 631 EXPORT_SYMBOL(zcrypt_device_status_mask); 632 633 static void zcrypt_status_mask(char status[AP_DEVICES]) 634 { 635 struct zcrypt_card *zc; 636 struct zcrypt_queue *zq; 637 638 memset(status, 0, sizeof(char) * AP_DEVICES); 639 spin_lock(&zcrypt_list_lock); 640 for_each_zcrypt_card(zc) { 641 for_each_zcrypt_queue(zq, zc) { 642 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 643 continue; 644 status[AP_QID_CARD(zq->queue->qid)] = 645 zc->online ? zc->user_space_type : 0x0d; 646 } 647 } 648 spin_unlock(&zcrypt_list_lock); 649 } 650 651 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 652 { 653 struct zcrypt_card *zc; 654 struct zcrypt_queue *zq; 655 656 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 657 spin_lock(&zcrypt_list_lock); 658 local_bh_disable(); 659 for_each_zcrypt_card(zc) { 660 for_each_zcrypt_queue(zq, zc) { 661 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 662 continue; 663 spin_lock(&zq->queue->lock); 664 qdepth[AP_QID_CARD(zq->queue->qid)] = 665 zq->queue->pendingq_count + 666 zq->queue->requestq_count; 667 spin_unlock(&zq->queue->lock); 668 } 669 } 670 local_bh_enable(); 671 spin_unlock(&zcrypt_list_lock); 672 } 673 674 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 675 { 676 struct zcrypt_card *zc; 677 struct zcrypt_queue *zq; 678 679 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 680 spin_lock(&zcrypt_list_lock); 681 local_bh_disable(); 682 for_each_zcrypt_card(zc) { 683 for_each_zcrypt_queue(zq, zc) { 684 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 685 continue; 686 spin_lock(&zq->queue->lock); 687 reqcnt[AP_QID_CARD(zq->queue->qid)] = 688 zq->queue->total_request_count; 689 spin_unlock(&zq->queue->lock); 690 } 691 } 692 local_bh_enable(); 693 spin_unlock(&zcrypt_list_lock); 694 } 695 696 static int zcrypt_pendingq_count(void) 697 { 698 struct zcrypt_card *zc; 699 struct zcrypt_queue *zq; 700 int pendingq_count; 701 702 pendingq_count = 0; 703 spin_lock(&zcrypt_list_lock); 704 local_bh_disable(); 705 for_each_zcrypt_card(zc) { 706 for_each_zcrypt_queue(zq, zc) { 707 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 708 continue; 709 spin_lock(&zq->queue->lock); 710 pendingq_count += zq->queue->pendingq_count; 711 spin_unlock(&zq->queue->lock); 712 } 713 } 714 local_bh_enable(); 715 spin_unlock(&zcrypt_list_lock); 716 return pendingq_count; 717 } 718 719 static int zcrypt_requestq_count(void) 720 { 721 struct zcrypt_card *zc; 722 struct zcrypt_queue *zq; 723 int requestq_count; 724 725 requestq_count = 0; 726 spin_lock(&zcrypt_list_lock); 727 local_bh_disable(); 728 for_each_zcrypt_card(zc) { 729 for_each_zcrypt_queue(zq, zc) { 730 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 731 continue; 732 spin_lock(&zq->queue->lock); 733 requestq_count += zq->queue->requestq_count; 734 spin_unlock(&zq->queue->lock); 735 } 736 } 737 local_bh_enable(); 738 spin_unlock(&zcrypt_list_lock); 739 return requestq_count; 740 } 741 742 static int zcrypt_count_type(int type) 743 { 744 struct zcrypt_card *zc; 745 struct zcrypt_queue *zq; 746 int device_count; 747 748 device_count = 0; 749 spin_lock(&zcrypt_list_lock); 750 for_each_zcrypt_card(zc) { 751 if (zc->card->id != type) 752 continue; 753 for_each_zcrypt_queue(zq, zc) { 754 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 755 continue; 756 device_count++; 757 } 758 } 759 spin_unlock(&zcrypt_list_lock); 760 return device_count; 761 } 762 763 /** 764 * zcrypt_ica_status(): Old, depracted combi status call. 765 * 766 * Old, deprecated combi status call. 767 */ 768 static long zcrypt_ica_status(struct file *filp, unsigned long arg) 769 { 770 struct ica_z90_status *pstat; 771 int ret; 772 773 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); 774 if (!pstat) 775 return -ENOMEM; 776 pstat->totalcount = zcrypt_device_count; 777 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); 778 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); 779 pstat->requestqWaitCount = zcrypt_requestq_count(); 780 pstat->pendingqWaitCount = zcrypt_pendingq_count(); 781 pstat->totalOpenCount = atomic_read(&zcrypt_open_count); 782 pstat->cryptoDomain = ap_domain_index; 783 zcrypt_status_mask(pstat->status); 784 zcrypt_qdepth_mask(pstat->qdepth); 785 ret = 0; 786 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) 787 ret = -EFAULT; 788 kfree(pstat); 789 return ret; 790 } 791 792 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 793 unsigned long arg) 794 { 795 int rc; 796 797 switch (cmd) { 798 case ICARSAMODEXPO: { 799 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 800 struct ica_rsa_modexpo mex; 801 if (copy_from_user(&mex, umex, sizeof(mex))) 802 return -EFAULT; 803 do { 804 rc = zcrypt_rsa_modexpo(&mex); 805 } while (rc == -EAGAIN); 806 /* on failure: retry once again after a requested rescan */ 807 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 808 do { 809 rc = zcrypt_rsa_modexpo(&mex); 810 } while (rc == -EAGAIN); 811 if (rc) { 812 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); 813 return rc; 814 } 815 return put_user(mex.outputdatalength, &umex->outputdatalength); 816 } 817 case ICARSACRT: { 818 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 819 struct ica_rsa_modexpo_crt crt; 820 if (copy_from_user(&crt, ucrt, sizeof(crt))) 821 return -EFAULT; 822 do { 823 rc = zcrypt_rsa_crt(&crt); 824 } while (rc == -EAGAIN); 825 /* on failure: retry once again after a requested rescan */ 826 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 827 do { 828 rc = zcrypt_rsa_crt(&crt); 829 } while (rc == -EAGAIN); 830 if (rc) { 831 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); 832 return rc; 833 } 834 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 835 } 836 case ZSECSENDCPRB: { 837 struct ica_xcRB __user *uxcRB = (void __user *) arg; 838 struct ica_xcRB xcRB; 839 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 840 return -EFAULT; 841 do { 842 rc = zcrypt_send_cprb(&xcRB); 843 } while (rc == -EAGAIN); 844 /* on failure: retry once again after a requested rescan */ 845 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 846 do { 847 rc = zcrypt_send_cprb(&xcRB); 848 } while (rc == -EAGAIN); 849 if (rc) 850 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc); 851 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 852 return -EFAULT; 853 return rc; 854 } 855 case ZSENDEP11CPRB: { 856 struct ep11_urb __user *uxcrb = (void __user *)arg; 857 struct ep11_urb xcrb; 858 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 859 return -EFAULT; 860 do { 861 rc = zcrypt_send_ep11_cprb(&xcrb); 862 } while (rc == -EAGAIN); 863 /* on failure: retry once again after a requested rescan */ 864 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 865 do { 866 rc = zcrypt_send_ep11_cprb(&xcrb); 867 } while (rc == -EAGAIN); 868 if (rc) 869 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); 870 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 871 return -EFAULT; 872 return rc; 873 } 874 case ZDEVICESTATUS: { 875 struct zcrypt_device_matrix *device_status; 876 877 device_status = kzalloc(sizeof(struct zcrypt_device_matrix), 878 GFP_KERNEL); 879 if (!device_status) 880 return -ENOMEM; 881 882 zcrypt_device_status_mask(device_status); 883 884 if (copy_to_user((char __user *) arg, device_status, 885 sizeof(struct zcrypt_device_matrix))) { 886 kfree(device_status); 887 return -EFAULT; 888 } 889 890 kfree(device_status); 891 return 0; 892 } 893 case Z90STAT_STATUS_MASK: { 894 char status[AP_DEVICES]; 895 zcrypt_status_mask(status); 896 if (copy_to_user((char __user *) arg, status, 897 sizeof(char) * AP_DEVICES)) 898 return -EFAULT; 899 return 0; 900 } 901 case Z90STAT_QDEPTH_MASK: { 902 char qdepth[AP_DEVICES]; 903 zcrypt_qdepth_mask(qdepth); 904 if (copy_to_user((char __user *) arg, qdepth, 905 sizeof(char) * AP_DEVICES)) 906 return -EFAULT; 907 return 0; 908 } 909 case Z90STAT_PERDEV_REQCNT: { 910 int reqcnt[AP_DEVICES]; 911 zcrypt_perdev_reqcnt(reqcnt); 912 if (copy_to_user((int __user *) arg, reqcnt, 913 sizeof(int) * AP_DEVICES)) 914 return -EFAULT; 915 return 0; 916 } 917 case Z90STAT_REQUESTQ_COUNT: 918 return put_user(zcrypt_requestq_count(), (int __user *) arg); 919 case Z90STAT_PENDINGQ_COUNT: 920 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 921 case Z90STAT_TOTALOPEN_COUNT: 922 return put_user(atomic_read(&zcrypt_open_count), 923 (int __user *) arg); 924 case Z90STAT_DOMAIN_INDEX: 925 return put_user(ap_domain_index, (int __user *) arg); 926 /* 927 * Deprecated ioctls. Don't add another device count ioctl, 928 * you can count them yourself in the user space with the 929 * output of the Z90STAT_STATUS_MASK ioctl. 930 */ 931 case ICAZ90STATUS: 932 return zcrypt_ica_status(filp, arg); 933 case Z90STAT_TOTALCOUNT: 934 return put_user(zcrypt_device_count, (int __user *) arg); 935 case Z90STAT_PCICACOUNT: 936 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 937 (int __user *) arg); 938 case Z90STAT_PCICCCOUNT: 939 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 940 (int __user *) arg); 941 case Z90STAT_PCIXCCMCL2COUNT: 942 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 943 (int __user *) arg); 944 case Z90STAT_PCIXCCMCL3COUNT: 945 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 946 (int __user *) arg); 947 case Z90STAT_PCIXCCCOUNT: 948 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 949 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 950 (int __user *) arg); 951 case Z90STAT_CEX2CCOUNT: 952 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 953 (int __user *) arg); 954 case Z90STAT_CEX2ACOUNT: 955 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 956 (int __user *) arg); 957 default: 958 /* unknown ioctl number */ 959 return -ENOIOCTLCMD; 960 } 961 } 962 963 #ifdef CONFIG_COMPAT 964 /* 965 * ioctl32 conversion routines 966 */ 967 struct compat_ica_rsa_modexpo { 968 compat_uptr_t inputdata; 969 unsigned int inputdatalength; 970 compat_uptr_t outputdata; 971 unsigned int outputdatalength; 972 compat_uptr_t b_key; 973 compat_uptr_t n_modulus; 974 }; 975 976 static long trans_modexpo32(struct file *filp, unsigned int cmd, 977 unsigned long arg) 978 { 979 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 980 struct compat_ica_rsa_modexpo mex32; 981 struct ica_rsa_modexpo mex64; 982 long rc; 983 984 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 985 return -EFAULT; 986 mex64.inputdata = compat_ptr(mex32.inputdata); 987 mex64.inputdatalength = mex32.inputdatalength; 988 mex64.outputdata = compat_ptr(mex32.outputdata); 989 mex64.outputdatalength = mex32.outputdatalength; 990 mex64.b_key = compat_ptr(mex32.b_key); 991 mex64.n_modulus = compat_ptr(mex32.n_modulus); 992 do { 993 rc = zcrypt_rsa_modexpo(&mex64); 994 } while (rc == -EAGAIN); 995 /* on failure: retry once again after a requested rescan */ 996 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 997 do { 998 rc = zcrypt_rsa_modexpo(&mex64); 999 } while (rc == -EAGAIN); 1000 if (rc) 1001 return rc; 1002 return put_user(mex64.outputdatalength, 1003 &umex32->outputdatalength); 1004 } 1005 1006 struct compat_ica_rsa_modexpo_crt { 1007 compat_uptr_t inputdata; 1008 unsigned int inputdatalength; 1009 compat_uptr_t outputdata; 1010 unsigned int outputdatalength; 1011 compat_uptr_t bp_key; 1012 compat_uptr_t bq_key; 1013 compat_uptr_t np_prime; 1014 compat_uptr_t nq_prime; 1015 compat_uptr_t u_mult_inv; 1016 }; 1017 1018 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1019 unsigned long arg) 1020 { 1021 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1022 struct compat_ica_rsa_modexpo_crt crt32; 1023 struct ica_rsa_modexpo_crt crt64; 1024 long rc; 1025 1026 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1027 return -EFAULT; 1028 crt64.inputdata = compat_ptr(crt32.inputdata); 1029 crt64.inputdatalength = crt32.inputdatalength; 1030 crt64.outputdata= compat_ptr(crt32.outputdata); 1031 crt64.outputdatalength = crt32.outputdatalength; 1032 crt64.bp_key = compat_ptr(crt32.bp_key); 1033 crt64.bq_key = compat_ptr(crt32.bq_key); 1034 crt64.np_prime = compat_ptr(crt32.np_prime); 1035 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1036 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1037 do { 1038 rc = zcrypt_rsa_crt(&crt64); 1039 } while (rc == -EAGAIN); 1040 /* on failure: retry once again after a requested rescan */ 1041 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1042 do { 1043 rc = zcrypt_rsa_crt(&crt64); 1044 } while (rc == -EAGAIN); 1045 if (rc) 1046 return rc; 1047 return put_user(crt64.outputdatalength, 1048 &ucrt32->outputdatalength); 1049 } 1050 1051 struct compat_ica_xcRB { 1052 unsigned short agent_ID; 1053 unsigned int user_defined; 1054 unsigned short request_ID; 1055 unsigned int request_control_blk_length; 1056 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 1057 compat_uptr_t request_control_blk_addr; 1058 unsigned int request_data_length; 1059 char padding2[16 - sizeof (compat_uptr_t)]; 1060 compat_uptr_t request_data_address; 1061 unsigned int reply_control_blk_length; 1062 char padding3[16 - sizeof (compat_uptr_t)]; 1063 compat_uptr_t reply_control_blk_addr; 1064 unsigned int reply_data_length; 1065 char padding4[16 - sizeof (compat_uptr_t)]; 1066 compat_uptr_t reply_data_addr; 1067 unsigned short priority_window; 1068 unsigned int status; 1069 } __attribute__((packed)); 1070 1071 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1072 unsigned long arg) 1073 { 1074 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1075 struct compat_ica_xcRB xcRB32; 1076 struct ica_xcRB xcRB64; 1077 long rc; 1078 1079 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1080 return -EFAULT; 1081 xcRB64.agent_ID = xcRB32.agent_ID; 1082 xcRB64.user_defined = xcRB32.user_defined; 1083 xcRB64.request_ID = xcRB32.request_ID; 1084 xcRB64.request_control_blk_length = 1085 xcRB32.request_control_blk_length; 1086 xcRB64.request_control_blk_addr = 1087 compat_ptr(xcRB32.request_control_blk_addr); 1088 xcRB64.request_data_length = 1089 xcRB32.request_data_length; 1090 xcRB64.request_data_address = 1091 compat_ptr(xcRB32.request_data_address); 1092 xcRB64.reply_control_blk_length = 1093 xcRB32.reply_control_blk_length; 1094 xcRB64.reply_control_blk_addr = 1095 compat_ptr(xcRB32.reply_control_blk_addr); 1096 xcRB64.reply_data_length = xcRB32.reply_data_length; 1097 xcRB64.reply_data_addr = 1098 compat_ptr(xcRB32.reply_data_addr); 1099 xcRB64.priority_window = xcRB32.priority_window; 1100 xcRB64.status = xcRB32.status; 1101 do { 1102 rc = zcrypt_send_cprb(&xcRB64); 1103 } while (rc == -EAGAIN); 1104 /* on failure: retry once again after a requested rescan */ 1105 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1106 do { 1107 rc = zcrypt_send_cprb(&xcRB64); 1108 } while (rc == -EAGAIN); 1109 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1110 xcRB32.reply_data_length = xcRB64.reply_data_length; 1111 xcRB32.status = xcRB64.status; 1112 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1113 return -EFAULT; 1114 return rc; 1115 } 1116 1117 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1118 unsigned long arg) 1119 { 1120 if (cmd == ICARSAMODEXPO) 1121 return trans_modexpo32(filp, cmd, arg); 1122 if (cmd == ICARSACRT) 1123 return trans_modexpo_crt32(filp, cmd, arg); 1124 if (cmd == ZSECSENDCPRB) 1125 return trans_xcRB32(filp, cmd, arg); 1126 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1127 } 1128 #endif 1129 1130 /* 1131 * Misc device file operations. 1132 */ 1133 static const struct file_operations zcrypt_fops = { 1134 .owner = THIS_MODULE, 1135 .read = zcrypt_read, 1136 .write = zcrypt_write, 1137 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1138 #ifdef CONFIG_COMPAT 1139 .compat_ioctl = zcrypt_compat_ioctl, 1140 #endif 1141 .open = zcrypt_open, 1142 .release = zcrypt_release, 1143 .llseek = no_llseek, 1144 }; 1145 1146 /* 1147 * Misc device. 1148 */ 1149 static struct miscdevice zcrypt_misc_device = { 1150 .minor = MISC_DYNAMIC_MINOR, 1151 .name = "z90crypt", 1152 .fops = &zcrypt_fops, 1153 }; 1154 1155 /* 1156 * Deprecated /proc entry support. 1157 */ 1158 static struct proc_dir_entry *zcrypt_entry; 1159 1160 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) 1161 { 1162 int i; 1163 1164 for (i = 0; i < len; i++) 1165 seq_printf(m, "%01x", (unsigned int) addr[i]); 1166 seq_putc(m, ' '); 1167 } 1168 1169 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) 1170 { 1171 int inl, c, cx; 1172 1173 seq_printf(m, " "); 1174 inl = 0; 1175 for (c = 0; c < (len / 16); c++) { 1176 sprintcl(m, addr+inl, 16); 1177 inl += 16; 1178 } 1179 cx = len%16; 1180 if (cx) { 1181 sprintcl(m, addr+inl, cx); 1182 inl += cx; 1183 } 1184 seq_putc(m, '\n'); 1185 } 1186 1187 static void sprinthx(unsigned char *title, struct seq_file *m, 1188 unsigned char *addr, unsigned int len) 1189 { 1190 int inl, r, rx; 1191 1192 seq_printf(m, "\n%s\n", title); 1193 inl = 0; 1194 for (r = 0; r < (len / 64); r++) { 1195 sprintrw(m, addr+inl, 64); 1196 inl += 64; 1197 } 1198 rx = len % 64; 1199 if (rx) { 1200 sprintrw(m, addr+inl, rx); 1201 inl += rx; 1202 } 1203 seq_putc(m, '\n'); 1204 } 1205 1206 static void sprinthx4(unsigned char *title, struct seq_file *m, 1207 unsigned int *array, unsigned int len) 1208 { 1209 seq_printf(m, "\n%s\n", title); 1210 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false); 1211 seq_putc(m, '\n'); 1212 } 1213 1214 static int zcrypt_proc_show(struct seq_file *m, void *v) 1215 { 1216 char workarea[sizeof(int) * AP_DEVICES]; 1217 1218 seq_printf(m, "\nzcrypt version: %d.%d.%d\n", 1219 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 1220 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); 1221 seq_printf(m, "Total device count: %d\n", zcrypt_device_count); 1222 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); 1223 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); 1224 seq_printf(m, "PCIXCC MCL2 count: %d\n", 1225 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 1226 seq_printf(m, "PCIXCC MCL3 count: %d\n", 1227 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); 1228 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); 1229 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); 1230 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); 1231 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); 1232 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); 1233 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); 1234 seq_printf(m, "Total open handles: %d\n\n", 1235 atomic_read(&zcrypt_open_count)); 1236 zcrypt_status_mask(workarea); 1237 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1238 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1239 m, workarea, AP_DEVICES); 1240 zcrypt_qdepth_mask(workarea); 1241 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); 1242 zcrypt_perdev_reqcnt((int *) workarea); 1243 sprinthx4("Per-device successfully completed request counts", 1244 m, (unsigned int *) workarea, AP_DEVICES); 1245 return 0; 1246 } 1247 1248 static int zcrypt_proc_open(struct inode *inode, struct file *file) 1249 { 1250 return single_open(file, zcrypt_proc_show, NULL); 1251 } 1252 1253 static void zcrypt_disable_card(int index) 1254 { 1255 struct zcrypt_card *zc; 1256 struct zcrypt_queue *zq; 1257 1258 spin_lock(&zcrypt_list_lock); 1259 for_each_zcrypt_card(zc) { 1260 for_each_zcrypt_queue(zq, zc) { 1261 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1262 continue; 1263 zq->online = 0; 1264 ap_flush_queue(zq->queue); 1265 } 1266 } 1267 spin_unlock(&zcrypt_list_lock); 1268 } 1269 1270 static void zcrypt_enable_card(int index) 1271 { 1272 struct zcrypt_card *zc; 1273 struct zcrypt_queue *zq; 1274 1275 spin_lock(&zcrypt_list_lock); 1276 for_each_zcrypt_card(zc) { 1277 for_each_zcrypt_queue(zq, zc) { 1278 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1279 continue; 1280 zq->online = 1; 1281 ap_flush_queue(zq->queue); 1282 } 1283 } 1284 spin_unlock(&zcrypt_list_lock); 1285 } 1286 1287 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, 1288 size_t count, loff_t *pos) 1289 { 1290 unsigned char *lbuf, *ptr; 1291 size_t local_count; 1292 int j; 1293 1294 if (count <= 0) 1295 return 0; 1296 1297 #define LBUFSIZE 1200UL 1298 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1299 if (!lbuf) 1300 return 0; 1301 1302 local_count = min(LBUFSIZE - 1, count); 1303 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1304 kfree(lbuf); 1305 return -EFAULT; 1306 } 1307 lbuf[local_count] = '\0'; 1308 1309 ptr = strstr(lbuf, "Online devices"); 1310 if (!ptr) 1311 goto out; 1312 ptr = strstr(ptr, "\n"); 1313 if (!ptr) 1314 goto out; 1315 ptr++; 1316 1317 if (strstr(ptr, "Waiting work element counts") == NULL) 1318 goto out; 1319 1320 for (j = 0; j < 64 && *ptr; ptr++) { 1321 /* 1322 * '0' for no device, '1' for PCICA, '2' for PCICC, 1323 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1324 * '5' for CEX2C and '6' for CEX2A' 1325 * '7' for CEX3C and '8' for CEX3A 1326 */ 1327 if (*ptr >= '0' && *ptr <= '8') 1328 j++; 1329 else if (*ptr == 'd' || *ptr == 'D') 1330 zcrypt_disable_card(j++); 1331 else if (*ptr == 'e' || *ptr == 'E') 1332 zcrypt_enable_card(j++); 1333 else if (*ptr != ' ' && *ptr != '\t') 1334 break; 1335 } 1336 out: 1337 kfree(lbuf); 1338 return count; 1339 } 1340 1341 static const struct file_operations zcrypt_proc_fops = { 1342 .owner = THIS_MODULE, 1343 .open = zcrypt_proc_open, 1344 .read = seq_read, 1345 .llseek = seq_lseek, 1346 .release = single_release, 1347 .write = zcrypt_proc_write, 1348 }; 1349 1350 static int zcrypt_rng_device_count; 1351 static u32 *zcrypt_rng_buffer; 1352 static int zcrypt_rng_buffer_index; 1353 static DEFINE_MUTEX(zcrypt_rng_mutex); 1354 1355 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1356 { 1357 int rc; 1358 1359 /* 1360 * We don't need locking here because the RNG API guarantees serialized 1361 * read method calls. 1362 */ 1363 if (zcrypt_rng_buffer_index == 0) { 1364 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1365 /* on failure: retry once again after a requested rescan */ 1366 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1367 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1368 if (rc < 0) 1369 return -EIO; 1370 zcrypt_rng_buffer_index = rc / sizeof *data; 1371 } 1372 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1373 return sizeof *data; 1374 } 1375 1376 static struct hwrng zcrypt_rng_dev = { 1377 .name = "zcrypt", 1378 .data_read = zcrypt_rng_data_read, 1379 .quality = 990, 1380 }; 1381 1382 int zcrypt_rng_device_add(void) 1383 { 1384 int rc = 0; 1385 1386 mutex_lock(&zcrypt_rng_mutex); 1387 if (zcrypt_rng_device_count == 0) { 1388 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1389 if (!zcrypt_rng_buffer) { 1390 rc = -ENOMEM; 1391 goto out; 1392 } 1393 zcrypt_rng_buffer_index = 0; 1394 if (!zcrypt_hwrng_seed) 1395 zcrypt_rng_dev.quality = 0; 1396 rc = hwrng_register(&zcrypt_rng_dev); 1397 if (rc) 1398 goto out_free; 1399 zcrypt_rng_device_count = 1; 1400 } else 1401 zcrypt_rng_device_count++; 1402 mutex_unlock(&zcrypt_rng_mutex); 1403 return 0; 1404 1405 out_free: 1406 free_page((unsigned long) zcrypt_rng_buffer); 1407 out: 1408 mutex_unlock(&zcrypt_rng_mutex); 1409 return rc; 1410 } 1411 1412 void zcrypt_rng_device_remove(void) 1413 { 1414 mutex_lock(&zcrypt_rng_mutex); 1415 zcrypt_rng_device_count--; 1416 if (zcrypt_rng_device_count == 0) { 1417 hwrng_unregister(&zcrypt_rng_dev); 1418 free_page((unsigned long) zcrypt_rng_buffer); 1419 } 1420 mutex_unlock(&zcrypt_rng_mutex); 1421 } 1422 1423 int __init zcrypt_debug_init(void) 1424 { 1425 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1426 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1427 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1428 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1429 1430 return 0; 1431 } 1432 1433 void zcrypt_debug_exit(void) 1434 { 1435 debug_unregister(zcrypt_dbf_info); 1436 } 1437 1438 /** 1439 * zcrypt_api_init(): Module initialization. 1440 * 1441 * The module initialization code. 1442 */ 1443 int __init zcrypt_api_init(void) 1444 { 1445 int rc; 1446 1447 rc = zcrypt_debug_init(); 1448 if (rc) 1449 goto out; 1450 1451 atomic_set(&zcrypt_rescan_req, 0); 1452 1453 /* Register the request sprayer. */ 1454 rc = misc_register(&zcrypt_misc_device); 1455 if (rc < 0) 1456 goto out; 1457 1458 /* Set up the proc file system */ 1459 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, 1460 &zcrypt_proc_fops); 1461 if (!zcrypt_entry) { 1462 rc = -ENOMEM; 1463 goto out_misc; 1464 } 1465 1466 zcrypt_msgtype6_init(); 1467 zcrypt_msgtype50_init(); 1468 return 0; 1469 1470 out_misc: 1471 misc_deregister(&zcrypt_misc_device); 1472 out: 1473 return rc; 1474 } 1475 1476 /** 1477 * zcrypt_api_exit(): Module termination. 1478 * 1479 * The module termination code. 1480 */ 1481 void __exit zcrypt_api_exit(void) 1482 { 1483 remove_proc_entry("driver/z90crypt", NULL); 1484 misc_deregister(&zcrypt_misc_device); 1485 zcrypt_msgtype6_exit(); 1486 zcrypt_msgtype50_exit(); 1487 zcrypt_debug_exit(); 1488 } 1489 1490 module_init(zcrypt_api_init); 1491 module_exit(zcrypt_api_exit); 1492