1 /* 2 * zcrypt 2.1.0 3 * 4 * Copyright IBM Corp. 2001, 2012 5 * Author(s): Robert Burroughs 6 * Eric Rossman (edrossma@us.ibm.com) 7 * Cornelia Huck <cornelia.huck@de.ibm.com> 8 * 9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 */ 28 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/miscdevice.h> 33 #include <linux/fs.h> 34 #include <linux/proc_fs.h> 35 #include <linux/seq_file.h> 36 #include <linux/compat.h> 37 #include <linux/slab.h> 38 #include <linux/atomic.h> 39 #include <linux/uaccess.h> 40 #include <linux/hw_random.h> 41 #include <linux/debugfs.h> 42 #include <asm/debug.h> 43 44 #define CREATE_TRACE_POINTS 45 #include <asm/trace/zcrypt.h> 46 47 #include "zcrypt_api.h" 48 #include "zcrypt_debug.h" 49 50 #include "zcrypt_msgtype6.h" 51 #include "zcrypt_msgtype50.h" 52 53 /* 54 * Module description. 55 */ 56 MODULE_AUTHOR("IBM Corporation"); 57 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 58 "Copyright IBM Corp. 2001, 2012"); 59 MODULE_LICENSE("GPL"); 60 61 /* 62 * zcrypt tracepoint functions 63 */ 64 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 65 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 66 67 static int zcrypt_hwrng_seed = 1; 68 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 69 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 70 71 DEFINE_SPINLOCK(zcrypt_list_lock); 72 LIST_HEAD(zcrypt_card_list); 73 int zcrypt_device_count; 74 75 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 76 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 77 78 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 79 EXPORT_SYMBOL(zcrypt_rescan_req); 80 81 static LIST_HEAD(zcrypt_ops_list); 82 83 /* Zcrypt related debug feature stuff. */ 84 debug_info_t *zcrypt_dbf_info; 85 86 /** 87 * Process a rescan of the transport layer. 88 * 89 * Returns 1, if the rescan has been processed, otherwise 0. 90 */ 91 static inline int zcrypt_process_rescan(void) 92 { 93 if (atomic_read(&zcrypt_rescan_req)) { 94 atomic_set(&zcrypt_rescan_req, 0); 95 atomic_inc(&zcrypt_rescan_count); 96 ap_bus_force_rescan(); 97 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d", 98 atomic_inc_return(&zcrypt_rescan_count)); 99 return 1; 100 } 101 return 0; 102 } 103 104 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 105 { 106 list_add_tail(&zops->list, &zcrypt_ops_list); 107 } 108 109 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 110 { 111 list_del_init(&zops->list); 112 } 113 114 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 115 { 116 struct zcrypt_ops *zops; 117 118 list_for_each_entry(zops, &zcrypt_ops_list, list) 119 if ((zops->variant == variant) && 120 (!strncmp(zops->name, name, sizeof(zops->name)))) 121 return zops; 122 return NULL; 123 } 124 EXPORT_SYMBOL(zcrypt_msgtype); 125 126 /** 127 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 128 * 129 * This function is not supported beyond zcrypt 1.3.1. 130 */ 131 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 132 size_t count, loff_t *f_pos) 133 { 134 return -EPERM; 135 } 136 137 /** 138 * zcrypt_write(): Not allowed. 139 * 140 * Write is is not allowed 141 */ 142 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 143 size_t count, loff_t *f_pos) 144 { 145 return -EPERM; 146 } 147 148 /** 149 * zcrypt_open(): Count number of users. 150 * 151 * Device open function to count number of users. 152 */ 153 static int zcrypt_open(struct inode *inode, struct file *filp) 154 { 155 atomic_inc(&zcrypt_open_count); 156 return nonseekable_open(inode, filp); 157 } 158 159 /** 160 * zcrypt_release(): Count number of users. 161 * 162 * Device close function to count number of users. 163 */ 164 static int zcrypt_release(struct inode *inode, struct file *filp) 165 { 166 atomic_dec(&zcrypt_open_count); 167 return 0; 168 } 169 170 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 171 struct zcrypt_queue *zq, 172 unsigned int weight) 173 { 174 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 175 return NULL; 176 zcrypt_queue_get(zq); 177 get_device(&zq->queue->ap_dev.device); 178 atomic_add(weight, &zc->load); 179 atomic_add(weight, &zq->load); 180 zq->request_count++; 181 return zq; 182 } 183 184 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 185 struct zcrypt_queue *zq, 186 unsigned int weight) 187 { 188 struct module *mod = zq->queue->ap_dev.drv->driver.owner; 189 190 zq->request_count--; 191 atomic_sub(weight, &zc->load); 192 atomic_sub(weight, &zq->load); 193 put_device(&zq->queue->ap_dev.device); 194 zcrypt_queue_put(zq); 195 module_put(mod); 196 } 197 198 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 199 struct zcrypt_card *pref_zc, 200 unsigned weight, unsigned pref_weight) 201 { 202 if (!pref_zc) 203 return false; 204 weight += atomic_read(&zc->load); 205 pref_weight += atomic_read(&pref_zc->load); 206 if (weight == pref_weight) 207 return atomic_read(&zc->card->total_request_count) > 208 atomic_read(&pref_zc->card->total_request_count); 209 return weight > pref_weight; 210 } 211 212 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 213 struct zcrypt_queue *pref_zq, 214 unsigned weight, unsigned pref_weight) 215 { 216 if (!pref_zq) 217 return false; 218 weight += atomic_read(&zq->load); 219 pref_weight += atomic_read(&pref_zq->load); 220 if (weight == pref_weight) 221 return &zq->queue->total_request_count > 222 &pref_zq->queue->total_request_count; 223 return weight > pref_weight; 224 } 225 226 /* 227 * zcrypt ioctls. 228 */ 229 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 230 { 231 struct zcrypt_card *zc, *pref_zc; 232 struct zcrypt_queue *zq, *pref_zq; 233 unsigned int weight, pref_weight; 234 unsigned int func_code; 235 int qid = 0, rc = -ENODEV; 236 237 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 238 239 if (mex->outputdatalength < mex->inputdatalength) { 240 rc = -EINVAL; 241 goto out; 242 } 243 244 /* 245 * As long as outputdatalength is big enough, we can set the 246 * outputdatalength equal to the inputdatalength, since that is the 247 * number of bytes we will copy in any case 248 */ 249 mex->outputdatalength = mex->inputdatalength; 250 251 rc = get_rsa_modex_fc(mex, &func_code); 252 if (rc) 253 goto out; 254 255 pref_zc = NULL; 256 pref_zq = NULL; 257 spin_lock(&zcrypt_list_lock); 258 for_each_zcrypt_card(zc) { 259 /* Check for online accelarator and CCA cards */ 260 if (!zc->online || !(zc->card->functions & 0x18000000)) 261 continue; 262 /* Check for size limits */ 263 if (zc->min_mod_size > mex->inputdatalength || 264 zc->max_mod_size < mex->inputdatalength) 265 continue; 266 /* get weight index of the card device */ 267 weight = zc->speed_rating[func_code]; 268 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 269 continue; 270 for_each_zcrypt_queue(zq, zc) { 271 /* check if device is online and eligible */ 272 if (!zq->online || !zq->ops->rsa_modexpo) 273 continue; 274 if (zcrypt_queue_compare(zq, pref_zq, 275 weight, pref_weight)) 276 continue; 277 pref_zc = zc; 278 pref_zq = zq; 279 pref_weight = weight; 280 } 281 } 282 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 283 spin_unlock(&zcrypt_list_lock); 284 285 if (!pref_zq) { 286 rc = -ENODEV; 287 goto out; 288 } 289 290 qid = pref_zq->queue->qid; 291 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 292 293 spin_lock(&zcrypt_list_lock); 294 zcrypt_drop_queue(pref_zc, pref_zq, weight); 295 spin_unlock(&zcrypt_list_lock); 296 297 out: 298 trace_s390_zcrypt_rep(mex, func_code, rc, 299 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 300 return rc; 301 } 302 303 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 304 { 305 struct zcrypt_card *zc, *pref_zc; 306 struct zcrypt_queue *zq, *pref_zq; 307 unsigned int weight, pref_weight; 308 unsigned int func_code; 309 int qid = 0, rc = -ENODEV; 310 311 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 312 313 if (crt->outputdatalength < crt->inputdatalength) { 314 rc = -EINVAL; 315 goto out; 316 } 317 318 /* 319 * As long as outputdatalength is big enough, we can set the 320 * outputdatalength equal to the inputdatalength, since that is the 321 * number of bytes we will copy in any case 322 */ 323 crt->outputdatalength = crt->inputdatalength; 324 325 rc = get_rsa_crt_fc(crt, &func_code); 326 if (rc) 327 goto out; 328 329 pref_zc = NULL; 330 pref_zq = NULL; 331 spin_lock(&zcrypt_list_lock); 332 for_each_zcrypt_card(zc) { 333 /* Check for online accelarator and CCA cards */ 334 if (!zc->online || !(zc->card->functions & 0x18000000)) 335 continue; 336 /* Check for size limits */ 337 if (zc->min_mod_size > crt->inputdatalength || 338 zc->max_mod_size < crt->inputdatalength) 339 continue; 340 /* get weight index of the card device */ 341 weight = zc->speed_rating[func_code]; 342 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 343 continue; 344 for_each_zcrypt_queue(zq, zc) { 345 /* check if device is online and eligible */ 346 if (!zq->online || !zq->ops->rsa_modexpo_crt) 347 continue; 348 if (zcrypt_queue_compare(zq, pref_zq, 349 weight, pref_weight)) 350 continue; 351 pref_zc = zc; 352 pref_zq = zq; 353 pref_weight = weight; 354 } 355 } 356 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 357 spin_unlock(&zcrypt_list_lock); 358 359 if (!pref_zq) { 360 rc = -ENODEV; 361 goto out; 362 } 363 364 qid = pref_zq->queue->qid; 365 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 366 367 spin_lock(&zcrypt_list_lock); 368 zcrypt_drop_queue(pref_zc, pref_zq, weight); 369 spin_unlock(&zcrypt_list_lock); 370 371 out: 372 trace_s390_zcrypt_rep(crt, func_code, rc, 373 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 374 return rc; 375 } 376 377 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 378 { 379 struct zcrypt_card *zc, *pref_zc; 380 struct zcrypt_queue *zq, *pref_zq; 381 struct ap_message ap_msg; 382 unsigned int weight, pref_weight; 383 unsigned int func_code; 384 unsigned short *domain; 385 int qid = 0, rc = -ENODEV; 386 387 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 388 389 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 390 if (rc) 391 goto out; 392 393 pref_zc = NULL; 394 pref_zq = NULL; 395 spin_lock(&zcrypt_list_lock); 396 for_each_zcrypt_card(zc) { 397 /* Check for online CCA cards */ 398 if (!zc->online || !(zc->card->functions & 0x10000000)) 399 continue; 400 /* Check for user selected CCA card */ 401 if (xcRB->user_defined != AUTOSELECT && 402 xcRB->user_defined != zc->card->id) 403 continue; 404 /* get weight index of the card device */ 405 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 406 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 407 continue; 408 for_each_zcrypt_queue(zq, zc) { 409 /* check if device is online and eligible */ 410 if (!zq->online || 411 !zq->ops->send_cprb || 412 ((*domain != (unsigned short) AUTOSELECT) && 413 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 414 continue; 415 if (zcrypt_queue_compare(zq, pref_zq, 416 weight, pref_weight)) 417 continue; 418 pref_zc = zc; 419 pref_zq = zq; 420 pref_weight = weight; 421 } 422 } 423 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 424 spin_unlock(&zcrypt_list_lock); 425 426 if (!pref_zq) { 427 rc = -ENODEV; 428 goto out; 429 } 430 431 /* in case of auto select, provide the correct domain */ 432 qid = pref_zq->queue->qid; 433 if (*domain == (unsigned short) AUTOSELECT) 434 *domain = AP_QID_QUEUE(qid); 435 436 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 437 438 spin_lock(&zcrypt_list_lock); 439 zcrypt_drop_queue(pref_zc, pref_zq, weight); 440 spin_unlock(&zcrypt_list_lock); 441 442 out: 443 trace_s390_zcrypt_rep(xcRB, func_code, rc, 444 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 445 return rc; 446 } 447 EXPORT_SYMBOL(zcrypt_send_cprb); 448 449 static bool is_desired_ep11_card(unsigned int dev_id, 450 unsigned short target_num, 451 struct ep11_target_dev *targets) 452 { 453 while (target_num-- > 0) { 454 if (dev_id == targets->ap_id) 455 return true; 456 targets++; 457 } 458 return false; 459 } 460 461 static bool is_desired_ep11_queue(unsigned int dev_qid, 462 unsigned short target_num, 463 struct ep11_target_dev *targets) 464 { 465 while (target_num-- > 0) { 466 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 467 return true; 468 targets++; 469 } 470 return false; 471 } 472 473 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 474 { 475 struct zcrypt_card *zc, *pref_zc; 476 struct zcrypt_queue *zq, *pref_zq; 477 struct ep11_target_dev *targets; 478 unsigned short target_num; 479 unsigned int weight, pref_weight; 480 unsigned int func_code; 481 struct ap_message ap_msg; 482 int qid = 0, rc = -ENODEV; 483 484 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 485 486 target_num = (unsigned short) xcrb->targets_num; 487 488 /* empty list indicates autoselect (all available targets) */ 489 targets = NULL; 490 if (target_num != 0) { 491 struct ep11_target_dev __user *uptr; 492 493 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 494 if (!targets) { 495 rc = -ENOMEM; 496 goto out; 497 } 498 499 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 500 if (copy_from_user(targets, uptr, 501 target_num * sizeof(*targets))) { 502 rc = -EFAULT; 503 goto out; 504 } 505 } 506 507 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 508 if (rc) 509 goto out_free; 510 511 pref_zc = NULL; 512 pref_zq = NULL; 513 spin_lock(&zcrypt_list_lock); 514 for_each_zcrypt_card(zc) { 515 /* Check for online EP11 cards */ 516 if (!zc->online || !(zc->card->functions & 0x04000000)) 517 continue; 518 /* Check for user selected EP11 card */ 519 if (targets && 520 !is_desired_ep11_card(zc->card->id, target_num, targets)) 521 continue; 522 /* get weight index of the card device */ 523 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 524 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 525 continue; 526 for_each_zcrypt_queue(zq, zc) { 527 /* check if device is online and eligible */ 528 if (!zq->online || 529 !zq->ops->send_ep11_cprb || 530 (targets && 531 !is_desired_ep11_queue(zq->queue->qid, 532 target_num, targets))) 533 continue; 534 if (zcrypt_queue_compare(zq, pref_zq, 535 weight, pref_weight)) 536 continue; 537 pref_zc = zc; 538 pref_zq = zq; 539 pref_weight = weight; 540 } 541 } 542 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 543 spin_unlock(&zcrypt_list_lock); 544 545 if (!pref_zq) { 546 rc = -ENODEV; 547 goto out_free; 548 } 549 550 qid = pref_zq->queue->qid; 551 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 552 553 spin_lock(&zcrypt_list_lock); 554 zcrypt_drop_queue(pref_zc, pref_zq, weight); 555 spin_unlock(&zcrypt_list_lock); 556 557 out_free: 558 kfree(targets); 559 out: 560 trace_s390_zcrypt_rep(xcrb, func_code, rc, 561 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 562 return rc; 563 } 564 565 static long zcrypt_rng(char *buffer) 566 { 567 struct zcrypt_card *zc, *pref_zc; 568 struct zcrypt_queue *zq, *pref_zq; 569 unsigned int weight, pref_weight; 570 unsigned int func_code; 571 struct ap_message ap_msg; 572 unsigned int domain; 573 int qid = 0, rc = -ENODEV; 574 575 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 576 577 rc = get_rng_fc(&ap_msg, &func_code, &domain); 578 if (rc) 579 goto out; 580 581 pref_zc = NULL; 582 pref_zq = NULL; 583 spin_lock(&zcrypt_list_lock); 584 for_each_zcrypt_card(zc) { 585 /* Check for online CCA cards */ 586 if (!zc->online || !(zc->card->functions & 0x10000000)) 587 continue; 588 /* get weight index of the card device */ 589 weight = zc->speed_rating[func_code]; 590 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 591 continue; 592 for_each_zcrypt_queue(zq, zc) { 593 /* check if device is online and eligible */ 594 if (!zq->online || !zq->ops->rng) 595 continue; 596 if (zcrypt_queue_compare(zq, pref_zq, 597 weight, pref_weight)) 598 continue; 599 pref_zc = zc; 600 pref_zq = zq; 601 pref_weight = weight; 602 } 603 } 604 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 605 spin_unlock(&zcrypt_list_lock); 606 607 if (!pref_zq) 608 return -ENODEV; 609 610 qid = pref_zq->queue->qid; 611 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 612 613 spin_lock(&zcrypt_list_lock); 614 zcrypt_drop_queue(pref_zc, pref_zq, weight); 615 spin_unlock(&zcrypt_list_lock); 616 617 out: 618 trace_s390_zcrypt_rep(buffer, func_code, rc, 619 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 620 return rc; 621 } 622 623 void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) 624 { 625 struct zcrypt_card *zc; 626 struct zcrypt_queue *zq; 627 struct zcrypt_device_status *stat; 628 629 memset(matrix, 0, sizeof(*matrix)); 630 spin_lock(&zcrypt_list_lock); 631 for_each_zcrypt_card(zc) { 632 for_each_zcrypt_queue(zq, zc) { 633 stat = matrix->device; 634 stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; 635 stat += AP_QID_QUEUE(zq->queue->qid); 636 stat->hwtype = zc->card->ap_dev.device_type; 637 stat->functions = zc->card->functions >> 26; 638 stat->qid = zq->queue->qid; 639 stat->online = zq->online ? 0x01 : 0x00; 640 } 641 } 642 spin_unlock(&zcrypt_list_lock); 643 } 644 EXPORT_SYMBOL(zcrypt_device_status_mask); 645 646 static void zcrypt_status_mask(char status[AP_DEVICES]) 647 { 648 struct zcrypt_card *zc; 649 struct zcrypt_queue *zq; 650 651 memset(status, 0, sizeof(char) * AP_DEVICES); 652 spin_lock(&zcrypt_list_lock); 653 for_each_zcrypt_card(zc) { 654 for_each_zcrypt_queue(zq, zc) { 655 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 656 continue; 657 status[AP_QID_CARD(zq->queue->qid)] = 658 zc->online ? zc->user_space_type : 0x0d; 659 } 660 } 661 spin_unlock(&zcrypt_list_lock); 662 } 663 664 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 665 { 666 struct zcrypt_card *zc; 667 struct zcrypt_queue *zq; 668 669 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 670 spin_lock(&zcrypt_list_lock); 671 local_bh_disable(); 672 for_each_zcrypt_card(zc) { 673 for_each_zcrypt_queue(zq, zc) { 674 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 675 continue; 676 spin_lock(&zq->queue->lock); 677 qdepth[AP_QID_CARD(zq->queue->qid)] = 678 zq->queue->pendingq_count + 679 zq->queue->requestq_count; 680 spin_unlock(&zq->queue->lock); 681 } 682 } 683 local_bh_enable(); 684 spin_unlock(&zcrypt_list_lock); 685 } 686 687 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 688 { 689 struct zcrypt_card *zc; 690 struct zcrypt_queue *zq; 691 692 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 693 spin_lock(&zcrypt_list_lock); 694 local_bh_disable(); 695 for_each_zcrypt_card(zc) { 696 for_each_zcrypt_queue(zq, zc) { 697 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 698 continue; 699 spin_lock(&zq->queue->lock); 700 reqcnt[AP_QID_CARD(zq->queue->qid)] = 701 zq->queue->total_request_count; 702 spin_unlock(&zq->queue->lock); 703 } 704 } 705 local_bh_enable(); 706 spin_unlock(&zcrypt_list_lock); 707 } 708 709 static int zcrypt_pendingq_count(void) 710 { 711 struct zcrypt_card *zc; 712 struct zcrypt_queue *zq; 713 int pendingq_count; 714 715 pendingq_count = 0; 716 spin_lock(&zcrypt_list_lock); 717 local_bh_disable(); 718 for_each_zcrypt_card(zc) { 719 for_each_zcrypt_queue(zq, zc) { 720 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 721 continue; 722 spin_lock(&zq->queue->lock); 723 pendingq_count += zq->queue->pendingq_count; 724 spin_unlock(&zq->queue->lock); 725 } 726 } 727 local_bh_enable(); 728 spin_unlock(&zcrypt_list_lock); 729 return pendingq_count; 730 } 731 732 static int zcrypt_requestq_count(void) 733 { 734 struct zcrypt_card *zc; 735 struct zcrypt_queue *zq; 736 int requestq_count; 737 738 requestq_count = 0; 739 spin_lock(&zcrypt_list_lock); 740 local_bh_disable(); 741 for_each_zcrypt_card(zc) { 742 for_each_zcrypt_queue(zq, zc) { 743 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 744 continue; 745 spin_lock(&zq->queue->lock); 746 requestq_count += zq->queue->requestq_count; 747 spin_unlock(&zq->queue->lock); 748 } 749 } 750 local_bh_enable(); 751 spin_unlock(&zcrypt_list_lock); 752 return requestq_count; 753 } 754 755 static int zcrypt_count_type(int type) 756 { 757 struct zcrypt_card *zc; 758 struct zcrypt_queue *zq; 759 int device_count; 760 761 device_count = 0; 762 spin_lock(&zcrypt_list_lock); 763 for_each_zcrypt_card(zc) { 764 if (zc->card->id != type) 765 continue; 766 for_each_zcrypt_queue(zq, zc) { 767 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 768 continue; 769 device_count++; 770 } 771 } 772 spin_unlock(&zcrypt_list_lock); 773 return device_count; 774 } 775 776 /** 777 * zcrypt_ica_status(): Old, depracted combi status call. 778 * 779 * Old, deprecated combi status call. 780 */ 781 static long zcrypt_ica_status(struct file *filp, unsigned long arg) 782 { 783 struct ica_z90_status *pstat; 784 int ret; 785 786 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); 787 if (!pstat) 788 return -ENOMEM; 789 pstat->totalcount = zcrypt_device_count; 790 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); 791 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); 792 pstat->requestqWaitCount = zcrypt_requestq_count(); 793 pstat->pendingqWaitCount = zcrypt_pendingq_count(); 794 pstat->totalOpenCount = atomic_read(&zcrypt_open_count); 795 pstat->cryptoDomain = ap_domain_index; 796 zcrypt_status_mask(pstat->status); 797 zcrypt_qdepth_mask(pstat->qdepth); 798 ret = 0; 799 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) 800 ret = -EFAULT; 801 kfree(pstat); 802 return ret; 803 } 804 805 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 806 unsigned long arg) 807 { 808 int rc; 809 810 switch (cmd) { 811 case ICARSAMODEXPO: { 812 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 813 struct ica_rsa_modexpo mex; 814 if (copy_from_user(&mex, umex, sizeof(mex))) 815 return -EFAULT; 816 do { 817 rc = zcrypt_rsa_modexpo(&mex); 818 } while (rc == -EAGAIN); 819 /* on failure: retry once again after a requested rescan */ 820 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 821 do { 822 rc = zcrypt_rsa_modexpo(&mex); 823 } while (rc == -EAGAIN); 824 if (rc) 825 return rc; 826 return put_user(mex.outputdatalength, &umex->outputdatalength); 827 } 828 case ICARSACRT: { 829 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 830 struct ica_rsa_modexpo_crt crt; 831 if (copy_from_user(&crt, ucrt, sizeof(crt))) 832 return -EFAULT; 833 do { 834 rc = zcrypt_rsa_crt(&crt); 835 } while (rc == -EAGAIN); 836 /* on failure: retry once again after a requested rescan */ 837 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 838 do { 839 rc = zcrypt_rsa_crt(&crt); 840 } while (rc == -EAGAIN); 841 if (rc) 842 return rc; 843 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 844 } 845 case ZSECSENDCPRB: { 846 struct ica_xcRB __user *uxcRB = (void __user *) arg; 847 struct ica_xcRB xcRB; 848 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 849 return -EFAULT; 850 do { 851 rc = zcrypt_send_cprb(&xcRB); 852 } while (rc == -EAGAIN); 853 /* on failure: retry once again after a requested rescan */ 854 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 855 do { 856 rc = zcrypt_send_cprb(&xcRB); 857 } while (rc == -EAGAIN); 858 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 859 return -EFAULT; 860 return rc; 861 } 862 case ZSENDEP11CPRB: { 863 struct ep11_urb __user *uxcrb = (void __user *)arg; 864 struct ep11_urb xcrb; 865 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 866 return -EFAULT; 867 do { 868 rc = zcrypt_send_ep11_cprb(&xcrb); 869 } while (rc == -EAGAIN); 870 /* on failure: retry once again after a requested rescan */ 871 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 872 do { 873 rc = zcrypt_send_ep11_cprb(&xcrb); 874 } while (rc == -EAGAIN); 875 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 876 return -EFAULT; 877 return rc; 878 } 879 case ZDEVICESTATUS: { 880 struct zcrypt_device_matrix *device_status; 881 882 device_status = kzalloc(sizeof(struct zcrypt_device_matrix), 883 GFP_KERNEL); 884 if (!device_status) 885 return -ENOMEM; 886 887 zcrypt_device_status_mask(device_status); 888 889 if (copy_to_user((char __user *) arg, device_status, 890 sizeof(struct zcrypt_device_matrix))) { 891 kfree(device_status); 892 return -EFAULT; 893 } 894 895 kfree(device_status); 896 return 0; 897 } 898 case Z90STAT_STATUS_MASK: { 899 char status[AP_DEVICES]; 900 zcrypt_status_mask(status); 901 if (copy_to_user((char __user *) arg, status, 902 sizeof(char) * AP_DEVICES)) 903 return -EFAULT; 904 return 0; 905 } 906 case Z90STAT_QDEPTH_MASK: { 907 char qdepth[AP_DEVICES]; 908 zcrypt_qdepth_mask(qdepth); 909 if (copy_to_user((char __user *) arg, qdepth, 910 sizeof(char) * AP_DEVICES)) 911 return -EFAULT; 912 return 0; 913 } 914 case Z90STAT_PERDEV_REQCNT: { 915 int reqcnt[AP_DEVICES]; 916 zcrypt_perdev_reqcnt(reqcnt); 917 if (copy_to_user((int __user *) arg, reqcnt, 918 sizeof(int) * AP_DEVICES)) 919 return -EFAULT; 920 return 0; 921 } 922 case Z90STAT_REQUESTQ_COUNT: 923 return put_user(zcrypt_requestq_count(), (int __user *) arg); 924 case Z90STAT_PENDINGQ_COUNT: 925 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 926 case Z90STAT_TOTALOPEN_COUNT: 927 return put_user(atomic_read(&zcrypt_open_count), 928 (int __user *) arg); 929 case Z90STAT_DOMAIN_INDEX: 930 return put_user(ap_domain_index, (int __user *) arg); 931 /* 932 * Deprecated ioctls. Don't add another device count ioctl, 933 * you can count them yourself in the user space with the 934 * output of the Z90STAT_STATUS_MASK ioctl. 935 */ 936 case ICAZ90STATUS: 937 return zcrypt_ica_status(filp, arg); 938 case Z90STAT_TOTALCOUNT: 939 return put_user(zcrypt_device_count, (int __user *) arg); 940 case Z90STAT_PCICACOUNT: 941 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 942 (int __user *) arg); 943 case Z90STAT_PCICCCOUNT: 944 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 945 (int __user *) arg); 946 case Z90STAT_PCIXCCMCL2COUNT: 947 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 948 (int __user *) arg); 949 case Z90STAT_PCIXCCMCL3COUNT: 950 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 951 (int __user *) arg); 952 case Z90STAT_PCIXCCCOUNT: 953 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 954 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 955 (int __user *) arg); 956 case Z90STAT_CEX2CCOUNT: 957 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 958 (int __user *) arg); 959 case Z90STAT_CEX2ACOUNT: 960 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 961 (int __user *) arg); 962 default: 963 /* unknown ioctl number */ 964 return -ENOIOCTLCMD; 965 } 966 } 967 968 #ifdef CONFIG_COMPAT 969 /* 970 * ioctl32 conversion routines 971 */ 972 struct compat_ica_rsa_modexpo { 973 compat_uptr_t inputdata; 974 unsigned int inputdatalength; 975 compat_uptr_t outputdata; 976 unsigned int outputdatalength; 977 compat_uptr_t b_key; 978 compat_uptr_t n_modulus; 979 }; 980 981 static long trans_modexpo32(struct file *filp, unsigned int cmd, 982 unsigned long arg) 983 { 984 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 985 struct compat_ica_rsa_modexpo mex32; 986 struct ica_rsa_modexpo mex64; 987 long rc; 988 989 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 990 return -EFAULT; 991 mex64.inputdata = compat_ptr(mex32.inputdata); 992 mex64.inputdatalength = mex32.inputdatalength; 993 mex64.outputdata = compat_ptr(mex32.outputdata); 994 mex64.outputdatalength = mex32.outputdatalength; 995 mex64.b_key = compat_ptr(mex32.b_key); 996 mex64.n_modulus = compat_ptr(mex32.n_modulus); 997 do { 998 rc = zcrypt_rsa_modexpo(&mex64); 999 } while (rc == -EAGAIN); 1000 /* on failure: retry once again after a requested rescan */ 1001 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1002 do { 1003 rc = zcrypt_rsa_modexpo(&mex64); 1004 } while (rc == -EAGAIN); 1005 if (rc) 1006 return rc; 1007 return put_user(mex64.outputdatalength, 1008 &umex32->outputdatalength); 1009 } 1010 1011 struct compat_ica_rsa_modexpo_crt { 1012 compat_uptr_t inputdata; 1013 unsigned int inputdatalength; 1014 compat_uptr_t outputdata; 1015 unsigned int outputdatalength; 1016 compat_uptr_t bp_key; 1017 compat_uptr_t bq_key; 1018 compat_uptr_t np_prime; 1019 compat_uptr_t nq_prime; 1020 compat_uptr_t u_mult_inv; 1021 }; 1022 1023 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1024 unsigned long arg) 1025 { 1026 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1027 struct compat_ica_rsa_modexpo_crt crt32; 1028 struct ica_rsa_modexpo_crt crt64; 1029 long rc; 1030 1031 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1032 return -EFAULT; 1033 crt64.inputdata = compat_ptr(crt32.inputdata); 1034 crt64.inputdatalength = crt32.inputdatalength; 1035 crt64.outputdata= compat_ptr(crt32.outputdata); 1036 crt64.outputdatalength = crt32.outputdatalength; 1037 crt64.bp_key = compat_ptr(crt32.bp_key); 1038 crt64.bq_key = compat_ptr(crt32.bq_key); 1039 crt64.np_prime = compat_ptr(crt32.np_prime); 1040 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1041 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1042 do { 1043 rc = zcrypt_rsa_crt(&crt64); 1044 } while (rc == -EAGAIN); 1045 /* on failure: retry once again after a requested rescan */ 1046 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1047 do { 1048 rc = zcrypt_rsa_crt(&crt64); 1049 } while (rc == -EAGAIN); 1050 if (rc) 1051 return rc; 1052 return put_user(crt64.outputdatalength, 1053 &ucrt32->outputdatalength); 1054 } 1055 1056 struct compat_ica_xcRB { 1057 unsigned short agent_ID; 1058 unsigned int user_defined; 1059 unsigned short request_ID; 1060 unsigned int request_control_blk_length; 1061 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 1062 compat_uptr_t request_control_blk_addr; 1063 unsigned int request_data_length; 1064 char padding2[16 - sizeof (compat_uptr_t)]; 1065 compat_uptr_t request_data_address; 1066 unsigned int reply_control_blk_length; 1067 char padding3[16 - sizeof (compat_uptr_t)]; 1068 compat_uptr_t reply_control_blk_addr; 1069 unsigned int reply_data_length; 1070 char padding4[16 - sizeof (compat_uptr_t)]; 1071 compat_uptr_t reply_data_addr; 1072 unsigned short priority_window; 1073 unsigned int status; 1074 } __attribute__((packed)); 1075 1076 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1077 unsigned long arg) 1078 { 1079 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1080 struct compat_ica_xcRB xcRB32; 1081 struct ica_xcRB xcRB64; 1082 long rc; 1083 1084 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1085 return -EFAULT; 1086 xcRB64.agent_ID = xcRB32.agent_ID; 1087 xcRB64.user_defined = xcRB32.user_defined; 1088 xcRB64.request_ID = xcRB32.request_ID; 1089 xcRB64.request_control_blk_length = 1090 xcRB32.request_control_blk_length; 1091 xcRB64.request_control_blk_addr = 1092 compat_ptr(xcRB32.request_control_blk_addr); 1093 xcRB64.request_data_length = 1094 xcRB32.request_data_length; 1095 xcRB64.request_data_address = 1096 compat_ptr(xcRB32.request_data_address); 1097 xcRB64.reply_control_blk_length = 1098 xcRB32.reply_control_blk_length; 1099 xcRB64.reply_control_blk_addr = 1100 compat_ptr(xcRB32.reply_control_blk_addr); 1101 xcRB64.reply_data_length = xcRB32.reply_data_length; 1102 xcRB64.reply_data_addr = 1103 compat_ptr(xcRB32.reply_data_addr); 1104 xcRB64.priority_window = xcRB32.priority_window; 1105 xcRB64.status = xcRB32.status; 1106 do { 1107 rc = zcrypt_send_cprb(&xcRB64); 1108 } while (rc == -EAGAIN); 1109 /* on failure: retry once again after a requested rescan */ 1110 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1111 do { 1112 rc = zcrypt_send_cprb(&xcRB64); 1113 } while (rc == -EAGAIN); 1114 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1115 xcRB32.reply_data_length = xcRB64.reply_data_length; 1116 xcRB32.status = xcRB64.status; 1117 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1118 return -EFAULT; 1119 return rc; 1120 } 1121 1122 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1123 unsigned long arg) 1124 { 1125 if (cmd == ICARSAMODEXPO) 1126 return trans_modexpo32(filp, cmd, arg); 1127 if (cmd == ICARSACRT) 1128 return trans_modexpo_crt32(filp, cmd, arg); 1129 if (cmd == ZSECSENDCPRB) 1130 return trans_xcRB32(filp, cmd, arg); 1131 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1132 } 1133 #endif 1134 1135 /* 1136 * Misc device file operations. 1137 */ 1138 static const struct file_operations zcrypt_fops = { 1139 .owner = THIS_MODULE, 1140 .read = zcrypt_read, 1141 .write = zcrypt_write, 1142 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1143 #ifdef CONFIG_COMPAT 1144 .compat_ioctl = zcrypt_compat_ioctl, 1145 #endif 1146 .open = zcrypt_open, 1147 .release = zcrypt_release, 1148 .llseek = no_llseek, 1149 }; 1150 1151 /* 1152 * Misc device. 1153 */ 1154 static struct miscdevice zcrypt_misc_device = { 1155 .minor = MISC_DYNAMIC_MINOR, 1156 .name = "z90crypt", 1157 .fops = &zcrypt_fops, 1158 }; 1159 1160 /* 1161 * Deprecated /proc entry support. 1162 */ 1163 static struct proc_dir_entry *zcrypt_entry; 1164 1165 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) 1166 { 1167 int i; 1168 1169 for (i = 0; i < len; i++) 1170 seq_printf(m, "%01x", (unsigned int) addr[i]); 1171 seq_putc(m, ' '); 1172 } 1173 1174 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) 1175 { 1176 int inl, c, cx; 1177 1178 seq_printf(m, " "); 1179 inl = 0; 1180 for (c = 0; c < (len / 16); c++) { 1181 sprintcl(m, addr+inl, 16); 1182 inl += 16; 1183 } 1184 cx = len%16; 1185 if (cx) { 1186 sprintcl(m, addr+inl, cx); 1187 inl += cx; 1188 } 1189 seq_putc(m, '\n'); 1190 } 1191 1192 static void sprinthx(unsigned char *title, struct seq_file *m, 1193 unsigned char *addr, unsigned int len) 1194 { 1195 int inl, r, rx; 1196 1197 seq_printf(m, "\n%s\n", title); 1198 inl = 0; 1199 for (r = 0; r < (len / 64); r++) { 1200 sprintrw(m, addr+inl, 64); 1201 inl += 64; 1202 } 1203 rx = len % 64; 1204 if (rx) { 1205 sprintrw(m, addr+inl, rx); 1206 inl += rx; 1207 } 1208 seq_putc(m, '\n'); 1209 } 1210 1211 static void sprinthx4(unsigned char *title, struct seq_file *m, 1212 unsigned int *array, unsigned int len) 1213 { 1214 seq_printf(m, "\n%s\n", title); 1215 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false); 1216 seq_putc(m, '\n'); 1217 } 1218 1219 static int zcrypt_proc_show(struct seq_file *m, void *v) 1220 { 1221 char workarea[sizeof(int) * AP_DEVICES]; 1222 1223 seq_printf(m, "\nzcrypt version: %d.%d.%d\n", 1224 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 1225 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); 1226 seq_printf(m, "Total device count: %d\n", zcrypt_device_count); 1227 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); 1228 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); 1229 seq_printf(m, "PCIXCC MCL2 count: %d\n", 1230 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 1231 seq_printf(m, "PCIXCC MCL3 count: %d\n", 1232 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); 1233 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); 1234 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); 1235 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); 1236 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); 1237 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); 1238 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); 1239 seq_printf(m, "Total open handles: %d\n\n", 1240 atomic_read(&zcrypt_open_count)); 1241 zcrypt_status_mask(workarea); 1242 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1243 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1244 m, workarea, AP_DEVICES); 1245 zcrypt_qdepth_mask(workarea); 1246 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); 1247 zcrypt_perdev_reqcnt((int *) workarea); 1248 sprinthx4("Per-device successfully completed request counts", 1249 m, (unsigned int *) workarea, AP_DEVICES); 1250 return 0; 1251 } 1252 1253 static int zcrypt_proc_open(struct inode *inode, struct file *file) 1254 { 1255 return single_open(file, zcrypt_proc_show, NULL); 1256 } 1257 1258 static void zcrypt_disable_card(int index) 1259 { 1260 struct zcrypt_card *zc; 1261 struct zcrypt_queue *zq; 1262 1263 spin_lock(&zcrypt_list_lock); 1264 for_each_zcrypt_card(zc) { 1265 for_each_zcrypt_queue(zq, zc) { 1266 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1267 continue; 1268 zq->online = 0; 1269 ap_flush_queue(zq->queue); 1270 } 1271 } 1272 spin_unlock(&zcrypt_list_lock); 1273 } 1274 1275 static void zcrypt_enable_card(int index) 1276 { 1277 struct zcrypt_card *zc; 1278 struct zcrypt_queue *zq; 1279 1280 spin_lock(&zcrypt_list_lock); 1281 for_each_zcrypt_card(zc) { 1282 for_each_zcrypt_queue(zq, zc) { 1283 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1284 continue; 1285 zq->online = 1; 1286 ap_flush_queue(zq->queue); 1287 } 1288 } 1289 spin_unlock(&zcrypt_list_lock); 1290 } 1291 1292 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, 1293 size_t count, loff_t *pos) 1294 { 1295 unsigned char *lbuf, *ptr; 1296 size_t local_count; 1297 int j; 1298 1299 if (count <= 0) 1300 return 0; 1301 1302 #define LBUFSIZE 1200UL 1303 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1304 if (!lbuf) 1305 return 0; 1306 1307 local_count = min(LBUFSIZE - 1, count); 1308 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1309 kfree(lbuf); 1310 return -EFAULT; 1311 } 1312 lbuf[local_count] = '\0'; 1313 1314 ptr = strstr(lbuf, "Online devices"); 1315 if (!ptr) 1316 goto out; 1317 ptr = strstr(ptr, "\n"); 1318 if (!ptr) 1319 goto out; 1320 ptr++; 1321 1322 if (strstr(ptr, "Waiting work element counts") == NULL) 1323 goto out; 1324 1325 for (j = 0; j < 64 && *ptr; ptr++) { 1326 /* 1327 * '0' for no device, '1' for PCICA, '2' for PCICC, 1328 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1329 * '5' for CEX2C and '6' for CEX2A' 1330 * '7' for CEX3C and '8' for CEX3A 1331 */ 1332 if (*ptr >= '0' && *ptr <= '8') 1333 j++; 1334 else if (*ptr == 'd' || *ptr == 'D') 1335 zcrypt_disable_card(j++); 1336 else if (*ptr == 'e' || *ptr == 'E') 1337 zcrypt_enable_card(j++); 1338 else if (*ptr != ' ' && *ptr != '\t') 1339 break; 1340 } 1341 out: 1342 kfree(lbuf); 1343 return count; 1344 } 1345 1346 static const struct file_operations zcrypt_proc_fops = { 1347 .owner = THIS_MODULE, 1348 .open = zcrypt_proc_open, 1349 .read = seq_read, 1350 .llseek = seq_lseek, 1351 .release = single_release, 1352 .write = zcrypt_proc_write, 1353 }; 1354 1355 static int zcrypt_rng_device_count; 1356 static u32 *zcrypt_rng_buffer; 1357 static int zcrypt_rng_buffer_index; 1358 static DEFINE_MUTEX(zcrypt_rng_mutex); 1359 1360 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1361 { 1362 int rc; 1363 1364 /* 1365 * We don't need locking here because the RNG API guarantees serialized 1366 * read method calls. 1367 */ 1368 if (zcrypt_rng_buffer_index == 0) { 1369 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1370 /* on failure: retry once again after a requested rescan */ 1371 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1372 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1373 if (rc < 0) 1374 return -EIO; 1375 zcrypt_rng_buffer_index = rc / sizeof *data; 1376 } 1377 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1378 return sizeof *data; 1379 } 1380 1381 static struct hwrng zcrypt_rng_dev = { 1382 .name = "zcrypt", 1383 .data_read = zcrypt_rng_data_read, 1384 .quality = 990, 1385 }; 1386 1387 int zcrypt_rng_device_add(void) 1388 { 1389 int rc = 0; 1390 1391 mutex_lock(&zcrypt_rng_mutex); 1392 if (zcrypt_rng_device_count == 0) { 1393 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1394 if (!zcrypt_rng_buffer) { 1395 rc = -ENOMEM; 1396 goto out; 1397 } 1398 zcrypt_rng_buffer_index = 0; 1399 if (!zcrypt_hwrng_seed) 1400 zcrypt_rng_dev.quality = 0; 1401 rc = hwrng_register(&zcrypt_rng_dev); 1402 if (rc) 1403 goto out_free; 1404 zcrypt_rng_device_count = 1; 1405 } else 1406 zcrypt_rng_device_count++; 1407 mutex_unlock(&zcrypt_rng_mutex); 1408 return 0; 1409 1410 out_free: 1411 free_page((unsigned long) zcrypt_rng_buffer); 1412 out: 1413 mutex_unlock(&zcrypt_rng_mutex); 1414 return rc; 1415 } 1416 1417 void zcrypt_rng_device_remove(void) 1418 { 1419 mutex_lock(&zcrypt_rng_mutex); 1420 zcrypt_rng_device_count--; 1421 if (zcrypt_rng_device_count == 0) { 1422 hwrng_unregister(&zcrypt_rng_dev); 1423 free_page((unsigned long) zcrypt_rng_buffer); 1424 } 1425 mutex_unlock(&zcrypt_rng_mutex); 1426 } 1427 1428 int __init zcrypt_debug_init(void) 1429 { 1430 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1431 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1432 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1433 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1434 1435 return 0; 1436 } 1437 1438 void zcrypt_debug_exit(void) 1439 { 1440 debug_unregister(zcrypt_dbf_info); 1441 } 1442 1443 /** 1444 * zcrypt_api_init(): Module initialization. 1445 * 1446 * The module initialization code. 1447 */ 1448 int __init zcrypt_api_init(void) 1449 { 1450 int rc; 1451 1452 rc = zcrypt_debug_init(); 1453 if (rc) 1454 goto out; 1455 1456 atomic_set(&zcrypt_rescan_req, 0); 1457 1458 /* Register the request sprayer. */ 1459 rc = misc_register(&zcrypt_misc_device); 1460 if (rc < 0) 1461 goto out; 1462 1463 /* Set up the proc file system */ 1464 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, 1465 &zcrypt_proc_fops); 1466 if (!zcrypt_entry) { 1467 rc = -ENOMEM; 1468 goto out_misc; 1469 } 1470 1471 zcrypt_msgtype6_init(); 1472 zcrypt_msgtype50_init(); 1473 return 0; 1474 1475 out_misc: 1476 misc_deregister(&zcrypt_misc_device); 1477 out: 1478 return rc; 1479 } 1480 1481 /** 1482 * zcrypt_api_exit(): Module termination. 1483 * 1484 * The module termination code. 1485 */ 1486 void __exit zcrypt_api_exit(void) 1487 { 1488 remove_proc_entry("driver/z90crypt", NULL); 1489 misc_deregister(&zcrypt_misc_device); 1490 zcrypt_msgtype6_exit(); 1491 zcrypt_msgtype50_exit(); 1492 zcrypt_debug_exit(); 1493 } 1494 1495 module_init(zcrypt_api_init); 1496 module_exit(zcrypt_api_exit); 1497