1 /* 2 * zcrypt 2.1.0 3 * 4 * Copyright IBM Corp. 2001, 2012 5 * Author(s): Robert Burroughs 6 * Eric Rossman (edrossma@us.ibm.com) 7 * Cornelia Huck <cornelia.huck@de.ibm.com> 8 * 9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 */ 28 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/miscdevice.h> 33 #include <linux/fs.h> 34 #include <linux/proc_fs.h> 35 #include <linux/seq_file.h> 36 #include <linux/compat.h> 37 #include <linux/slab.h> 38 #include <linux/atomic.h> 39 #include <linux/uaccess.h> 40 #include <linux/hw_random.h> 41 #include <linux/debugfs.h> 42 #include <asm/debug.h> 43 44 #define CREATE_TRACE_POINTS 45 #include <asm/trace/zcrypt.h> 46 47 #include "zcrypt_api.h" 48 #include "zcrypt_debug.h" 49 50 #include "zcrypt_msgtype6.h" 51 #include "zcrypt_msgtype50.h" 52 53 /* 54 * Module description. 55 */ 56 MODULE_AUTHOR("IBM Corporation"); 57 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 58 "Copyright IBM Corp. 2001, 2012"); 59 MODULE_LICENSE("GPL"); 60 61 /* 62 * zcrypt tracepoint functions 63 */ 64 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 65 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 66 67 static int zcrypt_hwrng_seed = 1; 68 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 69 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 70 71 DEFINE_SPINLOCK(zcrypt_list_lock); 72 LIST_HEAD(zcrypt_card_list); 73 int zcrypt_device_count; 74 75 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 76 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 77 78 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 79 EXPORT_SYMBOL(zcrypt_rescan_req); 80 81 static LIST_HEAD(zcrypt_ops_list); 82 83 /* Zcrypt related debug feature stuff. */ 84 debug_info_t *zcrypt_dbf_info; 85 86 /** 87 * Process a rescan of the transport layer. 88 * 89 * Returns 1, if the rescan has been processed, otherwise 0. 90 */ 91 static inline int zcrypt_process_rescan(void) 92 { 93 if (atomic_read(&zcrypt_rescan_req)) { 94 atomic_set(&zcrypt_rescan_req, 0); 95 atomic_inc(&zcrypt_rescan_count); 96 ap_bus_force_rescan(); 97 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d", 98 atomic_inc_return(&zcrypt_rescan_count)); 99 return 1; 100 } 101 return 0; 102 } 103 104 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 105 { 106 list_add_tail(&zops->list, &zcrypt_ops_list); 107 } 108 109 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 110 { 111 list_del_init(&zops->list); 112 } 113 114 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 115 { 116 struct zcrypt_ops *zops; 117 118 list_for_each_entry(zops, &zcrypt_ops_list, list) 119 if ((zops->variant == variant) && 120 (!strncmp(zops->name, name, sizeof(zops->name)))) 121 return zops; 122 return NULL; 123 } 124 EXPORT_SYMBOL(zcrypt_msgtype); 125 126 /** 127 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 128 * 129 * This function is not supported beyond zcrypt 1.3.1. 130 */ 131 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 132 size_t count, loff_t *f_pos) 133 { 134 return -EPERM; 135 } 136 137 /** 138 * zcrypt_write(): Not allowed. 139 * 140 * Write is is not allowed 141 */ 142 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 143 size_t count, loff_t *f_pos) 144 { 145 return -EPERM; 146 } 147 148 /** 149 * zcrypt_open(): Count number of users. 150 * 151 * Device open function to count number of users. 152 */ 153 static int zcrypt_open(struct inode *inode, struct file *filp) 154 { 155 atomic_inc(&zcrypt_open_count); 156 return nonseekable_open(inode, filp); 157 } 158 159 /** 160 * zcrypt_release(): Count number of users. 161 * 162 * Device close function to count number of users. 163 */ 164 static int zcrypt_release(struct inode *inode, struct file *filp) 165 { 166 atomic_dec(&zcrypt_open_count); 167 return 0; 168 } 169 170 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 171 struct zcrypt_queue *zq, 172 unsigned int weight) 173 { 174 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 175 return NULL; 176 zcrypt_queue_get(zq); 177 get_device(&zq->queue->ap_dev.device); 178 atomic_add(weight, &zc->load); 179 atomic_add(weight, &zq->load); 180 zq->request_count++; 181 return zq; 182 } 183 184 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 185 struct zcrypt_queue *zq, 186 unsigned int weight) 187 { 188 struct module *mod = zq->queue->ap_dev.drv->driver.owner; 189 190 zq->request_count--; 191 atomic_sub(weight, &zc->load); 192 atomic_sub(weight, &zq->load); 193 put_device(&zq->queue->ap_dev.device); 194 zcrypt_queue_put(zq); 195 module_put(mod); 196 } 197 198 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 199 struct zcrypt_card *pref_zc, 200 unsigned weight, unsigned pref_weight) 201 { 202 if (!pref_zc) 203 return false; 204 weight += atomic_read(&zc->load); 205 pref_weight += atomic_read(&pref_zc->load); 206 if (weight == pref_weight) 207 return atomic_read(&zc->card->total_request_count) > 208 atomic_read(&pref_zc->card->total_request_count); 209 return weight > pref_weight; 210 } 211 212 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 213 struct zcrypt_queue *pref_zq, 214 unsigned weight, unsigned pref_weight) 215 { 216 if (!pref_zq) 217 return false; 218 weight += atomic_read(&zq->load); 219 pref_weight += atomic_read(&pref_zq->load); 220 if (weight == pref_weight) 221 return &zq->queue->total_request_count > 222 &pref_zq->queue->total_request_count; 223 return weight > pref_weight; 224 } 225 226 /* 227 * zcrypt ioctls. 228 */ 229 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 230 { 231 struct zcrypt_card *zc, *pref_zc; 232 struct zcrypt_queue *zq, *pref_zq; 233 unsigned int weight, pref_weight; 234 unsigned int func_code; 235 int qid = 0, rc = -ENODEV; 236 237 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 238 239 if (mex->outputdatalength < mex->inputdatalength) { 240 rc = -EINVAL; 241 goto out; 242 } 243 244 /* 245 * As long as outputdatalength is big enough, we can set the 246 * outputdatalength equal to the inputdatalength, since that is the 247 * number of bytes we will copy in any case 248 */ 249 mex->outputdatalength = mex->inputdatalength; 250 251 rc = get_rsa_modex_fc(mex, &func_code); 252 if (rc) 253 goto out; 254 255 pref_zc = NULL; 256 pref_zq = NULL; 257 spin_lock(&zcrypt_list_lock); 258 for_each_zcrypt_card(zc) { 259 /* Check for online accelarator and CCA cards */ 260 if (!zc->online || !(zc->card->functions & 0x18000000)) 261 continue; 262 /* Check for size limits */ 263 if (zc->min_mod_size > mex->inputdatalength || 264 zc->max_mod_size < mex->inputdatalength) 265 continue; 266 /* get weight index of the card device */ 267 weight = zc->speed_rating[func_code]; 268 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 269 continue; 270 for_each_zcrypt_queue(zq, zc) { 271 /* check if device is online and eligible */ 272 if (!zq->online || !zq->ops->rsa_modexpo) 273 continue; 274 if (zcrypt_queue_compare(zq, pref_zq, 275 weight, pref_weight)) 276 continue; 277 pref_zc = zc; 278 pref_zq = zq; 279 pref_weight = weight; 280 } 281 } 282 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 283 spin_unlock(&zcrypt_list_lock); 284 285 if (!pref_zq) { 286 rc = -ENODEV; 287 goto out; 288 } 289 290 qid = pref_zq->queue->qid; 291 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 292 293 spin_lock(&zcrypt_list_lock); 294 zcrypt_drop_queue(pref_zc, pref_zq, weight); 295 spin_unlock(&zcrypt_list_lock); 296 297 out: 298 trace_s390_zcrypt_rep(mex, func_code, rc, 299 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 300 return rc; 301 } 302 303 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 304 { 305 struct zcrypt_card *zc, *pref_zc; 306 struct zcrypt_queue *zq, *pref_zq; 307 unsigned int weight, pref_weight; 308 unsigned int func_code; 309 int qid = 0, rc = -ENODEV; 310 311 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 312 313 if (crt->outputdatalength < crt->inputdatalength) { 314 rc = -EINVAL; 315 goto out; 316 } 317 318 /* 319 * As long as outputdatalength is big enough, we can set the 320 * outputdatalength equal to the inputdatalength, since that is the 321 * number of bytes we will copy in any case 322 */ 323 crt->outputdatalength = crt->inputdatalength; 324 325 rc = get_rsa_crt_fc(crt, &func_code); 326 if (rc) 327 goto out; 328 329 pref_zc = NULL; 330 pref_zq = NULL; 331 spin_lock(&zcrypt_list_lock); 332 for_each_zcrypt_card(zc) { 333 /* Check for online accelarator and CCA cards */ 334 if (!zc->online || !(zc->card->functions & 0x18000000)) 335 continue; 336 /* Check for size limits */ 337 if (zc->min_mod_size > crt->inputdatalength || 338 zc->max_mod_size < crt->inputdatalength) 339 continue; 340 /* get weight index of the card device */ 341 weight = zc->speed_rating[func_code]; 342 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 343 continue; 344 for_each_zcrypt_queue(zq, zc) { 345 /* check if device is online and eligible */ 346 if (!zq->online || !zq->ops->rsa_modexpo_crt) 347 continue; 348 if (zcrypt_queue_compare(zq, pref_zq, 349 weight, pref_weight)) 350 continue; 351 pref_zc = zc; 352 pref_zq = zq; 353 pref_weight = weight; 354 } 355 } 356 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 357 spin_unlock(&zcrypt_list_lock); 358 359 if (!pref_zq) { 360 rc = -ENODEV; 361 goto out; 362 } 363 364 qid = pref_zq->queue->qid; 365 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 366 367 spin_lock(&zcrypt_list_lock); 368 zcrypt_drop_queue(pref_zc, pref_zq, weight); 369 spin_unlock(&zcrypt_list_lock); 370 371 out: 372 trace_s390_zcrypt_rep(crt, func_code, rc, 373 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 374 return rc; 375 } 376 377 static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 378 { 379 struct zcrypt_card *zc, *pref_zc; 380 struct zcrypt_queue *zq, *pref_zq; 381 struct ap_message ap_msg; 382 unsigned int weight, pref_weight; 383 unsigned int func_code; 384 unsigned short *domain; 385 int qid = 0, rc = -ENODEV; 386 387 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 388 389 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 390 if (rc) 391 goto out; 392 393 pref_zc = NULL; 394 pref_zq = NULL; 395 spin_lock(&zcrypt_list_lock); 396 for_each_zcrypt_card(zc) { 397 /* Check for online CCA cards */ 398 if (!zc->online || !(zc->card->functions & 0x10000000)) 399 continue; 400 /* Check for user selected CCA card */ 401 if (xcRB->user_defined != AUTOSELECT && 402 xcRB->user_defined != zc->card->id) 403 continue; 404 /* get weight index of the card device */ 405 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 406 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 407 continue; 408 for_each_zcrypt_queue(zq, zc) { 409 /* check if device is online and eligible */ 410 if (!zq->online || 411 !zq->ops->send_cprb || 412 ((*domain != (unsigned short) AUTOSELECT) && 413 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 414 continue; 415 if (zcrypt_queue_compare(zq, pref_zq, 416 weight, pref_weight)) 417 continue; 418 pref_zc = zc; 419 pref_zq = zq; 420 pref_weight = weight; 421 } 422 } 423 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 424 spin_unlock(&zcrypt_list_lock); 425 426 if (!pref_zq) { 427 rc = -ENODEV; 428 goto out; 429 } 430 431 /* in case of auto select, provide the correct domain */ 432 qid = pref_zq->queue->qid; 433 if (*domain == (unsigned short) AUTOSELECT) 434 *domain = AP_QID_QUEUE(qid); 435 436 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 437 438 spin_lock(&zcrypt_list_lock); 439 zcrypt_drop_queue(pref_zc, pref_zq, weight); 440 spin_unlock(&zcrypt_list_lock); 441 442 out: 443 trace_s390_zcrypt_rep(xcRB, func_code, rc, 444 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 445 return rc; 446 } 447 448 static bool is_desired_ep11_card(unsigned int dev_id, 449 unsigned short target_num, 450 struct ep11_target_dev *targets) 451 { 452 while (target_num-- > 0) { 453 if (dev_id == targets->ap_id) 454 return true; 455 targets++; 456 } 457 return false; 458 } 459 460 static bool is_desired_ep11_queue(unsigned int dev_qid, 461 unsigned short target_num, 462 struct ep11_target_dev *targets) 463 { 464 while (target_num-- > 0) { 465 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 466 return true; 467 targets++; 468 } 469 return false; 470 } 471 472 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 473 { 474 struct zcrypt_card *zc, *pref_zc; 475 struct zcrypt_queue *zq, *pref_zq; 476 struct ep11_target_dev *targets; 477 unsigned short target_num; 478 unsigned int weight, pref_weight; 479 unsigned int func_code; 480 struct ap_message ap_msg; 481 int qid = 0, rc = -ENODEV; 482 483 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 484 485 target_num = (unsigned short) xcrb->targets_num; 486 487 /* empty list indicates autoselect (all available targets) */ 488 targets = NULL; 489 if (target_num != 0) { 490 struct ep11_target_dev __user *uptr; 491 492 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 493 if (!targets) { 494 rc = -ENOMEM; 495 goto out; 496 } 497 498 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 499 if (copy_from_user(targets, uptr, 500 target_num * sizeof(*targets))) { 501 rc = -EFAULT; 502 goto out; 503 } 504 } 505 506 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 507 if (rc) 508 goto out_free; 509 510 pref_zc = NULL; 511 pref_zq = NULL; 512 spin_lock(&zcrypt_list_lock); 513 for_each_zcrypt_card(zc) { 514 /* Check for online EP11 cards */ 515 if (!zc->online || !(zc->card->functions & 0x04000000)) 516 continue; 517 /* Check for user selected EP11 card */ 518 if (targets && 519 !is_desired_ep11_card(zc->card->id, target_num, targets)) 520 continue; 521 /* get weight index of the card device */ 522 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 523 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 524 continue; 525 for_each_zcrypt_queue(zq, zc) { 526 /* check if device is online and eligible */ 527 if (!zq->online || 528 !zq->ops->send_ep11_cprb || 529 (targets && 530 !is_desired_ep11_queue(zq->queue->qid, 531 target_num, targets))) 532 continue; 533 if (zcrypt_queue_compare(zq, pref_zq, 534 weight, pref_weight)) 535 continue; 536 pref_zc = zc; 537 pref_zq = zq; 538 pref_weight = weight; 539 } 540 } 541 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 542 spin_unlock(&zcrypt_list_lock); 543 544 if (!pref_zq) { 545 rc = -ENODEV; 546 goto out_free; 547 } 548 549 qid = pref_zq->queue->qid; 550 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 551 552 spin_lock(&zcrypt_list_lock); 553 zcrypt_drop_queue(pref_zc, pref_zq, weight); 554 spin_unlock(&zcrypt_list_lock); 555 556 out_free: 557 kfree(targets); 558 out: 559 trace_s390_zcrypt_rep(xcrb, func_code, rc, 560 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 561 return rc; 562 } 563 564 static long zcrypt_rng(char *buffer) 565 { 566 struct zcrypt_card *zc, *pref_zc; 567 struct zcrypt_queue *zq, *pref_zq; 568 unsigned int weight, pref_weight; 569 unsigned int func_code; 570 struct ap_message ap_msg; 571 unsigned int domain; 572 int qid = 0, rc = -ENODEV; 573 574 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 575 576 rc = get_rng_fc(&ap_msg, &func_code, &domain); 577 if (rc) 578 goto out; 579 580 pref_zc = NULL; 581 pref_zq = NULL; 582 spin_lock(&zcrypt_list_lock); 583 for_each_zcrypt_card(zc) { 584 /* Check for online CCA cards */ 585 if (!zc->online || !(zc->card->functions & 0x10000000)) 586 continue; 587 /* get weight index of the card device */ 588 weight = zc->speed_rating[func_code]; 589 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 590 continue; 591 for_each_zcrypt_queue(zq, zc) { 592 /* check if device is online and eligible */ 593 if (!zq->online || !zq->ops->rng) 594 continue; 595 if (zcrypt_queue_compare(zq, pref_zq, 596 weight, pref_weight)) 597 continue; 598 pref_zc = zc; 599 pref_zq = zq; 600 pref_weight = weight; 601 } 602 } 603 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 604 spin_unlock(&zcrypt_list_lock); 605 606 if (!pref_zq) 607 return -ENODEV; 608 609 qid = pref_zq->queue->qid; 610 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 611 612 spin_lock(&zcrypt_list_lock); 613 zcrypt_drop_queue(pref_zc, pref_zq, weight); 614 spin_unlock(&zcrypt_list_lock); 615 616 out: 617 trace_s390_zcrypt_rep(buffer, func_code, rc, 618 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 619 return rc; 620 } 621 622 static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) 623 { 624 struct zcrypt_card *zc; 625 struct zcrypt_queue *zq; 626 struct zcrypt_device_status *stat; 627 628 memset(matrix, 0, sizeof(*matrix)); 629 spin_lock(&zcrypt_list_lock); 630 for_each_zcrypt_card(zc) { 631 for_each_zcrypt_queue(zq, zc) { 632 stat = matrix->device; 633 stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; 634 stat += AP_QID_QUEUE(zq->queue->qid); 635 stat->hwtype = zc->card->ap_dev.device_type; 636 stat->functions = zc->card->functions >> 26; 637 stat->qid = zq->queue->qid; 638 stat->online = zq->online ? 0x01 : 0x00; 639 } 640 } 641 spin_unlock(&zcrypt_list_lock); 642 } 643 EXPORT_SYMBOL(zcrypt_device_status_mask); 644 645 static void zcrypt_status_mask(char status[AP_DEVICES]) 646 { 647 struct zcrypt_card *zc; 648 struct zcrypt_queue *zq; 649 650 memset(status, 0, sizeof(char) * AP_DEVICES); 651 spin_lock(&zcrypt_list_lock); 652 for_each_zcrypt_card(zc) { 653 for_each_zcrypt_queue(zq, zc) { 654 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 655 continue; 656 status[AP_QID_CARD(zq->queue->qid)] = 657 zc->online ? zc->user_space_type : 0x0d; 658 } 659 } 660 spin_unlock(&zcrypt_list_lock); 661 } 662 663 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 664 { 665 struct zcrypt_card *zc; 666 struct zcrypt_queue *zq; 667 668 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 669 spin_lock(&zcrypt_list_lock); 670 local_bh_disable(); 671 for_each_zcrypt_card(zc) { 672 for_each_zcrypt_queue(zq, zc) { 673 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 674 continue; 675 spin_lock(&zq->queue->lock); 676 qdepth[AP_QID_CARD(zq->queue->qid)] = 677 zq->queue->pendingq_count + 678 zq->queue->requestq_count; 679 spin_unlock(&zq->queue->lock); 680 } 681 } 682 local_bh_enable(); 683 spin_unlock(&zcrypt_list_lock); 684 } 685 686 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 687 { 688 struct zcrypt_card *zc; 689 struct zcrypt_queue *zq; 690 691 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 692 spin_lock(&zcrypt_list_lock); 693 local_bh_disable(); 694 for_each_zcrypt_card(zc) { 695 for_each_zcrypt_queue(zq, zc) { 696 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 697 continue; 698 spin_lock(&zq->queue->lock); 699 reqcnt[AP_QID_CARD(zq->queue->qid)] = 700 zq->queue->total_request_count; 701 spin_unlock(&zq->queue->lock); 702 } 703 } 704 local_bh_enable(); 705 spin_unlock(&zcrypt_list_lock); 706 } 707 708 static int zcrypt_pendingq_count(void) 709 { 710 struct zcrypt_card *zc; 711 struct zcrypt_queue *zq; 712 int pendingq_count; 713 714 pendingq_count = 0; 715 spin_lock(&zcrypt_list_lock); 716 local_bh_disable(); 717 for_each_zcrypt_card(zc) { 718 for_each_zcrypt_queue(zq, zc) { 719 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 720 continue; 721 spin_lock(&zq->queue->lock); 722 pendingq_count += zq->queue->pendingq_count; 723 spin_unlock(&zq->queue->lock); 724 } 725 } 726 local_bh_enable(); 727 spin_unlock(&zcrypt_list_lock); 728 return pendingq_count; 729 } 730 731 static int zcrypt_requestq_count(void) 732 { 733 struct zcrypt_card *zc; 734 struct zcrypt_queue *zq; 735 int requestq_count; 736 737 requestq_count = 0; 738 spin_lock(&zcrypt_list_lock); 739 local_bh_disable(); 740 for_each_zcrypt_card(zc) { 741 for_each_zcrypt_queue(zq, zc) { 742 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 743 continue; 744 spin_lock(&zq->queue->lock); 745 requestq_count += zq->queue->requestq_count; 746 spin_unlock(&zq->queue->lock); 747 } 748 } 749 local_bh_enable(); 750 spin_unlock(&zcrypt_list_lock); 751 return requestq_count; 752 } 753 754 static int zcrypt_count_type(int type) 755 { 756 struct zcrypt_card *zc; 757 struct zcrypt_queue *zq; 758 int device_count; 759 760 device_count = 0; 761 spin_lock(&zcrypt_list_lock); 762 for_each_zcrypt_card(zc) { 763 if (zc->card->id != type) 764 continue; 765 for_each_zcrypt_queue(zq, zc) { 766 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 767 continue; 768 device_count++; 769 } 770 } 771 spin_unlock(&zcrypt_list_lock); 772 return device_count; 773 } 774 775 /** 776 * zcrypt_ica_status(): Old, depracted combi status call. 777 * 778 * Old, deprecated combi status call. 779 */ 780 static long zcrypt_ica_status(struct file *filp, unsigned long arg) 781 { 782 struct ica_z90_status *pstat; 783 int ret; 784 785 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); 786 if (!pstat) 787 return -ENOMEM; 788 pstat->totalcount = zcrypt_device_count; 789 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); 790 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); 791 pstat->requestqWaitCount = zcrypt_requestq_count(); 792 pstat->pendingqWaitCount = zcrypt_pendingq_count(); 793 pstat->totalOpenCount = atomic_read(&zcrypt_open_count); 794 pstat->cryptoDomain = ap_domain_index; 795 zcrypt_status_mask(pstat->status); 796 zcrypt_qdepth_mask(pstat->qdepth); 797 ret = 0; 798 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) 799 ret = -EFAULT; 800 kfree(pstat); 801 return ret; 802 } 803 804 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 805 unsigned long arg) 806 { 807 int rc; 808 809 switch (cmd) { 810 case ICARSAMODEXPO: { 811 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 812 struct ica_rsa_modexpo mex; 813 if (copy_from_user(&mex, umex, sizeof(mex))) 814 return -EFAULT; 815 do { 816 rc = zcrypt_rsa_modexpo(&mex); 817 } while (rc == -EAGAIN); 818 /* on failure: retry once again after a requested rescan */ 819 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 820 do { 821 rc = zcrypt_rsa_modexpo(&mex); 822 } while (rc == -EAGAIN); 823 if (rc) 824 return rc; 825 return put_user(mex.outputdatalength, &umex->outputdatalength); 826 } 827 case ICARSACRT: { 828 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 829 struct ica_rsa_modexpo_crt crt; 830 if (copy_from_user(&crt, ucrt, sizeof(crt))) 831 return -EFAULT; 832 do { 833 rc = zcrypt_rsa_crt(&crt); 834 } while (rc == -EAGAIN); 835 /* on failure: retry once again after a requested rescan */ 836 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 837 do { 838 rc = zcrypt_rsa_crt(&crt); 839 } while (rc == -EAGAIN); 840 if (rc) 841 return rc; 842 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 843 } 844 case ZSECSENDCPRB: { 845 struct ica_xcRB __user *uxcRB = (void __user *) arg; 846 struct ica_xcRB xcRB; 847 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 848 return -EFAULT; 849 do { 850 rc = zcrypt_send_cprb(&xcRB); 851 } while (rc == -EAGAIN); 852 /* on failure: retry once again after a requested rescan */ 853 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 854 do { 855 rc = zcrypt_send_cprb(&xcRB); 856 } while (rc == -EAGAIN); 857 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 858 return -EFAULT; 859 return rc; 860 } 861 case ZSENDEP11CPRB: { 862 struct ep11_urb __user *uxcrb = (void __user *)arg; 863 struct ep11_urb xcrb; 864 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 865 return -EFAULT; 866 do { 867 rc = zcrypt_send_ep11_cprb(&xcrb); 868 } while (rc == -EAGAIN); 869 /* on failure: retry once again after a requested rescan */ 870 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 871 do { 872 rc = zcrypt_send_ep11_cprb(&xcrb); 873 } while (rc == -EAGAIN); 874 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 875 return -EFAULT; 876 return rc; 877 } 878 case ZDEVICESTATUS: { 879 struct zcrypt_device_matrix *device_status; 880 881 device_status = kzalloc(sizeof(struct zcrypt_device_matrix), 882 GFP_KERNEL); 883 if (!device_status) 884 return -ENOMEM; 885 886 zcrypt_device_status_mask(device_status); 887 888 if (copy_to_user((char __user *) arg, device_status, 889 sizeof(struct zcrypt_device_matrix))) { 890 kfree(device_status); 891 return -EFAULT; 892 } 893 894 kfree(device_status); 895 return 0; 896 } 897 case Z90STAT_STATUS_MASK: { 898 char status[AP_DEVICES]; 899 zcrypt_status_mask(status); 900 if (copy_to_user((char __user *) arg, status, 901 sizeof(char) * AP_DEVICES)) 902 return -EFAULT; 903 return 0; 904 } 905 case Z90STAT_QDEPTH_MASK: { 906 char qdepth[AP_DEVICES]; 907 zcrypt_qdepth_mask(qdepth); 908 if (copy_to_user((char __user *) arg, qdepth, 909 sizeof(char) * AP_DEVICES)) 910 return -EFAULT; 911 return 0; 912 } 913 case Z90STAT_PERDEV_REQCNT: { 914 int reqcnt[AP_DEVICES]; 915 zcrypt_perdev_reqcnt(reqcnt); 916 if (copy_to_user((int __user *) arg, reqcnt, 917 sizeof(int) * AP_DEVICES)) 918 return -EFAULT; 919 return 0; 920 } 921 case Z90STAT_REQUESTQ_COUNT: 922 return put_user(zcrypt_requestq_count(), (int __user *) arg); 923 case Z90STAT_PENDINGQ_COUNT: 924 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 925 case Z90STAT_TOTALOPEN_COUNT: 926 return put_user(atomic_read(&zcrypt_open_count), 927 (int __user *) arg); 928 case Z90STAT_DOMAIN_INDEX: 929 return put_user(ap_domain_index, (int __user *) arg); 930 /* 931 * Deprecated ioctls. Don't add another device count ioctl, 932 * you can count them yourself in the user space with the 933 * output of the Z90STAT_STATUS_MASK ioctl. 934 */ 935 case ICAZ90STATUS: 936 return zcrypt_ica_status(filp, arg); 937 case Z90STAT_TOTALCOUNT: 938 return put_user(zcrypt_device_count, (int __user *) arg); 939 case Z90STAT_PCICACOUNT: 940 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 941 (int __user *) arg); 942 case Z90STAT_PCICCCOUNT: 943 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 944 (int __user *) arg); 945 case Z90STAT_PCIXCCMCL2COUNT: 946 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 947 (int __user *) arg); 948 case Z90STAT_PCIXCCMCL3COUNT: 949 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 950 (int __user *) arg); 951 case Z90STAT_PCIXCCCOUNT: 952 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 953 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 954 (int __user *) arg); 955 case Z90STAT_CEX2CCOUNT: 956 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 957 (int __user *) arg); 958 case Z90STAT_CEX2ACOUNT: 959 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 960 (int __user *) arg); 961 default: 962 /* unknown ioctl number */ 963 return -ENOIOCTLCMD; 964 } 965 } 966 967 #ifdef CONFIG_COMPAT 968 /* 969 * ioctl32 conversion routines 970 */ 971 struct compat_ica_rsa_modexpo { 972 compat_uptr_t inputdata; 973 unsigned int inputdatalength; 974 compat_uptr_t outputdata; 975 unsigned int outputdatalength; 976 compat_uptr_t b_key; 977 compat_uptr_t n_modulus; 978 }; 979 980 static long trans_modexpo32(struct file *filp, unsigned int cmd, 981 unsigned long arg) 982 { 983 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 984 struct compat_ica_rsa_modexpo mex32; 985 struct ica_rsa_modexpo mex64; 986 long rc; 987 988 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 989 return -EFAULT; 990 mex64.inputdata = compat_ptr(mex32.inputdata); 991 mex64.inputdatalength = mex32.inputdatalength; 992 mex64.outputdata = compat_ptr(mex32.outputdata); 993 mex64.outputdatalength = mex32.outputdatalength; 994 mex64.b_key = compat_ptr(mex32.b_key); 995 mex64.n_modulus = compat_ptr(mex32.n_modulus); 996 do { 997 rc = zcrypt_rsa_modexpo(&mex64); 998 } while (rc == -EAGAIN); 999 /* on failure: retry once again after a requested rescan */ 1000 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1001 do { 1002 rc = zcrypt_rsa_modexpo(&mex64); 1003 } while (rc == -EAGAIN); 1004 if (rc) 1005 return rc; 1006 return put_user(mex64.outputdatalength, 1007 &umex32->outputdatalength); 1008 } 1009 1010 struct compat_ica_rsa_modexpo_crt { 1011 compat_uptr_t inputdata; 1012 unsigned int inputdatalength; 1013 compat_uptr_t outputdata; 1014 unsigned int outputdatalength; 1015 compat_uptr_t bp_key; 1016 compat_uptr_t bq_key; 1017 compat_uptr_t np_prime; 1018 compat_uptr_t nq_prime; 1019 compat_uptr_t u_mult_inv; 1020 }; 1021 1022 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1023 unsigned long arg) 1024 { 1025 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1026 struct compat_ica_rsa_modexpo_crt crt32; 1027 struct ica_rsa_modexpo_crt crt64; 1028 long rc; 1029 1030 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1031 return -EFAULT; 1032 crt64.inputdata = compat_ptr(crt32.inputdata); 1033 crt64.inputdatalength = crt32.inputdatalength; 1034 crt64.outputdata= compat_ptr(crt32.outputdata); 1035 crt64.outputdatalength = crt32.outputdatalength; 1036 crt64.bp_key = compat_ptr(crt32.bp_key); 1037 crt64.bq_key = compat_ptr(crt32.bq_key); 1038 crt64.np_prime = compat_ptr(crt32.np_prime); 1039 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1040 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1041 do { 1042 rc = zcrypt_rsa_crt(&crt64); 1043 } while (rc == -EAGAIN); 1044 /* on failure: retry once again after a requested rescan */ 1045 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1046 do { 1047 rc = zcrypt_rsa_crt(&crt64); 1048 } while (rc == -EAGAIN); 1049 if (rc) 1050 return rc; 1051 return put_user(crt64.outputdatalength, 1052 &ucrt32->outputdatalength); 1053 } 1054 1055 struct compat_ica_xcRB { 1056 unsigned short agent_ID; 1057 unsigned int user_defined; 1058 unsigned short request_ID; 1059 unsigned int request_control_blk_length; 1060 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 1061 compat_uptr_t request_control_blk_addr; 1062 unsigned int request_data_length; 1063 char padding2[16 - sizeof (compat_uptr_t)]; 1064 compat_uptr_t request_data_address; 1065 unsigned int reply_control_blk_length; 1066 char padding3[16 - sizeof (compat_uptr_t)]; 1067 compat_uptr_t reply_control_blk_addr; 1068 unsigned int reply_data_length; 1069 char padding4[16 - sizeof (compat_uptr_t)]; 1070 compat_uptr_t reply_data_addr; 1071 unsigned short priority_window; 1072 unsigned int status; 1073 } __attribute__((packed)); 1074 1075 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1076 unsigned long arg) 1077 { 1078 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1079 struct compat_ica_xcRB xcRB32; 1080 struct ica_xcRB xcRB64; 1081 long rc; 1082 1083 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1084 return -EFAULT; 1085 xcRB64.agent_ID = xcRB32.agent_ID; 1086 xcRB64.user_defined = xcRB32.user_defined; 1087 xcRB64.request_ID = xcRB32.request_ID; 1088 xcRB64.request_control_blk_length = 1089 xcRB32.request_control_blk_length; 1090 xcRB64.request_control_blk_addr = 1091 compat_ptr(xcRB32.request_control_blk_addr); 1092 xcRB64.request_data_length = 1093 xcRB32.request_data_length; 1094 xcRB64.request_data_address = 1095 compat_ptr(xcRB32.request_data_address); 1096 xcRB64.reply_control_blk_length = 1097 xcRB32.reply_control_blk_length; 1098 xcRB64.reply_control_blk_addr = 1099 compat_ptr(xcRB32.reply_control_blk_addr); 1100 xcRB64.reply_data_length = xcRB32.reply_data_length; 1101 xcRB64.reply_data_addr = 1102 compat_ptr(xcRB32.reply_data_addr); 1103 xcRB64.priority_window = xcRB32.priority_window; 1104 xcRB64.status = xcRB32.status; 1105 do { 1106 rc = zcrypt_send_cprb(&xcRB64); 1107 } while (rc == -EAGAIN); 1108 /* on failure: retry once again after a requested rescan */ 1109 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1110 do { 1111 rc = zcrypt_send_cprb(&xcRB64); 1112 } while (rc == -EAGAIN); 1113 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1114 xcRB32.reply_data_length = xcRB64.reply_data_length; 1115 xcRB32.status = xcRB64.status; 1116 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1117 return -EFAULT; 1118 return rc; 1119 } 1120 1121 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1122 unsigned long arg) 1123 { 1124 if (cmd == ICARSAMODEXPO) 1125 return trans_modexpo32(filp, cmd, arg); 1126 if (cmd == ICARSACRT) 1127 return trans_modexpo_crt32(filp, cmd, arg); 1128 if (cmd == ZSECSENDCPRB) 1129 return trans_xcRB32(filp, cmd, arg); 1130 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1131 } 1132 #endif 1133 1134 /* 1135 * Misc device file operations. 1136 */ 1137 static const struct file_operations zcrypt_fops = { 1138 .owner = THIS_MODULE, 1139 .read = zcrypt_read, 1140 .write = zcrypt_write, 1141 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1142 #ifdef CONFIG_COMPAT 1143 .compat_ioctl = zcrypt_compat_ioctl, 1144 #endif 1145 .open = zcrypt_open, 1146 .release = zcrypt_release, 1147 .llseek = no_llseek, 1148 }; 1149 1150 /* 1151 * Misc device. 1152 */ 1153 static struct miscdevice zcrypt_misc_device = { 1154 .minor = MISC_DYNAMIC_MINOR, 1155 .name = "z90crypt", 1156 .fops = &zcrypt_fops, 1157 }; 1158 1159 /* 1160 * Deprecated /proc entry support. 1161 */ 1162 static struct proc_dir_entry *zcrypt_entry; 1163 1164 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) 1165 { 1166 int i; 1167 1168 for (i = 0; i < len; i++) 1169 seq_printf(m, "%01x", (unsigned int) addr[i]); 1170 seq_putc(m, ' '); 1171 } 1172 1173 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) 1174 { 1175 int inl, c, cx; 1176 1177 seq_printf(m, " "); 1178 inl = 0; 1179 for (c = 0; c < (len / 16); c++) { 1180 sprintcl(m, addr+inl, 16); 1181 inl += 16; 1182 } 1183 cx = len%16; 1184 if (cx) { 1185 sprintcl(m, addr+inl, cx); 1186 inl += cx; 1187 } 1188 seq_putc(m, '\n'); 1189 } 1190 1191 static void sprinthx(unsigned char *title, struct seq_file *m, 1192 unsigned char *addr, unsigned int len) 1193 { 1194 int inl, r, rx; 1195 1196 seq_printf(m, "\n%s\n", title); 1197 inl = 0; 1198 for (r = 0; r < (len / 64); r++) { 1199 sprintrw(m, addr+inl, 64); 1200 inl += 64; 1201 } 1202 rx = len % 64; 1203 if (rx) { 1204 sprintrw(m, addr+inl, rx); 1205 inl += rx; 1206 } 1207 seq_putc(m, '\n'); 1208 } 1209 1210 static void sprinthx4(unsigned char *title, struct seq_file *m, 1211 unsigned int *array, unsigned int len) 1212 { 1213 seq_printf(m, "\n%s\n", title); 1214 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false); 1215 seq_putc(m, '\n'); 1216 } 1217 1218 static int zcrypt_proc_show(struct seq_file *m, void *v) 1219 { 1220 char workarea[sizeof(int) * AP_DEVICES]; 1221 1222 seq_printf(m, "\nzcrypt version: %d.%d.%d\n", 1223 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 1224 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); 1225 seq_printf(m, "Total device count: %d\n", zcrypt_device_count); 1226 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); 1227 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); 1228 seq_printf(m, "PCIXCC MCL2 count: %d\n", 1229 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 1230 seq_printf(m, "PCIXCC MCL3 count: %d\n", 1231 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); 1232 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); 1233 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); 1234 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); 1235 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); 1236 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); 1237 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); 1238 seq_printf(m, "Total open handles: %d\n\n", 1239 atomic_read(&zcrypt_open_count)); 1240 zcrypt_status_mask(workarea); 1241 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1242 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1243 m, workarea, AP_DEVICES); 1244 zcrypt_qdepth_mask(workarea); 1245 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); 1246 zcrypt_perdev_reqcnt((int *) workarea); 1247 sprinthx4("Per-device successfully completed request counts", 1248 m, (unsigned int *) workarea, AP_DEVICES); 1249 return 0; 1250 } 1251 1252 static int zcrypt_proc_open(struct inode *inode, struct file *file) 1253 { 1254 return single_open(file, zcrypt_proc_show, NULL); 1255 } 1256 1257 static void zcrypt_disable_card(int index) 1258 { 1259 struct zcrypt_card *zc; 1260 struct zcrypt_queue *zq; 1261 1262 spin_lock(&zcrypt_list_lock); 1263 for_each_zcrypt_card(zc) { 1264 for_each_zcrypt_queue(zq, zc) { 1265 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1266 continue; 1267 zq->online = 0; 1268 ap_flush_queue(zq->queue); 1269 } 1270 } 1271 spin_unlock(&zcrypt_list_lock); 1272 } 1273 1274 static void zcrypt_enable_card(int index) 1275 { 1276 struct zcrypt_card *zc; 1277 struct zcrypt_queue *zq; 1278 1279 spin_lock(&zcrypt_list_lock); 1280 for_each_zcrypt_card(zc) { 1281 for_each_zcrypt_queue(zq, zc) { 1282 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1283 continue; 1284 zq->online = 1; 1285 ap_flush_queue(zq->queue); 1286 } 1287 } 1288 spin_unlock(&zcrypt_list_lock); 1289 } 1290 1291 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, 1292 size_t count, loff_t *pos) 1293 { 1294 unsigned char *lbuf, *ptr; 1295 size_t local_count; 1296 int j; 1297 1298 if (count <= 0) 1299 return 0; 1300 1301 #define LBUFSIZE 1200UL 1302 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1303 if (!lbuf) 1304 return 0; 1305 1306 local_count = min(LBUFSIZE - 1, count); 1307 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1308 kfree(lbuf); 1309 return -EFAULT; 1310 } 1311 lbuf[local_count] = '\0'; 1312 1313 ptr = strstr(lbuf, "Online devices"); 1314 if (!ptr) 1315 goto out; 1316 ptr = strstr(ptr, "\n"); 1317 if (!ptr) 1318 goto out; 1319 ptr++; 1320 1321 if (strstr(ptr, "Waiting work element counts") == NULL) 1322 goto out; 1323 1324 for (j = 0; j < 64 && *ptr; ptr++) { 1325 /* 1326 * '0' for no device, '1' for PCICA, '2' for PCICC, 1327 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1328 * '5' for CEX2C and '6' for CEX2A' 1329 * '7' for CEX3C and '8' for CEX3A 1330 */ 1331 if (*ptr >= '0' && *ptr <= '8') 1332 j++; 1333 else if (*ptr == 'd' || *ptr == 'D') 1334 zcrypt_disable_card(j++); 1335 else if (*ptr == 'e' || *ptr == 'E') 1336 zcrypt_enable_card(j++); 1337 else if (*ptr != ' ' && *ptr != '\t') 1338 break; 1339 } 1340 out: 1341 kfree(lbuf); 1342 return count; 1343 } 1344 1345 static const struct file_operations zcrypt_proc_fops = { 1346 .owner = THIS_MODULE, 1347 .open = zcrypt_proc_open, 1348 .read = seq_read, 1349 .llseek = seq_lseek, 1350 .release = single_release, 1351 .write = zcrypt_proc_write, 1352 }; 1353 1354 static int zcrypt_rng_device_count; 1355 static u32 *zcrypt_rng_buffer; 1356 static int zcrypt_rng_buffer_index; 1357 static DEFINE_MUTEX(zcrypt_rng_mutex); 1358 1359 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1360 { 1361 int rc; 1362 1363 /* 1364 * We don't need locking here because the RNG API guarantees serialized 1365 * read method calls. 1366 */ 1367 if (zcrypt_rng_buffer_index == 0) { 1368 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1369 /* on failure: retry once again after a requested rescan */ 1370 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1371 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1372 if (rc < 0) 1373 return -EIO; 1374 zcrypt_rng_buffer_index = rc / sizeof *data; 1375 } 1376 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1377 return sizeof *data; 1378 } 1379 1380 static struct hwrng zcrypt_rng_dev = { 1381 .name = "zcrypt", 1382 .data_read = zcrypt_rng_data_read, 1383 .quality = 990, 1384 }; 1385 1386 int zcrypt_rng_device_add(void) 1387 { 1388 int rc = 0; 1389 1390 mutex_lock(&zcrypt_rng_mutex); 1391 if (zcrypt_rng_device_count == 0) { 1392 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1393 if (!zcrypt_rng_buffer) { 1394 rc = -ENOMEM; 1395 goto out; 1396 } 1397 zcrypt_rng_buffer_index = 0; 1398 if (!zcrypt_hwrng_seed) 1399 zcrypt_rng_dev.quality = 0; 1400 rc = hwrng_register(&zcrypt_rng_dev); 1401 if (rc) 1402 goto out_free; 1403 zcrypt_rng_device_count = 1; 1404 } else 1405 zcrypt_rng_device_count++; 1406 mutex_unlock(&zcrypt_rng_mutex); 1407 return 0; 1408 1409 out_free: 1410 free_page((unsigned long) zcrypt_rng_buffer); 1411 out: 1412 mutex_unlock(&zcrypt_rng_mutex); 1413 return rc; 1414 } 1415 1416 void zcrypt_rng_device_remove(void) 1417 { 1418 mutex_lock(&zcrypt_rng_mutex); 1419 zcrypt_rng_device_count--; 1420 if (zcrypt_rng_device_count == 0) { 1421 hwrng_unregister(&zcrypt_rng_dev); 1422 free_page((unsigned long) zcrypt_rng_buffer); 1423 } 1424 mutex_unlock(&zcrypt_rng_mutex); 1425 } 1426 1427 int __init zcrypt_debug_init(void) 1428 { 1429 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1430 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1431 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1432 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1433 1434 return 0; 1435 } 1436 1437 void zcrypt_debug_exit(void) 1438 { 1439 debug_unregister(zcrypt_dbf_info); 1440 } 1441 1442 /** 1443 * zcrypt_api_init(): Module initialization. 1444 * 1445 * The module initialization code. 1446 */ 1447 int __init zcrypt_api_init(void) 1448 { 1449 int rc; 1450 1451 rc = zcrypt_debug_init(); 1452 if (rc) 1453 goto out; 1454 1455 atomic_set(&zcrypt_rescan_req, 0); 1456 1457 /* Register the request sprayer. */ 1458 rc = misc_register(&zcrypt_misc_device); 1459 if (rc < 0) 1460 goto out; 1461 1462 /* Set up the proc file system */ 1463 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, 1464 &zcrypt_proc_fops); 1465 if (!zcrypt_entry) { 1466 rc = -ENOMEM; 1467 goto out_misc; 1468 } 1469 1470 zcrypt_msgtype6_init(); 1471 zcrypt_msgtype50_init(); 1472 return 0; 1473 1474 out_misc: 1475 misc_deregister(&zcrypt_misc_device); 1476 out: 1477 return rc; 1478 } 1479 1480 /** 1481 * zcrypt_api_exit(): Module termination. 1482 * 1483 * The module termination code. 1484 */ 1485 void __exit zcrypt_api_exit(void) 1486 { 1487 remove_proc_entry("driver/z90crypt", NULL); 1488 misc_deregister(&zcrypt_misc_device); 1489 zcrypt_msgtype6_exit(); 1490 zcrypt_msgtype50_exit(); 1491 zcrypt_debug_exit(); 1492 } 1493 1494 module_init(zcrypt_api_init); 1495 module_exit(zcrypt_api_exit); 1496