1 /* 2 * linux/drivers/s390/crypto/ap_bus.c 3 * 4 * Copyright (C) 2006 IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Ralph Wuerthner <rwuerthn@de.ibm.com> 8 * Felix Beck <felix.beck@de.ibm.com> 9 * 10 * Adjunct processor bus. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 */ 26 27 #define KMSG_COMPONENT "ap" 28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 29 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/delay.h> 33 #include <linux/err.h> 34 #include <linux/interrupt.h> 35 #include <linux/workqueue.h> 36 #include <linux/notifier.h> 37 #include <linux/kthread.h> 38 #include <linux/mutex.h> 39 #include <asm/reset.h> 40 #include <asm/airq.h> 41 #include <asm/atomic.h> 42 #include <asm/system.h> 43 #include <asm/isc.h> 44 #include <linux/hrtimer.h> 45 #include <linux/ktime.h> 46 47 #include "ap_bus.h" 48 49 /* Some prototypes. */ 50 static void ap_scan_bus(struct work_struct *); 51 static void ap_poll_all(unsigned long); 52 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); 53 static int ap_poll_thread_start(void); 54 static void ap_poll_thread_stop(void); 55 static void ap_request_timeout(unsigned long); 56 static inline void ap_schedule_poll_timer(void); 57 58 /* 59 * Module description. 60 */ 61 MODULE_AUTHOR("IBM Corporation"); 62 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " 63 "Copyright 2006 IBM Corporation"); 64 MODULE_LICENSE("GPL"); 65 66 /* 67 * Module parameter 68 */ 69 int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 70 module_param_named(domain, ap_domain_index, int, 0000); 71 MODULE_PARM_DESC(domain, "domain index for ap devices"); 72 EXPORT_SYMBOL(ap_domain_index); 73 74 static int ap_thread_flag = 0; 75 module_param_named(poll_thread, ap_thread_flag, int, 0000); 76 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 77 78 static struct device *ap_root_device = NULL; 79 static DEFINE_SPINLOCK(ap_device_list_lock); 80 static LIST_HEAD(ap_device_list); 81 82 /* 83 * Workqueue & timer for bus rescan. 84 */ 85 static struct workqueue_struct *ap_work_queue; 86 static struct timer_list ap_config_timer; 87 static int ap_config_time = AP_CONFIG_TIME; 88 static DECLARE_WORK(ap_config_work, ap_scan_bus); 89 90 /* 91 * Tasklet & timer for AP request polling and interrupts 92 */ 93 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 94 static atomic_t ap_poll_requests = ATOMIC_INIT(0); 95 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 96 static struct task_struct *ap_poll_kthread = NULL; 97 static DEFINE_MUTEX(ap_poll_thread_mutex); 98 static void *ap_interrupt_indicator; 99 static struct hrtimer ap_poll_timer; 100 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 101 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ 102 static unsigned long long poll_timeout = 250000; 103 104 /** 105 * ap_using_interrupts() - Returns non-zero if interrupt support is 106 * available. 107 */ 108 static inline int ap_using_interrupts(void) 109 { 110 return ap_interrupt_indicator != NULL; 111 } 112 113 /** 114 * ap_intructions_available() - Test if AP instructions are available. 115 * 116 * Returns 0 if the AP instructions are installed. 117 */ 118 static inline int ap_instructions_available(void) 119 { 120 register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 121 register unsigned long reg1 asm ("1") = -ENODEV; 122 register unsigned long reg2 asm ("2") = 0UL; 123 124 asm volatile( 125 " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 126 "0: la %1,0\n" 127 "1:\n" 128 EX_TABLE(0b, 1b) 129 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); 130 return reg1; 131 } 132 133 /** 134 * ap_interrupts_available(): Test if AP interrupts are available. 135 * 136 * Returns 1 if AP interrupts are available. 137 */ 138 static int ap_interrupts_available(void) 139 { 140 unsigned long long facility_bits[2]; 141 142 if (stfle(facility_bits, 2) <= 1) 143 return 0; 144 if (!(facility_bits[0] & (1ULL << 61)) || 145 !(facility_bits[1] & (1ULL << 62))) 146 return 0; 147 return 1; 148 } 149 150 /** 151 * ap_test_queue(): Test adjunct processor queue. 152 * @qid: The AP queue number 153 * @queue_depth: Pointer to queue depth value 154 * @device_type: Pointer to device type value 155 * 156 * Returns AP queue status structure. 157 */ 158 static inline struct ap_queue_status 159 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 160 { 161 register unsigned long reg0 asm ("0") = qid; 162 register struct ap_queue_status reg1 asm ("1"); 163 register unsigned long reg2 asm ("2") = 0UL; 164 165 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 166 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 167 *device_type = (int) (reg2 >> 24); 168 *queue_depth = (int) (reg2 & 0xff); 169 return reg1; 170 } 171 172 /** 173 * ap_reset_queue(): Reset adjunct processor queue. 174 * @qid: The AP queue number 175 * 176 * Returns AP queue status structure. 177 */ 178 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 179 { 180 register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 181 register struct ap_queue_status reg1 asm ("1"); 182 register unsigned long reg2 asm ("2") = 0UL; 183 184 asm volatile( 185 ".long 0xb2af0000" /* PQAP(RAPQ) */ 186 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 187 return reg1; 188 } 189 190 #ifdef CONFIG_64BIT 191 /** 192 * ap_queue_interruption_control(): Enable interruption for a specific AP. 193 * @qid: The AP queue number 194 * @ind: The notification indicator byte 195 * 196 * Returns AP queue status. 197 */ 198 static inline struct ap_queue_status 199 ap_queue_interruption_control(ap_qid_t qid, void *ind) 200 { 201 register unsigned long reg0 asm ("0") = qid | 0x03000000UL; 202 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; 203 register struct ap_queue_status reg1_out asm ("1"); 204 register void *reg2 asm ("2") = ind; 205 asm volatile( 206 ".long 0xb2af0000" /* PQAP(RAPQ) */ 207 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 208 : 209 : "cc" ); 210 return reg1_out; 211 } 212 #endif 213 214 /** 215 * ap_queue_enable_interruption(): Enable interruption on an AP. 216 * @qid: The AP queue number 217 * @ind: the notification indicator byte 218 * 219 * Enables interruption on AP queue via ap_queue_interruption_control(). Based 220 * on the return value it waits a while and tests the AP queue if interrupts 221 * have been switched on using ap_test_queue(). 222 */ 223 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) 224 { 225 #ifdef CONFIG_64BIT 226 struct ap_queue_status status; 227 int t_depth, t_device_type, rc, i; 228 229 rc = -EBUSY; 230 status = ap_queue_interruption_control(qid, ind); 231 232 for (i = 0; i < AP_MAX_RESET; i++) { 233 switch (status.response_code) { 234 case AP_RESPONSE_NORMAL: 235 if (status.int_enabled) 236 return 0; 237 break; 238 case AP_RESPONSE_RESET_IN_PROGRESS: 239 case AP_RESPONSE_BUSY: 240 break; 241 case AP_RESPONSE_Q_NOT_AVAIL: 242 case AP_RESPONSE_DECONFIGURED: 243 case AP_RESPONSE_CHECKSTOPPED: 244 case AP_RESPONSE_INVALID_ADDRESS: 245 return -ENODEV; 246 case AP_RESPONSE_OTHERWISE_CHANGED: 247 if (status.int_enabled) 248 return 0; 249 break; 250 default: 251 break; 252 } 253 if (i < AP_MAX_RESET - 1) { 254 udelay(5); 255 status = ap_test_queue(qid, &t_depth, &t_device_type); 256 } 257 } 258 return rc; 259 #else 260 return -EINVAL; 261 #endif 262 } 263 264 /** 265 * __ap_send(): Send message to adjunct processor queue. 266 * @qid: The AP queue number 267 * @psmid: The program supplied message identifier 268 * @msg: The message text 269 * @length: The message length 270 * 271 * Returns AP queue status structure. 272 * Condition code 1 on NQAP can't happen because the L bit is 1. 273 * Condition code 2 on NQAP also means the send is incomplete, 274 * because a segment boundary was reached. The NQAP is repeated. 275 */ 276 static inline struct ap_queue_status 277 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 278 { 279 typedef struct { char _[length]; } msgblock; 280 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 281 register struct ap_queue_status reg1 asm ("1"); 282 register unsigned long reg2 asm ("2") = (unsigned long) msg; 283 register unsigned long reg3 asm ("3") = (unsigned long) length; 284 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 285 register unsigned long reg5 asm ("5") = (unsigned int) psmid; 286 287 asm volatile ( 288 "0: .long 0xb2ad0042\n" /* DQAP */ 289 " brc 2,0b" 290 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 291 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 292 : "cc" ); 293 return reg1; 294 } 295 296 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 297 { 298 struct ap_queue_status status; 299 300 status = __ap_send(qid, psmid, msg, length); 301 switch (status.response_code) { 302 case AP_RESPONSE_NORMAL: 303 return 0; 304 case AP_RESPONSE_Q_FULL: 305 case AP_RESPONSE_RESET_IN_PROGRESS: 306 return -EBUSY; 307 default: /* Device is gone. */ 308 return -ENODEV; 309 } 310 } 311 EXPORT_SYMBOL(ap_send); 312 313 /** 314 * __ap_recv(): Receive message from adjunct processor queue. 315 * @qid: The AP queue number 316 * @psmid: Pointer to program supplied message identifier 317 * @msg: The message text 318 * @length: The message length 319 * 320 * Returns AP queue status structure. 321 * Condition code 1 on DQAP means the receive has taken place 322 * but only partially. The response is incomplete, hence the 323 * DQAP is repeated. 324 * Condition code 2 on DQAP also means the receive is incomplete, 325 * this time because a segment boundary was reached. Again, the 326 * DQAP is repeated. 327 * Note that gpr2 is used by the DQAP instruction to keep track of 328 * any 'residual' length, in case the instruction gets interrupted. 329 * Hence it gets zeroed before the instruction. 330 */ 331 static inline struct ap_queue_status 332 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 333 { 334 typedef struct { char _[length]; } msgblock; 335 register unsigned long reg0 asm("0") = qid | 0x80000000UL; 336 register struct ap_queue_status reg1 asm ("1"); 337 register unsigned long reg2 asm("2") = 0UL; 338 register unsigned long reg4 asm("4") = (unsigned long) msg; 339 register unsigned long reg5 asm("5") = (unsigned long) length; 340 register unsigned long reg6 asm("6") = 0UL; 341 register unsigned long reg7 asm("7") = 0UL; 342 343 344 asm volatile( 345 "0: .long 0xb2ae0064\n" 346 " brc 6,0b\n" 347 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 348 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 349 "=m" (*(msgblock *) msg) : : "cc" ); 350 *psmid = (((unsigned long long) reg6) << 32) + reg7; 351 return reg1; 352 } 353 354 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 355 { 356 struct ap_queue_status status; 357 358 status = __ap_recv(qid, psmid, msg, length); 359 switch (status.response_code) { 360 case AP_RESPONSE_NORMAL: 361 return 0; 362 case AP_RESPONSE_NO_PENDING_REPLY: 363 if (status.queue_empty) 364 return -ENOENT; 365 return -EBUSY; 366 case AP_RESPONSE_RESET_IN_PROGRESS: 367 return -EBUSY; 368 default: 369 return -ENODEV; 370 } 371 } 372 EXPORT_SYMBOL(ap_recv); 373 374 /** 375 * ap_query_queue(): Check if an AP queue is available. 376 * @qid: The AP queue number 377 * @queue_depth: Pointer to queue depth value 378 * @device_type: Pointer to device type value 379 * 380 * The test is repeated for AP_MAX_RESET times. 381 */ 382 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 383 { 384 struct ap_queue_status status; 385 int t_depth, t_device_type, rc, i; 386 387 rc = -EBUSY; 388 for (i = 0; i < AP_MAX_RESET; i++) { 389 status = ap_test_queue(qid, &t_depth, &t_device_type); 390 switch (status.response_code) { 391 case AP_RESPONSE_NORMAL: 392 *queue_depth = t_depth + 1; 393 *device_type = t_device_type; 394 rc = 0; 395 break; 396 case AP_RESPONSE_Q_NOT_AVAIL: 397 rc = -ENODEV; 398 break; 399 case AP_RESPONSE_RESET_IN_PROGRESS: 400 break; 401 case AP_RESPONSE_DECONFIGURED: 402 rc = -ENODEV; 403 break; 404 case AP_RESPONSE_CHECKSTOPPED: 405 rc = -ENODEV; 406 break; 407 case AP_RESPONSE_INVALID_ADDRESS: 408 rc = -ENODEV; 409 break; 410 case AP_RESPONSE_OTHERWISE_CHANGED: 411 break; 412 case AP_RESPONSE_BUSY: 413 break; 414 default: 415 BUG(); 416 } 417 if (rc != -EBUSY) 418 break; 419 if (i < AP_MAX_RESET - 1) 420 udelay(5); 421 } 422 return rc; 423 } 424 425 /** 426 * ap_init_queue(): Reset an AP queue. 427 * @qid: The AP queue number 428 * 429 * Reset an AP queue and wait for it to become available again. 430 */ 431 static int ap_init_queue(ap_qid_t qid) 432 { 433 struct ap_queue_status status; 434 int rc, dummy, i; 435 436 rc = -ENODEV; 437 status = ap_reset_queue(qid); 438 for (i = 0; i < AP_MAX_RESET; i++) { 439 switch (status.response_code) { 440 case AP_RESPONSE_NORMAL: 441 if (status.queue_empty) 442 rc = 0; 443 break; 444 case AP_RESPONSE_Q_NOT_AVAIL: 445 case AP_RESPONSE_DECONFIGURED: 446 case AP_RESPONSE_CHECKSTOPPED: 447 i = AP_MAX_RESET; /* return with -ENODEV */ 448 break; 449 case AP_RESPONSE_RESET_IN_PROGRESS: 450 rc = -EBUSY; 451 case AP_RESPONSE_BUSY: 452 default: 453 break; 454 } 455 if (rc != -ENODEV && rc != -EBUSY) 456 break; 457 if (i < AP_MAX_RESET - 1) { 458 udelay(5); 459 status = ap_test_queue(qid, &dummy, &dummy); 460 } 461 } 462 if (rc == 0 && ap_using_interrupts()) { 463 rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator); 464 /* If interruption mode is supported by the machine, 465 * but an AP can not be enabled for interruption then 466 * the AP will be discarded. */ 467 if (rc) 468 pr_err("Registering adapter interrupts for " 469 "AP %d failed\n", AP_QID_DEVICE(qid)); 470 } 471 return rc; 472 } 473 474 /** 475 * ap_increase_queue_count(): Arm request timeout. 476 * @ap_dev: Pointer to an AP device. 477 * 478 * Arm request timeout if an AP device was idle and a new request is submitted. 479 */ 480 static void ap_increase_queue_count(struct ap_device *ap_dev) 481 { 482 int timeout = ap_dev->drv->request_timeout; 483 484 ap_dev->queue_count++; 485 if (ap_dev->queue_count == 1) { 486 mod_timer(&ap_dev->timeout, jiffies + timeout); 487 ap_dev->reset = AP_RESET_ARMED; 488 } 489 } 490 491 /** 492 * ap_decrease_queue_count(): Decrease queue count. 493 * @ap_dev: Pointer to an AP device. 494 * 495 * If AP device is still alive, re-schedule request timeout if there are still 496 * pending requests. 497 */ 498 static void ap_decrease_queue_count(struct ap_device *ap_dev) 499 { 500 int timeout = ap_dev->drv->request_timeout; 501 502 ap_dev->queue_count--; 503 if (ap_dev->queue_count > 0) 504 mod_timer(&ap_dev->timeout, jiffies + timeout); 505 else 506 /* 507 * The timeout timer should to be disabled now - since 508 * del_timer_sync() is very expensive, we just tell via the 509 * reset flag to ignore the pending timeout timer. 510 */ 511 ap_dev->reset = AP_RESET_IGNORE; 512 } 513 514 /* 515 * AP device related attributes. 516 */ 517 static ssize_t ap_hwtype_show(struct device *dev, 518 struct device_attribute *attr, char *buf) 519 { 520 struct ap_device *ap_dev = to_ap_dev(dev); 521 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 522 } 523 524 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 525 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 526 char *buf) 527 { 528 struct ap_device *ap_dev = to_ap_dev(dev); 529 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 530 } 531 532 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 533 static ssize_t ap_request_count_show(struct device *dev, 534 struct device_attribute *attr, 535 char *buf) 536 { 537 struct ap_device *ap_dev = to_ap_dev(dev); 538 int rc; 539 540 spin_lock_bh(&ap_dev->lock); 541 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 542 spin_unlock_bh(&ap_dev->lock); 543 return rc; 544 } 545 546 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 547 548 static ssize_t ap_modalias_show(struct device *dev, 549 struct device_attribute *attr, char *buf) 550 { 551 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); 552 } 553 554 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 555 556 static struct attribute *ap_dev_attrs[] = { 557 &dev_attr_hwtype.attr, 558 &dev_attr_depth.attr, 559 &dev_attr_request_count.attr, 560 &dev_attr_modalias.attr, 561 NULL 562 }; 563 static struct attribute_group ap_dev_attr_group = { 564 .attrs = ap_dev_attrs 565 }; 566 567 /** 568 * ap_bus_match() 569 * @dev: Pointer to device 570 * @drv: Pointer to device_driver 571 * 572 * AP bus driver registration/unregistration. 573 */ 574 static int ap_bus_match(struct device *dev, struct device_driver *drv) 575 { 576 struct ap_device *ap_dev = to_ap_dev(dev); 577 struct ap_driver *ap_drv = to_ap_drv(drv); 578 struct ap_device_id *id; 579 580 /* 581 * Compare device type of the device with the list of 582 * supported types of the device_driver. 583 */ 584 for (id = ap_drv->ids; id->match_flags; id++) { 585 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 586 (id->dev_type != ap_dev->device_type)) 587 continue; 588 return 1; 589 } 590 return 0; 591 } 592 593 /** 594 * ap_uevent(): Uevent function for AP devices. 595 * @dev: Pointer to device 596 * @env: Pointer to kobj_uevent_env 597 * 598 * It sets up a single environment variable DEV_TYPE which contains the 599 * hardware device type. 600 */ 601 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 602 { 603 struct ap_device *ap_dev = to_ap_dev(dev); 604 int retval = 0; 605 606 if (!ap_dev) 607 return -ENODEV; 608 609 /* Set up DEV_TYPE environment variable. */ 610 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); 611 if (retval) 612 return retval; 613 614 /* Add MODALIAS= */ 615 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); 616 617 return retval; 618 } 619 620 static struct bus_type ap_bus_type = { 621 .name = "ap", 622 .match = &ap_bus_match, 623 .uevent = &ap_uevent, 624 }; 625 626 static int ap_device_probe(struct device *dev) 627 { 628 struct ap_device *ap_dev = to_ap_dev(dev); 629 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 630 int rc; 631 632 ap_dev->drv = ap_drv; 633 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 634 if (!rc) { 635 spin_lock_bh(&ap_device_list_lock); 636 list_add(&ap_dev->list, &ap_device_list); 637 spin_unlock_bh(&ap_device_list_lock); 638 } 639 return rc; 640 } 641 642 /** 643 * __ap_flush_queue(): Flush requests. 644 * @ap_dev: Pointer to the AP device 645 * 646 * Flush all requests from the request/pending queue of an AP device. 647 */ 648 static void __ap_flush_queue(struct ap_device *ap_dev) 649 { 650 struct ap_message *ap_msg, *next; 651 652 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 653 list_del_init(&ap_msg->list); 654 ap_dev->pendingq_count--; 655 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 656 } 657 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 658 list_del_init(&ap_msg->list); 659 ap_dev->requestq_count--; 660 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 661 } 662 } 663 664 void ap_flush_queue(struct ap_device *ap_dev) 665 { 666 spin_lock_bh(&ap_dev->lock); 667 __ap_flush_queue(ap_dev); 668 spin_unlock_bh(&ap_dev->lock); 669 } 670 EXPORT_SYMBOL(ap_flush_queue); 671 672 static int ap_device_remove(struct device *dev) 673 { 674 struct ap_device *ap_dev = to_ap_dev(dev); 675 struct ap_driver *ap_drv = ap_dev->drv; 676 677 ap_flush_queue(ap_dev); 678 del_timer_sync(&ap_dev->timeout); 679 spin_lock_bh(&ap_device_list_lock); 680 list_del_init(&ap_dev->list); 681 spin_unlock_bh(&ap_device_list_lock); 682 if (ap_drv->remove) 683 ap_drv->remove(ap_dev); 684 spin_lock_bh(&ap_dev->lock); 685 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 686 spin_unlock_bh(&ap_dev->lock); 687 return 0; 688 } 689 690 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 691 char *name) 692 { 693 struct device_driver *drv = &ap_drv->driver; 694 695 drv->bus = &ap_bus_type; 696 drv->probe = ap_device_probe; 697 drv->remove = ap_device_remove; 698 drv->owner = owner; 699 drv->name = name; 700 return driver_register(drv); 701 } 702 EXPORT_SYMBOL(ap_driver_register); 703 704 void ap_driver_unregister(struct ap_driver *ap_drv) 705 { 706 driver_unregister(&ap_drv->driver); 707 } 708 EXPORT_SYMBOL(ap_driver_unregister); 709 710 /* 711 * AP bus attributes. 712 */ 713 static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 714 { 715 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 716 } 717 718 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 719 720 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 721 { 722 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 723 } 724 725 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 726 { 727 return snprintf(buf, PAGE_SIZE, "%d\n", 728 ap_using_interrupts() ? 1 : 0); 729 } 730 731 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); 732 733 static ssize_t ap_config_time_store(struct bus_type *bus, 734 const char *buf, size_t count) 735 { 736 int time; 737 738 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 739 return -EINVAL; 740 ap_config_time = time; 741 if (!timer_pending(&ap_config_timer) || 742 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { 743 ap_config_timer.expires = jiffies + ap_config_time * HZ; 744 add_timer(&ap_config_timer); 745 } 746 return count; 747 } 748 749 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); 750 751 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) 752 { 753 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 754 } 755 756 static ssize_t ap_poll_thread_store(struct bus_type *bus, 757 const char *buf, size_t count) 758 { 759 int flag, rc; 760 761 if (sscanf(buf, "%d\n", &flag) != 1) 762 return -EINVAL; 763 if (flag) { 764 rc = ap_poll_thread_start(); 765 if (rc) 766 return rc; 767 } 768 else 769 ap_poll_thread_stop(); 770 return count; 771 } 772 773 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 774 775 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) 776 { 777 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 778 } 779 780 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, 781 size_t count) 782 { 783 unsigned long long time; 784 ktime_t hr_time; 785 786 /* 120 seconds = maximum poll interval */ 787 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || 788 time > 120000000000ULL) 789 return -EINVAL; 790 poll_timeout = time; 791 hr_time = ktime_set(0, poll_timeout); 792 793 if (!hrtimer_is_queued(&ap_poll_timer) || 794 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 795 hrtimer_set_expires(&ap_poll_timer, hr_time); 796 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 797 } 798 return count; 799 } 800 801 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); 802 803 static struct bus_attribute *const ap_bus_attrs[] = { 804 &bus_attr_ap_domain, 805 &bus_attr_config_time, 806 &bus_attr_poll_thread, 807 &bus_attr_ap_interrupts, 808 &bus_attr_poll_timeout, 809 NULL, 810 }; 811 812 /** 813 * ap_select_domain(): Select an AP domain. 814 * 815 * Pick one of the 16 AP domains. 816 */ 817 static int ap_select_domain(void) 818 { 819 int queue_depth, device_type, count, max_count, best_domain; 820 int rc, i, j; 821 822 /* 823 * We want to use a single domain. Either the one specified with 824 * the "domain=" parameter or the domain with the maximum number 825 * of devices. 826 */ 827 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 828 /* Domain has already been selected. */ 829 return 0; 830 best_domain = -1; 831 max_count = 0; 832 for (i = 0; i < AP_DOMAINS; i++) { 833 count = 0; 834 for (j = 0; j < AP_DEVICES; j++) { 835 ap_qid_t qid = AP_MKQID(j, i); 836 rc = ap_query_queue(qid, &queue_depth, &device_type); 837 if (rc) 838 continue; 839 count++; 840 } 841 if (count > max_count) { 842 max_count = count; 843 best_domain = i; 844 } 845 } 846 if (best_domain >= 0){ 847 ap_domain_index = best_domain; 848 return 0; 849 } 850 return -ENODEV; 851 } 852 853 /** 854 * ap_probe_device_type(): Find the device type of an AP. 855 * @ap_dev: pointer to the AP device. 856 * 857 * Find the device type if query queue returned a device type of 0. 858 */ 859 static int ap_probe_device_type(struct ap_device *ap_dev) 860 { 861 static unsigned char msg[] = { 862 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 863 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 864 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 865 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 866 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 867 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 868 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 869 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 870 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 871 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 872 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 873 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 874 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 875 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 876 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 877 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 878 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 879 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 880 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 881 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 882 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 883 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 884 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 885 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 886 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 887 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 888 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 889 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 890 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 891 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 892 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 893 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 894 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 895 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 896 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 897 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 898 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 899 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 900 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 901 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 902 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 903 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 904 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 905 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 906 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, 907 }; 908 struct ap_queue_status status; 909 unsigned long long psmid; 910 char *reply; 911 int rc, i; 912 913 reply = (void *) get_zeroed_page(GFP_KERNEL); 914 if (!reply) { 915 rc = -ENOMEM; 916 goto out; 917 } 918 919 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 920 msg, sizeof(msg)); 921 if (status.response_code != AP_RESPONSE_NORMAL) { 922 rc = -ENODEV; 923 goto out_free; 924 } 925 926 /* Wait for the test message to complete. */ 927 for (i = 0; i < 6; i++) { 928 mdelay(300); 929 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); 930 if (status.response_code == AP_RESPONSE_NORMAL && 931 psmid == 0x0102030405060708ULL) 932 break; 933 } 934 if (i < 6) { 935 /* Got an answer. */ 936 if (reply[0] == 0x00 && reply[1] == 0x86) 937 ap_dev->device_type = AP_DEVICE_TYPE_PCICC; 938 else 939 ap_dev->device_type = AP_DEVICE_TYPE_PCICA; 940 rc = 0; 941 } else 942 rc = -ENODEV; 943 944 out_free: 945 free_page((unsigned long) reply); 946 out: 947 return rc; 948 } 949 950 static void ap_interrupt_handler(void *unused1, void *unused2) 951 { 952 tasklet_schedule(&ap_tasklet); 953 } 954 955 /** 956 * __ap_scan_bus(): Scan the AP bus. 957 * @dev: Pointer to device 958 * @data: Pointer to data 959 * 960 * Scan the AP bus for new devices. 961 */ 962 static int __ap_scan_bus(struct device *dev, void *data) 963 { 964 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 965 } 966 967 static void ap_device_release(struct device *dev) 968 { 969 struct ap_device *ap_dev = to_ap_dev(dev); 970 971 kfree(ap_dev); 972 } 973 974 static void ap_scan_bus(struct work_struct *unused) 975 { 976 struct ap_device *ap_dev; 977 struct device *dev; 978 ap_qid_t qid; 979 int queue_depth, device_type; 980 int rc, i; 981 982 if (ap_select_domain() != 0) 983 return; 984 for (i = 0; i < AP_DEVICES; i++) { 985 qid = AP_MKQID(i, ap_domain_index); 986 dev = bus_find_device(&ap_bus_type, NULL, 987 (void *)(unsigned long)qid, 988 __ap_scan_bus); 989 rc = ap_query_queue(qid, &queue_depth, &device_type); 990 if (dev) { 991 if (rc == -EBUSY) { 992 set_current_state(TASK_UNINTERRUPTIBLE); 993 schedule_timeout(AP_RESET_TIMEOUT); 994 rc = ap_query_queue(qid, &queue_depth, 995 &device_type); 996 } 997 ap_dev = to_ap_dev(dev); 998 spin_lock_bh(&ap_dev->lock); 999 if (rc || ap_dev->unregistered) { 1000 spin_unlock_bh(&ap_dev->lock); 1001 device_unregister(dev); 1002 put_device(dev); 1003 continue; 1004 } 1005 spin_unlock_bh(&ap_dev->lock); 1006 put_device(dev); 1007 continue; 1008 } 1009 if (rc) 1010 continue; 1011 rc = ap_init_queue(qid); 1012 if (rc) 1013 continue; 1014 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 1015 if (!ap_dev) 1016 break; 1017 ap_dev->qid = qid; 1018 ap_dev->queue_depth = queue_depth; 1019 ap_dev->unregistered = 1; 1020 spin_lock_init(&ap_dev->lock); 1021 INIT_LIST_HEAD(&ap_dev->pendingq); 1022 INIT_LIST_HEAD(&ap_dev->requestq); 1023 INIT_LIST_HEAD(&ap_dev->list); 1024 setup_timer(&ap_dev->timeout, ap_request_timeout, 1025 (unsigned long) ap_dev); 1026 if (device_type == 0) 1027 ap_probe_device_type(ap_dev); 1028 else 1029 ap_dev->device_type = device_type; 1030 1031 ap_dev->device.bus = &ap_bus_type; 1032 ap_dev->device.parent = ap_root_device; 1033 dev_set_name(&ap_dev->device, "card%02x", 1034 AP_QID_DEVICE(ap_dev->qid)); 1035 ap_dev->device.release = ap_device_release; 1036 rc = device_register(&ap_dev->device); 1037 if (rc) { 1038 kfree(ap_dev); 1039 continue; 1040 } 1041 /* Add device attributes. */ 1042 rc = sysfs_create_group(&ap_dev->device.kobj, 1043 &ap_dev_attr_group); 1044 if (!rc) { 1045 spin_lock_bh(&ap_dev->lock); 1046 ap_dev->unregistered = 0; 1047 spin_unlock_bh(&ap_dev->lock); 1048 } 1049 else 1050 device_unregister(&ap_dev->device); 1051 } 1052 } 1053 1054 static void 1055 ap_config_timeout(unsigned long ptr) 1056 { 1057 queue_work(ap_work_queue, &ap_config_work); 1058 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1059 add_timer(&ap_config_timer); 1060 } 1061 1062 /** 1063 * ap_schedule_poll_timer(): Schedule poll timer. 1064 * 1065 * Set up the timer to run the poll tasklet 1066 */ 1067 static inline void ap_schedule_poll_timer(void) 1068 { 1069 if (ap_using_interrupts()) 1070 return; 1071 if (hrtimer_is_queued(&ap_poll_timer)) 1072 return; 1073 hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), 1074 HRTIMER_MODE_ABS); 1075 } 1076 1077 /** 1078 * ap_poll_read(): Receive pending reply messages from an AP device. 1079 * @ap_dev: pointer to the AP device 1080 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1081 * required, bit 2^1 is set if the poll timer needs to get armed 1082 * 1083 * Returns 0 if the device is still present, -ENODEV if not. 1084 */ 1085 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 1086 { 1087 struct ap_queue_status status; 1088 struct ap_message *ap_msg; 1089 1090 if (ap_dev->queue_count <= 0) 1091 return 0; 1092 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 1093 ap_dev->reply->message, ap_dev->reply->length); 1094 switch (status.response_code) { 1095 case AP_RESPONSE_NORMAL: 1096 atomic_dec(&ap_poll_requests); 1097 ap_decrease_queue_count(ap_dev); 1098 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 1099 if (ap_msg->psmid != ap_dev->reply->psmid) 1100 continue; 1101 list_del_init(&ap_msg->list); 1102 ap_dev->pendingq_count--; 1103 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply); 1104 break; 1105 } 1106 if (ap_dev->queue_count > 0) 1107 *flags |= 1; 1108 break; 1109 case AP_RESPONSE_NO_PENDING_REPLY: 1110 if (status.queue_empty) { 1111 /* The card shouldn't forget requests but who knows. */ 1112 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1113 ap_dev->queue_count = 0; 1114 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1115 ap_dev->requestq_count += ap_dev->pendingq_count; 1116 ap_dev->pendingq_count = 0; 1117 } else 1118 *flags |= 2; 1119 break; 1120 default: 1121 return -ENODEV; 1122 } 1123 return 0; 1124 } 1125 1126 /** 1127 * ap_poll_write(): Send messages from the request queue to an AP device. 1128 * @ap_dev: pointer to the AP device 1129 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1130 * required, bit 2^1 is set if the poll timer needs to get armed 1131 * 1132 * Returns 0 if the device is still present, -ENODEV if not. 1133 */ 1134 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 1135 { 1136 struct ap_queue_status status; 1137 struct ap_message *ap_msg; 1138 1139 if (ap_dev->requestq_count <= 0 || 1140 ap_dev->queue_count >= ap_dev->queue_depth) 1141 return 0; 1142 /* Start the next request on the queue. */ 1143 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1144 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1145 ap_msg->message, ap_msg->length); 1146 switch (status.response_code) { 1147 case AP_RESPONSE_NORMAL: 1148 atomic_inc(&ap_poll_requests); 1149 ap_increase_queue_count(ap_dev); 1150 list_move_tail(&ap_msg->list, &ap_dev->pendingq); 1151 ap_dev->requestq_count--; 1152 ap_dev->pendingq_count++; 1153 if (ap_dev->queue_count < ap_dev->queue_depth && 1154 ap_dev->requestq_count > 0) 1155 *flags |= 1; 1156 *flags |= 2; 1157 break; 1158 case AP_RESPONSE_Q_FULL: 1159 case AP_RESPONSE_RESET_IN_PROGRESS: 1160 *flags |= 2; 1161 break; 1162 case AP_RESPONSE_MESSAGE_TOO_BIG: 1163 return -EINVAL; 1164 default: 1165 return -ENODEV; 1166 } 1167 return 0; 1168 } 1169 1170 /** 1171 * ap_poll_queue(): Poll AP device for pending replies and send new messages. 1172 * @ap_dev: pointer to the bus device 1173 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1174 * required, bit 2^1 is set if the poll timer needs to get armed 1175 * 1176 * Poll AP device for pending replies and send new messages. If either 1177 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. 1178 * Returns 0. 1179 */ 1180 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1181 { 1182 int rc; 1183 1184 rc = ap_poll_read(ap_dev, flags); 1185 if (rc) 1186 return rc; 1187 return ap_poll_write(ap_dev, flags); 1188 } 1189 1190 /** 1191 * __ap_queue_message(): Queue a message to a device. 1192 * @ap_dev: pointer to the AP device 1193 * @ap_msg: the message to be queued 1194 * 1195 * Queue a message to a device. Returns 0 if successful. 1196 */ 1197 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1198 { 1199 struct ap_queue_status status; 1200 1201 if (list_empty(&ap_dev->requestq) && 1202 ap_dev->queue_count < ap_dev->queue_depth) { 1203 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1204 ap_msg->message, ap_msg->length); 1205 switch (status.response_code) { 1206 case AP_RESPONSE_NORMAL: 1207 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1208 atomic_inc(&ap_poll_requests); 1209 ap_dev->pendingq_count++; 1210 ap_increase_queue_count(ap_dev); 1211 ap_dev->total_request_count++; 1212 break; 1213 case AP_RESPONSE_Q_FULL: 1214 case AP_RESPONSE_RESET_IN_PROGRESS: 1215 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1216 ap_dev->requestq_count++; 1217 ap_dev->total_request_count++; 1218 return -EBUSY; 1219 case AP_RESPONSE_MESSAGE_TOO_BIG: 1220 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1221 return -EINVAL; 1222 default: /* Device is gone. */ 1223 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1224 return -ENODEV; 1225 } 1226 } else { 1227 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1228 ap_dev->requestq_count++; 1229 ap_dev->total_request_count++; 1230 return -EBUSY; 1231 } 1232 ap_schedule_poll_timer(); 1233 return 0; 1234 } 1235 1236 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1237 { 1238 unsigned long flags; 1239 int rc; 1240 1241 spin_lock_bh(&ap_dev->lock); 1242 if (!ap_dev->unregistered) { 1243 /* Make room on the queue by polling for finished requests. */ 1244 rc = ap_poll_queue(ap_dev, &flags); 1245 if (!rc) 1246 rc = __ap_queue_message(ap_dev, ap_msg); 1247 if (!rc) 1248 wake_up(&ap_poll_wait); 1249 if (rc == -ENODEV) 1250 ap_dev->unregistered = 1; 1251 } else { 1252 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1253 rc = -ENODEV; 1254 } 1255 spin_unlock_bh(&ap_dev->lock); 1256 if (rc == -ENODEV) 1257 device_unregister(&ap_dev->device); 1258 } 1259 EXPORT_SYMBOL(ap_queue_message); 1260 1261 /** 1262 * ap_cancel_message(): Cancel a crypto request. 1263 * @ap_dev: The AP device that has the message queued 1264 * @ap_msg: The message that is to be removed 1265 * 1266 * Cancel a crypto request. This is done by removing the request 1267 * from the device pending or request queue. Note that the 1268 * request stays on the AP queue. When it finishes the message 1269 * reply will be discarded because the psmid can't be found. 1270 */ 1271 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1272 { 1273 struct ap_message *tmp; 1274 1275 spin_lock_bh(&ap_dev->lock); 1276 if (!list_empty(&ap_msg->list)) { 1277 list_for_each_entry(tmp, &ap_dev->pendingq, list) 1278 if (tmp->psmid == ap_msg->psmid) { 1279 ap_dev->pendingq_count--; 1280 goto found; 1281 } 1282 ap_dev->requestq_count--; 1283 found: 1284 list_del_init(&ap_msg->list); 1285 } 1286 spin_unlock_bh(&ap_dev->lock); 1287 } 1288 EXPORT_SYMBOL(ap_cancel_message); 1289 1290 /** 1291 * ap_poll_timeout(): AP receive polling for finished AP requests. 1292 * @unused: Unused pointer. 1293 * 1294 * Schedules the AP tasklet using a high resolution timer. 1295 */ 1296 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 1297 { 1298 tasklet_schedule(&ap_tasklet); 1299 return HRTIMER_NORESTART; 1300 } 1301 1302 /** 1303 * ap_reset(): Reset a not responding AP device. 1304 * @ap_dev: Pointer to the AP device 1305 * 1306 * Reset a not responding AP device and move all requests from the 1307 * pending queue to the request queue. 1308 */ 1309 static void ap_reset(struct ap_device *ap_dev) 1310 { 1311 int rc; 1312 1313 ap_dev->reset = AP_RESET_IGNORE; 1314 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1315 ap_dev->queue_count = 0; 1316 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1317 ap_dev->requestq_count += ap_dev->pendingq_count; 1318 ap_dev->pendingq_count = 0; 1319 rc = ap_init_queue(ap_dev->qid); 1320 if (rc == -ENODEV) 1321 ap_dev->unregistered = 1; 1322 } 1323 1324 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1325 { 1326 spin_lock(&ap_dev->lock); 1327 if (!ap_dev->unregistered) { 1328 if (ap_poll_queue(ap_dev, flags)) 1329 ap_dev->unregistered = 1; 1330 if (ap_dev->reset == AP_RESET_DO) 1331 ap_reset(ap_dev); 1332 } 1333 spin_unlock(&ap_dev->lock); 1334 return 0; 1335 } 1336 1337 /** 1338 * ap_poll_all(): Poll all AP devices. 1339 * @dummy: Unused variable 1340 * 1341 * Poll all AP devices on the bus in a round robin fashion. Continue 1342 * polling until bit 2^0 of the control flags is not set. If bit 2^1 1343 * of the control flags has been set arm the poll timer. 1344 */ 1345 static void ap_poll_all(unsigned long dummy) 1346 { 1347 unsigned long flags; 1348 struct ap_device *ap_dev; 1349 1350 /* Reset the indicator if interrupts are used. Thus new interrupts can 1351 * be received. Doing it in the beginning of the tasklet is therefor 1352 * important that no requests on any AP get lost. 1353 */ 1354 if (ap_using_interrupts()) 1355 xchg((u8 *)ap_interrupt_indicator, 0); 1356 do { 1357 flags = 0; 1358 spin_lock(&ap_device_list_lock); 1359 list_for_each_entry(ap_dev, &ap_device_list, list) { 1360 __ap_poll_device(ap_dev, &flags); 1361 } 1362 spin_unlock(&ap_device_list_lock); 1363 } while (flags & 1); 1364 if (flags & 2) 1365 ap_schedule_poll_timer(); 1366 } 1367 1368 /** 1369 * ap_poll_thread(): Thread that polls for finished requests. 1370 * @data: Unused pointer 1371 * 1372 * AP bus poll thread. The purpose of this thread is to poll for 1373 * finished requests in a loop if there is a "free" cpu - that is 1374 * a cpu that doesn't have anything better to do. The polling stops 1375 * as soon as there is another task or if all messages have been 1376 * delivered. 1377 */ 1378 static int ap_poll_thread(void *data) 1379 { 1380 DECLARE_WAITQUEUE(wait, current); 1381 unsigned long flags; 1382 int requests; 1383 struct ap_device *ap_dev; 1384 1385 set_user_nice(current, 19); 1386 while (1) { 1387 if (need_resched()) { 1388 schedule(); 1389 continue; 1390 } 1391 add_wait_queue(&ap_poll_wait, &wait); 1392 set_current_state(TASK_INTERRUPTIBLE); 1393 if (kthread_should_stop()) 1394 break; 1395 requests = atomic_read(&ap_poll_requests); 1396 if (requests <= 0) 1397 schedule(); 1398 set_current_state(TASK_RUNNING); 1399 remove_wait_queue(&ap_poll_wait, &wait); 1400 1401 flags = 0; 1402 spin_lock_bh(&ap_device_list_lock); 1403 list_for_each_entry(ap_dev, &ap_device_list, list) { 1404 __ap_poll_device(ap_dev, &flags); 1405 } 1406 spin_unlock_bh(&ap_device_list_lock); 1407 } 1408 set_current_state(TASK_RUNNING); 1409 remove_wait_queue(&ap_poll_wait, &wait); 1410 return 0; 1411 } 1412 1413 static int ap_poll_thread_start(void) 1414 { 1415 int rc; 1416 1417 if (ap_using_interrupts()) 1418 return 0; 1419 mutex_lock(&ap_poll_thread_mutex); 1420 if (!ap_poll_kthread) { 1421 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 1422 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0; 1423 if (rc) 1424 ap_poll_kthread = NULL; 1425 } 1426 else 1427 rc = 0; 1428 mutex_unlock(&ap_poll_thread_mutex); 1429 return rc; 1430 } 1431 1432 static void ap_poll_thread_stop(void) 1433 { 1434 mutex_lock(&ap_poll_thread_mutex); 1435 if (ap_poll_kthread) { 1436 kthread_stop(ap_poll_kthread); 1437 ap_poll_kthread = NULL; 1438 } 1439 mutex_unlock(&ap_poll_thread_mutex); 1440 } 1441 1442 /** 1443 * ap_request_timeout(): Handling of request timeouts 1444 * @data: Holds the AP device. 1445 * 1446 * Handles request timeouts. 1447 */ 1448 static void ap_request_timeout(unsigned long data) 1449 { 1450 struct ap_device *ap_dev = (struct ap_device *) data; 1451 1452 if (ap_dev->reset == AP_RESET_ARMED) { 1453 ap_dev->reset = AP_RESET_DO; 1454 1455 if (ap_using_interrupts()) 1456 tasklet_schedule(&ap_tasklet); 1457 } 1458 } 1459 1460 static void ap_reset_domain(void) 1461 { 1462 int i; 1463 1464 if (ap_domain_index != -1) 1465 for (i = 0; i < AP_DEVICES; i++) 1466 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1467 } 1468 1469 static void ap_reset_all(void) 1470 { 1471 int i, j; 1472 1473 for (i = 0; i < AP_DOMAINS; i++) 1474 for (j = 0; j < AP_DEVICES; j++) 1475 ap_reset_queue(AP_MKQID(j, i)); 1476 } 1477 1478 static struct reset_call ap_reset_call = { 1479 .fn = ap_reset_all, 1480 }; 1481 1482 /** 1483 * ap_module_init(): The module initialization code. 1484 * 1485 * Initializes the module. 1486 */ 1487 int __init ap_module_init(void) 1488 { 1489 int rc, i; 1490 1491 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1492 pr_warning("%d is not a valid cryptographic domain\n", 1493 ap_domain_index); 1494 return -EINVAL; 1495 } 1496 if (ap_instructions_available() != 0) { 1497 pr_warning("The hardware system does not support " 1498 "AP instructions\n"); 1499 return -ENODEV; 1500 } 1501 if (ap_interrupts_available()) { 1502 isc_register(AP_ISC); 1503 ap_interrupt_indicator = s390_register_adapter_interrupt( 1504 &ap_interrupt_handler, NULL, AP_ISC); 1505 if (IS_ERR(ap_interrupt_indicator)) { 1506 ap_interrupt_indicator = NULL; 1507 isc_unregister(AP_ISC); 1508 } 1509 } 1510 1511 register_reset_call(&ap_reset_call); 1512 1513 /* Create /sys/bus/ap. */ 1514 rc = bus_register(&ap_bus_type); 1515 if (rc) 1516 goto out; 1517 for (i = 0; ap_bus_attrs[i]; i++) { 1518 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); 1519 if (rc) 1520 goto out_bus; 1521 } 1522 1523 /* Create /sys/devices/ap. */ 1524 ap_root_device = root_device_register("ap"); 1525 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0; 1526 if (rc) 1527 goto out_bus; 1528 1529 ap_work_queue = create_singlethread_workqueue("kapwork"); 1530 if (!ap_work_queue) { 1531 rc = -ENOMEM; 1532 goto out_root; 1533 } 1534 1535 if (ap_select_domain() == 0) 1536 ap_scan_bus(NULL); 1537 1538 /* Setup the AP bus rescan timer. */ 1539 init_timer(&ap_config_timer); 1540 ap_config_timer.function = ap_config_timeout; 1541 ap_config_timer.data = 0; 1542 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1543 add_timer(&ap_config_timer); 1544 1545 /* Setup the high resultion poll timer. 1546 * If we are running under z/VM adjust polling to z/VM polling rate. 1547 */ 1548 if (MACHINE_IS_VM) 1549 poll_timeout = 1500000; 1550 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1551 ap_poll_timer.function = ap_poll_timeout; 1552 1553 /* Start the low priority AP bus poll thread. */ 1554 if (ap_thread_flag) { 1555 rc = ap_poll_thread_start(); 1556 if (rc) 1557 goto out_work; 1558 } 1559 1560 return 0; 1561 1562 out_work: 1563 del_timer_sync(&ap_config_timer); 1564 hrtimer_cancel(&ap_poll_timer); 1565 destroy_workqueue(ap_work_queue); 1566 out_root: 1567 root_device_unregister(ap_root_device); 1568 out_bus: 1569 while (i--) 1570 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1571 bus_unregister(&ap_bus_type); 1572 out: 1573 unregister_reset_call(&ap_reset_call); 1574 if (ap_using_interrupts()) { 1575 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); 1576 isc_unregister(AP_ISC); 1577 } 1578 return rc; 1579 } 1580 1581 static int __ap_match_all(struct device *dev, void *data) 1582 { 1583 return 1; 1584 } 1585 1586 /** 1587 * ap_modules_exit(): The module termination code 1588 * 1589 * Terminates the module. 1590 */ 1591 void ap_module_exit(void) 1592 { 1593 int i; 1594 struct device *dev; 1595 1596 ap_reset_domain(); 1597 ap_poll_thread_stop(); 1598 del_timer_sync(&ap_config_timer); 1599 hrtimer_cancel(&ap_poll_timer); 1600 destroy_workqueue(ap_work_queue); 1601 tasklet_kill(&ap_tasklet); 1602 root_device_unregister(ap_root_device); 1603 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 1604 __ap_match_all))) 1605 { 1606 device_unregister(dev); 1607 put_device(dev); 1608 } 1609 for (i = 0; ap_bus_attrs[i]; i++) 1610 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1611 bus_unregister(&ap_bus_type); 1612 unregister_reset_call(&ap_reset_call); 1613 if (ap_using_interrupts()) { 1614 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); 1615 isc_unregister(AP_ISC); 1616 } 1617 } 1618 1619 #ifndef CONFIG_ZCRYPT_MONOLITHIC 1620 module_init(ap_module_init); 1621 module_exit(ap_module_exit); 1622 #endif 1623