1 /* 2 * Copyright IBM Corp. 2006, 2012 3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 6 * Felix Beck <felix.beck@de.ibm.com> 7 * Holger Dengler <hd@linux.vnet.ibm.com> 8 * 9 * Adjunct processor bus. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26 #define KMSG_COMPONENT "ap" 27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28 29 #include <linux/kernel_stat.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/delay.h> 33 #include <linux/err.h> 34 #include <linux/interrupt.h> 35 #include <linux/workqueue.h> 36 #include <linux/slab.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/mutex.h> 40 #include <asm/reset.h> 41 #include <asm/airq.h> 42 #include <linux/atomic.h> 43 #include <asm/isc.h> 44 #include <linux/hrtimer.h> 45 #include <linux/ktime.h> 46 #include <asm/facility.h> 47 #include <linux/crypto.h> 48 49 #include "ap_bus.h" 50 51 /* Some prototypes. */ 52 static void ap_scan_bus(struct work_struct *); 53 static void ap_poll_all(unsigned long); 54 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); 55 static int ap_poll_thread_start(void); 56 static void ap_poll_thread_stop(void); 57 static void ap_request_timeout(unsigned long); 58 static inline void ap_schedule_poll_timer(void); 59 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); 60 static int ap_device_remove(struct device *dev); 61 static int ap_device_probe(struct device *dev); 62 static void ap_interrupt_handler(struct airq_struct *airq); 63 static void ap_reset(struct ap_device *ap_dev); 64 static void ap_config_timeout(unsigned long ptr); 65 static int ap_select_domain(void); 66 static void ap_query_configuration(void); 67 68 /* 69 * Module description. 70 */ 71 MODULE_AUTHOR("IBM Corporation"); 72 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ 73 "Copyright IBM Corp. 2006, 2012"); 74 MODULE_LICENSE("GPL"); 75 MODULE_ALIAS_CRYPTO("z90crypt"); 76 77 /* 78 * Module parameter 79 */ 80 int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 81 module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); 82 MODULE_PARM_DESC(domain, "domain index for ap devices"); 83 EXPORT_SYMBOL(ap_domain_index); 84 85 static int ap_thread_flag = 0; 86 module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); 87 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 88 89 static struct device *ap_root_device = NULL; 90 static struct ap_config_info *ap_configuration; 91 static DEFINE_SPINLOCK(ap_device_list_lock); 92 static LIST_HEAD(ap_device_list); 93 94 /* 95 * Workqueue & timer for bus rescan. 96 */ 97 static struct workqueue_struct *ap_work_queue; 98 static struct timer_list ap_config_timer; 99 static int ap_config_time = AP_CONFIG_TIME; 100 static DECLARE_WORK(ap_config_work, ap_scan_bus); 101 102 /* 103 * Tasklet & timer for AP request polling and interrupts 104 */ 105 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 106 static atomic_t ap_poll_requests = ATOMIC_INIT(0); 107 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 108 static struct task_struct *ap_poll_kthread = NULL; 109 static DEFINE_MUTEX(ap_poll_thread_mutex); 110 static DEFINE_SPINLOCK(ap_poll_timer_lock); 111 static struct hrtimer ap_poll_timer; 112 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 113 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ 114 static unsigned long long poll_timeout = 250000; 115 116 /* Suspend flag */ 117 static int ap_suspend_flag; 118 /* Flag to check if domain was set through module parameter domain=. This is 119 * important when supsend and resume is done in a z/VM environment where the 120 * domain might change. */ 121 static int user_set_domain = 0; 122 static struct bus_type ap_bus_type; 123 124 /* Adapter interrupt definitions */ 125 static int ap_airq_flag; 126 127 static struct airq_struct ap_airq = { 128 .handler = ap_interrupt_handler, 129 .isc = AP_ISC, 130 }; 131 132 /** 133 * ap_using_interrupts() - Returns non-zero if interrupt support is 134 * available. 135 */ 136 static inline int ap_using_interrupts(void) 137 { 138 return ap_airq_flag; 139 } 140 141 /** 142 * ap_intructions_available() - Test if AP instructions are available. 143 * 144 * Returns 0 if the AP instructions are installed. 145 */ 146 static inline int ap_instructions_available(void) 147 { 148 register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 149 register unsigned long reg1 asm ("1") = -ENODEV; 150 register unsigned long reg2 asm ("2") = 0UL; 151 152 asm volatile( 153 " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 154 "0: la %1,0\n" 155 "1:\n" 156 EX_TABLE(0b, 1b) 157 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); 158 return reg1; 159 } 160 161 /** 162 * ap_interrupts_available(): Test if AP interrupts are available. 163 * 164 * Returns 1 if AP interrupts are available. 165 */ 166 static int ap_interrupts_available(void) 167 { 168 return test_facility(65); 169 } 170 171 /** 172 * ap_configuration_available(): Test if AP configuration 173 * information is available. 174 * 175 * Returns 1 if AP configuration information is available. 176 */ 177 static int ap_configuration_available(void) 178 { 179 return test_facility(12); 180 } 181 182 /** 183 * ap_test_queue(): Test adjunct processor queue. 184 * @qid: The AP queue number 185 * @queue_depth: Pointer to queue depth value 186 * @device_type: Pointer to device type value 187 * 188 * Returns AP queue status structure. 189 */ 190 static inline struct ap_queue_status 191 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 192 { 193 register unsigned long reg0 asm ("0") = qid; 194 register struct ap_queue_status reg1 asm ("1"); 195 register unsigned long reg2 asm ("2") = 0UL; 196 197 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 198 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 199 *device_type = (int) (reg2 >> 24); 200 *queue_depth = (int) (reg2 & 0xff); 201 return reg1; 202 } 203 204 /** 205 * ap_query_facilities(): PQAP(TAPQ) query facilities. 206 * @qid: The AP queue number 207 * 208 * Returns content of general register 2 after the PQAP(TAPQ) 209 * instruction was called. 210 */ 211 static inline unsigned long ap_query_facilities(ap_qid_t qid) 212 { 213 register unsigned long reg0 asm ("0") = qid | 0x00800000UL; 214 register unsigned long reg1 asm ("1"); 215 register unsigned long reg2 asm ("2") = 0UL; 216 217 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 218 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 219 return reg2; 220 } 221 222 /** 223 * ap_reset_queue(): Reset adjunct processor queue. 224 * @qid: The AP queue number 225 * 226 * Returns AP queue status structure. 227 */ 228 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 229 { 230 register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 231 register struct ap_queue_status reg1 asm ("1"); 232 register unsigned long reg2 asm ("2") = 0UL; 233 234 asm volatile( 235 ".long 0xb2af0000" /* PQAP(RAPQ) */ 236 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 237 return reg1; 238 } 239 240 /** 241 * ap_queue_interruption_control(): Enable interruption for a specific AP. 242 * @qid: The AP queue number 243 * @ind: The notification indicator byte 244 * 245 * Returns AP queue status. 246 */ 247 static inline struct ap_queue_status 248 ap_queue_interruption_control(ap_qid_t qid, void *ind) 249 { 250 register unsigned long reg0 asm ("0") = qid | 0x03000000UL; 251 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; 252 register struct ap_queue_status reg1_out asm ("1"); 253 register void *reg2 asm ("2") = ind; 254 asm volatile( 255 ".long 0xb2af0000" /* PQAP(AQIC) */ 256 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 257 : 258 : "cc" ); 259 return reg1_out; 260 } 261 262 static inline struct ap_queue_status 263 __ap_query_functions(ap_qid_t qid, unsigned int *functions) 264 { 265 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); 266 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; 267 register unsigned long reg2 asm ("2"); 268 269 asm volatile( 270 ".long 0xb2af0000\n" /* PQAP(TAPQ) */ 271 "0:\n" 272 EX_TABLE(0b, 0b) 273 : "+d" (reg0), "+d" (reg1), "=d" (reg2) 274 : 275 : "cc"); 276 277 *functions = (unsigned int)(reg2 >> 32); 278 return reg1; 279 } 280 281 static inline int __ap_query_configuration(struct ap_config_info *config) 282 { 283 register unsigned long reg0 asm ("0") = 0x04000000UL; 284 register unsigned long reg1 asm ("1") = -EINVAL; 285 register unsigned char *reg2 asm ("2") = (unsigned char *)config; 286 287 asm volatile( 288 ".long 0xb2af0000\n" /* PQAP(QCI) */ 289 "0: la %1,0\n" 290 "1:\n" 291 EX_TABLE(0b, 1b) 292 : "+d" (reg0), "+d" (reg1), "+d" (reg2) 293 : 294 : "cc"); 295 296 return reg1; 297 } 298 299 /** 300 * ap_query_functions(): Query supported functions. 301 * @qid: The AP queue number 302 * @functions: Pointer to functions field. 303 * 304 * Returns 305 * 0 on success. 306 * -ENODEV if queue not valid. 307 * -EBUSY if device busy. 308 * -EINVAL if query function is not supported 309 */ 310 static int ap_query_functions(ap_qid_t qid, unsigned int *functions) 311 { 312 struct ap_queue_status status; 313 int i; 314 status = __ap_query_functions(qid, functions); 315 316 for (i = 0; i < AP_MAX_RESET; i++) { 317 if (ap_queue_status_invalid_test(&status)) 318 return -ENODEV; 319 320 switch (status.response_code) { 321 case AP_RESPONSE_NORMAL: 322 return 0; 323 case AP_RESPONSE_RESET_IN_PROGRESS: 324 case AP_RESPONSE_BUSY: 325 break; 326 case AP_RESPONSE_Q_NOT_AVAIL: 327 case AP_RESPONSE_DECONFIGURED: 328 case AP_RESPONSE_CHECKSTOPPED: 329 case AP_RESPONSE_INVALID_ADDRESS: 330 return -ENODEV; 331 case AP_RESPONSE_OTHERWISE_CHANGED: 332 break; 333 default: 334 break; 335 } 336 if (i < AP_MAX_RESET - 1) { 337 udelay(5); 338 status = __ap_query_functions(qid, functions); 339 } 340 } 341 return -EBUSY; 342 } 343 344 /** 345 * ap_queue_enable_interruption(): Enable interruption on an AP. 346 * @qid: The AP queue number 347 * @ind: the notification indicator byte 348 * 349 * Enables interruption on AP queue via ap_queue_interruption_control(). Based 350 * on the return value it waits a while and tests the AP queue if interrupts 351 * have been switched on using ap_test_queue(). 352 */ 353 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) 354 { 355 struct ap_queue_status status; 356 int t_depth, t_device_type, rc, i; 357 358 rc = -EBUSY; 359 status = ap_queue_interruption_control(qid, ind); 360 361 for (i = 0; i < AP_MAX_RESET; i++) { 362 switch (status.response_code) { 363 case AP_RESPONSE_NORMAL: 364 if (status.int_enabled) 365 return 0; 366 break; 367 case AP_RESPONSE_RESET_IN_PROGRESS: 368 case AP_RESPONSE_BUSY: 369 if (i < AP_MAX_RESET - 1) { 370 udelay(5); 371 status = ap_queue_interruption_control(qid, 372 ind); 373 continue; 374 } 375 break; 376 case AP_RESPONSE_Q_NOT_AVAIL: 377 case AP_RESPONSE_DECONFIGURED: 378 case AP_RESPONSE_CHECKSTOPPED: 379 case AP_RESPONSE_INVALID_ADDRESS: 380 return -ENODEV; 381 case AP_RESPONSE_OTHERWISE_CHANGED: 382 if (status.int_enabled) 383 return 0; 384 break; 385 default: 386 break; 387 } 388 if (i < AP_MAX_RESET - 1) { 389 udelay(5); 390 status = ap_test_queue(qid, &t_depth, &t_device_type); 391 } 392 } 393 return rc; 394 } 395 396 /** 397 * __ap_send(): Send message to adjunct processor queue. 398 * @qid: The AP queue number 399 * @psmid: The program supplied message identifier 400 * @msg: The message text 401 * @length: The message length 402 * @special: Special Bit 403 * 404 * Returns AP queue status structure. 405 * Condition code 1 on NQAP can't happen because the L bit is 1. 406 * Condition code 2 on NQAP also means the send is incomplete, 407 * because a segment boundary was reached. The NQAP is repeated. 408 */ 409 static inline struct ap_queue_status 410 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 411 unsigned int special) 412 { 413 typedef struct { char _[length]; } msgblock; 414 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 415 register struct ap_queue_status reg1 asm ("1"); 416 register unsigned long reg2 asm ("2") = (unsigned long) msg; 417 register unsigned long reg3 asm ("3") = (unsigned long) length; 418 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 419 register unsigned long reg5 asm ("5") = psmid & 0xffffffff; 420 421 if (special == 1) 422 reg0 |= 0x400000UL; 423 424 asm volatile ( 425 "0: .long 0xb2ad0042\n" /* NQAP */ 426 " brc 2,0b" 427 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 428 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 429 : "cc" ); 430 return reg1; 431 } 432 433 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 434 { 435 struct ap_queue_status status; 436 437 status = __ap_send(qid, psmid, msg, length, 0); 438 switch (status.response_code) { 439 case AP_RESPONSE_NORMAL: 440 return 0; 441 case AP_RESPONSE_Q_FULL: 442 case AP_RESPONSE_RESET_IN_PROGRESS: 443 return -EBUSY; 444 case AP_RESPONSE_REQ_FAC_NOT_INST: 445 return -EINVAL; 446 default: /* Device is gone. */ 447 return -ENODEV; 448 } 449 } 450 EXPORT_SYMBOL(ap_send); 451 452 /** 453 * __ap_recv(): Receive message from adjunct processor queue. 454 * @qid: The AP queue number 455 * @psmid: Pointer to program supplied message identifier 456 * @msg: The message text 457 * @length: The message length 458 * 459 * Returns AP queue status structure. 460 * Condition code 1 on DQAP means the receive has taken place 461 * but only partially. The response is incomplete, hence the 462 * DQAP is repeated. 463 * Condition code 2 on DQAP also means the receive is incomplete, 464 * this time because a segment boundary was reached. Again, the 465 * DQAP is repeated. 466 * Note that gpr2 is used by the DQAP instruction to keep track of 467 * any 'residual' length, in case the instruction gets interrupted. 468 * Hence it gets zeroed before the instruction. 469 */ 470 static inline struct ap_queue_status 471 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 472 { 473 typedef struct { char _[length]; } msgblock; 474 register unsigned long reg0 asm("0") = qid | 0x80000000UL; 475 register struct ap_queue_status reg1 asm ("1"); 476 register unsigned long reg2 asm("2") = 0UL; 477 register unsigned long reg4 asm("4") = (unsigned long) msg; 478 register unsigned long reg5 asm("5") = (unsigned long) length; 479 register unsigned long reg6 asm("6") = 0UL; 480 register unsigned long reg7 asm("7") = 0UL; 481 482 483 asm volatile( 484 "0: .long 0xb2ae0064\n" /* DQAP */ 485 " brc 6,0b\n" 486 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 487 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 488 "=m" (*(msgblock *) msg) : : "cc" ); 489 *psmid = (((unsigned long long) reg6) << 32) + reg7; 490 return reg1; 491 } 492 493 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 494 { 495 struct ap_queue_status status; 496 497 status = __ap_recv(qid, psmid, msg, length); 498 switch (status.response_code) { 499 case AP_RESPONSE_NORMAL: 500 return 0; 501 case AP_RESPONSE_NO_PENDING_REPLY: 502 if (status.queue_empty) 503 return -ENOENT; 504 return -EBUSY; 505 case AP_RESPONSE_RESET_IN_PROGRESS: 506 return -EBUSY; 507 default: 508 return -ENODEV; 509 } 510 } 511 EXPORT_SYMBOL(ap_recv); 512 513 /** 514 * ap_query_queue(): Check if an AP queue is available. 515 * @qid: The AP queue number 516 * @queue_depth: Pointer to queue depth value 517 * @device_type: Pointer to device type value 518 * 519 * The test is repeated for AP_MAX_RESET times. 520 */ 521 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 522 { 523 struct ap_queue_status status; 524 int t_depth, t_device_type, rc, i; 525 526 rc = -EBUSY; 527 for (i = 0; i < AP_MAX_RESET; i++) { 528 status = ap_test_queue(qid, &t_depth, &t_device_type); 529 switch (status.response_code) { 530 case AP_RESPONSE_NORMAL: 531 *queue_depth = t_depth + 1; 532 *device_type = t_device_type; 533 rc = 0; 534 break; 535 case AP_RESPONSE_Q_NOT_AVAIL: 536 rc = -ENODEV; 537 break; 538 case AP_RESPONSE_RESET_IN_PROGRESS: 539 break; 540 case AP_RESPONSE_DECONFIGURED: 541 rc = -ENODEV; 542 break; 543 case AP_RESPONSE_CHECKSTOPPED: 544 rc = -ENODEV; 545 break; 546 case AP_RESPONSE_INVALID_ADDRESS: 547 rc = -ENODEV; 548 break; 549 case AP_RESPONSE_OTHERWISE_CHANGED: 550 break; 551 case AP_RESPONSE_BUSY: 552 break; 553 default: 554 BUG(); 555 } 556 if (rc != -EBUSY) 557 break; 558 if (i < AP_MAX_RESET - 1) 559 udelay(5); 560 } 561 return rc; 562 } 563 564 /** 565 * ap_init_queue(): Reset an AP queue. 566 * @qid: The AP queue number 567 * 568 * Reset an AP queue and wait for it to become available again. 569 */ 570 static int ap_init_queue(ap_qid_t qid) 571 { 572 struct ap_queue_status status; 573 int rc, dummy, i; 574 575 rc = -ENODEV; 576 status = ap_reset_queue(qid); 577 for (i = 0; i < AP_MAX_RESET; i++) { 578 switch (status.response_code) { 579 case AP_RESPONSE_NORMAL: 580 if (status.queue_empty) 581 rc = 0; 582 break; 583 case AP_RESPONSE_Q_NOT_AVAIL: 584 case AP_RESPONSE_DECONFIGURED: 585 case AP_RESPONSE_CHECKSTOPPED: 586 i = AP_MAX_RESET; /* return with -ENODEV */ 587 break; 588 case AP_RESPONSE_RESET_IN_PROGRESS: 589 rc = -EBUSY; 590 case AP_RESPONSE_BUSY: 591 default: 592 break; 593 } 594 if (rc != -ENODEV && rc != -EBUSY) 595 break; 596 if (i < AP_MAX_RESET - 1) { 597 /* Time we are waiting until we give up (0.7sec * 90). 598 * Since the actual request (in progress) will not 599 * interrupted immediately for the reset command, 600 * we have to be patient. In worst case we have to 601 * wait 60sec + reset time (some msec). 602 */ 603 schedule_timeout(AP_RESET_TIMEOUT); 604 status = ap_test_queue(qid, &dummy, &dummy); 605 } 606 } 607 if (rc == 0 && ap_using_interrupts()) { 608 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr); 609 /* If interruption mode is supported by the machine, 610 * but an AP can not be enabled for interruption then 611 * the AP will be discarded. */ 612 if (rc) 613 pr_err("Registering adapter interrupts for " 614 "AP %d failed\n", AP_QID_DEVICE(qid)); 615 } 616 return rc; 617 } 618 619 /** 620 * ap_increase_queue_count(): Arm request timeout. 621 * @ap_dev: Pointer to an AP device. 622 * 623 * Arm request timeout if an AP device was idle and a new request is submitted. 624 */ 625 static void ap_increase_queue_count(struct ap_device *ap_dev) 626 { 627 int timeout = ap_dev->drv->request_timeout; 628 629 ap_dev->queue_count++; 630 if (ap_dev->queue_count == 1) { 631 mod_timer(&ap_dev->timeout, jiffies + timeout); 632 ap_dev->reset = AP_RESET_ARMED; 633 } 634 } 635 636 /** 637 * ap_decrease_queue_count(): Decrease queue count. 638 * @ap_dev: Pointer to an AP device. 639 * 640 * If AP device is still alive, re-schedule request timeout if there are still 641 * pending requests. 642 */ 643 static void ap_decrease_queue_count(struct ap_device *ap_dev) 644 { 645 int timeout = ap_dev->drv->request_timeout; 646 647 ap_dev->queue_count--; 648 if (ap_dev->queue_count > 0) 649 mod_timer(&ap_dev->timeout, jiffies + timeout); 650 else 651 /* 652 * The timeout timer should to be disabled now - since 653 * del_timer_sync() is very expensive, we just tell via the 654 * reset flag to ignore the pending timeout timer. 655 */ 656 ap_dev->reset = AP_RESET_IGNORE; 657 } 658 659 /* 660 * AP device related attributes. 661 */ 662 static ssize_t ap_hwtype_show(struct device *dev, 663 struct device_attribute *attr, char *buf) 664 { 665 struct ap_device *ap_dev = to_ap_dev(dev); 666 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 667 } 668 669 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 670 671 static ssize_t ap_raw_hwtype_show(struct device *dev, 672 struct device_attribute *attr, char *buf) 673 { 674 struct ap_device *ap_dev = to_ap_dev(dev); 675 676 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype); 677 } 678 679 static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); 680 681 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 682 char *buf) 683 { 684 struct ap_device *ap_dev = to_ap_dev(dev); 685 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 686 } 687 688 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 689 static ssize_t ap_request_count_show(struct device *dev, 690 struct device_attribute *attr, 691 char *buf) 692 { 693 struct ap_device *ap_dev = to_ap_dev(dev); 694 int rc; 695 696 spin_lock_bh(&ap_dev->lock); 697 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 698 spin_unlock_bh(&ap_dev->lock); 699 return rc; 700 } 701 702 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 703 704 static ssize_t ap_requestq_count_show(struct device *dev, 705 struct device_attribute *attr, char *buf) 706 { 707 struct ap_device *ap_dev = to_ap_dev(dev); 708 int rc; 709 710 spin_lock_bh(&ap_dev->lock); 711 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); 712 spin_unlock_bh(&ap_dev->lock); 713 return rc; 714 } 715 716 static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 717 718 static ssize_t ap_pendingq_count_show(struct device *dev, 719 struct device_attribute *attr, char *buf) 720 { 721 struct ap_device *ap_dev = to_ap_dev(dev); 722 int rc; 723 724 spin_lock_bh(&ap_dev->lock); 725 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); 726 spin_unlock_bh(&ap_dev->lock); 727 return rc; 728 } 729 730 static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 731 732 static ssize_t ap_modalias_show(struct device *dev, 733 struct device_attribute *attr, char *buf) 734 { 735 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); 736 } 737 738 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 739 740 static ssize_t ap_functions_show(struct device *dev, 741 struct device_attribute *attr, char *buf) 742 { 743 struct ap_device *ap_dev = to_ap_dev(dev); 744 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); 745 } 746 747 static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 748 749 static struct attribute *ap_dev_attrs[] = { 750 &dev_attr_hwtype.attr, 751 &dev_attr_raw_hwtype.attr, 752 &dev_attr_depth.attr, 753 &dev_attr_request_count.attr, 754 &dev_attr_requestq_count.attr, 755 &dev_attr_pendingq_count.attr, 756 &dev_attr_modalias.attr, 757 &dev_attr_ap_functions.attr, 758 NULL 759 }; 760 static struct attribute_group ap_dev_attr_group = { 761 .attrs = ap_dev_attrs 762 }; 763 764 /** 765 * ap_bus_match() 766 * @dev: Pointer to device 767 * @drv: Pointer to device_driver 768 * 769 * AP bus driver registration/unregistration. 770 */ 771 static int ap_bus_match(struct device *dev, struct device_driver *drv) 772 { 773 struct ap_device *ap_dev = to_ap_dev(dev); 774 struct ap_driver *ap_drv = to_ap_drv(drv); 775 struct ap_device_id *id; 776 777 /* 778 * Compare device type of the device with the list of 779 * supported types of the device_driver. 780 */ 781 for (id = ap_drv->ids; id->match_flags; id++) { 782 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 783 (id->dev_type != ap_dev->device_type)) 784 continue; 785 return 1; 786 } 787 return 0; 788 } 789 790 /** 791 * ap_uevent(): Uevent function for AP devices. 792 * @dev: Pointer to device 793 * @env: Pointer to kobj_uevent_env 794 * 795 * It sets up a single environment variable DEV_TYPE which contains the 796 * hardware device type. 797 */ 798 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 799 { 800 struct ap_device *ap_dev = to_ap_dev(dev); 801 int retval = 0; 802 803 if (!ap_dev) 804 return -ENODEV; 805 806 /* Set up DEV_TYPE environment variable. */ 807 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); 808 if (retval) 809 return retval; 810 811 /* Add MODALIAS= */ 812 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); 813 814 return retval; 815 } 816 817 static int ap_bus_suspend(struct device *dev, pm_message_t state) 818 { 819 struct ap_device *ap_dev = to_ap_dev(dev); 820 unsigned long flags; 821 822 if (!ap_suspend_flag) { 823 ap_suspend_flag = 1; 824 825 /* Disable scanning for devices, thus we do not want to scan 826 * for them after removing. 827 */ 828 del_timer_sync(&ap_config_timer); 829 if (ap_work_queue != NULL) { 830 destroy_workqueue(ap_work_queue); 831 ap_work_queue = NULL; 832 } 833 834 tasklet_disable(&ap_tasklet); 835 } 836 /* Poll on the device until all requests are finished. */ 837 do { 838 flags = 0; 839 spin_lock_bh(&ap_dev->lock); 840 __ap_poll_device(ap_dev, &flags); 841 spin_unlock_bh(&ap_dev->lock); 842 } while ((flags & 1) || (flags & 2)); 843 844 spin_lock_bh(&ap_dev->lock); 845 ap_dev->unregistered = 1; 846 spin_unlock_bh(&ap_dev->lock); 847 848 return 0; 849 } 850 851 static int ap_bus_resume(struct device *dev) 852 { 853 struct ap_device *ap_dev = to_ap_dev(dev); 854 int rc; 855 856 if (ap_suspend_flag) { 857 ap_suspend_flag = 0; 858 if (ap_interrupts_available()) { 859 if (!ap_using_interrupts()) { 860 rc = register_adapter_interrupt(&ap_airq); 861 ap_airq_flag = (rc == 0); 862 } 863 } else { 864 if (ap_using_interrupts()) { 865 unregister_adapter_interrupt(&ap_airq); 866 ap_airq_flag = 0; 867 } 868 } 869 ap_query_configuration(); 870 if (!user_set_domain) { 871 ap_domain_index = -1; 872 ap_select_domain(); 873 } 874 init_timer(&ap_config_timer); 875 ap_config_timer.function = ap_config_timeout; 876 ap_config_timer.data = 0; 877 ap_config_timer.expires = jiffies + ap_config_time * HZ; 878 add_timer(&ap_config_timer); 879 ap_work_queue = create_singlethread_workqueue("kapwork"); 880 if (!ap_work_queue) 881 return -ENOMEM; 882 tasklet_enable(&ap_tasklet); 883 if (!ap_using_interrupts()) 884 ap_schedule_poll_timer(); 885 else 886 tasklet_schedule(&ap_tasklet); 887 if (ap_thread_flag) 888 rc = ap_poll_thread_start(); 889 else 890 rc = 0; 891 } else 892 rc = 0; 893 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { 894 spin_lock_bh(&ap_dev->lock); 895 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), 896 ap_domain_index); 897 spin_unlock_bh(&ap_dev->lock); 898 } 899 queue_work(ap_work_queue, &ap_config_work); 900 901 return rc; 902 } 903 904 static struct bus_type ap_bus_type = { 905 .name = "ap", 906 .match = &ap_bus_match, 907 .uevent = &ap_uevent, 908 .suspend = ap_bus_suspend, 909 .resume = ap_bus_resume 910 }; 911 912 static int ap_device_probe(struct device *dev) 913 { 914 struct ap_device *ap_dev = to_ap_dev(dev); 915 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 916 int rc; 917 918 ap_dev->drv = ap_drv; 919 920 spin_lock_bh(&ap_device_list_lock); 921 list_add(&ap_dev->list, &ap_device_list); 922 spin_unlock_bh(&ap_device_list_lock); 923 924 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 925 if (rc) { 926 spin_lock_bh(&ap_device_list_lock); 927 list_del_init(&ap_dev->list); 928 spin_unlock_bh(&ap_device_list_lock); 929 } 930 return rc; 931 } 932 933 /** 934 * __ap_flush_queue(): Flush requests. 935 * @ap_dev: Pointer to the AP device 936 * 937 * Flush all requests from the request/pending queue of an AP device. 938 */ 939 static void __ap_flush_queue(struct ap_device *ap_dev) 940 { 941 struct ap_message *ap_msg, *next; 942 943 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 944 list_del_init(&ap_msg->list); 945 ap_dev->pendingq_count--; 946 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 947 } 948 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 949 list_del_init(&ap_msg->list); 950 ap_dev->requestq_count--; 951 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 952 } 953 } 954 955 void ap_flush_queue(struct ap_device *ap_dev) 956 { 957 spin_lock_bh(&ap_dev->lock); 958 __ap_flush_queue(ap_dev); 959 spin_unlock_bh(&ap_dev->lock); 960 } 961 EXPORT_SYMBOL(ap_flush_queue); 962 963 static int ap_device_remove(struct device *dev) 964 { 965 struct ap_device *ap_dev = to_ap_dev(dev); 966 struct ap_driver *ap_drv = ap_dev->drv; 967 968 ap_flush_queue(ap_dev); 969 del_timer_sync(&ap_dev->timeout); 970 spin_lock_bh(&ap_device_list_lock); 971 list_del_init(&ap_dev->list); 972 spin_unlock_bh(&ap_device_list_lock); 973 if (ap_drv->remove) 974 ap_drv->remove(ap_dev); 975 spin_lock_bh(&ap_dev->lock); 976 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 977 spin_unlock_bh(&ap_dev->lock); 978 return 0; 979 } 980 981 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 982 char *name) 983 { 984 struct device_driver *drv = &ap_drv->driver; 985 986 drv->bus = &ap_bus_type; 987 drv->probe = ap_device_probe; 988 drv->remove = ap_device_remove; 989 drv->owner = owner; 990 drv->name = name; 991 return driver_register(drv); 992 } 993 EXPORT_SYMBOL(ap_driver_register); 994 995 void ap_driver_unregister(struct ap_driver *ap_drv) 996 { 997 driver_unregister(&ap_drv->driver); 998 } 999 EXPORT_SYMBOL(ap_driver_unregister); 1000 1001 void ap_bus_force_rescan(void) 1002 { 1003 /* reconfigure the AP bus rescan timer. */ 1004 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1005 /* processing a asynchronous bus rescan */ 1006 queue_work(ap_work_queue, &ap_config_work); 1007 flush_work(&ap_config_work); 1008 } 1009 EXPORT_SYMBOL(ap_bus_force_rescan); 1010 1011 /* 1012 * ap_test_config(): helper function to extract the nrth bit 1013 * within the unsigned int array field. 1014 */ 1015 static inline int ap_test_config(unsigned int *field, unsigned int nr) 1016 { 1017 if (nr > 0xFFu) 1018 return 0; 1019 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); 1020 } 1021 1022 /* 1023 * ap_test_config_card_id(): Test, whether an AP card ID is configured. 1024 * @id AP card ID 1025 * 1026 * Returns 0 if the card is not configured 1027 * 1 if the card is configured or 1028 * if the configuration information is not available 1029 */ 1030 static inline int ap_test_config_card_id(unsigned int id) 1031 { 1032 if (!ap_configuration) 1033 return 1; 1034 return ap_test_config(ap_configuration->apm, id); 1035 } 1036 1037 /* 1038 * ap_test_config_domain(): Test, whether an AP usage domain is configured. 1039 * @domain AP usage domain ID 1040 * 1041 * Returns 0 if the usage domain is not configured 1042 * 1 if the usage domain is configured or 1043 * if the configuration information is not available 1044 */ 1045 static inline int ap_test_config_domain(unsigned int domain) 1046 { 1047 if (!ap_configuration) /* QCI not supported */ 1048 if (domain < 16) 1049 return 1; /* then domains 0...15 are configured */ 1050 else 1051 return 0; 1052 else 1053 return ap_test_config(ap_configuration->aqm, domain); 1054 } 1055 1056 /* 1057 * AP bus attributes. 1058 */ 1059 static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 1060 { 1061 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 1062 } 1063 1064 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 1065 1066 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 1067 { 1068 if (ap_configuration != NULL) { /* QCI not supported */ 1069 if (test_facility(76)) { /* format 1 - 256 bit domain field */ 1070 return snprintf(buf, PAGE_SIZE, 1071 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1072 ap_configuration->adm[0], ap_configuration->adm[1], 1073 ap_configuration->adm[2], ap_configuration->adm[3], 1074 ap_configuration->adm[4], ap_configuration->adm[5], 1075 ap_configuration->adm[6], ap_configuration->adm[7]); 1076 } else { /* format 0 - 16 bit domain field */ 1077 return snprintf(buf, PAGE_SIZE, "%08x%08x\n", 1078 ap_configuration->adm[0], ap_configuration->adm[1]); 1079 } 1080 } else { 1081 return snprintf(buf, PAGE_SIZE, "not supported\n"); 1082 } 1083 } 1084 1085 static BUS_ATTR(ap_control_domain_mask, 0444, 1086 ap_control_domain_mask_show, NULL); 1087 1088 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 1089 { 1090 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 1091 } 1092 1093 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 1094 { 1095 return snprintf(buf, PAGE_SIZE, "%d\n", 1096 ap_using_interrupts() ? 1 : 0); 1097 } 1098 1099 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); 1100 1101 static ssize_t ap_config_time_store(struct bus_type *bus, 1102 const char *buf, size_t count) 1103 { 1104 int time; 1105 1106 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 1107 return -EINVAL; 1108 ap_config_time = time; 1109 if (!timer_pending(&ap_config_timer) || 1110 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { 1111 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1112 add_timer(&ap_config_timer); 1113 } 1114 return count; 1115 } 1116 1117 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); 1118 1119 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) 1120 { 1121 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 1122 } 1123 1124 static ssize_t ap_poll_thread_store(struct bus_type *bus, 1125 const char *buf, size_t count) 1126 { 1127 int flag, rc; 1128 1129 if (sscanf(buf, "%d\n", &flag) != 1) 1130 return -EINVAL; 1131 if (flag) { 1132 rc = ap_poll_thread_start(); 1133 if (rc) 1134 return rc; 1135 } 1136 else 1137 ap_poll_thread_stop(); 1138 return count; 1139 } 1140 1141 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 1142 1143 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) 1144 { 1145 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 1146 } 1147 1148 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, 1149 size_t count) 1150 { 1151 unsigned long long time; 1152 ktime_t hr_time; 1153 1154 /* 120 seconds = maximum poll interval */ 1155 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || 1156 time > 120000000000ULL) 1157 return -EINVAL; 1158 poll_timeout = time; 1159 hr_time = ktime_set(0, poll_timeout); 1160 1161 if (!hrtimer_is_queued(&ap_poll_timer) || 1162 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 1163 hrtimer_set_expires(&ap_poll_timer, hr_time); 1164 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 1165 } 1166 return count; 1167 } 1168 1169 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); 1170 1171 static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf) 1172 { 1173 ap_qid_t qid; 1174 int i, nd, max_domain_id = -1; 1175 unsigned long fbits; 1176 1177 if (ap_configuration) { 1178 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) { 1179 for (i = 0; i < AP_DEVICES; i++) { 1180 if (!ap_test_config_card_id(i)) 1181 continue; 1182 qid = AP_MKQID(i, ap_domain_index); 1183 fbits = ap_query_facilities(qid); 1184 if (fbits & (1UL << 57)) { 1185 /* the N bit is 0, Nd field is filled */ 1186 nd = (int)((fbits & 0x00FF0000UL)>>16); 1187 if (nd > 0) 1188 max_domain_id = nd; 1189 else 1190 max_domain_id = 15; 1191 } else { 1192 /* N bit is 1, max 16 domains */ 1193 max_domain_id = 15; 1194 } 1195 break; 1196 } 1197 } 1198 } else { 1199 /* no APXA support, older machines with max 16 domains */ 1200 max_domain_id = 15; 1201 } 1202 return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id); 1203 } 1204 1205 static BUS_ATTR(ap_max_domain_id, 0444, ap_max_domain_id_show, NULL); 1206 1207 static struct bus_attribute *const ap_bus_attrs[] = { 1208 &bus_attr_ap_domain, 1209 &bus_attr_ap_control_domain_mask, 1210 &bus_attr_config_time, 1211 &bus_attr_poll_thread, 1212 &bus_attr_ap_interrupts, 1213 &bus_attr_poll_timeout, 1214 &bus_attr_ap_max_domain_id, 1215 NULL, 1216 }; 1217 1218 /** 1219 * ap_query_configuration(): Query AP configuration information. 1220 * 1221 * Query information of installed cards and configured domains from AP. 1222 */ 1223 static void ap_query_configuration(void) 1224 { 1225 if (ap_configuration_available()) { 1226 if (!ap_configuration) 1227 ap_configuration = 1228 kzalloc(sizeof(struct ap_config_info), 1229 GFP_KERNEL); 1230 if (ap_configuration) 1231 __ap_query_configuration(ap_configuration); 1232 } else 1233 ap_configuration = NULL; 1234 } 1235 1236 /** 1237 * ap_select_domain(): Select an AP domain. 1238 * 1239 * Pick one of the 16 AP domains. 1240 */ 1241 static int ap_select_domain(void) 1242 { 1243 int queue_depth, device_type, count, max_count, best_domain; 1244 ap_qid_t qid; 1245 int rc, i, j; 1246 1247 /* IF APXA isn't installed, only 16 domains could be defined */ 1248 if (!ap_configuration->ap_extended && (ap_domain_index > 15)) 1249 return -EINVAL; 1250 1251 /* 1252 * We want to use a single domain. Either the one specified with 1253 * the "domain=" parameter or the domain with the maximum number 1254 * of devices. 1255 */ 1256 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 1257 /* Domain has already been selected. */ 1258 return 0; 1259 best_domain = -1; 1260 max_count = 0; 1261 for (i = 0; i < AP_DOMAINS; i++) { 1262 if (!ap_test_config_domain(i)) 1263 continue; 1264 count = 0; 1265 for (j = 0; j < AP_DEVICES; j++) { 1266 if (!ap_test_config_card_id(j)) 1267 continue; 1268 qid = AP_MKQID(j, i); 1269 rc = ap_query_queue(qid, &queue_depth, &device_type); 1270 if (rc) 1271 continue; 1272 count++; 1273 } 1274 if (count > max_count) { 1275 max_count = count; 1276 best_domain = i; 1277 } 1278 } 1279 if (best_domain >= 0){ 1280 ap_domain_index = best_domain; 1281 return 0; 1282 } 1283 return -ENODEV; 1284 } 1285 1286 /** 1287 * ap_probe_device_type(): Find the device type of an AP. 1288 * @ap_dev: pointer to the AP device. 1289 * 1290 * Find the device type if query queue returned a device type of 0. 1291 */ 1292 static int ap_probe_device_type(struct ap_device *ap_dev) 1293 { 1294 static unsigned char msg[] = { 1295 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 1296 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1297 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 1298 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1299 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 1300 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 1301 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 1302 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 1303 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1304 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 1305 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1306 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 1307 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 1308 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1309 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 1310 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1311 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1312 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1313 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1314 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1315 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1316 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 1317 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1318 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 1319 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 1320 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 1321 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 1322 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1323 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 1324 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 1325 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 1326 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 1327 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1328 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 1329 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 1330 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 1331 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 1332 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 1333 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 1334 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 1335 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 1336 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 1337 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 1338 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 1339 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, 1340 }; 1341 struct ap_queue_status status; 1342 unsigned long long psmid; 1343 char *reply; 1344 int rc, i; 1345 1346 reply = (void *) get_zeroed_page(GFP_KERNEL); 1347 if (!reply) { 1348 rc = -ENOMEM; 1349 goto out; 1350 } 1351 1352 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1353 msg, sizeof(msg), 0); 1354 if (status.response_code != AP_RESPONSE_NORMAL) { 1355 rc = -ENODEV; 1356 goto out_free; 1357 } 1358 1359 /* Wait for the test message to complete. */ 1360 for (i = 0; i < 6; i++) { 1361 mdelay(300); 1362 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); 1363 if (status.response_code == AP_RESPONSE_NORMAL && 1364 psmid == 0x0102030405060708ULL) 1365 break; 1366 } 1367 if (i < 6) { 1368 /* Got an answer. */ 1369 if (reply[0] == 0x00 && reply[1] == 0x86) 1370 ap_dev->device_type = AP_DEVICE_TYPE_PCICC; 1371 else 1372 ap_dev->device_type = AP_DEVICE_TYPE_PCICA; 1373 rc = 0; 1374 } else 1375 rc = -ENODEV; 1376 1377 out_free: 1378 free_page((unsigned long) reply); 1379 out: 1380 return rc; 1381 } 1382 1383 static void ap_interrupt_handler(struct airq_struct *airq) 1384 { 1385 inc_irq_stat(IRQIO_APB); 1386 tasklet_schedule(&ap_tasklet); 1387 } 1388 1389 /** 1390 * __ap_scan_bus(): Scan the AP bus. 1391 * @dev: Pointer to device 1392 * @data: Pointer to data 1393 * 1394 * Scan the AP bus for new devices. 1395 */ 1396 static int __ap_scan_bus(struct device *dev, void *data) 1397 { 1398 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1399 } 1400 1401 static void ap_device_release(struct device *dev) 1402 { 1403 struct ap_device *ap_dev = to_ap_dev(dev); 1404 1405 kfree(ap_dev); 1406 } 1407 1408 static void ap_scan_bus(struct work_struct *unused) 1409 { 1410 struct ap_device *ap_dev; 1411 struct device *dev; 1412 ap_qid_t qid; 1413 int queue_depth, device_type; 1414 unsigned int device_functions; 1415 int rc, i; 1416 1417 ap_query_configuration(); 1418 if (ap_select_domain() != 0) { 1419 return; 1420 } 1421 for (i = 0; i < AP_DEVICES; i++) { 1422 qid = AP_MKQID(i, ap_domain_index); 1423 dev = bus_find_device(&ap_bus_type, NULL, 1424 (void *)(unsigned long)qid, 1425 __ap_scan_bus); 1426 if (ap_test_config_card_id(i)) 1427 rc = ap_query_queue(qid, &queue_depth, &device_type); 1428 else 1429 rc = -ENODEV; 1430 if (dev) { 1431 if (rc == -EBUSY) { 1432 set_current_state(TASK_UNINTERRUPTIBLE); 1433 schedule_timeout(AP_RESET_TIMEOUT); 1434 rc = ap_query_queue(qid, &queue_depth, 1435 &device_type); 1436 } 1437 ap_dev = to_ap_dev(dev); 1438 spin_lock_bh(&ap_dev->lock); 1439 if (rc || ap_dev->unregistered) { 1440 spin_unlock_bh(&ap_dev->lock); 1441 if (ap_dev->unregistered) 1442 i--; 1443 device_unregister(dev); 1444 put_device(dev); 1445 continue; 1446 } 1447 spin_unlock_bh(&ap_dev->lock); 1448 put_device(dev); 1449 continue; 1450 } 1451 if (rc) 1452 continue; 1453 rc = ap_init_queue(qid); 1454 if (rc) 1455 continue; 1456 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 1457 if (!ap_dev) 1458 break; 1459 ap_dev->qid = qid; 1460 ap_dev->queue_depth = queue_depth; 1461 ap_dev->unregistered = 1; 1462 spin_lock_init(&ap_dev->lock); 1463 INIT_LIST_HEAD(&ap_dev->pendingq); 1464 INIT_LIST_HEAD(&ap_dev->requestq); 1465 INIT_LIST_HEAD(&ap_dev->list); 1466 setup_timer(&ap_dev->timeout, ap_request_timeout, 1467 (unsigned long) ap_dev); 1468 switch (device_type) { 1469 case 0: 1470 /* device type probing for old cards */ 1471 if (ap_probe_device_type(ap_dev)) { 1472 kfree(ap_dev); 1473 continue; 1474 } 1475 break; 1476 default: 1477 ap_dev->device_type = device_type; 1478 } 1479 ap_dev->raw_hwtype = device_type; 1480 1481 rc = ap_query_functions(qid, &device_functions); 1482 if (!rc) 1483 ap_dev->functions = device_functions; 1484 else 1485 ap_dev->functions = 0u; 1486 1487 ap_dev->device.bus = &ap_bus_type; 1488 ap_dev->device.parent = ap_root_device; 1489 if (dev_set_name(&ap_dev->device, "card%02x", 1490 AP_QID_DEVICE(ap_dev->qid))) { 1491 kfree(ap_dev); 1492 continue; 1493 } 1494 ap_dev->device.release = ap_device_release; 1495 rc = device_register(&ap_dev->device); 1496 if (rc) { 1497 put_device(&ap_dev->device); 1498 continue; 1499 } 1500 /* Add device attributes. */ 1501 rc = sysfs_create_group(&ap_dev->device.kobj, 1502 &ap_dev_attr_group); 1503 if (!rc) { 1504 spin_lock_bh(&ap_dev->lock); 1505 ap_dev->unregistered = 0; 1506 spin_unlock_bh(&ap_dev->lock); 1507 } 1508 else 1509 device_unregister(&ap_dev->device); 1510 } 1511 } 1512 1513 static void 1514 ap_config_timeout(unsigned long ptr) 1515 { 1516 queue_work(ap_work_queue, &ap_config_work); 1517 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1518 add_timer(&ap_config_timer); 1519 } 1520 1521 /** 1522 * __ap_schedule_poll_timer(): Schedule poll timer. 1523 * 1524 * Set up the timer to run the poll tasklet 1525 */ 1526 static inline void __ap_schedule_poll_timer(void) 1527 { 1528 ktime_t hr_time; 1529 1530 spin_lock_bh(&ap_poll_timer_lock); 1531 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) 1532 goto out; 1533 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1534 hr_time = ktime_set(0, poll_timeout); 1535 hrtimer_forward_now(&ap_poll_timer, hr_time); 1536 hrtimer_restart(&ap_poll_timer); 1537 } 1538 out: 1539 spin_unlock_bh(&ap_poll_timer_lock); 1540 } 1541 1542 /** 1543 * ap_schedule_poll_timer(): Schedule poll timer. 1544 * 1545 * Set up the timer to run the poll tasklet 1546 */ 1547 static inline void ap_schedule_poll_timer(void) 1548 { 1549 if (ap_using_interrupts()) 1550 return; 1551 __ap_schedule_poll_timer(); 1552 } 1553 1554 /** 1555 * ap_poll_read(): Receive pending reply messages from an AP device. 1556 * @ap_dev: pointer to the AP device 1557 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1558 * required, bit 2^1 is set if the poll timer needs to get armed 1559 * 1560 * Returns 0 if the device is still present, -ENODEV if not. 1561 */ 1562 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 1563 { 1564 struct ap_queue_status status; 1565 struct ap_message *ap_msg; 1566 1567 if (ap_dev->queue_count <= 0) 1568 return 0; 1569 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 1570 ap_dev->reply->message, ap_dev->reply->length); 1571 switch (status.response_code) { 1572 case AP_RESPONSE_NORMAL: 1573 atomic_dec(&ap_poll_requests); 1574 ap_decrease_queue_count(ap_dev); 1575 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 1576 if (ap_msg->psmid != ap_dev->reply->psmid) 1577 continue; 1578 list_del_init(&ap_msg->list); 1579 ap_dev->pendingq_count--; 1580 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 1581 break; 1582 } 1583 if (ap_dev->queue_count > 0) 1584 *flags |= 1; 1585 break; 1586 case AP_RESPONSE_NO_PENDING_REPLY: 1587 if (status.queue_empty) { 1588 /* The card shouldn't forget requests but who knows. */ 1589 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1590 ap_dev->queue_count = 0; 1591 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1592 ap_dev->requestq_count += ap_dev->pendingq_count; 1593 ap_dev->pendingq_count = 0; 1594 } else 1595 *flags |= 2; 1596 break; 1597 default: 1598 return -ENODEV; 1599 } 1600 return 0; 1601 } 1602 1603 /** 1604 * ap_poll_write(): Send messages from the request queue to an AP device. 1605 * @ap_dev: pointer to the AP device 1606 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1607 * required, bit 2^1 is set if the poll timer needs to get armed 1608 * 1609 * Returns 0 if the device is still present, -ENODEV if not. 1610 */ 1611 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 1612 { 1613 struct ap_queue_status status; 1614 struct ap_message *ap_msg; 1615 1616 if (ap_dev->requestq_count <= 0 || 1617 ap_dev->queue_count >= ap_dev->queue_depth) 1618 return 0; 1619 /* Start the next request on the queue. */ 1620 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1621 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1622 ap_msg->message, ap_msg->length, ap_msg->special); 1623 switch (status.response_code) { 1624 case AP_RESPONSE_NORMAL: 1625 atomic_inc(&ap_poll_requests); 1626 ap_increase_queue_count(ap_dev); 1627 list_move_tail(&ap_msg->list, &ap_dev->pendingq); 1628 ap_dev->requestq_count--; 1629 ap_dev->pendingq_count++; 1630 if (ap_dev->queue_count < ap_dev->queue_depth && 1631 ap_dev->requestq_count > 0) 1632 *flags |= 1; 1633 *flags |= 2; 1634 break; 1635 case AP_RESPONSE_RESET_IN_PROGRESS: 1636 __ap_schedule_poll_timer(); 1637 case AP_RESPONSE_Q_FULL: 1638 *flags |= 2; 1639 break; 1640 case AP_RESPONSE_MESSAGE_TOO_BIG: 1641 case AP_RESPONSE_REQ_FAC_NOT_INST: 1642 return -EINVAL; 1643 default: 1644 return -ENODEV; 1645 } 1646 return 0; 1647 } 1648 1649 /** 1650 * ap_poll_queue(): Poll AP device for pending replies and send new messages. 1651 * @ap_dev: pointer to the bus device 1652 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1653 * required, bit 2^1 is set if the poll timer needs to get armed 1654 * 1655 * Poll AP device for pending replies and send new messages. If either 1656 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. 1657 * Returns 0. 1658 */ 1659 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1660 { 1661 int rc; 1662 1663 rc = ap_poll_read(ap_dev, flags); 1664 if (rc) 1665 return rc; 1666 return ap_poll_write(ap_dev, flags); 1667 } 1668 1669 /** 1670 * __ap_queue_message(): Queue a message to a device. 1671 * @ap_dev: pointer to the AP device 1672 * @ap_msg: the message to be queued 1673 * 1674 * Queue a message to a device. Returns 0 if successful. 1675 */ 1676 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1677 { 1678 struct ap_queue_status status; 1679 1680 if (list_empty(&ap_dev->requestq) && 1681 ap_dev->queue_count < ap_dev->queue_depth) { 1682 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1683 ap_msg->message, ap_msg->length, 1684 ap_msg->special); 1685 switch (status.response_code) { 1686 case AP_RESPONSE_NORMAL: 1687 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1688 atomic_inc(&ap_poll_requests); 1689 ap_dev->pendingq_count++; 1690 ap_increase_queue_count(ap_dev); 1691 ap_dev->total_request_count++; 1692 break; 1693 case AP_RESPONSE_Q_FULL: 1694 case AP_RESPONSE_RESET_IN_PROGRESS: 1695 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1696 ap_dev->requestq_count++; 1697 ap_dev->total_request_count++; 1698 return -EBUSY; 1699 case AP_RESPONSE_REQ_FAC_NOT_INST: 1700 case AP_RESPONSE_MESSAGE_TOO_BIG: 1701 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1702 return -EINVAL; 1703 default: /* Device is gone. */ 1704 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1705 return -ENODEV; 1706 } 1707 } else { 1708 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1709 ap_dev->requestq_count++; 1710 ap_dev->total_request_count++; 1711 return -EBUSY; 1712 } 1713 ap_schedule_poll_timer(); 1714 return 0; 1715 } 1716 1717 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1718 { 1719 unsigned long flags; 1720 int rc; 1721 1722 /* For asynchronous message handling a valid receive-callback 1723 * is required. */ 1724 BUG_ON(!ap_msg->receive); 1725 1726 spin_lock_bh(&ap_dev->lock); 1727 if (!ap_dev->unregistered) { 1728 /* Make room on the queue by polling for finished requests. */ 1729 rc = ap_poll_queue(ap_dev, &flags); 1730 if (!rc) 1731 rc = __ap_queue_message(ap_dev, ap_msg); 1732 if (!rc) 1733 wake_up(&ap_poll_wait); 1734 if (rc == -ENODEV) 1735 ap_dev->unregistered = 1; 1736 } else { 1737 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1738 rc = -ENODEV; 1739 } 1740 spin_unlock_bh(&ap_dev->lock); 1741 if (rc == -ENODEV) 1742 device_unregister(&ap_dev->device); 1743 } 1744 EXPORT_SYMBOL(ap_queue_message); 1745 1746 /** 1747 * ap_cancel_message(): Cancel a crypto request. 1748 * @ap_dev: The AP device that has the message queued 1749 * @ap_msg: The message that is to be removed 1750 * 1751 * Cancel a crypto request. This is done by removing the request 1752 * from the device pending or request queue. Note that the 1753 * request stays on the AP queue. When it finishes the message 1754 * reply will be discarded because the psmid can't be found. 1755 */ 1756 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1757 { 1758 struct ap_message *tmp; 1759 1760 spin_lock_bh(&ap_dev->lock); 1761 if (!list_empty(&ap_msg->list)) { 1762 list_for_each_entry(tmp, &ap_dev->pendingq, list) 1763 if (tmp->psmid == ap_msg->psmid) { 1764 ap_dev->pendingq_count--; 1765 goto found; 1766 } 1767 ap_dev->requestq_count--; 1768 found: 1769 list_del_init(&ap_msg->list); 1770 } 1771 spin_unlock_bh(&ap_dev->lock); 1772 } 1773 EXPORT_SYMBOL(ap_cancel_message); 1774 1775 /** 1776 * ap_poll_timeout(): AP receive polling for finished AP requests. 1777 * @unused: Unused pointer. 1778 * 1779 * Schedules the AP tasklet using a high resolution timer. 1780 */ 1781 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 1782 { 1783 tasklet_schedule(&ap_tasklet); 1784 return HRTIMER_NORESTART; 1785 } 1786 1787 /** 1788 * ap_reset(): Reset a not responding AP device. 1789 * @ap_dev: Pointer to the AP device 1790 * 1791 * Reset a not responding AP device and move all requests from the 1792 * pending queue to the request queue. 1793 */ 1794 static void ap_reset(struct ap_device *ap_dev) 1795 { 1796 int rc; 1797 1798 ap_dev->reset = AP_RESET_IGNORE; 1799 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1800 ap_dev->queue_count = 0; 1801 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1802 ap_dev->requestq_count += ap_dev->pendingq_count; 1803 ap_dev->pendingq_count = 0; 1804 rc = ap_init_queue(ap_dev->qid); 1805 if (rc == -ENODEV) 1806 ap_dev->unregistered = 1; 1807 else 1808 __ap_schedule_poll_timer(); 1809 } 1810 1811 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1812 { 1813 if (!ap_dev->unregistered) { 1814 if (ap_poll_queue(ap_dev, flags)) 1815 ap_dev->unregistered = 1; 1816 if (ap_dev->reset == AP_RESET_DO) 1817 ap_reset(ap_dev); 1818 } 1819 return 0; 1820 } 1821 1822 /** 1823 * ap_poll_all(): Poll all AP devices. 1824 * @dummy: Unused variable 1825 * 1826 * Poll all AP devices on the bus in a round robin fashion. Continue 1827 * polling until bit 2^0 of the control flags is not set. If bit 2^1 1828 * of the control flags has been set arm the poll timer. 1829 */ 1830 static void ap_poll_all(unsigned long dummy) 1831 { 1832 unsigned long flags; 1833 struct ap_device *ap_dev; 1834 1835 /* Reset the indicator if interrupts are used. Thus new interrupts can 1836 * be received. Doing it in the beginning of the tasklet is therefor 1837 * important that no requests on any AP get lost. 1838 */ 1839 if (ap_using_interrupts()) 1840 xchg(ap_airq.lsi_ptr, 0); 1841 do { 1842 flags = 0; 1843 spin_lock(&ap_device_list_lock); 1844 list_for_each_entry(ap_dev, &ap_device_list, list) { 1845 spin_lock(&ap_dev->lock); 1846 __ap_poll_device(ap_dev, &flags); 1847 spin_unlock(&ap_dev->lock); 1848 } 1849 spin_unlock(&ap_device_list_lock); 1850 } while (flags & 1); 1851 if (flags & 2) 1852 ap_schedule_poll_timer(); 1853 } 1854 1855 /** 1856 * ap_poll_thread(): Thread that polls for finished requests. 1857 * @data: Unused pointer 1858 * 1859 * AP bus poll thread. The purpose of this thread is to poll for 1860 * finished requests in a loop if there is a "free" cpu - that is 1861 * a cpu that doesn't have anything better to do. The polling stops 1862 * as soon as there is another task or if all messages have been 1863 * delivered. 1864 */ 1865 static int ap_poll_thread(void *data) 1866 { 1867 DECLARE_WAITQUEUE(wait, current); 1868 unsigned long flags; 1869 int requests; 1870 struct ap_device *ap_dev; 1871 1872 set_user_nice(current, MAX_NICE); 1873 while (1) { 1874 if (ap_suspend_flag) 1875 return 0; 1876 if (need_resched()) { 1877 schedule(); 1878 continue; 1879 } 1880 add_wait_queue(&ap_poll_wait, &wait); 1881 set_current_state(TASK_INTERRUPTIBLE); 1882 if (kthread_should_stop()) 1883 break; 1884 requests = atomic_read(&ap_poll_requests); 1885 if (requests <= 0) 1886 schedule(); 1887 set_current_state(TASK_RUNNING); 1888 remove_wait_queue(&ap_poll_wait, &wait); 1889 1890 flags = 0; 1891 spin_lock_bh(&ap_device_list_lock); 1892 list_for_each_entry(ap_dev, &ap_device_list, list) { 1893 spin_lock(&ap_dev->lock); 1894 __ap_poll_device(ap_dev, &flags); 1895 spin_unlock(&ap_dev->lock); 1896 } 1897 spin_unlock_bh(&ap_device_list_lock); 1898 } 1899 set_current_state(TASK_RUNNING); 1900 remove_wait_queue(&ap_poll_wait, &wait); 1901 return 0; 1902 } 1903 1904 static int ap_poll_thread_start(void) 1905 { 1906 int rc; 1907 1908 if (ap_using_interrupts() || ap_suspend_flag) 1909 return 0; 1910 mutex_lock(&ap_poll_thread_mutex); 1911 if (!ap_poll_kthread) { 1912 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 1913 rc = PTR_RET(ap_poll_kthread); 1914 if (rc) 1915 ap_poll_kthread = NULL; 1916 } 1917 else 1918 rc = 0; 1919 mutex_unlock(&ap_poll_thread_mutex); 1920 return rc; 1921 } 1922 1923 static void ap_poll_thread_stop(void) 1924 { 1925 mutex_lock(&ap_poll_thread_mutex); 1926 if (ap_poll_kthread) { 1927 kthread_stop(ap_poll_kthread); 1928 ap_poll_kthread = NULL; 1929 } 1930 mutex_unlock(&ap_poll_thread_mutex); 1931 } 1932 1933 /** 1934 * ap_request_timeout(): Handling of request timeouts 1935 * @data: Holds the AP device. 1936 * 1937 * Handles request timeouts. 1938 */ 1939 static void ap_request_timeout(unsigned long data) 1940 { 1941 struct ap_device *ap_dev = (struct ap_device *) data; 1942 1943 if (ap_dev->reset == AP_RESET_ARMED) { 1944 ap_dev->reset = AP_RESET_DO; 1945 1946 if (ap_using_interrupts()) 1947 tasklet_schedule(&ap_tasklet); 1948 } 1949 } 1950 1951 static void ap_reset_domain(void) 1952 { 1953 int i; 1954 1955 if (ap_domain_index != -1) 1956 for (i = 0; i < AP_DEVICES; i++) 1957 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1958 } 1959 1960 static void ap_reset_all(void) 1961 { 1962 int i, j; 1963 1964 for (i = 0; i < AP_DOMAINS; i++) { 1965 if (!ap_test_config_domain(i)) 1966 continue; 1967 for (j = 0; j < AP_DEVICES; j++) { 1968 if (!ap_test_config_card_id(j)) 1969 continue; 1970 ap_reset_queue(AP_MKQID(j, i)); 1971 } 1972 } 1973 } 1974 1975 static struct reset_call ap_reset_call = { 1976 .fn = ap_reset_all, 1977 }; 1978 1979 /** 1980 * ap_module_init(): The module initialization code. 1981 * 1982 * Initializes the module. 1983 */ 1984 int __init ap_module_init(void) 1985 { 1986 int rc, i; 1987 1988 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1989 pr_warning("%d is not a valid cryptographic domain\n", 1990 ap_domain_index); 1991 return -EINVAL; 1992 } 1993 /* In resume callback we need to know if the user had set the domain. 1994 * If so, we can not just reset it. 1995 */ 1996 if (ap_domain_index >= 0) 1997 user_set_domain = 1; 1998 1999 if (ap_instructions_available() != 0) { 2000 pr_warning("The hardware system does not support " 2001 "AP instructions\n"); 2002 return -ENODEV; 2003 } 2004 if (ap_interrupts_available()) { 2005 rc = register_adapter_interrupt(&ap_airq); 2006 ap_airq_flag = (rc == 0); 2007 } 2008 2009 register_reset_call(&ap_reset_call); 2010 2011 /* Create /sys/bus/ap. */ 2012 rc = bus_register(&ap_bus_type); 2013 if (rc) 2014 goto out; 2015 for (i = 0; ap_bus_attrs[i]; i++) { 2016 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); 2017 if (rc) 2018 goto out_bus; 2019 } 2020 2021 /* Create /sys/devices/ap. */ 2022 ap_root_device = root_device_register("ap"); 2023 rc = PTR_RET(ap_root_device); 2024 if (rc) 2025 goto out_bus; 2026 2027 ap_work_queue = create_singlethread_workqueue("kapwork"); 2028 if (!ap_work_queue) { 2029 rc = -ENOMEM; 2030 goto out_root; 2031 } 2032 2033 ap_query_configuration(); 2034 if (ap_select_domain() == 0) 2035 ap_scan_bus(NULL); 2036 2037 /* Setup the AP bus rescan timer. */ 2038 init_timer(&ap_config_timer); 2039 ap_config_timer.function = ap_config_timeout; 2040 ap_config_timer.data = 0; 2041 ap_config_timer.expires = jiffies + ap_config_time * HZ; 2042 add_timer(&ap_config_timer); 2043 2044 /* Setup the high resultion poll timer. 2045 * If we are running under z/VM adjust polling to z/VM polling rate. 2046 */ 2047 if (MACHINE_IS_VM) 2048 poll_timeout = 1500000; 2049 spin_lock_init(&ap_poll_timer_lock); 2050 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2051 ap_poll_timer.function = ap_poll_timeout; 2052 2053 /* Start the low priority AP bus poll thread. */ 2054 if (ap_thread_flag) { 2055 rc = ap_poll_thread_start(); 2056 if (rc) 2057 goto out_work; 2058 } 2059 2060 return 0; 2061 2062 out_work: 2063 del_timer_sync(&ap_config_timer); 2064 hrtimer_cancel(&ap_poll_timer); 2065 destroy_workqueue(ap_work_queue); 2066 out_root: 2067 root_device_unregister(ap_root_device); 2068 out_bus: 2069 while (i--) 2070 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2071 bus_unregister(&ap_bus_type); 2072 out: 2073 unregister_reset_call(&ap_reset_call); 2074 if (ap_using_interrupts()) 2075 unregister_adapter_interrupt(&ap_airq); 2076 return rc; 2077 } 2078 2079 static int __ap_match_all(struct device *dev, void *data) 2080 { 2081 return 1; 2082 } 2083 2084 /** 2085 * ap_modules_exit(): The module termination code 2086 * 2087 * Terminates the module. 2088 */ 2089 void ap_module_exit(void) 2090 { 2091 int i; 2092 struct device *dev; 2093 2094 ap_reset_domain(); 2095 ap_poll_thread_stop(); 2096 del_timer_sync(&ap_config_timer); 2097 hrtimer_cancel(&ap_poll_timer); 2098 destroy_workqueue(ap_work_queue); 2099 tasklet_kill(&ap_tasklet); 2100 root_device_unregister(ap_root_device); 2101 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 2102 __ap_match_all))) 2103 { 2104 device_unregister(dev); 2105 put_device(dev); 2106 } 2107 for (i = 0; ap_bus_attrs[i]; i++) 2108 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2109 bus_unregister(&ap_bus_type); 2110 unregister_reset_call(&ap_reset_call); 2111 if (ap_using_interrupts()) 2112 unregister_adapter_interrupt(&ap_airq); 2113 } 2114 2115 module_init(ap_module_init); 2116 module_exit(ap_module_exit); 2117