1 /* 2 * Copyright IBM Corp. 2006, 2012 3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 6 * Felix Beck <felix.beck@de.ibm.com> 7 * Holger Dengler <hd@linux.vnet.ibm.com> 8 * 9 * Adjunct processor bus. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26 #define KMSG_COMPONENT "ap" 27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28 29 #include <linux/kernel_stat.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/delay.h> 33 #include <linux/err.h> 34 #include <linux/interrupt.h> 35 #include <linux/workqueue.h> 36 #include <linux/slab.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/mutex.h> 40 #include <asm/reset.h> 41 #include <asm/airq.h> 42 #include <linux/atomic.h> 43 #include <asm/isc.h> 44 #include <linux/hrtimer.h> 45 #include <linux/ktime.h> 46 #include <asm/facility.h> 47 #include <linux/crypto.h> 48 49 #include "ap_bus.h" 50 51 /* Some prototypes. */ 52 static void ap_scan_bus(struct work_struct *); 53 static void ap_poll_all(unsigned long); 54 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); 55 static int ap_poll_thread_start(void); 56 static void ap_poll_thread_stop(void); 57 static void ap_request_timeout(unsigned long); 58 static inline void ap_schedule_poll_timer(void); 59 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); 60 static int ap_device_remove(struct device *dev); 61 static int ap_device_probe(struct device *dev); 62 static void ap_interrupt_handler(struct airq_struct *airq); 63 static void ap_reset(struct ap_device *ap_dev); 64 static void ap_config_timeout(unsigned long ptr); 65 static int ap_select_domain(void); 66 static void ap_query_configuration(void); 67 68 /* 69 * Module description. 70 */ 71 MODULE_AUTHOR("IBM Corporation"); 72 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ 73 "Copyright IBM Corp. 2006, 2012"); 74 MODULE_LICENSE("GPL"); 75 MODULE_ALIAS_CRYPTO("z90crypt"); 76 77 /* 78 * Module parameter 79 */ 80 int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 81 module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); 82 MODULE_PARM_DESC(domain, "domain index for ap devices"); 83 EXPORT_SYMBOL(ap_domain_index); 84 85 static int ap_thread_flag = 0; 86 module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); 87 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 88 89 static struct device *ap_root_device = NULL; 90 static struct ap_config_info *ap_configuration; 91 static DEFINE_SPINLOCK(ap_device_list_lock); 92 static LIST_HEAD(ap_device_list); 93 94 /* 95 * Workqueue & timer for bus rescan. 96 */ 97 static struct workqueue_struct *ap_work_queue; 98 static struct timer_list ap_config_timer; 99 static int ap_config_time = AP_CONFIG_TIME; 100 static DECLARE_WORK(ap_config_work, ap_scan_bus); 101 102 /* 103 * Tasklet & timer for AP request polling and interrupts 104 */ 105 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 106 static atomic_t ap_poll_requests = ATOMIC_INIT(0); 107 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 108 static struct task_struct *ap_poll_kthread = NULL; 109 static DEFINE_MUTEX(ap_poll_thread_mutex); 110 static DEFINE_SPINLOCK(ap_poll_timer_lock); 111 static struct hrtimer ap_poll_timer; 112 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 113 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ 114 static unsigned long long poll_timeout = 250000; 115 116 /* Suspend flag */ 117 static int ap_suspend_flag; 118 /* Flag to check if domain was set through module parameter domain=. This is 119 * important when supsend and resume is done in a z/VM environment where the 120 * domain might change. */ 121 static int user_set_domain = 0; 122 static struct bus_type ap_bus_type; 123 124 /* Adapter interrupt definitions */ 125 static int ap_airq_flag; 126 127 static struct airq_struct ap_airq = { 128 .handler = ap_interrupt_handler, 129 .isc = AP_ISC, 130 }; 131 132 /** 133 * ap_using_interrupts() - Returns non-zero if interrupt support is 134 * available. 135 */ 136 static inline int ap_using_interrupts(void) 137 { 138 return ap_airq_flag; 139 } 140 141 /** 142 * ap_intructions_available() - Test if AP instructions are available. 143 * 144 * Returns 0 if the AP instructions are installed. 145 */ 146 static inline int ap_instructions_available(void) 147 { 148 register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 149 register unsigned long reg1 asm ("1") = -ENODEV; 150 register unsigned long reg2 asm ("2") = 0UL; 151 152 asm volatile( 153 " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 154 "0: la %1,0\n" 155 "1:\n" 156 EX_TABLE(0b, 1b) 157 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); 158 return reg1; 159 } 160 161 /** 162 * ap_interrupts_available(): Test if AP interrupts are available. 163 * 164 * Returns 1 if AP interrupts are available. 165 */ 166 static int ap_interrupts_available(void) 167 { 168 return test_facility(2) && test_facility(65); 169 } 170 171 /** 172 * ap_configuration_available(): Test if AP configuration 173 * information is available. 174 * 175 * Returns 1 if AP configuration information is available. 176 */ 177 #ifdef CONFIG_64BIT 178 static int ap_configuration_available(void) 179 { 180 return test_facility(2) && test_facility(12); 181 } 182 #endif 183 184 /** 185 * ap_test_queue(): Test adjunct processor queue. 186 * @qid: The AP queue number 187 * @queue_depth: Pointer to queue depth value 188 * @device_type: Pointer to device type value 189 * 190 * Returns AP queue status structure. 191 */ 192 static inline struct ap_queue_status 193 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 194 { 195 register unsigned long reg0 asm ("0") = qid; 196 register struct ap_queue_status reg1 asm ("1"); 197 register unsigned long reg2 asm ("2") = 0UL; 198 199 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 200 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 201 *device_type = (int) (reg2 >> 24); 202 *queue_depth = (int) (reg2 & 0xff); 203 return reg1; 204 } 205 206 /** 207 * ap_reset_queue(): Reset adjunct processor queue. 208 * @qid: The AP queue number 209 * 210 * Returns AP queue status structure. 211 */ 212 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 213 { 214 register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 215 register struct ap_queue_status reg1 asm ("1"); 216 register unsigned long reg2 asm ("2") = 0UL; 217 218 asm volatile( 219 ".long 0xb2af0000" /* PQAP(RAPQ) */ 220 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 221 return reg1; 222 } 223 224 #ifdef CONFIG_64BIT 225 /** 226 * ap_queue_interruption_control(): Enable interruption for a specific AP. 227 * @qid: The AP queue number 228 * @ind: The notification indicator byte 229 * 230 * Returns AP queue status. 231 */ 232 static inline struct ap_queue_status 233 ap_queue_interruption_control(ap_qid_t qid, void *ind) 234 { 235 register unsigned long reg0 asm ("0") = qid | 0x03000000UL; 236 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; 237 register struct ap_queue_status reg1_out asm ("1"); 238 register void *reg2 asm ("2") = ind; 239 asm volatile( 240 ".long 0xb2af0000" /* PQAP(AQIC) */ 241 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 242 : 243 : "cc" ); 244 return reg1_out; 245 } 246 #endif 247 248 #ifdef CONFIG_64BIT 249 static inline struct ap_queue_status 250 __ap_query_functions(ap_qid_t qid, unsigned int *functions) 251 { 252 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); 253 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; 254 register unsigned long reg2 asm ("2"); 255 256 asm volatile( 257 ".long 0xb2af0000\n" /* PQAP(TAPQ) */ 258 "0:\n" 259 EX_TABLE(0b, 0b) 260 : "+d" (reg0), "+d" (reg1), "=d" (reg2) 261 : 262 : "cc"); 263 264 *functions = (unsigned int)(reg2 >> 32); 265 return reg1; 266 } 267 #endif 268 269 #ifdef CONFIG_64BIT 270 static inline int __ap_query_configuration(struct ap_config_info *config) 271 { 272 register unsigned long reg0 asm ("0") = 0x04000000UL; 273 register unsigned long reg1 asm ("1") = -EINVAL; 274 register unsigned char *reg2 asm ("2") = (unsigned char *)config; 275 276 asm volatile( 277 ".long 0xb2af0000\n" /* PQAP(QCI) */ 278 "0: la %1,0\n" 279 "1:\n" 280 EX_TABLE(0b, 1b) 281 : "+d" (reg0), "+d" (reg1), "+d" (reg2) 282 : 283 : "cc"); 284 285 return reg1; 286 } 287 #endif 288 289 /** 290 * ap_query_functions(): Query supported functions. 291 * @qid: The AP queue number 292 * @functions: Pointer to functions field. 293 * 294 * Returns 295 * 0 on success. 296 * -ENODEV if queue not valid. 297 * -EBUSY if device busy. 298 * -EINVAL if query function is not supported 299 */ 300 static int ap_query_functions(ap_qid_t qid, unsigned int *functions) 301 { 302 #ifdef CONFIG_64BIT 303 struct ap_queue_status status; 304 int i; 305 status = __ap_query_functions(qid, functions); 306 307 for (i = 0; i < AP_MAX_RESET; i++) { 308 if (ap_queue_status_invalid_test(&status)) 309 return -ENODEV; 310 311 switch (status.response_code) { 312 case AP_RESPONSE_NORMAL: 313 return 0; 314 case AP_RESPONSE_RESET_IN_PROGRESS: 315 case AP_RESPONSE_BUSY: 316 break; 317 case AP_RESPONSE_Q_NOT_AVAIL: 318 case AP_RESPONSE_DECONFIGURED: 319 case AP_RESPONSE_CHECKSTOPPED: 320 case AP_RESPONSE_INVALID_ADDRESS: 321 return -ENODEV; 322 case AP_RESPONSE_OTHERWISE_CHANGED: 323 break; 324 default: 325 break; 326 } 327 if (i < AP_MAX_RESET - 1) { 328 udelay(5); 329 status = __ap_query_functions(qid, functions); 330 } 331 } 332 return -EBUSY; 333 #else 334 return -EINVAL; 335 #endif 336 } 337 338 /** 339 * ap_queue_enable_interruption(): Enable interruption on an AP. 340 * @qid: The AP queue number 341 * @ind: the notification indicator byte 342 * 343 * Enables interruption on AP queue via ap_queue_interruption_control(). Based 344 * on the return value it waits a while and tests the AP queue if interrupts 345 * have been switched on using ap_test_queue(). 346 */ 347 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) 348 { 349 #ifdef CONFIG_64BIT 350 struct ap_queue_status status; 351 int t_depth, t_device_type, rc, i; 352 353 rc = -EBUSY; 354 status = ap_queue_interruption_control(qid, ind); 355 356 for (i = 0; i < AP_MAX_RESET; i++) { 357 switch (status.response_code) { 358 case AP_RESPONSE_NORMAL: 359 if (status.int_enabled) 360 return 0; 361 break; 362 case AP_RESPONSE_RESET_IN_PROGRESS: 363 case AP_RESPONSE_BUSY: 364 if (i < AP_MAX_RESET - 1) { 365 udelay(5); 366 status = ap_queue_interruption_control(qid, 367 ind); 368 continue; 369 } 370 break; 371 case AP_RESPONSE_Q_NOT_AVAIL: 372 case AP_RESPONSE_DECONFIGURED: 373 case AP_RESPONSE_CHECKSTOPPED: 374 case AP_RESPONSE_INVALID_ADDRESS: 375 return -ENODEV; 376 case AP_RESPONSE_OTHERWISE_CHANGED: 377 if (status.int_enabled) 378 return 0; 379 break; 380 default: 381 break; 382 } 383 if (i < AP_MAX_RESET - 1) { 384 udelay(5); 385 status = ap_test_queue(qid, &t_depth, &t_device_type); 386 } 387 } 388 return rc; 389 #else 390 return -EINVAL; 391 #endif 392 } 393 394 /** 395 * __ap_send(): Send message to adjunct processor queue. 396 * @qid: The AP queue number 397 * @psmid: The program supplied message identifier 398 * @msg: The message text 399 * @length: The message length 400 * @special: Special Bit 401 * 402 * Returns AP queue status structure. 403 * Condition code 1 on NQAP can't happen because the L bit is 1. 404 * Condition code 2 on NQAP also means the send is incomplete, 405 * because a segment boundary was reached. The NQAP is repeated. 406 */ 407 static inline struct ap_queue_status 408 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 409 unsigned int special) 410 { 411 typedef struct { char _[length]; } msgblock; 412 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 413 register struct ap_queue_status reg1 asm ("1"); 414 register unsigned long reg2 asm ("2") = (unsigned long) msg; 415 register unsigned long reg3 asm ("3") = (unsigned long) length; 416 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 417 register unsigned long reg5 asm ("5") = psmid & 0xffffffff; 418 419 if (special == 1) 420 reg0 |= 0x400000UL; 421 422 asm volatile ( 423 "0: .long 0xb2ad0042\n" /* NQAP */ 424 " brc 2,0b" 425 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 426 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 427 : "cc" ); 428 return reg1; 429 } 430 431 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 432 { 433 struct ap_queue_status status; 434 435 status = __ap_send(qid, psmid, msg, length, 0); 436 switch (status.response_code) { 437 case AP_RESPONSE_NORMAL: 438 return 0; 439 case AP_RESPONSE_Q_FULL: 440 case AP_RESPONSE_RESET_IN_PROGRESS: 441 return -EBUSY; 442 case AP_RESPONSE_REQ_FAC_NOT_INST: 443 return -EINVAL; 444 default: /* Device is gone. */ 445 return -ENODEV; 446 } 447 } 448 EXPORT_SYMBOL(ap_send); 449 450 /** 451 * __ap_recv(): Receive message from adjunct processor queue. 452 * @qid: The AP queue number 453 * @psmid: Pointer to program supplied message identifier 454 * @msg: The message text 455 * @length: The message length 456 * 457 * Returns AP queue status structure. 458 * Condition code 1 on DQAP means the receive has taken place 459 * but only partially. The response is incomplete, hence the 460 * DQAP is repeated. 461 * Condition code 2 on DQAP also means the receive is incomplete, 462 * this time because a segment boundary was reached. Again, the 463 * DQAP is repeated. 464 * Note that gpr2 is used by the DQAP instruction to keep track of 465 * any 'residual' length, in case the instruction gets interrupted. 466 * Hence it gets zeroed before the instruction. 467 */ 468 static inline struct ap_queue_status 469 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 470 { 471 typedef struct { char _[length]; } msgblock; 472 register unsigned long reg0 asm("0") = qid | 0x80000000UL; 473 register struct ap_queue_status reg1 asm ("1"); 474 register unsigned long reg2 asm("2") = 0UL; 475 register unsigned long reg4 asm("4") = (unsigned long) msg; 476 register unsigned long reg5 asm("5") = (unsigned long) length; 477 register unsigned long reg6 asm("6") = 0UL; 478 register unsigned long reg7 asm("7") = 0UL; 479 480 481 asm volatile( 482 "0: .long 0xb2ae0064\n" /* DQAP */ 483 " brc 6,0b\n" 484 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 485 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 486 "=m" (*(msgblock *) msg) : : "cc" ); 487 *psmid = (((unsigned long long) reg6) << 32) + reg7; 488 return reg1; 489 } 490 491 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 492 { 493 struct ap_queue_status status; 494 495 status = __ap_recv(qid, psmid, msg, length); 496 switch (status.response_code) { 497 case AP_RESPONSE_NORMAL: 498 return 0; 499 case AP_RESPONSE_NO_PENDING_REPLY: 500 if (status.queue_empty) 501 return -ENOENT; 502 return -EBUSY; 503 case AP_RESPONSE_RESET_IN_PROGRESS: 504 return -EBUSY; 505 default: 506 return -ENODEV; 507 } 508 } 509 EXPORT_SYMBOL(ap_recv); 510 511 /** 512 * ap_query_queue(): Check if an AP queue is available. 513 * @qid: The AP queue number 514 * @queue_depth: Pointer to queue depth value 515 * @device_type: Pointer to device type value 516 * 517 * The test is repeated for AP_MAX_RESET times. 518 */ 519 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 520 { 521 struct ap_queue_status status; 522 int t_depth, t_device_type, rc, i; 523 524 rc = -EBUSY; 525 for (i = 0; i < AP_MAX_RESET; i++) { 526 status = ap_test_queue(qid, &t_depth, &t_device_type); 527 switch (status.response_code) { 528 case AP_RESPONSE_NORMAL: 529 *queue_depth = t_depth + 1; 530 *device_type = t_device_type; 531 rc = 0; 532 break; 533 case AP_RESPONSE_Q_NOT_AVAIL: 534 rc = -ENODEV; 535 break; 536 case AP_RESPONSE_RESET_IN_PROGRESS: 537 break; 538 case AP_RESPONSE_DECONFIGURED: 539 rc = -ENODEV; 540 break; 541 case AP_RESPONSE_CHECKSTOPPED: 542 rc = -ENODEV; 543 break; 544 case AP_RESPONSE_INVALID_ADDRESS: 545 rc = -ENODEV; 546 break; 547 case AP_RESPONSE_OTHERWISE_CHANGED: 548 break; 549 case AP_RESPONSE_BUSY: 550 break; 551 default: 552 BUG(); 553 } 554 if (rc != -EBUSY) 555 break; 556 if (i < AP_MAX_RESET - 1) 557 udelay(5); 558 } 559 return rc; 560 } 561 562 /** 563 * ap_init_queue(): Reset an AP queue. 564 * @qid: The AP queue number 565 * 566 * Reset an AP queue and wait for it to become available again. 567 */ 568 static int ap_init_queue(ap_qid_t qid) 569 { 570 struct ap_queue_status status; 571 int rc, dummy, i; 572 573 rc = -ENODEV; 574 status = ap_reset_queue(qid); 575 for (i = 0; i < AP_MAX_RESET; i++) { 576 switch (status.response_code) { 577 case AP_RESPONSE_NORMAL: 578 if (status.queue_empty) 579 rc = 0; 580 break; 581 case AP_RESPONSE_Q_NOT_AVAIL: 582 case AP_RESPONSE_DECONFIGURED: 583 case AP_RESPONSE_CHECKSTOPPED: 584 i = AP_MAX_RESET; /* return with -ENODEV */ 585 break; 586 case AP_RESPONSE_RESET_IN_PROGRESS: 587 rc = -EBUSY; 588 case AP_RESPONSE_BUSY: 589 default: 590 break; 591 } 592 if (rc != -ENODEV && rc != -EBUSY) 593 break; 594 if (i < AP_MAX_RESET - 1) { 595 /* Time we are waiting until we give up (0.7sec * 90). 596 * Since the actual request (in progress) will not 597 * interrupted immediately for the reset command, 598 * we have to be patient. In worst case we have to 599 * wait 60sec + reset time (some msec). 600 */ 601 schedule_timeout(AP_RESET_TIMEOUT); 602 status = ap_test_queue(qid, &dummy, &dummy); 603 } 604 } 605 if (rc == 0 && ap_using_interrupts()) { 606 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr); 607 /* If interruption mode is supported by the machine, 608 * but an AP can not be enabled for interruption then 609 * the AP will be discarded. */ 610 if (rc) 611 pr_err("Registering adapter interrupts for " 612 "AP %d failed\n", AP_QID_DEVICE(qid)); 613 } 614 return rc; 615 } 616 617 /** 618 * ap_increase_queue_count(): Arm request timeout. 619 * @ap_dev: Pointer to an AP device. 620 * 621 * Arm request timeout if an AP device was idle and a new request is submitted. 622 */ 623 static void ap_increase_queue_count(struct ap_device *ap_dev) 624 { 625 int timeout = ap_dev->drv->request_timeout; 626 627 ap_dev->queue_count++; 628 if (ap_dev->queue_count == 1) { 629 mod_timer(&ap_dev->timeout, jiffies + timeout); 630 ap_dev->reset = AP_RESET_ARMED; 631 } 632 } 633 634 /** 635 * ap_decrease_queue_count(): Decrease queue count. 636 * @ap_dev: Pointer to an AP device. 637 * 638 * If AP device is still alive, re-schedule request timeout if there are still 639 * pending requests. 640 */ 641 static void ap_decrease_queue_count(struct ap_device *ap_dev) 642 { 643 int timeout = ap_dev->drv->request_timeout; 644 645 ap_dev->queue_count--; 646 if (ap_dev->queue_count > 0) 647 mod_timer(&ap_dev->timeout, jiffies + timeout); 648 else 649 /* 650 * The timeout timer should to be disabled now - since 651 * del_timer_sync() is very expensive, we just tell via the 652 * reset flag to ignore the pending timeout timer. 653 */ 654 ap_dev->reset = AP_RESET_IGNORE; 655 } 656 657 /* 658 * AP device related attributes. 659 */ 660 static ssize_t ap_hwtype_show(struct device *dev, 661 struct device_attribute *attr, char *buf) 662 { 663 struct ap_device *ap_dev = to_ap_dev(dev); 664 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 665 } 666 667 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 668 669 static ssize_t ap_raw_hwtype_show(struct device *dev, 670 struct device_attribute *attr, char *buf) 671 { 672 struct ap_device *ap_dev = to_ap_dev(dev); 673 674 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->raw_hwtype); 675 } 676 677 static DEVICE_ATTR(raw_hwtype, 0444, ap_raw_hwtype_show, NULL); 678 679 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 680 char *buf) 681 { 682 struct ap_device *ap_dev = to_ap_dev(dev); 683 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 684 } 685 686 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 687 static ssize_t ap_request_count_show(struct device *dev, 688 struct device_attribute *attr, 689 char *buf) 690 { 691 struct ap_device *ap_dev = to_ap_dev(dev); 692 int rc; 693 694 spin_lock_bh(&ap_dev->lock); 695 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 696 spin_unlock_bh(&ap_dev->lock); 697 return rc; 698 } 699 700 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 701 702 static ssize_t ap_requestq_count_show(struct device *dev, 703 struct device_attribute *attr, char *buf) 704 { 705 struct ap_device *ap_dev = to_ap_dev(dev); 706 int rc; 707 708 spin_lock_bh(&ap_dev->lock); 709 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); 710 spin_unlock_bh(&ap_dev->lock); 711 return rc; 712 } 713 714 static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 715 716 static ssize_t ap_pendingq_count_show(struct device *dev, 717 struct device_attribute *attr, char *buf) 718 { 719 struct ap_device *ap_dev = to_ap_dev(dev); 720 int rc; 721 722 spin_lock_bh(&ap_dev->lock); 723 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); 724 spin_unlock_bh(&ap_dev->lock); 725 return rc; 726 } 727 728 static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 729 730 static ssize_t ap_modalias_show(struct device *dev, 731 struct device_attribute *attr, char *buf) 732 { 733 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); 734 } 735 736 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 737 738 static ssize_t ap_functions_show(struct device *dev, 739 struct device_attribute *attr, char *buf) 740 { 741 struct ap_device *ap_dev = to_ap_dev(dev); 742 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); 743 } 744 745 static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 746 747 static struct attribute *ap_dev_attrs[] = { 748 &dev_attr_hwtype.attr, 749 &dev_attr_raw_hwtype.attr, 750 &dev_attr_depth.attr, 751 &dev_attr_request_count.attr, 752 &dev_attr_requestq_count.attr, 753 &dev_attr_pendingq_count.attr, 754 &dev_attr_modalias.attr, 755 &dev_attr_ap_functions.attr, 756 NULL 757 }; 758 static struct attribute_group ap_dev_attr_group = { 759 .attrs = ap_dev_attrs 760 }; 761 762 /** 763 * ap_bus_match() 764 * @dev: Pointer to device 765 * @drv: Pointer to device_driver 766 * 767 * AP bus driver registration/unregistration. 768 */ 769 static int ap_bus_match(struct device *dev, struct device_driver *drv) 770 { 771 struct ap_device *ap_dev = to_ap_dev(dev); 772 struct ap_driver *ap_drv = to_ap_drv(drv); 773 struct ap_device_id *id; 774 775 /* 776 * Compare device type of the device with the list of 777 * supported types of the device_driver. 778 */ 779 for (id = ap_drv->ids; id->match_flags; id++) { 780 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 781 (id->dev_type != ap_dev->device_type)) 782 continue; 783 return 1; 784 } 785 return 0; 786 } 787 788 /** 789 * ap_uevent(): Uevent function for AP devices. 790 * @dev: Pointer to device 791 * @env: Pointer to kobj_uevent_env 792 * 793 * It sets up a single environment variable DEV_TYPE which contains the 794 * hardware device type. 795 */ 796 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 797 { 798 struct ap_device *ap_dev = to_ap_dev(dev); 799 int retval = 0; 800 801 if (!ap_dev) 802 return -ENODEV; 803 804 /* Set up DEV_TYPE environment variable. */ 805 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); 806 if (retval) 807 return retval; 808 809 /* Add MODALIAS= */ 810 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); 811 812 return retval; 813 } 814 815 static int ap_bus_suspend(struct device *dev, pm_message_t state) 816 { 817 struct ap_device *ap_dev = to_ap_dev(dev); 818 unsigned long flags; 819 820 if (!ap_suspend_flag) { 821 ap_suspend_flag = 1; 822 823 /* Disable scanning for devices, thus we do not want to scan 824 * for them after removing. 825 */ 826 del_timer_sync(&ap_config_timer); 827 if (ap_work_queue != NULL) { 828 destroy_workqueue(ap_work_queue); 829 ap_work_queue = NULL; 830 } 831 832 tasklet_disable(&ap_tasklet); 833 } 834 /* Poll on the device until all requests are finished. */ 835 do { 836 flags = 0; 837 spin_lock_bh(&ap_dev->lock); 838 __ap_poll_device(ap_dev, &flags); 839 spin_unlock_bh(&ap_dev->lock); 840 } while ((flags & 1) || (flags & 2)); 841 842 spin_lock_bh(&ap_dev->lock); 843 ap_dev->unregistered = 1; 844 spin_unlock_bh(&ap_dev->lock); 845 846 return 0; 847 } 848 849 static int ap_bus_resume(struct device *dev) 850 { 851 struct ap_device *ap_dev = to_ap_dev(dev); 852 int rc; 853 854 if (ap_suspend_flag) { 855 ap_suspend_flag = 0; 856 if (ap_interrupts_available()) { 857 if (!ap_using_interrupts()) { 858 rc = register_adapter_interrupt(&ap_airq); 859 ap_airq_flag = (rc == 0); 860 } 861 } else { 862 if (ap_using_interrupts()) { 863 unregister_adapter_interrupt(&ap_airq); 864 ap_airq_flag = 0; 865 } 866 } 867 ap_query_configuration(); 868 if (!user_set_domain) { 869 ap_domain_index = -1; 870 ap_select_domain(); 871 } 872 init_timer(&ap_config_timer); 873 ap_config_timer.function = ap_config_timeout; 874 ap_config_timer.data = 0; 875 ap_config_timer.expires = jiffies + ap_config_time * HZ; 876 add_timer(&ap_config_timer); 877 ap_work_queue = create_singlethread_workqueue("kapwork"); 878 if (!ap_work_queue) 879 return -ENOMEM; 880 tasklet_enable(&ap_tasklet); 881 if (!ap_using_interrupts()) 882 ap_schedule_poll_timer(); 883 else 884 tasklet_schedule(&ap_tasklet); 885 if (ap_thread_flag) 886 rc = ap_poll_thread_start(); 887 else 888 rc = 0; 889 } else 890 rc = 0; 891 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { 892 spin_lock_bh(&ap_dev->lock); 893 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), 894 ap_domain_index); 895 spin_unlock_bh(&ap_dev->lock); 896 } 897 queue_work(ap_work_queue, &ap_config_work); 898 899 return rc; 900 } 901 902 static struct bus_type ap_bus_type = { 903 .name = "ap", 904 .match = &ap_bus_match, 905 .uevent = &ap_uevent, 906 .suspend = ap_bus_suspend, 907 .resume = ap_bus_resume 908 }; 909 910 static int ap_device_probe(struct device *dev) 911 { 912 struct ap_device *ap_dev = to_ap_dev(dev); 913 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 914 int rc; 915 916 ap_dev->drv = ap_drv; 917 918 spin_lock_bh(&ap_device_list_lock); 919 list_add(&ap_dev->list, &ap_device_list); 920 spin_unlock_bh(&ap_device_list_lock); 921 922 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 923 if (rc) { 924 spin_lock_bh(&ap_device_list_lock); 925 list_del_init(&ap_dev->list); 926 spin_unlock_bh(&ap_device_list_lock); 927 } 928 return rc; 929 } 930 931 /** 932 * __ap_flush_queue(): Flush requests. 933 * @ap_dev: Pointer to the AP device 934 * 935 * Flush all requests from the request/pending queue of an AP device. 936 */ 937 static void __ap_flush_queue(struct ap_device *ap_dev) 938 { 939 struct ap_message *ap_msg, *next; 940 941 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 942 list_del_init(&ap_msg->list); 943 ap_dev->pendingq_count--; 944 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 945 } 946 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 947 list_del_init(&ap_msg->list); 948 ap_dev->requestq_count--; 949 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 950 } 951 } 952 953 void ap_flush_queue(struct ap_device *ap_dev) 954 { 955 spin_lock_bh(&ap_dev->lock); 956 __ap_flush_queue(ap_dev); 957 spin_unlock_bh(&ap_dev->lock); 958 } 959 EXPORT_SYMBOL(ap_flush_queue); 960 961 static int ap_device_remove(struct device *dev) 962 { 963 struct ap_device *ap_dev = to_ap_dev(dev); 964 struct ap_driver *ap_drv = ap_dev->drv; 965 966 ap_flush_queue(ap_dev); 967 del_timer_sync(&ap_dev->timeout); 968 spin_lock_bh(&ap_device_list_lock); 969 list_del_init(&ap_dev->list); 970 spin_unlock_bh(&ap_device_list_lock); 971 if (ap_drv->remove) 972 ap_drv->remove(ap_dev); 973 spin_lock_bh(&ap_dev->lock); 974 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 975 spin_unlock_bh(&ap_dev->lock); 976 return 0; 977 } 978 979 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 980 char *name) 981 { 982 struct device_driver *drv = &ap_drv->driver; 983 984 drv->bus = &ap_bus_type; 985 drv->probe = ap_device_probe; 986 drv->remove = ap_device_remove; 987 drv->owner = owner; 988 drv->name = name; 989 return driver_register(drv); 990 } 991 EXPORT_SYMBOL(ap_driver_register); 992 993 void ap_driver_unregister(struct ap_driver *ap_drv) 994 { 995 driver_unregister(&ap_drv->driver); 996 } 997 EXPORT_SYMBOL(ap_driver_unregister); 998 999 void ap_bus_force_rescan(void) 1000 { 1001 /* reconfigure the AP bus rescan timer. */ 1002 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 1003 /* processing a asynchronous bus rescan */ 1004 queue_work(ap_work_queue, &ap_config_work); 1005 flush_work(&ap_config_work); 1006 } 1007 EXPORT_SYMBOL(ap_bus_force_rescan); 1008 1009 /* 1010 * AP bus attributes. 1011 */ 1012 static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 1013 { 1014 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 1015 } 1016 1017 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 1018 1019 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 1020 { 1021 if (ap_configuration != NULL) { /* QCI not supported */ 1022 if (test_facility(76)) { /* format 1 - 256 bit domain field */ 1023 return snprintf(buf, PAGE_SIZE, 1024 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1025 ap_configuration->adm[0], ap_configuration->adm[1], 1026 ap_configuration->adm[2], ap_configuration->adm[3], 1027 ap_configuration->adm[4], ap_configuration->adm[5], 1028 ap_configuration->adm[6], ap_configuration->adm[7]); 1029 } else { /* format 0 - 16 bit domain field */ 1030 return snprintf(buf, PAGE_SIZE, "%08x%08x\n", 1031 ap_configuration->adm[0], ap_configuration->adm[1]); 1032 } 1033 } else { 1034 return snprintf(buf, PAGE_SIZE, "not supported\n"); 1035 } 1036 } 1037 1038 static BUS_ATTR(ap_control_domain_mask, 0444, 1039 ap_control_domain_mask_show, NULL); 1040 1041 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 1042 { 1043 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 1044 } 1045 1046 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 1047 { 1048 return snprintf(buf, PAGE_SIZE, "%d\n", 1049 ap_using_interrupts() ? 1 : 0); 1050 } 1051 1052 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); 1053 1054 static ssize_t ap_config_time_store(struct bus_type *bus, 1055 const char *buf, size_t count) 1056 { 1057 int time; 1058 1059 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 1060 return -EINVAL; 1061 ap_config_time = time; 1062 if (!timer_pending(&ap_config_timer) || 1063 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { 1064 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1065 add_timer(&ap_config_timer); 1066 } 1067 return count; 1068 } 1069 1070 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); 1071 1072 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) 1073 { 1074 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 1075 } 1076 1077 static ssize_t ap_poll_thread_store(struct bus_type *bus, 1078 const char *buf, size_t count) 1079 { 1080 int flag, rc; 1081 1082 if (sscanf(buf, "%d\n", &flag) != 1) 1083 return -EINVAL; 1084 if (flag) { 1085 rc = ap_poll_thread_start(); 1086 if (rc) 1087 return rc; 1088 } 1089 else 1090 ap_poll_thread_stop(); 1091 return count; 1092 } 1093 1094 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 1095 1096 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) 1097 { 1098 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 1099 } 1100 1101 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, 1102 size_t count) 1103 { 1104 unsigned long long time; 1105 ktime_t hr_time; 1106 1107 /* 120 seconds = maximum poll interval */ 1108 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || 1109 time > 120000000000ULL) 1110 return -EINVAL; 1111 poll_timeout = time; 1112 hr_time = ktime_set(0, poll_timeout); 1113 1114 if (!hrtimer_is_queued(&ap_poll_timer) || 1115 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 1116 hrtimer_set_expires(&ap_poll_timer, hr_time); 1117 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 1118 } 1119 return count; 1120 } 1121 1122 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); 1123 1124 static struct bus_attribute *const ap_bus_attrs[] = { 1125 &bus_attr_ap_domain, 1126 &bus_attr_ap_control_domain_mask, 1127 &bus_attr_config_time, 1128 &bus_attr_poll_thread, 1129 &bus_attr_ap_interrupts, 1130 &bus_attr_poll_timeout, 1131 NULL, 1132 }; 1133 1134 static inline int ap_test_config(unsigned int *field, unsigned int nr) 1135 { 1136 if (nr > 0xFFu) 1137 return 0; 1138 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); 1139 } 1140 1141 /* 1142 * ap_test_config_card_id(): Test, whether an AP card ID is configured. 1143 * @id AP card ID 1144 * 1145 * Returns 0 if the card is not configured 1146 * 1 if the card is configured or 1147 * if the configuration information is not available 1148 */ 1149 static inline int ap_test_config_card_id(unsigned int id) 1150 { 1151 if (!ap_configuration) 1152 return 1; 1153 return ap_test_config(ap_configuration->apm, id); 1154 } 1155 1156 /* 1157 * ap_test_config_domain(): Test, whether an AP usage domain is configured. 1158 * @domain AP usage domain ID 1159 * 1160 * Returns 0 if the usage domain is not configured 1161 * 1 if the usage domain is configured or 1162 * if the configuration information is not available 1163 */ 1164 static inline int ap_test_config_domain(unsigned int domain) 1165 { 1166 if (!ap_configuration) /* QCI not supported */ 1167 if (domain < 16) 1168 return 1; /* then domains 0...15 are configured */ 1169 else 1170 return 0; 1171 else 1172 return ap_test_config(ap_configuration->aqm, domain); 1173 } 1174 1175 /** 1176 * ap_query_configuration(): Query AP configuration information. 1177 * 1178 * Query information of installed cards and configured domains from AP. 1179 */ 1180 static void ap_query_configuration(void) 1181 { 1182 #ifdef CONFIG_64BIT 1183 if (ap_configuration_available()) { 1184 if (!ap_configuration) 1185 ap_configuration = 1186 kzalloc(sizeof(struct ap_config_info), 1187 GFP_KERNEL); 1188 if (ap_configuration) 1189 __ap_query_configuration(ap_configuration); 1190 } else 1191 ap_configuration = NULL; 1192 #else 1193 ap_configuration = NULL; 1194 #endif 1195 } 1196 1197 /** 1198 * ap_select_domain(): Select an AP domain. 1199 * 1200 * Pick one of the 16 AP domains. 1201 */ 1202 static int ap_select_domain(void) 1203 { 1204 int queue_depth, device_type, count, max_count, best_domain; 1205 ap_qid_t qid; 1206 int rc, i, j; 1207 1208 /* IF APXA isn't installed, only 16 domains could be defined */ 1209 if (!ap_configuration->ap_extended && (ap_domain_index > 15)) 1210 return -EINVAL; 1211 1212 /* 1213 * We want to use a single domain. Either the one specified with 1214 * the "domain=" parameter or the domain with the maximum number 1215 * of devices. 1216 */ 1217 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 1218 /* Domain has already been selected. */ 1219 return 0; 1220 best_domain = -1; 1221 max_count = 0; 1222 for (i = 0; i < AP_DOMAINS; i++) { 1223 if (!ap_test_config_domain(i)) 1224 continue; 1225 count = 0; 1226 for (j = 0; j < AP_DEVICES; j++) { 1227 if (!ap_test_config_card_id(j)) 1228 continue; 1229 qid = AP_MKQID(j, i); 1230 rc = ap_query_queue(qid, &queue_depth, &device_type); 1231 if (rc) 1232 continue; 1233 count++; 1234 } 1235 if (count > max_count) { 1236 max_count = count; 1237 best_domain = i; 1238 } 1239 } 1240 if (best_domain >= 0){ 1241 ap_domain_index = best_domain; 1242 return 0; 1243 } 1244 return -ENODEV; 1245 } 1246 1247 /** 1248 * ap_probe_device_type(): Find the device type of an AP. 1249 * @ap_dev: pointer to the AP device. 1250 * 1251 * Find the device type if query queue returned a device type of 0. 1252 */ 1253 static int ap_probe_device_type(struct ap_device *ap_dev) 1254 { 1255 static unsigned char msg[] = { 1256 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 1257 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1258 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 1259 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1260 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 1261 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 1262 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 1263 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 1264 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1265 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 1266 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1267 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 1268 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 1269 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1270 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 1271 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1272 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1273 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1274 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1275 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1276 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1277 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 1278 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1279 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 1280 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 1281 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 1282 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 1283 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1284 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 1285 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 1286 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 1287 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 1288 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1289 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 1290 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 1291 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 1292 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 1293 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 1294 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 1295 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 1296 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 1297 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 1298 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 1299 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 1300 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, 1301 }; 1302 struct ap_queue_status status; 1303 unsigned long long psmid; 1304 char *reply; 1305 int rc, i; 1306 1307 reply = (void *) get_zeroed_page(GFP_KERNEL); 1308 if (!reply) { 1309 rc = -ENOMEM; 1310 goto out; 1311 } 1312 1313 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1314 msg, sizeof(msg), 0); 1315 if (status.response_code != AP_RESPONSE_NORMAL) { 1316 rc = -ENODEV; 1317 goto out_free; 1318 } 1319 1320 /* Wait for the test message to complete. */ 1321 for (i = 0; i < 6; i++) { 1322 mdelay(300); 1323 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); 1324 if (status.response_code == AP_RESPONSE_NORMAL && 1325 psmid == 0x0102030405060708ULL) 1326 break; 1327 } 1328 if (i < 6) { 1329 /* Got an answer. */ 1330 if (reply[0] == 0x00 && reply[1] == 0x86) 1331 ap_dev->device_type = AP_DEVICE_TYPE_PCICC; 1332 else 1333 ap_dev->device_type = AP_DEVICE_TYPE_PCICA; 1334 rc = 0; 1335 } else 1336 rc = -ENODEV; 1337 1338 out_free: 1339 free_page((unsigned long) reply); 1340 out: 1341 return rc; 1342 } 1343 1344 static void ap_interrupt_handler(struct airq_struct *airq) 1345 { 1346 inc_irq_stat(IRQIO_APB); 1347 tasklet_schedule(&ap_tasklet); 1348 } 1349 1350 /** 1351 * __ap_scan_bus(): Scan the AP bus. 1352 * @dev: Pointer to device 1353 * @data: Pointer to data 1354 * 1355 * Scan the AP bus for new devices. 1356 */ 1357 static int __ap_scan_bus(struct device *dev, void *data) 1358 { 1359 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1360 } 1361 1362 static void ap_device_release(struct device *dev) 1363 { 1364 struct ap_device *ap_dev = to_ap_dev(dev); 1365 1366 kfree(ap_dev); 1367 } 1368 1369 static void ap_scan_bus(struct work_struct *unused) 1370 { 1371 struct ap_device *ap_dev; 1372 struct device *dev; 1373 ap_qid_t qid; 1374 int queue_depth, device_type; 1375 unsigned int device_functions; 1376 int rc, i; 1377 1378 ap_query_configuration(); 1379 if (ap_select_domain() != 0) { 1380 return; 1381 } 1382 for (i = 0; i < AP_DEVICES; i++) { 1383 qid = AP_MKQID(i, ap_domain_index); 1384 dev = bus_find_device(&ap_bus_type, NULL, 1385 (void *)(unsigned long)qid, 1386 __ap_scan_bus); 1387 if (ap_test_config_card_id(i)) 1388 rc = ap_query_queue(qid, &queue_depth, &device_type); 1389 else 1390 rc = -ENODEV; 1391 if (dev) { 1392 if (rc == -EBUSY) { 1393 set_current_state(TASK_UNINTERRUPTIBLE); 1394 schedule_timeout(AP_RESET_TIMEOUT); 1395 rc = ap_query_queue(qid, &queue_depth, 1396 &device_type); 1397 } 1398 ap_dev = to_ap_dev(dev); 1399 spin_lock_bh(&ap_dev->lock); 1400 if (rc || ap_dev->unregistered) { 1401 spin_unlock_bh(&ap_dev->lock); 1402 if (ap_dev->unregistered) 1403 i--; 1404 device_unregister(dev); 1405 put_device(dev); 1406 continue; 1407 } 1408 spin_unlock_bh(&ap_dev->lock); 1409 put_device(dev); 1410 continue; 1411 } 1412 if (rc) 1413 continue; 1414 rc = ap_init_queue(qid); 1415 if (rc) 1416 continue; 1417 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 1418 if (!ap_dev) 1419 break; 1420 ap_dev->qid = qid; 1421 ap_dev->queue_depth = queue_depth; 1422 ap_dev->unregistered = 1; 1423 spin_lock_init(&ap_dev->lock); 1424 INIT_LIST_HEAD(&ap_dev->pendingq); 1425 INIT_LIST_HEAD(&ap_dev->requestq); 1426 INIT_LIST_HEAD(&ap_dev->list); 1427 setup_timer(&ap_dev->timeout, ap_request_timeout, 1428 (unsigned long) ap_dev); 1429 switch (device_type) { 1430 case 0: 1431 /* device type probing for old cards */ 1432 if (ap_probe_device_type(ap_dev)) { 1433 kfree(ap_dev); 1434 continue; 1435 } 1436 break; 1437 case 11: 1438 ap_dev->device_type = 10; 1439 break; 1440 default: 1441 ap_dev->device_type = device_type; 1442 } 1443 ap_dev->raw_hwtype = device_type; 1444 1445 rc = ap_query_functions(qid, &device_functions); 1446 if (!rc) 1447 ap_dev->functions = device_functions; 1448 else 1449 ap_dev->functions = 0u; 1450 1451 ap_dev->device.bus = &ap_bus_type; 1452 ap_dev->device.parent = ap_root_device; 1453 if (dev_set_name(&ap_dev->device, "card%02x", 1454 AP_QID_DEVICE(ap_dev->qid))) { 1455 kfree(ap_dev); 1456 continue; 1457 } 1458 ap_dev->device.release = ap_device_release; 1459 rc = device_register(&ap_dev->device); 1460 if (rc) { 1461 put_device(&ap_dev->device); 1462 continue; 1463 } 1464 /* Add device attributes. */ 1465 rc = sysfs_create_group(&ap_dev->device.kobj, 1466 &ap_dev_attr_group); 1467 if (!rc) { 1468 spin_lock_bh(&ap_dev->lock); 1469 ap_dev->unregistered = 0; 1470 spin_unlock_bh(&ap_dev->lock); 1471 } 1472 else 1473 device_unregister(&ap_dev->device); 1474 } 1475 } 1476 1477 static void 1478 ap_config_timeout(unsigned long ptr) 1479 { 1480 queue_work(ap_work_queue, &ap_config_work); 1481 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1482 add_timer(&ap_config_timer); 1483 } 1484 1485 /** 1486 * __ap_schedule_poll_timer(): Schedule poll timer. 1487 * 1488 * Set up the timer to run the poll tasklet 1489 */ 1490 static inline void __ap_schedule_poll_timer(void) 1491 { 1492 ktime_t hr_time; 1493 1494 spin_lock_bh(&ap_poll_timer_lock); 1495 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) 1496 goto out; 1497 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1498 hr_time = ktime_set(0, poll_timeout); 1499 hrtimer_forward_now(&ap_poll_timer, hr_time); 1500 hrtimer_restart(&ap_poll_timer); 1501 } 1502 out: 1503 spin_unlock_bh(&ap_poll_timer_lock); 1504 } 1505 1506 /** 1507 * ap_schedule_poll_timer(): Schedule poll timer. 1508 * 1509 * Set up the timer to run the poll tasklet 1510 */ 1511 static inline void ap_schedule_poll_timer(void) 1512 { 1513 if (ap_using_interrupts()) 1514 return; 1515 __ap_schedule_poll_timer(); 1516 } 1517 1518 /** 1519 * ap_poll_read(): Receive pending reply messages from an AP device. 1520 * @ap_dev: pointer to the AP device 1521 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1522 * required, bit 2^1 is set if the poll timer needs to get armed 1523 * 1524 * Returns 0 if the device is still present, -ENODEV if not. 1525 */ 1526 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 1527 { 1528 struct ap_queue_status status; 1529 struct ap_message *ap_msg; 1530 1531 if (ap_dev->queue_count <= 0) 1532 return 0; 1533 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 1534 ap_dev->reply->message, ap_dev->reply->length); 1535 switch (status.response_code) { 1536 case AP_RESPONSE_NORMAL: 1537 atomic_dec(&ap_poll_requests); 1538 ap_decrease_queue_count(ap_dev); 1539 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 1540 if (ap_msg->psmid != ap_dev->reply->psmid) 1541 continue; 1542 list_del_init(&ap_msg->list); 1543 ap_dev->pendingq_count--; 1544 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 1545 break; 1546 } 1547 if (ap_dev->queue_count > 0) 1548 *flags |= 1; 1549 break; 1550 case AP_RESPONSE_NO_PENDING_REPLY: 1551 if (status.queue_empty) { 1552 /* The card shouldn't forget requests but who knows. */ 1553 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1554 ap_dev->queue_count = 0; 1555 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1556 ap_dev->requestq_count += ap_dev->pendingq_count; 1557 ap_dev->pendingq_count = 0; 1558 } else 1559 *flags |= 2; 1560 break; 1561 default: 1562 return -ENODEV; 1563 } 1564 return 0; 1565 } 1566 1567 /** 1568 * ap_poll_write(): Send messages from the request queue to an AP device. 1569 * @ap_dev: pointer to the AP device 1570 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1571 * required, bit 2^1 is set if the poll timer needs to get armed 1572 * 1573 * Returns 0 if the device is still present, -ENODEV if not. 1574 */ 1575 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 1576 { 1577 struct ap_queue_status status; 1578 struct ap_message *ap_msg; 1579 1580 if (ap_dev->requestq_count <= 0 || 1581 ap_dev->queue_count >= ap_dev->queue_depth) 1582 return 0; 1583 /* Start the next request on the queue. */ 1584 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1585 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1586 ap_msg->message, ap_msg->length, ap_msg->special); 1587 switch (status.response_code) { 1588 case AP_RESPONSE_NORMAL: 1589 atomic_inc(&ap_poll_requests); 1590 ap_increase_queue_count(ap_dev); 1591 list_move_tail(&ap_msg->list, &ap_dev->pendingq); 1592 ap_dev->requestq_count--; 1593 ap_dev->pendingq_count++; 1594 if (ap_dev->queue_count < ap_dev->queue_depth && 1595 ap_dev->requestq_count > 0) 1596 *flags |= 1; 1597 *flags |= 2; 1598 break; 1599 case AP_RESPONSE_RESET_IN_PROGRESS: 1600 __ap_schedule_poll_timer(); 1601 case AP_RESPONSE_Q_FULL: 1602 *flags |= 2; 1603 break; 1604 case AP_RESPONSE_MESSAGE_TOO_BIG: 1605 case AP_RESPONSE_REQ_FAC_NOT_INST: 1606 return -EINVAL; 1607 default: 1608 return -ENODEV; 1609 } 1610 return 0; 1611 } 1612 1613 /** 1614 * ap_poll_queue(): Poll AP device for pending replies and send new messages. 1615 * @ap_dev: pointer to the bus device 1616 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1617 * required, bit 2^1 is set if the poll timer needs to get armed 1618 * 1619 * Poll AP device for pending replies and send new messages. If either 1620 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. 1621 * Returns 0. 1622 */ 1623 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1624 { 1625 int rc; 1626 1627 rc = ap_poll_read(ap_dev, flags); 1628 if (rc) 1629 return rc; 1630 return ap_poll_write(ap_dev, flags); 1631 } 1632 1633 /** 1634 * __ap_queue_message(): Queue a message to a device. 1635 * @ap_dev: pointer to the AP device 1636 * @ap_msg: the message to be queued 1637 * 1638 * Queue a message to a device. Returns 0 if successful. 1639 */ 1640 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1641 { 1642 struct ap_queue_status status; 1643 1644 if (list_empty(&ap_dev->requestq) && 1645 ap_dev->queue_count < ap_dev->queue_depth) { 1646 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1647 ap_msg->message, ap_msg->length, 1648 ap_msg->special); 1649 switch (status.response_code) { 1650 case AP_RESPONSE_NORMAL: 1651 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1652 atomic_inc(&ap_poll_requests); 1653 ap_dev->pendingq_count++; 1654 ap_increase_queue_count(ap_dev); 1655 ap_dev->total_request_count++; 1656 break; 1657 case AP_RESPONSE_Q_FULL: 1658 case AP_RESPONSE_RESET_IN_PROGRESS: 1659 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1660 ap_dev->requestq_count++; 1661 ap_dev->total_request_count++; 1662 return -EBUSY; 1663 case AP_RESPONSE_REQ_FAC_NOT_INST: 1664 case AP_RESPONSE_MESSAGE_TOO_BIG: 1665 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1666 return -EINVAL; 1667 default: /* Device is gone. */ 1668 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1669 return -ENODEV; 1670 } 1671 } else { 1672 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1673 ap_dev->requestq_count++; 1674 ap_dev->total_request_count++; 1675 return -EBUSY; 1676 } 1677 ap_schedule_poll_timer(); 1678 return 0; 1679 } 1680 1681 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1682 { 1683 unsigned long flags; 1684 int rc; 1685 1686 /* For asynchronous message handling a valid receive-callback 1687 * is required. */ 1688 BUG_ON(!ap_msg->receive); 1689 1690 spin_lock_bh(&ap_dev->lock); 1691 if (!ap_dev->unregistered) { 1692 /* Make room on the queue by polling for finished requests. */ 1693 rc = ap_poll_queue(ap_dev, &flags); 1694 if (!rc) 1695 rc = __ap_queue_message(ap_dev, ap_msg); 1696 if (!rc) 1697 wake_up(&ap_poll_wait); 1698 if (rc == -ENODEV) 1699 ap_dev->unregistered = 1; 1700 } else { 1701 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1702 rc = -ENODEV; 1703 } 1704 spin_unlock_bh(&ap_dev->lock); 1705 if (rc == -ENODEV) 1706 device_unregister(&ap_dev->device); 1707 } 1708 EXPORT_SYMBOL(ap_queue_message); 1709 1710 /** 1711 * ap_cancel_message(): Cancel a crypto request. 1712 * @ap_dev: The AP device that has the message queued 1713 * @ap_msg: The message that is to be removed 1714 * 1715 * Cancel a crypto request. This is done by removing the request 1716 * from the device pending or request queue. Note that the 1717 * request stays on the AP queue. When it finishes the message 1718 * reply will be discarded because the psmid can't be found. 1719 */ 1720 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1721 { 1722 struct ap_message *tmp; 1723 1724 spin_lock_bh(&ap_dev->lock); 1725 if (!list_empty(&ap_msg->list)) { 1726 list_for_each_entry(tmp, &ap_dev->pendingq, list) 1727 if (tmp->psmid == ap_msg->psmid) { 1728 ap_dev->pendingq_count--; 1729 goto found; 1730 } 1731 ap_dev->requestq_count--; 1732 found: 1733 list_del_init(&ap_msg->list); 1734 } 1735 spin_unlock_bh(&ap_dev->lock); 1736 } 1737 EXPORT_SYMBOL(ap_cancel_message); 1738 1739 /** 1740 * ap_poll_timeout(): AP receive polling for finished AP requests. 1741 * @unused: Unused pointer. 1742 * 1743 * Schedules the AP tasklet using a high resolution timer. 1744 */ 1745 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 1746 { 1747 tasklet_schedule(&ap_tasklet); 1748 return HRTIMER_NORESTART; 1749 } 1750 1751 /** 1752 * ap_reset(): Reset a not responding AP device. 1753 * @ap_dev: Pointer to the AP device 1754 * 1755 * Reset a not responding AP device and move all requests from the 1756 * pending queue to the request queue. 1757 */ 1758 static void ap_reset(struct ap_device *ap_dev) 1759 { 1760 int rc; 1761 1762 ap_dev->reset = AP_RESET_IGNORE; 1763 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1764 ap_dev->queue_count = 0; 1765 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1766 ap_dev->requestq_count += ap_dev->pendingq_count; 1767 ap_dev->pendingq_count = 0; 1768 rc = ap_init_queue(ap_dev->qid); 1769 if (rc == -ENODEV) 1770 ap_dev->unregistered = 1; 1771 else 1772 __ap_schedule_poll_timer(); 1773 } 1774 1775 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1776 { 1777 if (!ap_dev->unregistered) { 1778 if (ap_poll_queue(ap_dev, flags)) 1779 ap_dev->unregistered = 1; 1780 if (ap_dev->reset == AP_RESET_DO) 1781 ap_reset(ap_dev); 1782 } 1783 return 0; 1784 } 1785 1786 /** 1787 * ap_poll_all(): Poll all AP devices. 1788 * @dummy: Unused variable 1789 * 1790 * Poll all AP devices on the bus in a round robin fashion. Continue 1791 * polling until bit 2^0 of the control flags is not set. If bit 2^1 1792 * of the control flags has been set arm the poll timer. 1793 */ 1794 static void ap_poll_all(unsigned long dummy) 1795 { 1796 unsigned long flags; 1797 struct ap_device *ap_dev; 1798 1799 /* Reset the indicator if interrupts are used. Thus new interrupts can 1800 * be received. Doing it in the beginning of the tasklet is therefor 1801 * important that no requests on any AP get lost. 1802 */ 1803 if (ap_using_interrupts()) 1804 xchg(ap_airq.lsi_ptr, 0); 1805 do { 1806 flags = 0; 1807 spin_lock(&ap_device_list_lock); 1808 list_for_each_entry(ap_dev, &ap_device_list, list) { 1809 spin_lock(&ap_dev->lock); 1810 __ap_poll_device(ap_dev, &flags); 1811 spin_unlock(&ap_dev->lock); 1812 } 1813 spin_unlock(&ap_device_list_lock); 1814 } while (flags & 1); 1815 if (flags & 2) 1816 ap_schedule_poll_timer(); 1817 } 1818 1819 /** 1820 * ap_poll_thread(): Thread that polls for finished requests. 1821 * @data: Unused pointer 1822 * 1823 * AP bus poll thread. The purpose of this thread is to poll for 1824 * finished requests in a loop if there is a "free" cpu - that is 1825 * a cpu that doesn't have anything better to do. The polling stops 1826 * as soon as there is another task or if all messages have been 1827 * delivered. 1828 */ 1829 static int ap_poll_thread(void *data) 1830 { 1831 DECLARE_WAITQUEUE(wait, current); 1832 unsigned long flags; 1833 int requests; 1834 struct ap_device *ap_dev; 1835 1836 set_user_nice(current, MAX_NICE); 1837 while (1) { 1838 if (ap_suspend_flag) 1839 return 0; 1840 if (need_resched()) { 1841 schedule(); 1842 continue; 1843 } 1844 add_wait_queue(&ap_poll_wait, &wait); 1845 set_current_state(TASK_INTERRUPTIBLE); 1846 if (kthread_should_stop()) 1847 break; 1848 requests = atomic_read(&ap_poll_requests); 1849 if (requests <= 0) 1850 schedule(); 1851 set_current_state(TASK_RUNNING); 1852 remove_wait_queue(&ap_poll_wait, &wait); 1853 1854 flags = 0; 1855 spin_lock_bh(&ap_device_list_lock); 1856 list_for_each_entry(ap_dev, &ap_device_list, list) { 1857 spin_lock(&ap_dev->lock); 1858 __ap_poll_device(ap_dev, &flags); 1859 spin_unlock(&ap_dev->lock); 1860 } 1861 spin_unlock_bh(&ap_device_list_lock); 1862 } 1863 set_current_state(TASK_RUNNING); 1864 remove_wait_queue(&ap_poll_wait, &wait); 1865 return 0; 1866 } 1867 1868 static int ap_poll_thread_start(void) 1869 { 1870 int rc; 1871 1872 if (ap_using_interrupts() || ap_suspend_flag) 1873 return 0; 1874 mutex_lock(&ap_poll_thread_mutex); 1875 if (!ap_poll_kthread) { 1876 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 1877 rc = PTR_RET(ap_poll_kthread); 1878 if (rc) 1879 ap_poll_kthread = NULL; 1880 } 1881 else 1882 rc = 0; 1883 mutex_unlock(&ap_poll_thread_mutex); 1884 return rc; 1885 } 1886 1887 static void ap_poll_thread_stop(void) 1888 { 1889 mutex_lock(&ap_poll_thread_mutex); 1890 if (ap_poll_kthread) { 1891 kthread_stop(ap_poll_kthread); 1892 ap_poll_kthread = NULL; 1893 } 1894 mutex_unlock(&ap_poll_thread_mutex); 1895 } 1896 1897 /** 1898 * ap_request_timeout(): Handling of request timeouts 1899 * @data: Holds the AP device. 1900 * 1901 * Handles request timeouts. 1902 */ 1903 static void ap_request_timeout(unsigned long data) 1904 { 1905 struct ap_device *ap_dev = (struct ap_device *) data; 1906 1907 if (ap_dev->reset == AP_RESET_ARMED) { 1908 ap_dev->reset = AP_RESET_DO; 1909 1910 if (ap_using_interrupts()) 1911 tasklet_schedule(&ap_tasklet); 1912 } 1913 } 1914 1915 static void ap_reset_domain(void) 1916 { 1917 int i; 1918 1919 if (ap_domain_index != -1) 1920 for (i = 0; i < AP_DEVICES; i++) 1921 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1922 } 1923 1924 static void ap_reset_all(void) 1925 { 1926 int i, j; 1927 1928 for (i = 0; i < AP_DOMAINS; i++) { 1929 if (!ap_test_config_domain(i)) 1930 continue; 1931 for (j = 0; j < AP_DEVICES; j++) { 1932 if (!ap_test_config_card_id(j)) 1933 continue; 1934 ap_reset_queue(AP_MKQID(j, i)); 1935 } 1936 } 1937 } 1938 1939 static struct reset_call ap_reset_call = { 1940 .fn = ap_reset_all, 1941 }; 1942 1943 /** 1944 * ap_module_init(): The module initialization code. 1945 * 1946 * Initializes the module. 1947 */ 1948 int __init ap_module_init(void) 1949 { 1950 int rc, i; 1951 1952 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1953 pr_warning("%d is not a valid cryptographic domain\n", 1954 ap_domain_index); 1955 return -EINVAL; 1956 } 1957 /* In resume callback we need to know if the user had set the domain. 1958 * If so, we can not just reset it. 1959 */ 1960 if (ap_domain_index >= 0) 1961 user_set_domain = 1; 1962 1963 if (ap_instructions_available() != 0) { 1964 pr_warning("The hardware system does not support " 1965 "AP instructions\n"); 1966 return -ENODEV; 1967 } 1968 if (ap_interrupts_available()) { 1969 rc = register_adapter_interrupt(&ap_airq); 1970 ap_airq_flag = (rc == 0); 1971 } 1972 1973 register_reset_call(&ap_reset_call); 1974 1975 /* Create /sys/bus/ap. */ 1976 rc = bus_register(&ap_bus_type); 1977 if (rc) 1978 goto out; 1979 for (i = 0; ap_bus_attrs[i]; i++) { 1980 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); 1981 if (rc) 1982 goto out_bus; 1983 } 1984 1985 /* Create /sys/devices/ap. */ 1986 ap_root_device = root_device_register("ap"); 1987 rc = PTR_RET(ap_root_device); 1988 if (rc) 1989 goto out_bus; 1990 1991 ap_work_queue = create_singlethread_workqueue("kapwork"); 1992 if (!ap_work_queue) { 1993 rc = -ENOMEM; 1994 goto out_root; 1995 } 1996 1997 ap_query_configuration(); 1998 if (ap_select_domain() == 0) 1999 ap_scan_bus(NULL); 2000 2001 /* Setup the AP bus rescan timer. */ 2002 init_timer(&ap_config_timer); 2003 ap_config_timer.function = ap_config_timeout; 2004 ap_config_timer.data = 0; 2005 ap_config_timer.expires = jiffies + ap_config_time * HZ; 2006 add_timer(&ap_config_timer); 2007 2008 /* Setup the high resultion poll timer. 2009 * If we are running under z/VM adjust polling to z/VM polling rate. 2010 */ 2011 if (MACHINE_IS_VM) 2012 poll_timeout = 1500000; 2013 spin_lock_init(&ap_poll_timer_lock); 2014 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2015 ap_poll_timer.function = ap_poll_timeout; 2016 2017 /* Start the low priority AP bus poll thread. */ 2018 if (ap_thread_flag) { 2019 rc = ap_poll_thread_start(); 2020 if (rc) 2021 goto out_work; 2022 } 2023 2024 return 0; 2025 2026 out_work: 2027 del_timer_sync(&ap_config_timer); 2028 hrtimer_cancel(&ap_poll_timer); 2029 destroy_workqueue(ap_work_queue); 2030 out_root: 2031 root_device_unregister(ap_root_device); 2032 out_bus: 2033 while (i--) 2034 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2035 bus_unregister(&ap_bus_type); 2036 out: 2037 unregister_reset_call(&ap_reset_call); 2038 if (ap_using_interrupts()) 2039 unregister_adapter_interrupt(&ap_airq); 2040 return rc; 2041 } 2042 2043 static int __ap_match_all(struct device *dev, void *data) 2044 { 2045 return 1; 2046 } 2047 2048 /** 2049 * ap_modules_exit(): The module termination code 2050 * 2051 * Terminates the module. 2052 */ 2053 void ap_module_exit(void) 2054 { 2055 int i; 2056 struct device *dev; 2057 2058 ap_reset_domain(); 2059 ap_poll_thread_stop(); 2060 del_timer_sync(&ap_config_timer); 2061 hrtimer_cancel(&ap_poll_timer); 2062 destroy_workqueue(ap_work_queue); 2063 tasklet_kill(&ap_tasklet); 2064 root_device_unregister(ap_root_device); 2065 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 2066 __ap_match_all))) 2067 { 2068 device_unregister(dev); 2069 put_device(dev); 2070 } 2071 for (i = 0; ap_bus_attrs[i]; i++) 2072 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2073 bus_unregister(&ap_bus_type); 2074 unregister_reset_call(&ap_reset_call); 2075 if (ap_using_interrupts()) 2076 unregister_adapter_interrupt(&ap_airq); 2077 } 2078 2079 module_init(ap_module_init); 2080 module_exit(ap_module_exit); 2081