1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Linux for s390 qdio support, buffer handling, qdio API and module support. 4 * 5 * Copyright IBM Corp. 2000, 2008 6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 7 * Jan Glauber <jang@linux.vnet.ibm.com> 8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/delay.h> 14 #include <linux/gfp.h> 15 #include <linux/io.h> 16 #include <linux/atomic.h> 17 #include <asm/debug.h> 18 #include <asm/qdio.h> 19 #include <asm/ipl.h> 20 21 #include "cio.h" 22 #include "css.h" 23 #include "device.h" 24 #include "qdio.h" 25 #include "qdio_debug.h" 26 27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 28 "Jan Glauber <jang@linux.vnet.ibm.com>"); 29 MODULE_DESCRIPTION("QDIO base support"); 30 MODULE_LICENSE("GPL"); 31 32 static inline int do_siga_sync(unsigned long schid, 33 unsigned long out_mask, unsigned long in_mask, 34 unsigned int fc) 35 { 36 int cc; 37 38 asm volatile( 39 " lgr 0,%[fc]\n" 40 " lgr 1,%[schid]\n" 41 " lgr 2,%[out]\n" 42 " lgr 3,%[in]\n" 43 " siga 0\n" 44 " ipm %[cc]\n" 45 " srl %[cc],28\n" 46 : [cc] "=&d" (cc) 47 : [fc] "d" (fc), [schid] "d" (schid), 48 [out] "d" (out_mask), [in] "d" (in_mask) 49 : "cc", "0", "1", "2", "3"); 50 return cc; 51 } 52 53 static inline int do_siga_input(unsigned long schid, unsigned long mask, 54 unsigned long fc) 55 { 56 int cc; 57 58 asm volatile( 59 " lgr 0,%[fc]\n" 60 " lgr 1,%[schid]\n" 61 " lgr 2,%[mask]\n" 62 " siga 0\n" 63 " ipm %[cc]\n" 64 " srl %[cc],28\n" 65 : [cc] "=&d" (cc) 66 : [fc] "d" (fc), [schid] "d" (schid), [mask] "d" (mask) 67 : "cc", "0", "1", "2"); 68 return cc; 69 } 70 71 /** 72 * do_siga_output - perform SIGA-w/wt function 73 * @schid: subchannel id or in case of QEBSM the subchannel token 74 * @mask: which output queues to process 75 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 76 * @fc: function code to perform 77 * @aob: asynchronous operation block 78 * 79 * Returns condition code. 80 * Note: For IQDC unicast queues only the highest priority queue is processed. 81 */ 82 static inline int do_siga_output(unsigned long schid, unsigned long mask, 83 unsigned int *bb, unsigned long fc, 84 unsigned long aob) 85 { 86 int cc; 87 88 asm volatile( 89 " lgr 0,%[fc]\n" 90 " lgr 1,%[schid]\n" 91 " lgr 2,%[mask]\n" 92 " lgr 3,%[aob]\n" 93 " siga 0\n" 94 " lgr %[fc],0\n" 95 " ipm %[cc]\n" 96 " srl %[cc],28\n" 97 : [cc] "=&d" (cc), [fc] "+&d" (fc) 98 : [schid] "d" (schid), [mask] "d" (mask), [aob] "d" (aob) 99 : "cc", "0", "1", "2", "3"); 100 *bb = fc >> 31; 101 return cc; 102 } 103 104 /** 105 * qdio_do_eqbs - extract buffer states for QEBSM 106 * @q: queue to manipulate 107 * @state: state of the extracted buffers 108 * @start: buffer number to start at 109 * @count: count of buffers to examine 110 * @auto_ack: automatically acknowledge buffers 111 * 112 * Returns the number of successfully extracted equal buffer states. 113 * Stops processing if a state is different from the last buffers state. 114 */ 115 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 116 int start, int count, int auto_ack) 117 { 118 int tmp_count = count, tmp_start = start, nr = q->nr; 119 unsigned int ccq = 0; 120 121 qperf_inc(q, eqbs); 122 123 if (!q->is_input_q) 124 nr += q->irq_ptr->nr_input_qs; 125 again: 126 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 127 auto_ack); 128 129 switch (ccq) { 130 case 0: 131 case 32: 132 /* all done, or next buffer state different */ 133 return count - tmp_count; 134 case 96: 135 /* not all buffers processed */ 136 qperf_inc(q, eqbs_partial); 137 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "EQBS part:%02x", 138 tmp_count); 139 return count - tmp_count; 140 case 97: 141 /* no buffer processed */ 142 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 143 goto again; 144 default: 145 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 146 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 147 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 148 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, 149 q->first_to_check, count, q->irq_ptr->int_parm); 150 return 0; 151 } 152 } 153 154 /** 155 * qdio_do_sqbs - set buffer states for QEBSM 156 * @q: queue to manipulate 157 * @state: new state of the buffers 158 * @start: first buffer number to change 159 * @count: how many buffers to change 160 * 161 * Returns the number of successfully changed buffers. 162 * Does retrying until the specified count of buffer states is set or an 163 * error occurs. 164 */ 165 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, 166 int count) 167 { 168 unsigned int ccq = 0; 169 int tmp_count = count, tmp_start = start; 170 int nr = q->nr; 171 172 if (!count) 173 return 0; 174 qperf_inc(q, sqbs); 175 176 if (!q->is_input_q) 177 nr += q->irq_ptr->nr_input_qs; 178 again: 179 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 180 181 switch (ccq) { 182 case 0: 183 case 32: 184 /* all done, or active buffer adapter-owned */ 185 WARN_ON_ONCE(tmp_count); 186 return count - tmp_count; 187 case 96: 188 /* not all buffers processed */ 189 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 190 qperf_inc(q, sqbs_partial); 191 goto again; 192 default: 193 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 194 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 195 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 196 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, 197 q->first_to_check, count, q->irq_ptr->int_parm); 198 return 0; 199 } 200 } 201 202 /* 203 * Returns number of examined buffers and their common state in *state. 204 * Requested number of buffers-to-examine must be > 0. 205 */ 206 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 207 unsigned char *state, unsigned int count, 208 int auto_ack) 209 { 210 unsigned char __state = 0; 211 int i = 1; 212 213 if (is_qebsm(q)) 214 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 215 216 /* get initial state: */ 217 __state = q->slsb.val[bufnr]; 218 219 /* Bail out early if there is no work on the queue: */ 220 if (__state & SLSB_OWNER_CU) 221 goto out; 222 223 for (; i < count; i++) { 224 bufnr = next_buf(bufnr); 225 226 /* stop if next state differs from initial state: */ 227 if (q->slsb.val[bufnr] != __state) 228 break; 229 } 230 231 out: 232 *state = __state; 233 return i; 234 } 235 236 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 237 unsigned char *state, int auto_ack) 238 { 239 return get_buf_states(q, bufnr, state, 1, auto_ack); 240 } 241 242 /* wrap-around safe setting of slsb states, returns number of changed buffers */ 243 static inline int set_buf_states(struct qdio_q *q, int bufnr, 244 unsigned char state, int count) 245 { 246 int i; 247 248 if (is_qebsm(q)) 249 return qdio_do_sqbs(q, state, bufnr, count); 250 251 /* Ensure that all preceding changes to the SBALs are visible: */ 252 mb(); 253 254 for (i = 0; i < count; i++) { 255 WRITE_ONCE(q->slsb.val[bufnr], state); 256 bufnr = next_buf(bufnr); 257 } 258 259 /* Make our SLSB changes visible: */ 260 mb(); 261 262 return count; 263 } 264 265 static inline int set_buf_state(struct qdio_q *q, int bufnr, 266 unsigned char state) 267 { 268 return set_buf_states(q, bufnr, state, 1); 269 } 270 271 /* set slsb states to initial state */ 272 static void qdio_init_buf_states(struct qdio_irq *irq_ptr) 273 { 274 struct qdio_q *q; 275 int i; 276 277 for_each_input_queue(irq_ptr, q, i) 278 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, 279 QDIO_MAX_BUFFERS_PER_Q); 280 for_each_output_queue(irq_ptr, q, i) 281 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, 282 QDIO_MAX_BUFFERS_PER_Q); 283 } 284 285 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 286 unsigned int input) 287 { 288 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 289 unsigned int fc = QDIO_SIGA_SYNC; 290 int cc; 291 292 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 293 qperf_inc(q, siga_sync); 294 295 if (is_qebsm(q)) { 296 schid = q->irq_ptr->sch_token; 297 fc |= QDIO_SIGA_QEBSM_FLAG; 298 } 299 300 cc = do_siga_sync(schid, output, input, fc); 301 if (unlikely(cc)) 302 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 303 return (cc) ? -EIO : 0; 304 } 305 306 static inline int qdio_sync_input_queue(struct qdio_q *q) 307 { 308 return qdio_siga_sync(q, 0, q->mask); 309 } 310 311 static inline int qdio_sync_output_queue(struct qdio_q *q) 312 { 313 return qdio_siga_sync(q, q->mask, 0); 314 } 315 316 static inline int qdio_siga_sync_q(struct qdio_q *q) 317 { 318 if (q->is_input_q) 319 return qdio_sync_input_queue(q); 320 else 321 return qdio_sync_output_queue(q); 322 } 323 324 static int qdio_siga_output(struct qdio_q *q, unsigned int count, 325 unsigned int *busy_bit, unsigned long aob) 326 { 327 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 328 unsigned int fc = QDIO_SIGA_WRITE; 329 u64 start_time = 0; 330 int retries = 0, cc; 331 332 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) { 333 if (count > 1) 334 fc = QDIO_SIGA_WRITEM; 335 else if (aob) 336 fc = QDIO_SIGA_WRITEQ; 337 } 338 339 if (is_qebsm(q)) { 340 schid = q->irq_ptr->sch_token; 341 fc |= QDIO_SIGA_QEBSM_FLAG; 342 } 343 again: 344 cc = do_siga_output(schid, q->mask, busy_bit, fc, aob); 345 346 /* hipersocket busy condition */ 347 if (unlikely(*busy_bit)) { 348 retries++; 349 350 if (!start_time) { 351 start_time = get_tod_clock_fast(); 352 goto again; 353 } 354 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) 355 goto again; 356 } 357 if (retries) { 358 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, 359 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); 360 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); 361 } 362 return cc; 363 } 364 365 static inline int qdio_siga_input(struct qdio_q *q) 366 { 367 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 368 unsigned int fc = QDIO_SIGA_READ; 369 int cc; 370 371 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 372 qperf_inc(q, siga_read); 373 374 if (is_qebsm(q)) { 375 schid = q->irq_ptr->sch_token; 376 fc |= QDIO_SIGA_QEBSM_FLAG; 377 } 378 379 cc = do_siga_input(schid, q->mask, fc); 380 if (unlikely(cc)) 381 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 382 return (cc) ? -EIO : 0; 383 } 384 385 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 386 unsigned char *state) 387 { 388 if (qdio_need_siga_sync(q->irq_ptr)) 389 qdio_siga_sync_q(q); 390 return get_buf_state(q, bufnr, state, 0); 391 } 392 393 static inline void qdio_stop_polling(struct qdio_q *q) 394 { 395 if (!q->u.in.batch_count) 396 return; 397 398 qperf_inc(q, stop_polling); 399 400 /* show the card that we are not polling anymore */ 401 set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT, 402 q->u.in.batch_count); 403 q->u.in.batch_count = 0; 404 } 405 406 static inline void account_sbals(struct qdio_q *q, unsigned int count) 407 { 408 q->q_stats.nr_sbal_total += count; 409 q->q_stats.nr_sbals[ilog2(count)]++; 410 } 411 412 static void process_buffer_error(struct qdio_q *q, unsigned int start, 413 int count) 414 { 415 /* special handling for no target buffer empty */ 416 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q && 417 q->sbal[start]->element[15].sflags == 0x10) { 418 qperf_inc(q, target_full); 419 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); 420 return; 421 } 422 423 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); 424 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 425 DBF_ERROR("FTC:%3d C:%3d", start, count); 426 DBF_ERROR("F14:%2x F15:%2x", 427 q->sbal[start]->element[14].sflags, 428 q->sbal[start]->element[15].sflags); 429 } 430 431 static inline void inbound_handle_work(struct qdio_q *q, unsigned int start, 432 int count, bool auto_ack) 433 { 434 /* ACK the newest SBAL: */ 435 if (!auto_ack) 436 set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK); 437 438 if (!q->u.in.batch_count) 439 q->u.in.batch_start = start; 440 q->u.in.batch_count += count; 441 } 442 443 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start, 444 unsigned int *error) 445 { 446 unsigned char state = 0; 447 int count; 448 449 q->timestamp = get_tod_clock_fast(); 450 451 count = atomic_read(&q->nr_buf_used); 452 if (!count) 453 return 0; 454 455 if (qdio_need_siga_sync(q->irq_ptr)) 456 qdio_sync_input_queue(q); 457 458 count = get_buf_states(q, start, &state, count, 1); 459 if (!count) 460 return 0; 461 462 switch (state) { 463 case SLSB_P_INPUT_PRIMED: 464 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, 465 count); 466 467 inbound_handle_work(q, start, count, is_qebsm(q)); 468 if (atomic_sub_return(count, &q->nr_buf_used) == 0) 469 qperf_inc(q, inbound_queue_full); 470 if (q->irq_ptr->perf_stat_enabled) 471 account_sbals(q, count); 472 return count; 473 case SLSB_P_INPUT_ERROR: 474 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in err:%1d %02x", q->nr, 475 count); 476 477 *error = QDIO_ERROR_SLSB_STATE; 478 process_buffer_error(q, start, count); 479 inbound_handle_work(q, start, count, false); 480 if (atomic_sub_return(count, &q->nr_buf_used) == 0) 481 qperf_inc(q, inbound_queue_full); 482 if (q->irq_ptr->perf_stat_enabled) 483 account_sbals_error(q, count); 484 return count; 485 case SLSB_CU_INPUT_EMPTY: 486 if (q->irq_ptr->perf_stat_enabled) 487 q->q_stats.nr_sbal_nop++; 488 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x", 489 q->nr, start); 490 return 0; 491 case SLSB_P_INPUT_NOT_INIT: 492 case SLSB_P_INPUT_ACK: 493 /* We should never see this state, throw a WARN: */ 494 default: 495 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, 496 "found state %#x at index %u on queue %u\n", 497 state, start, q->nr); 498 return 0; 499 } 500 } 501 502 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) 503 { 504 unsigned char state = 0; 505 506 if (!atomic_read(&q->nr_buf_used)) 507 return 1; 508 509 if (qdio_need_siga_sync(q->irq_ptr)) 510 qdio_sync_input_queue(q); 511 get_buf_state(q, start, &state, 0); 512 513 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 514 /* more work coming */ 515 return 0; 516 517 return 1; 518 } 519 520 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start, 521 unsigned int *error) 522 { 523 unsigned char state = 0; 524 int count; 525 526 q->timestamp = get_tod_clock_fast(); 527 528 count = atomic_read(&q->nr_buf_used); 529 if (!count) 530 return 0; 531 532 if (qdio_need_siga_sync(q->irq_ptr)) 533 qdio_sync_output_queue(q); 534 535 count = get_buf_states(q, start, &state, count, 0); 536 if (!count) 537 return 0; 538 539 switch (state) { 540 case SLSB_P_OUTPUT_PENDING: 541 *error = QDIO_ERROR_SLSB_PENDING; 542 fallthrough; 543 case SLSB_P_OUTPUT_EMPTY: 544 /* the adapter got it */ 545 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, 546 "out empty:%1d %02x", q->nr, count); 547 548 atomic_sub(count, &q->nr_buf_used); 549 if (q->irq_ptr->perf_stat_enabled) 550 account_sbals(q, count); 551 return count; 552 case SLSB_P_OUTPUT_ERROR: 553 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out error:%1d %02x", 554 q->nr, count); 555 556 *error = QDIO_ERROR_SLSB_STATE; 557 process_buffer_error(q, start, count); 558 atomic_sub(count, &q->nr_buf_used); 559 if (q->irq_ptr->perf_stat_enabled) 560 account_sbals_error(q, count); 561 return count; 562 case SLSB_CU_OUTPUT_PRIMED: 563 /* the adapter has not fetched the output yet */ 564 if (q->irq_ptr->perf_stat_enabled) 565 q->q_stats.nr_sbal_nop++; 566 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", 567 q->nr); 568 return 0; 569 case SLSB_P_OUTPUT_HALTED: 570 return 0; 571 case SLSB_P_OUTPUT_NOT_INIT: 572 /* We should never see this state, throw a WARN: */ 573 default: 574 dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1, 575 "found state %#x at index %u on queue %u\n", 576 state, start, q->nr); 577 return 0; 578 } 579 } 580 581 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count, 582 unsigned long aob) 583 { 584 int retries = 0, cc; 585 unsigned int busy_bit; 586 587 if (!qdio_need_siga_out(q->irq_ptr)) 588 return 0; 589 590 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 591 retry: 592 qperf_inc(q, siga_write); 593 594 cc = qdio_siga_output(q, count, &busy_bit, aob); 595 switch (cc) { 596 case 0: 597 break; 598 case 2: 599 if (busy_bit) { 600 while (++retries < QDIO_BUSY_BIT_RETRIES) { 601 mdelay(QDIO_BUSY_BIT_RETRY_DELAY); 602 goto retry; 603 } 604 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); 605 cc = -EBUSY; 606 } else { 607 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 608 cc = -ENOBUFS; 609 } 610 break; 611 case 1: 612 case 3: 613 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 614 cc = -EIO; 615 break; 616 } 617 if (retries) { 618 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); 619 DBF_ERROR("count:%u", retries); 620 } 621 return cc; 622 } 623 624 static inline void qdio_set_state(struct qdio_irq *irq_ptr, 625 enum qdio_irq_states state) 626 { 627 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); 628 629 irq_ptr->state = state; 630 mb(); 631 } 632 633 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) 634 { 635 if (irb->esw.esw0.erw.cons) { 636 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); 637 DBF_ERROR_HEX(irb, 64); 638 DBF_ERROR_HEX(irb->ecw, 64); 639 } 640 } 641 642 /* PCI interrupt handler */ 643 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) 644 { 645 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 646 return; 647 648 qdio_deliver_irq(irq_ptr); 649 irq_ptr->last_data_irq_time = S390_lowcore.int_clock; 650 } 651 652 static void qdio_handle_activate_check(struct qdio_irq *irq_ptr, 653 unsigned long intparm, int cstat, 654 int dstat) 655 { 656 struct qdio_q *q; 657 658 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); 659 DBF_ERROR("intp :%lx", intparm); 660 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 661 662 if (irq_ptr->nr_input_qs) { 663 q = irq_ptr->input_qs[0]; 664 } else if (irq_ptr->nr_output_qs) { 665 q = irq_ptr->output_qs[0]; 666 } else { 667 dump_stack(); 668 goto no_handler; 669 } 670 671 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 672 q->nr, q->first_to_check, 0, irq_ptr->int_parm); 673 no_handler: 674 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 675 /* 676 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. 677 * Therefore we call the LGR detection function here. 678 */ 679 lgr_info_log(); 680 } 681 682 static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat, 683 int dstat) 684 { 685 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); 686 687 if (cstat) 688 goto error; 689 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) 690 goto error; 691 if (!(dstat & DEV_STAT_DEV_END)) 692 goto error; 693 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 694 return; 695 696 error: 697 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 698 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 699 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 700 } 701 702 /* qdio interrupt handler */ 703 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, 704 struct irb *irb) 705 { 706 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 707 struct subchannel_id schid; 708 int cstat, dstat; 709 710 if (!intparm || !irq_ptr) { 711 ccw_device_get_schid(cdev, &schid); 712 DBF_ERROR("qint:%4x", schid.sch_no); 713 return; 714 } 715 716 if (irq_ptr->perf_stat_enabled) 717 irq_ptr->perf_stat.qdio_int++; 718 719 if (IS_ERR(irb)) { 720 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 721 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 722 wake_up(&cdev->private->wait_q); 723 return; 724 } 725 qdio_irq_check_sense(irq_ptr, irb); 726 cstat = irb->scsw.cmd.cstat; 727 dstat = irb->scsw.cmd.dstat; 728 729 switch (irq_ptr->state) { 730 case QDIO_IRQ_STATE_INACTIVE: 731 qdio_establish_handle_irq(irq_ptr, cstat, dstat); 732 break; 733 case QDIO_IRQ_STATE_CLEANUP: 734 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 735 break; 736 case QDIO_IRQ_STATE_ESTABLISHED: 737 case QDIO_IRQ_STATE_ACTIVE: 738 if (cstat & SCHN_STAT_PCI) { 739 qdio_int_handler_pci(irq_ptr); 740 return; 741 } 742 if (cstat || dstat) 743 qdio_handle_activate_check(irq_ptr, intparm, cstat, 744 dstat); 745 break; 746 case QDIO_IRQ_STATE_STOPPED: 747 break; 748 default: 749 WARN_ON_ONCE(1); 750 } 751 wake_up(&cdev->private->wait_q); 752 } 753 754 /** 755 * qdio_get_ssqd_desc - get qdio subchannel description 756 * @cdev: ccw device to get description for 757 * @data: where to store the ssqd 758 * 759 * Returns 0 or an error code. The results of the chsc are stored in the 760 * specified structure. 761 */ 762 int qdio_get_ssqd_desc(struct ccw_device *cdev, 763 struct qdio_ssqd_desc *data) 764 { 765 struct subchannel_id schid; 766 767 if (!cdev || !cdev->private) 768 return -EINVAL; 769 770 ccw_device_get_schid(cdev, &schid); 771 DBF_EVENT("get ssqd:%4x", schid.sch_no); 772 return qdio_setup_get_ssqd(NULL, &schid, data); 773 } 774 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 775 776 static int qdio_cancel_ccw(struct qdio_irq *irq, int how) 777 { 778 struct ccw_device *cdev = irq->cdev; 779 long timeout; 780 int rc; 781 782 spin_lock_irq(get_ccwdev_lock(cdev)); 783 qdio_set_state(irq, QDIO_IRQ_STATE_CLEANUP); 784 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) 785 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); 786 else 787 /* default behaviour is halt */ 788 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); 789 spin_unlock_irq(get_ccwdev_lock(cdev)); 790 if (rc) { 791 DBF_ERROR("%4x SHUTD ERR", irq->schid.sch_no); 792 DBF_ERROR("rc:%4d", rc); 793 return rc; 794 } 795 796 timeout = wait_event_interruptible_timeout(cdev->private->wait_q, 797 irq->state == QDIO_IRQ_STATE_INACTIVE || 798 irq->state == QDIO_IRQ_STATE_ERR, 799 10 * HZ); 800 if (timeout <= 0) 801 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 802 803 return rc; 804 } 805 806 /** 807 * qdio_shutdown - shut down a qdio subchannel 808 * @cdev: associated ccw device 809 * @how: use halt or clear to shutdown 810 */ 811 int qdio_shutdown(struct ccw_device *cdev, int how) 812 { 813 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 814 struct subchannel_id schid; 815 int rc; 816 817 if (!irq_ptr) 818 return -ENODEV; 819 820 WARN_ON_ONCE(irqs_disabled()); 821 ccw_device_get_schid(cdev, &schid); 822 DBF_EVENT("qshutdown:%4x", schid.sch_no); 823 824 mutex_lock(&irq_ptr->setup_mutex); 825 /* 826 * Subchannel was already shot down. We cannot prevent being called 827 * twice since cio may trigger a shutdown asynchronously. 828 */ 829 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 830 mutex_unlock(&irq_ptr->setup_mutex); 831 return 0; 832 } 833 834 /* 835 * Indicate that the device is going down. 836 */ 837 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 838 839 qdio_shutdown_debug_entries(irq_ptr); 840 841 rc = qdio_cancel_ccw(irq_ptr, how); 842 qdio_shutdown_thinint(irq_ptr); 843 qdio_shutdown_irq(irq_ptr); 844 845 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 846 mutex_unlock(&irq_ptr->setup_mutex); 847 if (rc) 848 return rc; 849 return 0; 850 } 851 EXPORT_SYMBOL_GPL(qdio_shutdown); 852 853 /** 854 * qdio_free - free data structures for a qdio subchannel 855 * @cdev: associated ccw device 856 */ 857 int qdio_free(struct ccw_device *cdev) 858 { 859 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 860 struct subchannel_id schid; 861 862 if (!irq_ptr) 863 return -ENODEV; 864 865 ccw_device_get_schid(cdev, &schid); 866 DBF_EVENT("qfree:%4x", schid.sch_no); 867 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); 868 mutex_lock(&irq_ptr->setup_mutex); 869 870 irq_ptr->debug_area = NULL; 871 cdev->private->qdio_data = NULL; 872 mutex_unlock(&irq_ptr->setup_mutex); 873 874 qdio_free_queues(irq_ptr); 875 free_page((unsigned long) irq_ptr->qdr); 876 free_page(irq_ptr->chsc_page); 877 free_page((unsigned long) irq_ptr); 878 return 0; 879 } 880 EXPORT_SYMBOL_GPL(qdio_free); 881 882 /** 883 * qdio_allocate - allocate qdio queues and associated data 884 * @cdev: associated ccw device 885 * @no_input_qs: allocate this number of Input Queues 886 * @no_output_qs: allocate this number of Output Queues 887 */ 888 int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs, 889 unsigned int no_output_qs) 890 { 891 struct subchannel_id schid; 892 struct qdio_irq *irq_ptr; 893 int rc = -ENOMEM; 894 895 ccw_device_get_schid(cdev, &schid); 896 DBF_EVENT("qallocate:%4x", schid.sch_no); 897 898 if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ || 899 no_output_qs > QDIO_MAX_QUEUES_PER_IRQ) 900 return -EINVAL; 901 902 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ 903 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 904 if (!irq_ptr) 905 return -ENOMEM; 906 907 irq_ptr->cdev = cdev; 908 mutex_init(&irq_ptr->setup_mutex); 909 if (qdio_allocate_dbf(irq_ptr)) 910 goto err_dbf; 911 912 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs, 913 no_output_qs); 914 915 /* 916 * Allocate a page for the chsc calls in qdio_establish. 917 * Must be pre-allocated since a zfcp recovery will call 918 * qdio_establish. In case of low memory and swap on a zfcp disk 919 * we may not be able to allocate memory otherwise. 920 */ 921 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); 922 if (!irq_ptr->chsc_page) 923 goto err_chsc; 924 925 /* qdr is used in ccw1.cda which is u32 */ 926 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 927 if (!irq_ptr->qdr) 928 goto err_qdr; 929 930 rc = qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs); 931 if (rc) 932 goto err_queues; 933 934 cdev->private->qdio_data = irq_ptr; 935 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 936 return 0; 937 938 err_queues: 939 free_page((unsigned long) irq_ptr->qdr); 940 err_qdr: 941 free_page(irq_ptr->chsc_page); 942 err_chsc: 943 err_dbf: 944 free_page((unsigned long) irq_ptr); 945 return rc; 946 } 947 EXPORT_SYMBOL_GPL(qdio_allocate); 948 949 static void qdio_trace_init_data(struct qdio_irq *irq, 950 struct qdio_initialize *data) 951 { 952 DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format); 953 DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format); 954 DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR); 955 DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs, 956 data->no_output_qs); 957 DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR); 958 DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR); 959 DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR); 960 DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR); 961 DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *), 962 DBF_ERR); 963 } 964 965 /** 966 * qdio_establish - establish queues on a qdio subchannel 967 * @cdev: associated ccw device 968 * @init_data: initialization data 969 */ 970 int qdio_establish(struct ccw_device *cdev, 971 struct qdio_initialize *init_data) 972 { 973 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 974 struct subchannel_id schid; 975 long timeout; 976 int rc; 977 978 ccw_device_get_schid(cdev, &schid); 979 DBF_EVENT("qestablish:%4x", schid.sch_no); 980 981 if (!irq_ptr) 982 return -ENODEV; 983 984 if (init_data->no_input_qs > irq_ptr->max_input_qs || 985 init_data->no_output_qs > irq_ptr->max_output_qs) 986 return -EINVAL; 987 988 if ((init_data->no_input_qs && !init_data->input_handler) || 989 (init_data->no_output_qs && !init_data->output_handler)) 990 return -EINVAL; 991 992 if (!init_data->input_sbal_addr_array || 993 !init_data->output_sbal_addr_array) 994 return -EINVAL; 995 996 if (!init_data->irq_poll) 997 return -EINVAL; 998 999 mutex_lock(&irq_ptr->setup_mutex); 1000 qdio_trace_init_data(irq_ptr, init_data); 1001 qdio_setup_irq(irq_ptr, init_data); 1002 1003 rc = qdio_establish_thinint(irq_ptr); 1004 if (rc) 1005 goto err_thinint; 1006 1007 /* establish q */ 1008 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; 1009 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1010 irq_ptr->ccw.count = irq_ptr->equeue.count; 1011 irq_ptr->ccw.cda = (u32) virt_to_phys(irq_ptr->qdr); 1012 1013 spin_lock_irq(get_ccwdev_lock(cdev)); 1014 ccw_device_set_options_mask(cdev, 0); 1015 1016 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); 1017 spin_unlock_irq(get_ccwdev_lock(cdev)); 1018 if (rc) { 1019 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); 1020 DBF_ERROR("rc:%4x", rc); 1021 goto err_ccw_start; 1022 } 1023 1024 timeout = wait_event_interruptible_timeout(cdev->private->wait_q, 1025 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 1026 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); 1027 if (timeout <= 0) { 1028 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; 1029 goto err_ccw_timeout; 1030 } 1031 1032 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { 1033 rc = -EIO; 1034 goto err_ccw_error; 1035 } 1036 1037 qdio_setup_ssqd_info(irq_ptr); 1038 1039 /* qebsm is now setup if available, initialize buffer states */ 1040 qdio_init_buf_states(irq_ptr); 1041 1042 mutex_unlock(&irq_ptr->setup_mutex); 1043 qdio_print_subchannel_info(irq_ptr); 1044 qdio_setup_debug_entries(irq_ptr); 1045 return 0; 1046 1047 err_ccw_timeout: 1048 qdio_cancel_ccw(irq_ptr, QDIO_FLAG_CLEANUP_USING_CLEAR); 1049 err_ccw_error: 1050 err_ccw_start: 1051 qdio_shutdown_thinint(irq_ptr); 1052 err_thinint: 1053 qdio_shutdown_irq(irq_ptr); 1054 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1055 mutex_unlock(&irq_ptr->setup_mutex); 1056 return rc; 1057 } 1058 EXPORT_SYMBOL_GPL(qdio_establish); 1059 1060 /** 1061 * qdio_activate - activate queues on a qdio subchannel 1062 * @cdev: associated cdev 1063 */ 1064 int qdio_activate(struct ccw_device *cdev) 1065 { 1066 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1067 struct subchannel_id schid; 1068 int rc; 1069 1070 ccw_device_get_schid(cdev, &schid); 1071 DBF_EVENT("qactivate:%4x", schid.sch_no); 1072 1073 if (!irq_ptr) 1074 return -ENODEV; 1075 1076 mutex_lock(&irq_ptr->setup_mutex); 1077 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1078 rc = -EBUSY; 1079 goto out; 1080 } 1081 1082 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; 1083 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1084 irq_ptr->ccw.count = irq_ptr->aqueue.count; 1085 irq_ptr->ccw.cda = 0; 1086 1087 spin_lock_irq(get_ccwdev_lock(cdev)); 1088 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 1089 1090 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 1091 0, DOIO_DENY_PREFETCH); 1092 spin_unlock_irq(get_ccwdev_lock(cdev)); 1093 if (rc) { 1094 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); 1095 DBF_ERROR("rc:%4x", rc); 1096 goto out; 1097 } 1098 1099 /* wait for subchannel to become active */ 1100 msleep(5); 1101 1102 switch (irq_ptr->state) { 1103 case QDIO_IRQ_STATE_STOPPED: 1104 case QDIO_IRQ_STATE_ERR: 1105 rc = -EIO; 1106 break; 1107 default: 1108 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1109 rc = 0; 1110 } 1111 out: 1112 mutex_unlock(&irq_ptr->setup_mutex); 1113 return rc; 1114 } 1115 EXPORT_SYMBOL_GPL(qdio_activate); 1116 1117 /** 1118 * handle_inbound - reset processed input buffers 1119 * @q: queue containing the buffers 1120 * @bufnr: first buffer to process 1121 * @count: how many buffers are emptied 1122 */ 1123 static int handle_inbound(struct qdio_q *q, int bufnr, int count) 1124 { 1125 int overlap; 1126 1127 qperf_inc(q, inbound_call); 1128 1129 /* If any processed SBALs are returned to HW, adjust our tracking: */ 1130 overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr), 1131 q->u.in.batch_count); 1132 if (overlap > 0) { 1133 q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap); 1134 q->u.in.batch_count -= overlap; 1135 } 1136 1137 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1138 atomic_add(count, &q->nr_buf_used); 1139 1140 if (qdio_need_siga_in(q->irq_ptr)) 1141 return qdio_siga_input(q); 1142 1143 return 0; 1144 } 1145 1146 /** 1147 * handle_outbound - process filled outbound buffers 1148 * @q: queue containing the buffers 1149 * @bufnr: first buffer to process 1150 * @count: how many buffers are filled 1151 * @aob: asynchronous operation block 1152 */ 1153 static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int count, 1154 struct qaob *aob) 1155 { 1156 unsigned char state = 0; 1157 int used, rc = 0; 1158 1159 qperf_inc(q, outbound_call); 1160 1161 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1162 used = atomic_add_return(count, &q->nr_buf_used); 1163 1164 if (used == QDIO_MAX_BUFFERS_PER_Q) 1165 qperf_inc(q, outbound_queue_full); 1166 1167 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1168 unsigned long phys_aob = aob ? virt_to_phys(aob) : 0; 1169 1170 WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256)); 1171 rc = qdio_kick_outbound_q(q, count, phys_aob); 1172 } else if (qdio_need_siga_sync(q->irq_ptr)) { 1173 rc = qdio_sync_output_queue(q); 1174 } else if (count < QDIO_MAX_BUFFERS_PER_Q && 1175 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && 1176 state == SLSB_CU_OUTPUT_PRIMED) { 1177 /* The previous buffer is not processed yet, tack on. */ 1178 qperf_inc(q, fast_requeue); 1179 } else { 1180 rc = qdio_kick_outbound_q(q, count, 0); 1181 } 1182 1183 return rc; 1184 } 1185 1186 /** 1187 * do_QDIO - process input or output buffers 1188 * @cdev: associated ccw_device for the qdio subchannel 1189 * @callflags: input or output and special flags from the program 1190 * @q_nr: queue number 1191 * @bufnr: buffer number 1192 * @count: how many buffers to process 1193 * @aob: asynchronous operation block (outbound only) 1194 */ 1195 int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1196 int q_nr, unsigned int bufnr, unsigned int count, struct qaob *aob) 1197 { 1198 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1199 1200 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1201 return -EINVAL; 1202 1203 if (!irq_ptr) 1204 return -ENODEV; 1205 1206 DBF_DEV_EVENT(DBF_INFO, irq_ptr, 1207 "do%02x b:%02x c:%02x", callflags, bufnr, count); 1208 1209 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1210 return -EIO; 1211 if (!count) 1212 return 0; 1213 if (callflags & QDIO_FLAG_SYNC_INPUT) 1214 return handle_inbound(irq_ptr->input_qs[q_nr], bufnr, count); 1215 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1216 return handle_outbound(irq_ptr->output_qs[q_nr], bufnr, count, aob); 1217 return -EINVAL; 1218 } 1219 EXPORT_SYMBOL_GPL(do_QDIO); 1220 1221 /** 1222 * qdio_start_irq - enable interrupt processing for the device 1223 * @cdev: associated ccw_device for the qdio subchannel 1224 * 1225 * Return codes 1226 * 0 - success 1227 * 1 - irqs not started since new data is available 1228 */ 1229 int qdio_start_irq(struct ccw_device *cdev) 1230 { 1231 struct qdio_q *q; 1232 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1233 unsigned int i; 1234 1235 if (!irq_ptr) 1236 return -ENODEV; 1237 1238 for_each_input_queue(irq_ptr, q, i) 1239 qdio_stop_polling(q); 1240 1241 clear_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state); 1242 1243 /* 1244 * We need to check again to not lose initiative after 1245 * resetting the ACK state. 1246 */ 1247 if (test_nonshared_ind(irq_ptr)) 1248 goto rescan; 1249 1250 for_each_input_queue(irq_ptr, q, i) { 1251 if (!qdio_inbound_q_done(q, q->first_to_check)) 1252 goto rescan; 1253 } 1254 1255 return 0; 1256 1257 rescan: 1258 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state)) 1259 return 0; 1260 else 1261 return 1; 1262 1263 } 1264 EXPORT_SYMBOL(qdio_start_irq); 1265 1266 static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, 1267 unsigned int *error) 1268 { 1269 unsigned int start = q->first_to_check; 1270 int count; 1271 1272 *error = 0; 1273 count = q->is_input_q ? get_inbound_buffer_frontier(q, start, error) : 1274 get_outbound_buffer_frontier(q, start, error); 1275 if (count == 0) 1276 return 0; 1277 1278 *bufnr = start; 1279 1280 /* for the next time */ 1281 q->first_to_check = add_buf(start, count); 1282 1283 return count; 1284 } 1285 1286 int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, 1287 unsigned int *bufnr, unsigned int *error) 1288 { 1289 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1290 struct qdio_q *q; 1291 1292 if (!irq_ptr) 1293 return -ENODEV; 1294 q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; 1295 1296 return __qdio_inspect_queue(q, bufnr, error); 1297 } 1298 EXPORT_SYMBOL_GPL(qdio_inspect_queue); 1299 1300 /** 1301 * qdio_stop_irq - disable interrupt processing for the device 1302 * @cdev: associated ccw_device for the qdio subchannel 1303 * 1304 * Return codes 1305 * 0 - interrupts were already disabled 1306 * 1 - interrupts successfully disabled 1307 */ 1308 int qdio_stop_irq(struct ccw_device *cdev) 1309 { 1310 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1311 1312 if (!irq_ptr) 1313 return -ENODEV; 1314 1315 if (test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state)) 1316 return 0; 1317 else 1318 return 1; 1319 } 1320 EXPORT_SYMBOL(qdio_stop_irq); 1321 1322 static int __init init_QDIO(void) 1323 { 1324 int rc; 1325 1326 rc = qdio_debug_init(); 1327 if (rc) 1328 return rc; 1329 rc = qdio_setup_init(); 1330 if (rc) 1331 goto out_debug; 1332 rc = qdio_thinint_init(); 1333 if (rc) 1334 goto out_cache; 1335 return 0; 1336 1337 out_cache: 1338 qdio_setup_exit(); 1339 out_debug: 1340 qdio_debug_exit(); 1341 return rc; 1342 } 1343 1344 static void __exit exit_QDIO(void) 1345 { 1346 qdio_thinint_exit(); 1347 qdio_setup_exit(); 1348 qdio_debug_exit(); 1349 } 1350 1351 module_init(init_QDIO); 1352 module_exit(exit_QDIO); 1353