1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Linux for s390 qdio support, buffer handling, qdio API and module support. 4 * 5 * Copyright IBM Corp. 2000, 2008 6 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 7 * Jan Glauber <jang@linux.vnet.ibm.com> 8 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/timer.h> 14 #include <linux/delay.h> 15 #include <linux/gfp.h> 16 #include <linux/io.h> 17 #include <linux/atomic.h> 18 #include <asm/debug.h> 19 #include <asm/qdio.h> 20 #include <asm/ipl.h> 21 22 #include "cio.h" 23 #include "css.h" 24 #include "device.h" 25 #include "qdio.h" 26 #include "qdio_debug.h" 27 28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 29 "Jan Glauber <jang@linux.vnet.ibm.com>"); 30 MODULE_DESCRIPTION("QDIO base support"); 31 MODULE_LICENSE("GPL"); 32 33 static inline int do_siga_sync(unsigned long schid, 34 unsigned int out_mask, unsigned int in_mask, 35 unsigned int fc) 36 { 37 register unsigned long __fc asm ("0") = fc; 38 register unsigned long __schid asm ("1") = schid; 39 register unsigned long out asm ("2") = out_mask; 40 register unsigned long in asm ("3") = in_mask; 41 int cc; 42 43 asm volatile( 44 " siga 0\n" 45 " ipm %0\n" 46 " srl %0,28\n" 47 : "=d" (cc) 48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); 49 return cc; 50 } 51 52 static inline int do_siga_input(unsigned long schid, unsigned int mask, 53 unsigned int fc) 54 { 55 register unsigned long __fc asm ("0") = fc; 56 register unsigned long __schid asm ("1") = schid; 57 register unsigned long __mask asm ("2") = mask; 58 int cc; 59 60 asm volatile( 61 " siga 0\n" 62 " ipm %0\n" 63 " srl %0,28\n" 64 : "=d" (cc) 65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc"); 66 return cc; 67 } 68 69 /** 70 * do_siga_output - perform SIGA-w/wt function 71 * @schid: subchannel id or in case of QEBSM the subchannel token 72 * @mask: which output queues to process 73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 74 * @fc: function code to perform 75 * @aob: asynchronous operation block 76 * 77 * Returns condition code. 78 * Note: For IQDC unicast queues only the highest priority queue is processed. 79 */ 80 static inline int do_siga_output(unsigned long schid, unsigned long mask, 81 unsigned int *bb, unsigned int fc, 82 unsigned long aob) 83 { 84 register unsigned long __fc asm("0") = fc; 85 register unsigned long __schid asm("1") = schid; 86 register unsigned long __mask asm("2") = mask; 87 register unsigned long __aob asm("3") = aob; 88 int cc; 89 90 asm volatile( 91 " siga 0\n" 92 " ipm %0\n" 93 " srl %0,28\n" 94 : "=d" (cc), "+d" (__fc), "+d" (__aob) 95 : "d" (__schid), "d" (__mask) 96 : "cc"); 97 *bb = __fc >> 31; 98 return cc; 99 } 100 101 /** 102 * qdio_do_eqbs - extract buffer states for QEBSM 103 * @q: queue to manipulate 104 * @state: state of the extracted buffers 105 * @start: buffer number to start at 106 * @count: count of buffers to examine 107 * @auto_ack: automatically acknowledge buffers 108 * 109 * Returns the number of successfully extracted equal buffer states. 110 * Stops processing if a state is different from the last buffers state. 111 */ 112 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 113 int start, int count, int auto_ack) 114 { 115 int tmp_count = count, tmp_start = start, nr = q->nr; 116 unsigned int ccq = 0; 117 118 qperf_inc(q, eqbs); 119 120 if (!q->is_input_q) 121 nr += q->irq_ptr->nr_input_qs; 122 again: 123 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 124 auto_ack); 125 126 switch (ccq) { 127 case 0: 128 case 32: 129 /* all done, or next buffer state different */ 130 return count - tmp_count; 131 case 96: 132 /* not all buffers processed */ 133 qperf_inc(q, eqbs_partial); 134 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", 135 tmp_count); 136 return count - tmp_count; 137 case 97: 138 /* no buffer processed */ 139 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 140 goto again; 141 default: 142 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 143 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 144 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 145 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, 146 q->first_to_kick, count, q->irq_ptr->int_parm); 147 return 0; 148 } 149 } 150 151 /** 152 * qdio_do_sqbs - set buffer states for QEBSM 153 * @q: queue to manipulate 154 * @state: new state of the buffers 155 * @start: first buffer number to change 156 * @count: how many buffers to change 157 * 158 * Returns the number of successfully changed buffers. 159 * Does retrying until the specified count of buffer states is set or an 160 * error occurs. 161 */ 162 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, 163 int count) 164 { 165 unsigned int ccq = 0; 166 int tmp_count = count, tmp_start = start; 167 int nr = q->nr; 168 169 if (!count) 170 return 0; 171 qperf_inc(q, sqbs); 172 173 if (!q->is_input_q) 174 nr += q->irq_ptr->nr_input_qs; 175 again: 176 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 177 178 switch (ccq) { 179 case 0: 180 case 32: 181 /* all done, or active buffer adapter-owned */ 182 WARN_ON_ONCE(tmp_count); 183 return count - tmp_count; 184 case 96: 185 /* not all buffers processed */ 186 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 187 qperf_inc(q, sqbs_partial); 188 goto again; 189 default: 190 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 191 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 192 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 193 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, 194 q->first_to_kick, count, q->irq_ptr->int_parm); 195 return 0; 196 } 197 } 198 199 /* 200 * Returns number of examined buffers and their common state in *state. 201 * Requested number of buffers-to-examine must be > 0. 202 */ 203 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 204 unsigned char *state, unsigned int count, 205 int auto_ack, int merge_pending) 206 { 207 unsigned char __state = 0; 208 int i = 1; 209 210 if (is_qebsm(q)) 211 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 212 213 /* get initial state: */ 214 __state = q->slsb.val[bufnr]; 215 216 /* Bail out early if there is no work on the queue: */ 217 if (__state & SLSB_OWNER_CU) 218 goto out; 219 220 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) 221 __state = SLSB_P_OUTPUT_EMPTY; 222 223 for (; i < count; i++) { 224 bufnr = next_buf(bufnr); 225 226 /* merge PENDING into EMPTY: */ 227 if (merge_pending && 228 q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING && 229 __state == SLSB_P_OUTPUT_EMPTY) 230 continue; 231 232 /* stop if next state differs from initial state: */ 233 if (q->slsb.val[bufnr] != __state) 234 break; 235 } 236 237 out: 238 *state = __state; 239 return i; 240 } 241 242 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 243 unsigned char *state, int auto_ack) 244 { 245 return get_buf_states(q, bufnr, state, 1, auto_ack, 0); 246 } 247 248 /* wrap-around safe setting of slsb states, returns number of changed buffers */ 249 static inline int set_buf_states(struct qdio_q *q, int bufnr, 250 unsigned char state, int count) 251 { 252 int i; 253 254 if (is_qebsm(q)) 255 return qdio_do_sqbs(q, state, bufnr, count); 256 257 for (i = 0; i < count; i++) { 258 xchg(&q->slsb.val[bufnr], state); 259 bufnr = next_buf(bufnr); 260 } 261 return count; 262 } 263 264 static inline int set_buf_state(struct qdio_q *q, int bufnr, 265 unsigned char state) 266 { 267 return set_buf_states(q, bufnr, state, 1); 268 } 269 270 /* set slsb states to initial state */ 271 static void qdio_init_buf_states(struct qdio_irq *irq_ptr) 272 { 273 struct qdio_q *q; 274 int i; 275 276 for_each_input_queue(irq_ptr, q, i) 277 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, 278 QDIO_MAX_BUFFERS_PER_Q); 279 for_each_output_queue(irq_ptr, q, i) 280 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, 281 QDIO_MAX_BUFFERS_PER_Q); 282 } 283 284 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 285 unsigned int input) 286 { 287 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 288 unsigned int fc = QDIO_SIGA_SYNC; 289 int cc; 290 291 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 292 qperf_inc(q, siga_sync); 293 294 if (is_qebsm(q)) { 295 schid = q->irq_ptr->sch_token; 296 fc |= QDIO_SIGA_QEBSM_FLAG; 297 } 298 299 cc = do_siga_sync(schid, output, input, fc); 300 if (unlikely(cc)) 301 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 302 return (cc) ? -EIO : 0; 303 } 304 305 static inline int qdio_siga_sync_q(struct qdio_q *q) 306 { 307 if (q->is_input_q) 308 return qdio_siga_sync(q, 0, q->mask); 309 else 310 return qdio_siga_sync(q, q->mask, 0); 311 } 312 313 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, 314 unsigned long aob) 315 { 316 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 317 unsigned int fc = QDIO_SIGA_WRITE; 318 u64 start_time = 0; 319 int retries = 0, cc; 320 unsigned long laob = 0; 321 322 if (aob) { 323 fc = QDIO_SIGA_WRITEQ; 324 laob = aob; 325 } 326 327 if (is_qebsm(q)) { 328 schid = q->irq_ptr->sch_token; 329 fc |= QDIO_SIGA_QEBSM_FLAG; 330 } 331 again: 332 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); 333 334 /* hipersocket busy condition */ 335 if (unlikely(*busy_bit)) { 336 retries++; 337 338 if (!start_time) { 339 start_time = get_tod_clock_fast(); 340 goto again; 341 } 342 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE) 343 goto again; 344 } 345 if (retries) { 346 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, 347 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); 348 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); 349 } 350 return cc; 351 } 352 353 static inline int qdio_siga_input(struct qdio_q *q) 354 { 355 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 356 unsigned int fc = QDIO_SIGA_READ; 357 int cc; 358 359 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 360 qperf_inc(q, siga_read); 361 362 if (is_qebsm(q)) { 363 schid = q->irq_ptr->sch_token; 364 fc |= QDIO_SIGA_QEBSM_FLAG; 365 } 366 367 cc = do_siga_input(schid, q->mask, fc); 368 if (unlikely(cc)) 369 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 370 return (cc) ? -EIO : 0; 371 } 372 373 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) 374 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) 375 376 static inline void qdio_sync_queues(struct qdio_q *q) 377 { 378 /* PCI capable outbound queues will also be scanned so sync them too */ 379 if (pci_out_supported(q->irq_ptr)) 380 qdio_siga_sync_all(q); 381 else 382 qdio_siga_sync_q(q); 383 } 384 385 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 386 unsigned char *state) 387 { 388 if (need_siga_sync(q)) 389 qdio_siga_sync_q(q); 390 return get_buf_state(q, bufnr, state, 0); 391 } 392 393 static inline void qdio_stop_polling(struct qdio_q *q) 394 { 395 if (!q->u.in.polling) 396 return; 397 398 q->u.in.polling = 0; 399 qperf_inc(q, stop_polling); 400 401 /* show the card that we are not polling anymore */ 402 if (is_qebsm(q)) { 403 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 404 q->u.in.ack_count); 405 q->u.in.ack_count = 0; 406 } else 407 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 408 } 409 410 static inline void account_sbals(struct qdio_q *q, unsigned int count) 411 { 412 int pos; 413 414 q->q_stats.nr_sbal_total += count; 415 if (count == QDIO_MAX_BUFFERS_MASK) { 416 q->q_stats.nr_sbals[7]++; 417 return; 418 } 419 pos = ilog2(count); 420 q->q_stats.nr_sbals[pos]++; 421 } 422 423 static void process_buffer_error(struct qdio_q *q, unsigned int start, 424 int count) 425 { 426 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : 427 SLSB_P_OUTPUT_NOT_INIT; 428 429 q->qdio_error = QDIO_ERROR_SLSB_STATE; 430 431 /* special handling for no target buffer empty */ 432 if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q && 433 q->sbal[start]->element[15].sflags == 0x10) { 434 qperf_inc(q, target_full); 435 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); 436 goto set; 437 } 438 439 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); 440 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 441 DBF_ERROR("FTC:%3d C:%3d", start, count); 442 DBF_ERROR("F14:%2x F15:%2x", 443 q->sbal[start]->element[14].sflags, 444 q->sbal[start]->element[15].sflags); 445 446 set: 447 /* 448 * Interrupts may be avoided as long as the error is present 449 * so change the buffer state immediately to avoid starvation. 450 */ 451 set_buf_states(q, start, state, count); 452 } 453 454 static inline void inbound_primed(struct qdio_q *q, unsigned int start, 455 int count) 456 { 457 int new; 458 459 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count); 460 461 /* for QEBSM the ACK was already set by EQBS */ 462 if (is_qebsm(q)) { 463 if (!q->u.in.polling) { 464 q->u.in.polling = 1; 465 q->u.in.ack_count = count; 466 q->u.in.ack_start = start; 467 return; 468 } 469 470 /* delete the previous ACK's */ 471 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 472 q->u.in.ack_count); 473 q->u.in.ack_count = count; 474 q->u.in.ack_start = start; 475 return; 476 } 477 478 /* 479 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling 480 * or by the next inbound run. 481 */ 482 new = add_buf(start, count - 1); 483 if (q->u.in.polling) { 484 /* reset the previous ACK but first set the new one */ 485 set_buf_state(q, new, SLSB_P_INPUT_ACK); 486 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 487 } else { 488 q->u.in.polling = 1; 489 set_buf_state(q, new, SLSB_P_INPUT_ACK); 490 } 491 492 q->u.in.ack_start = new; 493 count--; 494 if (!count) 495 return; 496 /* need to change ALL buffers to get more interrupts */ 497 set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); 498 } 499 500 static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) 501 { 502 unsigned char state = 0; 503 int count; 504 505 q->timestamp = get_tod_clock_fast(); 506 507 /* 508 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 509 * would return 0. 510 */ 511 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 512 if (!count) 513 return 0; 514 515 /* 516 * No siga sync here, as a PCI or we after a thin interrupt 517 * already sync'ed the queues. 518 */ 519 count = get_buf_states(q, start, &state, count, 1, 0); 520 if (!count) 521 return 0; 522 523 switch (state) { 524 case SLSB_P_INPUT_PRIMED: 525 inbound_primed(q, start, count); 526 if (atomic_sub_return(count, &q->nr_buf_used) == 0) 527 qperf_inc(q, inbound_queue_full); 528 if (q->irq_ptr->perf_stat_enabled) 529 account_sbals(q, count); 530 return count; 531 case SLSB_P_INPUT_ERROR: 532 process_buffer_error(q, start, count); 533 if (atomic_sub_return(count, &q->nr_buf_used) == 0) 534 qperf_inc(q, inbound_queue_full); 535 if (q->irq_ptr->perf_stat_enabled) 536 account_sbals_error(q, count); 537 return count; 538 case SLSB_CU_INPUT_EMPTY: 539 case SLSB_P_INPUT_NOT_INIT: 540 case SLSB_P_INPUT_ACK: 541 if (q->irq_ptr->perf_stat_enabled) 542 q->q_stats.nr_sbal_nop++; 543 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x", 544 q->nr, start); 545 return 0; 546 default: 547 WARN_ON_ONCE(1); 548 return 0; 549 } 550 } 551 552 static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start) 553 { 554 int count; 555 556 count = get_inbound_buffer_frontier(q, start); 557 558 if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 559 q->u.in.timestamp = get_tod_clock(); 560 561 return count; 562 } 563 564 static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) 565 { 566 unsigned char state = 0; 567 568 if (!atomic_read(&q->nr_buf_used)) 569 return 1; 570 571 if (need_siga_sync(q)) 572 qdio_siga_sync_q(q); 573 get_buf_state(q, start, &state, 0); 574 575 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 576 /* more work coming */ 577 return 0; 578 579 if (is_thinint_irq(q->irq_ptr)) 580 return 1; 581 582 /* don't poll under z/VM */ 583 if (MACHINE_IS_VM) 584 return 1; 585 586 /* 587 * At this point we know, that inbound first_to_check 588 * has (probably) not moved (see qdio_inbound_processing). 589 */ 590 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 591 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start); 592 return 1; 593 } else 594 return 0; 595 } 596 597 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) 598 { 599 unsigned char state = 0; 600 int j, b = start; 601 602 for (j = 0; j < count; ++j) { 603 get_buf_state(q, b, &state, 0); 604 if (state == SLSB_P_OUTPUT_PENDING) { 605 struct qaob *aob = q->u.out.aobs[b]; 606 if (aob == NULL) 607 continue; 608 609 q->u.out.sbal_state[b].flags |= 610 QDIO_OUTBUF_STATE_FLAG_PENDING; 611 q->u.out.aobs[b] = NULL; 612 } 613 b = next_buf(b); 614 } 615 } 616 617 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, 618 int bufnr) 619 { 620 unsigned long phys_aob = 0; 621 622 if (!q->aobs[bufnr]) { 623 struct qaob *aob = qdio_allocate_aob(); 624 q->aobs[bufnr] = aob; 625 } 626 if (q->aobs[bufnr]) { 627 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; 628 phys_aob = virt_to_phys(q->aobs[bufnr]); 629 WARN_ON_ONCE(phys_aob & 0xFF); 630 } 631 632 q->sbal_state[bufnr].flags = 0; 633 return phys_aob; 634 } 635 636 static void qdio_kick_handler(struct qdio_q *q, unsigned int count) 637 { 638 int start = q->first_to_kick; 639 640 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 641 return; 642 643 if (q->is_input_q) { 644 qperf_inc(q, inbound_handler); 645 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); 646 } else { 647 qperf_inc(q, outbound_handler); 648 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 649 start, count); 650 } 651 652 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 653 q->irq_ptr->int_parm); 654 655 /* for the next time */ 656 q->first_to_kick = add_buf(start, count); 657 q->qdio_error = 0; 658 } 659 660 static inline int qdio_tasklet_schedule(struct qdio_q *q) 661 { 662 if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { 663 tasklet_schedule(&q->tasklet); 664 return 0; 665 } 666 return -EPERM; 667 } 668 669 static void __qdio_inbound_processing(struct qdio_q *q) 670 { 671 unsigned int start = q->first_to_check; 672 int count; 673 674 qperf_inc(q, tasklet_inbound); 675 676 count = qdio_inbound_q_moved(q, start); 677 if (count == 0) 678 return; 679 680 start = add_buf(start, count); 681 q->first_to_check = start; 682 qdio_kick_handler(q, count); 683 684 if (!qdio_inbound_q_done(q, start)) { 685 /* means poll time is not yet over */ 686 qperf_inc(q, tasklet_inbound_resched); 687 if (!qdio_tasklet_schedule(q)) 688 return; 689 } 690 691 qdio_stop_polling(q); 692 /* 693 * We need to check again to not lose initiative after 694 * resetting the ACK state. 695 */ 696 if (!qdio_inbound_q_done(q, start)) { 697 qperf_inc(q, tasklet_inbound_resched2); 698 qdio_tasklet_schedule(q); 699 } 700 } 701 702 void qdio_inbound_processing(unsigned long data) 703 { 704 struct qdio_q *q = (struct qdio_q *)data; 705 __qdio_inbound_processing(q); 706 } 707 708 static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) 709 { 710 unsigned char state = 0; 711 int count; 712 713 q->timestamp = get_tod_clock_fast(); 714 715 if (need_siga_sync(q)) 716 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 717 !pci_out_supported(q->irq_ptr)) || 718 (queue_type(q) == QDIO_IQDIO_QFMT && 719 multicast_outbound(q))) 720 qdio_siga_sync_q(q); 721 722 count = atomic_read(&q->nr_buf_used); 723 if (!count) 724 return 0; 725 726 count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq); 727 if (!count) 728 return 0; 729 730 switch (state) { 731 case SLSB_P_OUTPUT_EMPTY: 732 case SLSB_P_OUTPUT_PENDING: 733 /* the adapter got it */ 734 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, 735 "out empty:%1d %02x", q->nr, count); 736 737 atomic_sub(count, &q->nr_buf_used); 738 if (q->irq_ptr->perf_stat_enabled) 739 account_sbals(q, count); 740 return count; 741 case SLSB_P_OUTPUT_ERROR: 742 process_buffer_error(q, start, count); 743 atomic_sub(count, &q->nr_buf_used); 744 if (q->irq_ptr->perf_stat_enabled) 745 account_sbals_error(q, count); 746 return count; 747 case SLSB_CU_OUTPUT_PRIMED: 748 /* the adapter has not fetched the output yet */ 749 if (q->irq_ptr->perf_stat_enabled) 750 q->q_stats.nr_sbal_nop++; 751 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", 752 q->nr); 753 return 0; 754 case SLSB_P_OUTPUT_NOT_INIT: 755 case SLSB_P_OUTPUT_HALTED: 756 return 0; 757 default: 758 WARN_ON_ONCE(1); 759 return 0; 760 } 761 } 762 763 /* all buffers processed? */ 764 static inline int qdio_outbound_q_done(struct qdio_q *q) 765 { 766 return atomic_read(&q->nr_buf_used) == 0; 767 } 768 769 static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) 770 { 771 int count; 772 773 count = get_outbound_buffer_frontier(q, start); 774 775 if (count) { 776 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 777 if (q->u.out.use_cq) 778 qdio_handle_aobs(q, start, count); 779 } 780 781 return count; 782 } 783 784 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) 785 { 786 int retries = 0, cc; 787 unsigned int busy_bit; 788 789 if (!need_siga_out(q)) 790 return 0; 791 792 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 793 retry: 794 qperf_inc(q, siga_write); 795 796 cc = qdio_siga_output(q, &busy_bit, aob); 797 switch (cc) { 798 case 0: 799 break; 800 case 2: 801 if (busy_bit) { 802 while (++retries < QDIO_BUSY_BIT_RETRIES) { 803 mdelay(QDIO_BUSY_BIT_RETRY_DELAY); 804 goto retry; 805 } 806 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); 807 cc = -EBUSY; 808 } else { 809 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 810 cc = -ENOBUFS; 811 } 812 break; 813 case 1: 814 case 3: 815 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 816 cc = -EIO; 817 break; 818 } 819 if (retries) { 820 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); 821 DBF_ERROR("count:%u", retries); 822 } 823 return cc; 824 } 825 826 static void __qdio_outbound_processing(struct qdio_q *q) 827 { 828 unsigned int start = q->first_to_check; 829 int count; 830 831 qperf_inc(q, tasklet_outbound); 832 WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); 833 834 count = qdio_outbound_q_moved(q, start); 835 if (count) { 836 q->first_to_check = add_buf(start, count); 837 qdio_kick_handler(q, count); 838 } 839 840 if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) && 841 !qdio_outbound_q_done(q)) 842 goto sched; 843 844 if (q->u.out.pci_out_enabled) 845 return; 846 847 /* 848 * Now we know that queue type is either qeth without pci enabled 849 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY 850 * is noticed and outbound_handler is called after some time. 851 */ 852 if (qdio_outbound_q_done(q)) 853 del_timer_sync(&q->u.out.timer); 854 else 855 if (!timer_pending(&q->u.out.timer) && 856 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) 857 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 858 return; 859 860 sched: 861 qdio_tasklet_schedule(q); 862 } 863 864 /* outbound tasklet */ 865 void qdio_outbound_processing(unsigned long data) 866 { 867 struct qdio_q *q = (struct qdio_q *)data; 868 __qdio_outbound_processing(q); 869 } 870 871 void qdio_outbound_timer(struct timer_list *t) 872 { 873 struct qdio_q *q = from_timer(q, t, u.out.timer); 874 875 qdio_tasklet_schedule(q); 876 } 877 878 static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq) 879 { 880 struct qdio_q *out; 881 int i; 882 883 if (!pci_out_supported(irq) || !irq->scan_threshold) 884 return; 885 886 for_each_output_queue(irq, out, i) 887 if (!qdio_outbound_q_done(out)) 888 qdio_tasklet_schedule(out); 889 } 890 891 static void __tiqdio_inbound_processing(struct qdio_q *q) 892 { 893 unsigned int start = q->first_to_check; 894 int count; 895 896 qperf_inc(q, tasklet_inbound); 897 if (need_siga_sync(q) && need_siga_sync_after_ai(q)) 898 qdio_sync_queues(q); 899 900 /* The interrupt could be caused by a PCI request: */ 901 qdio_check_outbound_pci_queues(q->irq_ptr); 902 903 count = qdio_inbound_q_moved(q, start); 904 if (count == 0) 905 return; 906 907 start = add_buf(start, count); 908 q->first_to_check = start; 909 qdio_kick_handler(q, count); 910 911 if (!qdio_inbound_q_done(q, start)) { 912 qperf_inc(q, tasklet_inbound_resched); 913 if (!qdio_tasklet_schedule(q)) 914 return; 915 } 916 917 qdio_stop_polling(q); 918 /* 919 * We need to check again to not lose initiative after 920 * resetting the ACK state. 921 */ 922 if (!qdio_inbound_q_done(q, start)) { 923 qperf_inc(q, tasklet_inbound_resched2); 924 qdio_tasklet_schedule(q); 925 } 926 } 927 928 void tiqdio_inbound_processing(unsigned long data) 929 { 930 struct qdio_q *q = (struct qdio_q *)data; 931 __tiqdio_inbound_processing(q); 932 } 933 934 static inline void qdio_set_state(struct qdio_irq *irq_ptr, 935 enum qdio_irq_states state) 936 { 937 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); 938 939 irq_ptr->state = state; 940 mb(); 941 } 942 943 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) 944 { 945 if (irb->esw.esw0.erw.cons) { 946 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); 947 DBF_ERROR_HEX(irb, 64); 948 DBF_ERROR_HEX(irb->ecw, 64); 949 } 950 } 951 952 /* PCI interrupt handler */ 953 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) 954 { 955 int i; 956 struct qdio_q *q; 957 958 if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 959 return; 960 961 for_each_input_queue(irq_ptr, q, i) { 962 if (q->u.in.queue_start_poll) { 963 /* skip if polling is enabled or already in work */ 964 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 965 &q->u.in.queue_irq_state)) { 966 qperf_inc(q, int_discarded); 967 continue; 968 } 969 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 970 q->irq_ptr->int_parm); 971 } else { 972 tasklet_schedule(&q->tasklet); 973 } 974 } 975 976 if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold) 977 return; 978 979 for_each_output_queue(irq_ptr, q, i) { 980 if (qdio_outbound_q_done(q)) 981 continue; 982 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) 983 qdio_siga_sync_q(q); 984 qdio_tasklet_schedule(q); 985 } 986 } 987 988 static void qdio_handle_activate_check(struct ccw_device *cdev, 989 unsigned long intparm, int cstat, int dstat) 990 { 991 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 992 struct qdio_q *q; 993 int count; 994 995 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); 996 DBF_ERROR("intp :%lx", intparm); 997 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 998 999 if (irq_ptr->nr_input_qs) { 1000 q = irq_ptr->input_qs[0]; 1001 } else if (irq_ptr->nr_output_qs) { 1002 q = irq_ptr->output_qs[0]; 1003 } else { 1004 dump_stack(); 1005 goto no_handler; 1006 } 1007 1008 count = sub_buf(q->first_to_check, q->first_to_kick); 1009 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 1010 q->nr, q->first_to_kick, count, irq_ptr->int_parm); 1011 no_handler: 1012 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1013 /* 1014 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. 1015 * Therefore we call the LGR detection function here. 1016 */ 1017 lgr_info_log(); 1018 } 1019 1020 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 1021 int dstat) 1022 { 1023 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1024 1025 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); 1026 1027 if (cstat) 1028 goto error; 1029 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) 1030 goto error; 1031 if (!(dstat & DEV_STAT_DEV_END)) 1032 goto error; 1033 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 1034 return; 1035 1036 error: 1037 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 1038 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 1039 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1040 } 1041 1042 /* qdio interrupt handler */ 1043 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, 1044 struct irb *irb) 1045 { 1046 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1047 struct subchannel_id schid; 1048 int cstat, dstat; 1049 1050 if (!intparm || !irq_ptr) { 1051 ccw_device_get_schid(cdev, &schid); 1052 DBF_ERROR("qint:%4x", schid.sch_no); 1053 return; 1054 } 1055 1056 if (irq_ptr->perf_stat_enabled) 1057 irq_ptr->perf_stat.qdio_int++; 1058 1059 if (IS_ERR(irb)) { 1060 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 1061 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1062 wake_up(&cdev->private->wait_q); 1063 return; 1064 } 1065 qdio_irq_check_sense(irq_ptr, irb); 1066 cstat = irb->scsw.cmd.cstat; 1067 dstat = irb->scsw.cmd.dstat; 1068 1069 switch (irq_ptr->state) { 1070 case QDIO_IRQ_STATE_INACTIVE: 1071 qdio_establish_handle_irq(cdev, cstat, dstat); 1072 break; 1073 case QDIO_IRQ_STATE_CLEANUP: 1074 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1075 break; 1076 case QDIO_IRQ_STATE_ESTABLISHED: 1077 case QDIO_IRQ_STATE_ACTIVE: 1078 if (cstat & SCHN_STAT_PCI) { 1079 qdio_int_handler_pci(irq_ptr); 1080 return; 1081 } 1082 if (cstat || dstat) 1083 qdio_handle_activate_check(cdev, intparm, cstat, 1084 dstat); 1085 break; 1086 case QDIO_IRQ_STATE_STOPPED: 1087 break; 1088 default: 1089 WARN_ON_ONCE(1); 1090 } 1091 wake_up(&cdev->private->wait_q); 1092 } 1093 1094 /** 1095 * qdio_get_ssqd_desc - get qdio subchannel description 1096 * @cdev: ccw device to get description for 1097 * @data: where to store the ssqd 1098 * 1099 * Returns 0 or an error code. The results of the chsc are stored in the 1100 * specified structure. 1101 */ 1102 int qdio_get_ssqd_desc(struct ccw_device *cdev, 1103 struct qdio_ssqd_desc *data) 1104 { 1105 struct subchannel_id schid; 1106 1107 if (!cdev || !cdev->private) 1108 return -EINVAL; 1109 1110 ccw_device_get_schid(cdev, &schid); 1111 DBF_EVENT("get ssqd:%4x", schid.sch_no); 1112 return qdio_setup_get_ssqd(NULL, &schid, data); 1113 } 1114 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1115 1116 static void qdio_shutdown_queues(struct ccw_device *cdev) 1117 { 1118 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1119 struct qdio_q *q; 1120 int i; 1121 1122 for_each_input_queue(irq_ptr, q, i) 1123 tasklet_kill(&q->tasklet); 1124 1125 for_each_output_queue(irq_ptr, q, i) { 1126 del_timer_sync(&q->u.out.timer); 1127 tasklet_kill(&q->tasklet); 1128 } 1129 } 1130 1131 /** 1132 * qdio_shutdown - shut down a qdio subchannel 1133 * @cdev: associated ccw device 1134 * @how: use halt or clear to shutdown 1135 */ 1136 int qdio_shutdown(struct ccw_device *cdev, int how) 1137 { 1138 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1139 struct subchannel_id schid; 1140 int rc; 1141 1142 if (!irq_ptr) 1143 return -ENODEV; 1144 1145 WARN_ON_ONCE(irqs_disabled()); 1146 ccw_device_get_schid(cdev, &schid); 1147 DBF_EVENT("qshutdown:%4x", schid.sch_no); 1148 1149 mutex_lock(&irq_ptr->setup_mutex); 1150 /* 1151 * Subchannel was already shot down. We cannot prevent being called 1152 * twice since cio may trigger a shutdown asynchronously. 1153 */ 1154 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1155 mutex_unlock(&irq_ptr->setup_mutex); 1156 return 0; 1157 } 1158 1159 /* 1160 * Indicate that the device is going down. Scheduling the queue 1161 * tasklets is forbidden from here on. 1162 */ 1163 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1164 1165 tiqdio_remove_input_queues(irq_ptr); 1166 qdio_shutdown_queues(cdev); 1167 qdio_shutdown_debug_entries(irq_ptr); 1168 1169 /* cleanup subchannel */ 1170 spin_lock_irq(get_ccwdev_lock(cdev)); 1171 1172 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) 1173 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); 1174 else 1175 /* default behaviour is halt */ 1176 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); 1177 if (rc) { 1178 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); 1179 DBF_ERROR("rc:%4d", rc); 1180 goto no_cleanup; 1181 } 1182 1183 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 1184 spin_unlock_irq(get_ccwdev_lock(cdev)); 1185 wait_event_interruptible_timeout(cdev->private->wait_q, 1186 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 1187 irq_ptr->state == QDIO_IRQ_STATE_ERR, 1188 10 * HZ); 1189 spin_lock_irq(get_ccwdev_lock(cdev)); 1190 1191 no_cleanup: 1192 qdio_shutdown_thinint(irq_ptr); 1193 1194 /* restore interrupt handler */ 1195 if ((void *)cdev->handler == (void *)qdio_int_handler) { 1196 cdev->handler = irq_ptr->orig_handler; 1197 cdev->private->intparm = 0; 1198 } 1199 spin_unlock_irq(get_ccwdev_lock(cdev)); 1200 1201 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1202 mutex_unlock(&irq_ptr->setup_mutex); 1203 if (rc) 1204 return rc; 1205 return 0; 1206 } 1207 EXPORT_SYMBOL_GPL(qdio_shutdown); 1208 1209 /** 1210 * qdio_free - free data structures for a qdio subchannel 1211 * @cdev: associated ccw device 1212 */ 1213 int qdio_free(struct ccw_device *cdev) 1214 { 1215 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1216 struct subchannel_id schid; 1217 1218 if (!irq_ptr) 1219 return -ENODEV; 1220 1221 ccw_device_get_schid(cdev, &schid); 1222 DBF_EVENT("qfree:%4x", schid.sch_no); 1223 DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned"); 1224 mutex_lock(&irq_ptr->setup_mutex); 1225 1226 irq_ptr->debug_area = NULL; 1227 cdev->private->qdio_data = NULL; 1228 mutex_unlock(&irq_ptr->setup_mutex); 1229 1230 qdio_release_memory(irq_ptr); 1231 return 0; 1232 } 1233 EXPORT_SYMBOL_GPL(qdio_free); 1234 1235 /** 1236 * qdio_allocate - allocate qdio queues and associated data 1237 * @init_data: initialization data 1238 */ 1239 int qdio_allocate(struct qdio_initialize *init_data) 1240 { 1241 struct subchannel_id schid; 1242 struct qdio_irq *irq_ptr; 1243 1244 ccw_device_get_schid(init_data->cdev, &schid); 1245 DBF_EVENT("qallocate:%4x", schid.sch_no); 1246 1247 if ((init_data->no_input_qs && !init_data->input_handler) || 1248 (init_data->no_output_qs && !init_data->output_handler)) 1249 return -EINVAL; 1250 1251 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || 1252 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) 1253 return -EINVAL; 1254 1255 if ((!init_data->input_sbal_addr_array) || 1256 (!init_data->output_sbal_addr_array)) 1257 return -EINVAL; 1258 1259 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ 1260 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1261 if (!irq_ptr) 1262 goto out_err; 1263 1264 mutex_init(&irq_ptr->setup_mutex); 1265 if (qdio_allocate_dbf(init_data, irq_ptr)) 1266 goto out_rel; 1267 1268 /* 1269 * Allocate a page for the chsc calls in qdio_establish. 1270 * Must be pre-allocated since a zfcp recovery will call 1271 * qdio_establish. In case of low memory and swap on a zfcp disk 1272 * we may not be able to allocate memory otherwise. 1273 */ 1274 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); 1275 if (!irq_ptr->chsc_page) 1276 goto out_rel; 1277 1278 /* qdr is used in ccw1.cda which is u32 */ 1279 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1280 if (!irq_ptr->qdr) 1281 goto out_rel; 1282 1283 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, 1284 init_data->no_output_qs)) 1285 goto out_rel; 1286 1287 init_data->cdev->private->qdio_data = irq_ptr; 1288 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1289 return 0; 1290 out_rel: 1291 qdio_release_memory(irq_ptr); 1292 out_err: 1293 return -ENOMEM; 1294 } 1295 EXPORT_SYMBOL_GPL(qdio_allocate); 1296 1297 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) 1298 { 1299 struct qdio_q *q = irq_ptr->input_qs[0]; 1300 int i, use_cq = 0; 1301 1302 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) 1303 use_cq = 1; 1304 1305 for_each_output_queue(irq_ptr, q, i) { 1306 if (use_cq) { 1307 if (multicast_outbound(q)) 1308 continue; 1309 if (qdio_enable_async_operation(&q->u.out) < 0) { 1310 use_cq = 0; 1311 continue; 1312 } 1313 } else 1314 qdio_disable_async_operation(&q->u.out); 1315 } 1316 DBF_EVENT("use_cq:%d", use_cq); 1317 } 1318 1319 /** 1320 * qdio_establish - establish queues on a qdio subchannel 1321 * @init_data: initialization data 1322 */ 1323 int qdio_establish(struct qdio_initialize *init_data) 1324 { 1325 struct ccw_device *cdev = init_data->cdev; 1326 struct subchannel_id schid; 1327 struct qdio_irq *irq_ptr; 1328 int rc; 1329 1330 ccw_device_get_schid(cdev, &schid); 1331 DBF_EVENT("qestablish:%4x", schid.sch_no); 1332 1333 irq_ptr = cdev->private->qdio_data; 1334 if (!irq_ptr) 1335 return -ENODEV; 1336 1337 mutex_lock(&irq_ptr->setup_mutex); 1338 qdio_setup_irq(init_data); 1339 1340 rc = qdio_establish_thinint(irq_ptr); 1341 if (rc) { 1342 mutex_unlock(&irq_ptr->setup_mutex); 1343 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1344 return rc; 1345 } 1346 1347 /* establish q */ 1348 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; 1349 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1350 irq_ptr->ccw.count = irq_ptr->equeue.count; 1351 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); 1352 1353 spin_lock_irq(get_ccwdev_lock(cdev)); 1354 ccw_device_set_options_mask(cdev, 0); 1355 1356 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); 1357 spin_unlock_irq(get_ccwdev_lock(cdev)); 1358 if (rc) { 1359 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); 1360 DBF_ERROR("rc:%4x", rc); 1361 mutex_unlock(&irq_ptr->setup_mutex); 1362 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1363 return rc; 1364 } 1365 1366 wait_event_interruptible_timeout(cdev->private->wait_q, 1367 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 1368 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); 1369 1370 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { 1371 mutex_unlock(&irq_ptr->setup_mutex); 1372 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1373 return -EIO; 1374 } 1375 1376 qdio_setup_ssqd_info(irq_ptr); 1377 1378 qdio_detect_hsicq(irq_ptr); 1379 1380 /* qebsm is now setup if available, initialize buffer states */ 1381 qdio_init_buf_states(irq_ptr); 1382 1383 mutex_unlock(&irq_ptr->setup_mutex); 1384 qdio_print_subchannel_info(irq_ptr, cdev); 1385 qdio_setup_debug_entries(irq_ptr, cdev); 1386 return 0; 1387 } 1388 EXPORT_SYMBOL_GPL(qdio_establish); 1389 1390 /** 1391 * qdio_activate - activate queues on a qdio subchannel 1392 * @cdev: associated cdev 1393 */ 1394 int qdio_activate(struct ccw_device *cdev) 1395 { 1396 struct subchannel_id schid; 1397 struct qdio_irq *irq_ptr; 1398 int rc; 1399 1400 ccw_device_get_schid(cdev, &schid); 1401 DBF_EVENT("qactivate:%4x", schid.sch_no); 1402 1403 irq_ptr = cdev->private->qdio_data; 1404 if (!irq_ptr) 1405 return -ENODEV; 1406 1407 mutex_lock(&irq_ptr->setup_mutex); 1408 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1409 rc = -EBUSY; 1410 goto out; 1411 } 1412 1413 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; 1414 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1415 irq_ptr->ccw.count = irq_ptr->aqueue.count; 1416 irq_ptr->ccw.cda = 0; 1417 1418 spin_lock_irq(get_ccwdev_lock(cdev)); 1419 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 1420 1421 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 1422 0, DOIO_DENY_PREFETCH); 1423 spin_unlock_irq(get_ccwdev_lock(cdev)); 1424 if (rc) { 1425 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); 1426 DBF_ERROR("rc:%4x", rc); 1427 goto out; 1428 } 1429 1430 if (is_thinint_irq(irq_ptr)) 1431 tiqdio_add_input_queues(irq_ptr); 1432 1433 /* wait for subchannel to become active */ 1434 msleep(5); 1435 1436 switch (irq_ptr->state) { 1437 case QDIO_IRQ_STATE_STOPPED: 1438 case QDIO_IRQ_STATE_ERR: 1439 rc = -EIO; 1440 break; 1441 default: 1442 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1443 rc = 0; 1444 } 1445 out: 1446 mutex_unlock(&irq_ptr->setup_mutex); 1447 return rc; 1448 } 1449 EXPORT_SYMBOL_GPL(qdio_activate); 1450 1451 static inline int buf_in_between(int bufnr, int start, int count) 1452 { 1453 int end = add_buf(start, count); 1454 1455 if (end > start) { 1456 if (bufnr >= start && bufnr < end) 1457 return 1; 1458 else 1459 return 0; 1460 } 1461 1462 /* wrap-around case */ 1463 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || 1464 (bufnr < end)) 1465 return 1; 1466 else 1467 return 0; 1468 } 1469 1470 /** 1471 * handle_inbound - reset processed input buffers 1472 * @q: queue containing the buffers 1473 * @callflags: flags 1474 * @bufnr: first buffer to process 1475 * @count: how many buffers are emptied 1476 */ 1477 static int handle_inbound(struct qdio_q *q, unsigned int callflags, 1478 int bufnr, int count) 1479 { 1480 int diff; 1481 1482 qperf_inc(q, inbound_call); 1483 1484 if (!q->u.in.polling) 1485 goto set; 1486 1487 /* protect against stop polling setting an ACK for an emptied slsb */ 1488 if (count == QDIO_MAX_BUFFERS_PER_Q) { 1489 /* overwriting everything, just delete polling status */ 1490 q->u.in.polling = 0; 1491 q->u.in.ack_count = 0; 1492 goto set; 1493 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { 1494 if (is_qebsm(q)) { 1495 /* partial overwrite, just update ack_start */ 1496 diff = add_buf(bufnr, count); 1497 diff = sub_buf(diff, q->u.in.ack_start); 1498 q->u.in.ack_count -= diff; 1499 if (q->u.in.ack_count <= 0) { 1500 q->u.in.polling = 0; 1501 q->u.in.ack_count = 0; 1502 goto set; 1503 } 1504 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); 1505 } 1506 else 1507 /* the only ACK will be deleted, so stop polling */ 1508 q->u.in.polling = 0; 1509 } 1510 1511 set: 1512 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1513 atomic_add(count, &q->nr_buf_used); 1514 1515 if (need_siga_in(q)) 1516 return qdio_siga_input(q); 1517 1518 return 0; 1519 } 1520 1521 /** 1522 * handle_outbound - process filled outbound buffers 1523 * @q: queue containing the buffers 1524 * @callflags: flags 1525 * @bufnr: first buffer to process 1526 * @count: how many buffers are filled 1527 */ 1528 static int handle_outbound(struct qdio_q *q, unsigned int callflags, 1529 int bufnr, int count) 1530 { 1531 const unsigned int scan_threshold = q->irq_ptr->scan_threshold; 1532 unsigned char state = 0; 1533 int used, rc = 0; 1534 1535 qperf_inc(q, outbound_call); 1536 1537 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1538 used = atomic_add_return(count, &q->nr_buf_used); 1539 1540 if (used == QDIO_MAX_BUFFERS_PER_Q) 1541 qperf_inc(q, outbound_queue_full); 1542 1543 if (callflags & QDIO_FLAG_PCI_OUT) { 1544 q->u.out.pci_out_enabled = 1; 1545 qperf_inc(q, pci_request_int); 1546 } else 1547 q->u.out.pci_out_enabled = 0; 1548 1549 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1550 unsigned long phys_aob = 0; 1551 1552 /* One SIGA-W per buffer required for unicast HSI */ 1553 WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); 1554 1555 if (q->u.out.use_cq) 1556 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); 1557 1558 rc = qdio_kick_outbound_q(q, phys_aob); 1559 } else if (need_siga_sync(q)) { 1560 rc = qdio_siga_sync_q(q); 1561 } else if (count < QDIO_MAX_BUFFERS_PER_Q && 1562 get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && 1563 state == SLSB_CU_OUTPUT_PRIMED) { 1564 /* The previous buffer is not processed yet, tack on. */ 1565 qperf_inc(q, fast_requeue); 1566 } else { 1567 rc = qdio_kick_outbound_q(q, 0); 1568 } 1569 1570 /* Let drivers implement their own completion scanning: */ 1571 if (!scan_threshold) 1572 return rc; 1573 1574 /* in case of SIGA errors we must process the error immediately */ 1575 if (used >= scan_threshold || rc) 1576 qdio_tasklet_schedule(q); 1577 else 1578 /* free the SBALs in case of no further traffic */ 1579 if (!timer_pending(&q->u.out.timer) && 1580 likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) 1581 mod_timer(&q->u.out.timer, jiffies + HZ); 1582 return rc; 1583 } 1584 1585 /** 1586 * do_QDIO - process input or output buffers 1587 * @cdev: associated ccw_device for the qdio subchannel 1588 * @callflags: input or output and special flags from the program 1589 * @q_nr: queue number 1590 * @bufnr: buffer number 1591 * @count: how many buffers to process 1592 */ 1593 int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1594 int q_nr, unsigned int bufnr, unsigned int count) 1595 { 1596 struct qdio_irq *irq_ptr; 1597 1598 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1599 return -EINVAL; 1600 1601 irq_ptr = cdev->private->qdio_data; 1602 if (!irq_ptr) 1603 return -ENODEV; 1604 1605 DBF_DEV_EVENT(DBF_INFO, irq_ptr, 1606 "do%02x b:%02x c:%02x", callflags, bufnr, count); 1607 1608 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1609 return -EIO; 1610 if (!count) 1611 return 0; 1612 if (callflags & QDIO_FLAG_SYNC_INPUT) 1613 return handle_inbound(irq_ptr->input_qs[q_nr], 1614 callflags, bufnr, count); 1615 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1616 return handle_outbound(irq_ptr->output_qs[q_nr], 1617 callflags, bufnr, count); 1618 return -EINVAL; 1619 } 1620 EXPORT_SYMBOL_GPL(do_QDIO); 1621 1622 /** 1623 * qdio_start_irq - process input buffers 1624 * @cdev: associated ccw_device for the qdio subchannel 1625 * @nr: input queue number 1626 * 1627 * Return codes 1628 * 0 - success 1629 * 1 - irqs not started since new data is available 1630 */ 1631 int qdio_start_irq(struct ccw_device *cdev, int nr) 1632 { 1633 struct qdio_q *q; 1634 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1635 1636 if (!irq_ptr) 1637 return -ENODEV; 1638 q = irq_ptr->input_qs[nr]; 1639 1640 clear_nonshared_ind(irq_ptr); 1641 qdio_stop_polling(q); 1642 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); 1643 1644 /* 1645 * We need to check again to not lose initiative after 1646 * resetting the ACK state. 1647 */ 1648 if (test_nonshared_ind(irq_ptr)) 1649 goto rescan; 1650 if (!qdio_inbound_q_done(q, q->first_to_check)) 1651 goto rescan; 1652 return 0; 1653 1654 rescan: 1655 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 1656 &q->u.in.queue_irq_state)) 1657 return 0; 1658 else 1659 return 1; 1660 1661 } 1662 EXPORT_SYMBOL(qdio_start_irq); 1663 1664 static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, 1665 unsigned int *error) 1666 { 1667 unsigned int start = q->first_to_check; 1668 int count; 1669 1670 count = q->is_input_q ? qdio_inbound_q_moved(q, start) : 1671 qdio_outbound_q_moved(q, start); 1672 if (count == 0) 1673 return 0; 1674 1675 *bufnr = start; 1676 *error = q->qdio_error; 1677 1678 /* for the next time */ 1679 q->first_to_check = add_buf(start, count); 1680 q->qdio_error = 0; 1681 1682 return count; 1683 } 1684 1685 int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input, 1686 unsigned int *bufnr, unsigned int *error) 1687 { 1688 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1689 struct qdio_q *q; 1690 1691 if (!irq_ptr) 1692 return -ENODEV; 1693 q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; 1694 1695 if (need_siga_sync(q)) 1696 qdio_siga_sync_q(q); 1697 1698 return __qdio_inspect_queue(q, bufnr, error); 1699 } 1700 EXPORT_SYMBOL_GPL(qdio_inspect_queue); 1701 1702 /** 1703 * qdio_get_next_buffers - process input buffers 1704 * @cdev: associated ccw_device for the qdio subchannel 1705 * @nr: input queue number 1706 * @bufnr: first filled buffer number 1707 * @error: buffers are in error state 1708 * 1709 * Return codes 1710 * < 0 - error 1711 * = 0 - no new buffers found 1712 * > 0 - number of processed buffers 1713 */ 1714 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, 1715 int *error) 1716 { 1717 struct qdio_q *q; 1718 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1719 1720 if (!irq_ptr) 1721 return -ENODEV; 1722 q = irq_ptr->input_qs[nr]; 1723 1724 /* 1725 * Cannot rely on automatic sync after interrupt since queues may 1726 * also be examined without interrupt. 1727 */ 1728 if (need_siga_sync(q)) 1729 qdio_sync_queues(q); 1730 1731 qdio_check_outbound_pci_queues(irq_ptr); 1732 1733 /* Note: upper-layer MUST stop processing immediately here ... */ 1734 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 1735 return -EIO; 1736 1737 return __qdio_inspect_queue(q, bufnr, error); 1738 } 1739 EXPORT_SYMBOL(qdio_get_next_buffers); 1740 1741 /** 1742 * qdio_stop_irq - disable interrupt processing for the device 1743 * @cdev: associated ccw_device for the qdio subchannel 1744 * @nr: input queue number 1745 * 1746 * Return codes 1747 * 0 - interrupts were already disabled 1748 * 1 - interrupts successfully disabled 1749 */ 1750 int qdio_stop_irq(struct ccw_device *cdev, int nr) 1751 { 1752 struct qdio_q *q; 1753 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1754 1755 if (!irq_ptr) 1756 return -ENODEV; 1757 q = irq_ptr->input_qs[nr]; 1758 1759 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 1760 &q->u.in.queue_irq_state)) 1761 return 0; 1762 else 1763 return 1; 1764 } 1765 EXPORT_SYMBOL(qdio_stop_irq); 1766 1767 /** 1768 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info. 1769 * @schid: Subchannel ID. 1770 * @cnc: Boolean Change-Notification Control 1771 * @response: Response code will be stored at this address 1772 * @cb: Callback function will be executed for each element 1773 * of the address list 1774 * @priv: Pointer to pass to the callback function. 1775 * 1776 * Performs "Store-network-bridging-information list" operation and calls 1777 * the callback function for every entry in the list. If "change- 1778 * notification-control" is set, further changes in the address list 1779 * will be reported via the IPA command. 1780 */ 1781 int qdio_pnso_brinfo(struct subchannel_id schid, 1782 int cnc, u16 *response, 1783 void (*cb)(void *priv, enum qdio_brinfo_entry_type type, 1784 void *entry), 1785 void *priv) 1786 { 1787 struct chsc_pnso_area *rr; 1788 int rc; 1789 u32 prev_instance = 0; 1790 int isfirstblock = 1; 1791 int i, size, elems; 1792 1793 rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); 1794 if (rr == NULL) 1795 return -ENOMEM; 1796 do { 1797 /* on the first iteration, naihdr.resume_token will be zero */ 1798 rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc); 1799 if (rc != 0 && rc != -EBUSY) 1800 goto out; 1801 if (rr->response.code != 1) { 1802 rc = -EIO; 1803 continue; 1804 } else 1805 rc = 0; 1806 1807 if (cb == NULL) 1808 continue; 1809 1810 size = rr->naihdr.naids; 1811 elems = (rr->response.length - 1812 sizeof(struct chsc_header) - 1813 sizeof(struct chsc_brinfo_naihdr)) / 1814 size; 1815 1816 if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { 1817 /* Inform the caller that they need to scrap */ 1818 /* the data that was already reported via cb */ 1819 rc = -EAGAIN; 1820 break; 1821 } 1822 isfirstblock = 0; 1823 prev_instance = rr->naihdr.instance; 1824 for (i = 0; i < elems; i++) 1825 switch (size) { 1826 case sizeof(struct qdio_brinfo_entry_l3_ipv6): 1827 (*cb)(priv, l3_ipv6_addr, 1828 &rr->entries.l3_ipv6[i]); 1829 break; 1830 case sizeof(struct qdio_brinfo_entry_l3_ipv4): 1831 (*cb)(priv, l3_ipv4_addr, 1832 &rr->entries.l3_ipv4[i]); 1833 break; 1834 case sizeof(struct qdio_brinfo_entry_l2): 1835 (*cb)(priv, l2_addr_lnid, 1836 &rr->entries.l2[i]); 1837 break; 1838 default: 1839 WARN_ON_ONCE(1); 1840 rc = -EIO; 1841 goto out; 1842 } 1843 } while (rr->response.code == 0x0107 || /* channel busy */ 1844 (rr->response.code == 1 && /* list stored */ 1845 /* resume token is non-zero => list incomplete */ 1846 (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); 1847 (*response) = rr->response.code; 1848 1849 out: 1850 free_page((unsigned long)rr); 1851 return rc; 1852 } 1853 EXPORT_SYMBOL_GPL(qdio_pnso_brinfo); 1854 1855 static int __init init_QDIO(void) 1856 { 1857 int rc; 1858 1859 rc = qdio_debug_init(); 1860 if (rc) 1861 return rc; 1862 rc = qdio_setup_init(); 1863 if (rc) 1864 goto out_debug; 1865 rc = tiqdio_allocate_memory(); 1866 if (rc) 1867 goto out_cache; 1868 rc = tiqdio_register_thinints(); 1869 if (rc) 1870 goto out_ti; 1871 return 0; 1872 1873 out_ti: 1874 tiqdio_free_memory(); 1875 out_cache: 1876 qdio_setup_exit(); 1877 out_debug: 1878 qdio_debug_exit(); 1879 return rc; 1880 } 1881 1882 static void __exit exit_QDIO(void) 1883 { 1884 tiqdio_unregister_thinints(); 1885 tiqdio_free_memory(); 1886 qdio_setup_exit(); 1887 qdio_debug_exit(); 1888 } 1889 1890 module_init(init_QDIO); 1891 module_exit(exit_QDIO); 1892