1 /* 2 * Linux for s390 qdio support, buffer handling, qdio API and module support. 3 * 4 * Copyright IBM Corp. 2000, 2008 5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 8 */ 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/kernel.h> 12 #include <linux/timer.h> 13 #include <linux/delay.h> 14 #include <linux/gfp.h> 15 #include <linux/io.h> 16 #include <linux/atomic.h> 17 #include <asm/debug.h> 18 #include <asm/qdio.h> 19 #include <asm/ipl.h> 20 21 #include "cio.h" 22 #include "css.h" 23 #include "device.h" 24 #include "qdio.h" 25 #include "qdio_debug.h" 26 27 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 28 "Jan Glauber <jang@linux.vnet.ibm.com>"); 29 MODULE_DESCRIPTION("QDIO base support"); 30 MODULE_LICENSE("GPL"); 31 32 static inline int do_siga_sync(unsigned long schid, 33 unsigned int out_mask, unsigned int in_mask, 34 unsigned int fc) 35 { 36 register unsigned long __fc asm ("0") = fc; 37 register unsigned long __schid asm ("1") = schid; 38 register unsigned long out asm ("2") = out_mask; 39 register unsigned long in asm ("3") = in_mask; 40 int cc; 41 42 asm volatile( 43 " siga 0\n" 44 " ipm %0\n" 45 " srl %0,28\n" 46 : "=d" (cc) 47 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); 48 return cc; 49 } 50 51 static inline int do_siga_input(unsigned long schid, unsigned int mask, 52 unsigned int fc) 53 { 54 register unsigned long __fc asm ("0") = fc; 55 register unsigned long __schid asm ("1") = schid; 56 register unsigned long __mask asm ("2") = mask; 57 int cc; 58 59 asm volatile( 60 " siga 0\n" 61 " ipm %0\n" 62 " srl %0,28\n" 63 : "=d" (cc) 64 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc"); 65 return cc; 66 } 67 68 /** 69 * do_siga_output - perform SIGA-w/wt function 70 * @schid: subchannel id or in case of QEBSM the subchannel token 71 * @mask: which output queues to process 72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 73 * @fc: function code to perform 74 * 75 * Returns condition code. 76 * Note: For IQDC unicast queues only the highest priority queue is processed. 77 */ 78 static inline int do_siga_output(unsigned long schid, unsigned long mask, 79 unsigned int *bb, unsigned int fc, 80 unsigned long aob) 81 { 82 register unsigned long __fc asm("0") = fc; 83 register unsigned long __schid asm("1") = schid; 84 register unsigned long __mask asm("2") = mask; 85 register unsigned long __aob asm("3") = aob; 86 int cc; 87 88 asm volatile( 89 " siga 0\n" 90 " ipm %0\n" 91 " srl %0,28\n" 92 : "=d" (cc), "+d" (__fc), "+d" (__aob) 93 : "d" (__schid), "d" (__mask) 94 : "cc"); 95 *bb = __fc >> 31; 96 return cc; 97 } 98 99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 100 { 101 /* all done or next buffer state different */ 102 if (ccq == 0 || ccq == 32) 103 return 0; 104 /* no buffer processed */ 105 if (ccq == 97) 106 return 1; 107 /* not all buffers processed */ 108 if (ccq == 96) 109 return 2; 110 /* notify devices immediately */ 111 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 112 return -EIO; 113 } 114 115 /** 116 * qdio_do_eqbs - extract buffer states for QEBSM 117 * @q: queue to manipulate 118 * @state: state of the extracted buffers 119 * @start: buffer number to start at 120 * @count: count of buffers to examine 121 * @auto_ack: automatically acknowledge buffers 122 * 123 * Returns the number of successfully extracted equal buffer states. 124 * Stops processing if a state is different from the last buffers state. 125 */ 126 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 127 int start, int count, int auto_ack) 128 { 129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; 130 unsigned int ccq = 0; 131 132 BUG_ON(!q->irq_ptr->sch_token); 133 qperf_inc(q, eqbs); 134 135 if (!q->is_input_q) 136 nr += q->irq_ptr->nr_input_qs; 137 again: 138 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 139 auto_ack); 140 rc = qdio_check_ccq(q, ccq); 141 if (!rc) 142 return count - tmp_count; 143 144 if (rc == 1) { 145 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 146 goto again; 147 } 148 149 if (rc == 2) { 150 BUG_ON(tmp_count == count); 151 qperf_inc(q, eqbs_partial); 152 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", 153 tmp_count); 154 /* 155 * Retry once, if that fails bail out and process the 156 * extracted buffers before trying again. 157 */ 158 if (!retried++) 159 goto again; 160 else 161 return count - tmp_count; 162 } 163 164 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 165 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 166 q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, 167 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); 168 return 0; 169 } 170 171 /** 172 * qdio_do_sqbs - set buffer states for QEBSM 173 * @q: queue to manipulate 174 * @state: new state of the buffers 175 * @start: first buffer number to change 176 * @count: how many buffers to change 177 * 178 * Returns the number of successfully changed buffers. 179 * Does retrying until the specified count of buffer states is set or an 180 * error occurs. 181 */ 182 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, 183 int count) 184 { 185 unsigned int ccq = 0; 186 int tmp_count = count, tmp_start = start; 187 int nr = q->nr; 188 int rc; 189 190 if (!count) 191 return 0; 192 193 BUG_ON(!q->irq_ptr->sch_token); 194 qperf_inc(q, sqbs); 195 196 if (!q->is_input_q) 197 nr += q->irq_ptr->nr_input_qs; 198 again: 199 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 200 rc = qdio_check_ccq(q, ccq); 201 if (!rc) { 202 WARN_ON(tmp_count); 203 return count - tmp_count; 204 } 205 206 if (rc == 1 || rc == 2) { 207 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 208 qperf_inc(q, sqbs_partial); 209 goto again; 210 } 211 212 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 213 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 214 q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, 215 q->nr, q->first_to_kick, count, q->irq_ptr->int_parm); 216 return 0; 217 } 218 219 /* returns number of examined buffers and their common state in *state */ 220 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 221 unsigned char *state, unsigned int count, 222 int auto_ack, int merge_pending) 223 { 224 unsigned char __state = 0; 225 int i; 226 227 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 228 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 229 230 if (is_qebsm(q)) 231 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 232 233 for (i = 0; i < count; i++) { 234 if (!__state) { 235 __state = q->slsb.val[bufnr]; 236 if (merge_pending && __state == SLSB_P_OUTPUT_PENDING) 237 __state = SLSB_P_OUTPUT_EMPTY; 238 } else if (merge_pending) { 239 if ((q->slsb.val[bufnr] & __state) != __state) 240 break; 241 } else if (q->slsb.val[bufnr] != __state) 242 break; 243 bufnr = next_buf(bufnr); 244 } 245 *state = __state; 246 return i; 247 } 248 249 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 250 unsigned char *state, int auto_ack) 251 { 252 return get_buf_states(q, bufnr, state, 1, auto_ack, 0); 253 } 254 255 /* wrap-around safe setting of slsb states, returns number of changed buffers */ 256 static inline int set_buf_states(struct qdio_q *q, int bufnr, 257 unsigned char state, int count) 258 { 259 int i; 260 261 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 262 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 263 264 if (is_qebsm(q)) 265 return qdio_do_sqbs(q, state, bufnr, count); 266 267 for (i = 0; i < count; i++) { 268 xchg(&q->slsb.val[bufnr], state); 269 bufnr = next_buf(bufnr); 270 } 271 return count; 272 } 273 274 static inline int set_buf_state(struct qdio_q *q, int bufnr, 275 unsigned char state) 276 { 277 return set_buf_states(q, bufnr, state, 1); 278 } 279 280 /* set slsb states to initial state */ 281 static void qdio_init_buf_states(struct qdio_irq *irq_ptr) 282 { 283 struct qdio_q *q; 284 int i; 285 286 for_each_input_queue(irq_ptr, q, i) 287 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, 288 QDIO_MAX_BUFFERS_PER_Q); 289 for_each_output_queue(irq_ptr, q, i) 290 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, 291 QDIO_MAX_BUFFERS_PER_Q); 292 } 293 294 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 295 unsigned int input) 296 { 297 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 298 unsigned int fc = QDIO_SIGA_SYNC; 299 int cc; 300 301 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 302 qperf_inc(q, siga_sync); 303 304 if (is_qebsm(q)) { 305 schid = q->irq_ptr->sch_token; 306 fc |= QDIO_SIGA_QEBSM_FLAG; 307 } 308 309 cc = do_siga_sync(schid, output, input, fc); 310 if (unlikely(cc)) 311 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 312 return (cc) ? -EIO : 0; 313 } 314 315 static inline int qdio_siga_sync_q(struct qdio_q *q) 316 { 317 if (q->is_input_q) 318 return qdio_siga_sync(q, 0, q->mask); 319 else 320 return qdio_siga_sync(q, q->mask, 0); 321 } 322 323 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, 324 unsigned long aob) 325 { 326 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 327 unsigned int fc = QDIO_SIGA_WRITE; 328 u64 start_time = 0; 329 int retries = 0, cc; 330 unsigned long laob = 0; 331 332 if (q->u.out.use_cq && aob != 0) { 333 fc = QDIO_SIGA_WRITEQ; 334 laob = aob; 335 } 336 337 if (is_qebsm(q)) { 338 schid = q->irq_ptr->sch_token; 339 fc |= QDIO_SIGA_QEBSM_FLAG; 340 } 341 again: 342 WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) || 343 (aob && fc != QDIO_SIGA_WRITEQ)); 344 cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); 345 346 /* hipersocket busy condition */ 347 if (unlikely(*busy_bit)) { 348 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 349 retries++; 350 351 if (!start_time) { 352 start_time = get_clock(); 353 goto again; 354 } 355 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 356 goto again; 357 } 358 if (retries) { 359 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, 360 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); 361 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); 362 } 363 return cc; 364 } 365 366 static inline int qdio_siga_input(struct qdio_q *q) 367 { 368 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 369 unsigned int fc = QDIO_SIGA_READ; 370 int cc; 371 372 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 373 qperf_inc(q, siga_read); 374 375 if (is_qebsm(q)) { 376 schid = q->irq_ptr->sch_token; 377 fc |= QDIO_SIGA_QEBSM_FLAG; 378 } 379 380 cc = do_siga_input(schid, q->mask, fc); 381 if (unlikely(cc)) 382 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 383 return (cc) ? -EIO : 0; 384 } 385 386 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) 387 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) 388 389 static inline void qdio_sync_queues(struct qdio_q *q) 390 { 391 /* PCI capable outbound queues will also be scanned so sync them too */ 392 if (pci_out_supported(q)) 393 qdio_siga_sync_all(q); 394 else 395 qdio_siga_sync_q(q); 396 } 397 398 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 399 unsigned char *state) 400 { 401 if (need_siga_sync(q)) 402 qdio_siga_sync_q(q); 403 return get_buf_states(q, bufnr, state, 1, 0, 0); 404 } 405 406 static inline void qdio_stop_polling(struct qdio_q *q) 407 { 408 if (!q->u.in.polling) 409 return; 410 411 q->u.in.polling = 0; 412 qperf_inc(q, stop_polling); 413 414 /* show the card that we are not polling anymore */ 415 if (is_qebsm(q)) { 416 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 417 q->u.in.ack_count); 418 q->u.in.ack_count = 0; 419 } else 420 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 421 } 422 423 static inline void account_sbals(struct qdio_q *q, int count) 424 { 425 int pos = 0; 426 427 q->q_stats.nr_sbal_total += count; 428 if (count == QDIO_MAX_BUFFERS_MASK) { 429 q->q_stats.nr_sbals[7]++; 430 return; 431 } 432 while (count >>= 1) 433 pos++; 434 q->q_stats.nr_sbals[pos]++; 435 } 436 437 static void process_buffer_error(struct qdio_q *q, int count) 438 { 439 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : 440 SLSB_P_OUTPUT_NOT_INIT; 441 442 q->qdio_error = QDIO_ERROR_SLSB_STATE; 443 444 /* special handling for no target buffer empty */ 445 if ((!q->is_input_q && 446 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) { 447 qperf_inc(q, target_full); 448 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 449 q->first_to_check); 450 goto set; 451 } 452 453 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); 454 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 455 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 456 DBF_ERROR("F14:%2x F15:%2x", 457 q->sbal[q->first_to_check]->element[14].sflags, 458 q->sbal[q->first_to_check]->element[15].sflags); 459 460 set: 461 /* 462 * Interrupts may be avoided as long as the error is present 463 * so change the buffer state immediately to avoid starvation. 464 */ 465 set_buf_states(q, q->first_to_check, state, count); 466 } 467 468 static inline void inbound_primed(struct qdio_q *q, int count) 469 { 470 int new; 471 472 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); 473 474 /* for QEBSM the ACK was already set by EQBS */ 475 if (is_qebsm(q)) { 476 if (!q->u.in.polling) { 477 q->u.in.polling = 1; 478 q->u.in.ack_count = count; 479 q->u.in.ack_start = q->first_to_check; 480 return; 481 } 482 483 /* delete the previous ACK's */ 484 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 485 q->u.in.ack_count); 486 q->u.in.ack_count = count; 487 q->u.in.ack_start = q->first_to_check; 488 return; 489 } 490 491 /* 492 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling 493 * or by the next inbound run. 494 */ 495 new = add_buf(q->first_to_check, count - 1); 496 if (q->u.in.polling) { 497 /* reset the previous ACK but first set the new one */ 498 set_buf_state(q, new, SLSB_P_INPUT_ACK); 499 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 500 } else { 501 q->u.in.polling = 1; 502 set_buf_state(q, new, SLSB_P_INPUT_ACK); 503 } 504 505 q->u.in.ack_start = new; 506 count--; 507 if (!count) 508 return; 509 /* need to change ALL buffers to get more interrupts */ 510 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); 511 } 512 513 static int get_inbound_buffer_frontier(struct qdio_q *q) 514 { 515 int count, stop; 516 unsigned char state = 0; 517 518 q->timestamp = get_clock(); 519 520 /* 521 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 522 * would return 0. 523 */ 524 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 525 stop = add_buf(q->first_to_check, count); 526 527 if (q->first_to_check == stop) 528 goto out; 529 530 /* 531 * No siga sync here, as a PCI or we after a thin interrupt 532 * already sync'ed the queues. 533 */ 534 count = get_buf_states(q, q->first_to_check, &state, count, 1, 0); 535 if (!count) 536 goto out; 537 538 switch (state) { 539 case SLSB_P_INPUT_PRIMED: 540 inbound_primed(q, count); 541 q->first_to_check = add_buf(q->first_to_check, count); 542 if (atomic_sub(count, &q->nr_buf_used) == 0) 543 qperf_inc(q, inbound_queue_full); 544 if (q->irq_ptr->perf_stat_enabled) 545 account_sbals(q, count); 546 break; 547 case SLSB_P_INPUT_ERROR: 548 process_buffer_error(q, count); 549 q->first_to_check = add_buf(q->first_to_check, count); 550 atomic_sub(count, &q->nr_buf_used); 551 if (q->irq_ptr->perf_stat_enabled) 552 account_sbals_error(q, count); 553 break; 554 case SLSB_CU_INPUT_EMPTY: 555 case SLSB_P_INPUT_NOT_INIT: 556 case SLSB_P_INPUT_ACK: 557 if (q->irq_ptr->perf_stat_enabled) 558 q->q_stats.nr_sbal_nop++; 559 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 560 break; 561 default: 562 BUG(); 563 } 564 out: 565 return q->first_to_check; 566 } 567 568 static int qdio_inbound_q_moved(struct qdio_q *q) 569 { 570 int bufnr; 571 572 bufnr = get_inbound_buffer_frontier(q); 573 574 if (bufnr != q->last_move) { 575 q->last_move = bufnr; 576 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 577 q->u.in.timestamp = get_clock(); 578 return 1; 579 } else 580 return 0; 581 } 582 583 static inline int qdio_inbound_q_done(struct qdio_q *q) 584 { 585 unsigned char state = 0; 586 587 if (!atomic_read(&q->nr_buf_used)) 588 return 1; 589 590 if (need_siga_sync(q)) 591 qdio_siga_sync_q(q); 592 get_buf_state(q, q->first_to_check, &state, 0); 593 594 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 595 /* more work coming */ 596 return 0; 597 598 if (is_thinint_irq(q->irq_ptr)) 599 return 1; 600 601 /* don't poll under z/VM */ 602 if (MACHINE_IS_VM) 603 return 1; 604 605 /* 606 * At this point we know, that inbound first_to_check 607 * has (probably) not moved (see qdio_inbound_processing). 608 */ 609 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 610 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 611 q->first_to_check); 612 return 1; 613 } else 614 return 0; 615 } 616 617 static inline int contains_aobs(struct qdio_q *q) 618 { 619 return !q->is_input_q && q->u.out.use_cq; 620 } 621 622 static inline void qdio_trace_aob(struct qdio_irq *irq, struct qdio_q *q, 623 int i, struct qaob *aob) 624 { 625 int tmp; 626 627 DBF_DEV_EVENT(DBF_INFO, irq, "AOB%d:%lx", i, 628 (unsigned long) virt_to_phys(aob)); 629 DBF_DEV_EVENT(DBF_INFO, irq, "RES00:%lx", 630 (unsigned long) aob->res0[0]); 631 DBF_DEV_EVENT(DBF_INFO, irq, "RES01:%lx", 632 (unsigned long) aob->res0[1]); 633 DBF_DEV_EVENT(DBF_INFO, irq, "RES02:%lx", 634 (unsigned long) aob->res0[2]); 635 DBF_DEV_EVENT(DBF_INFO, irq, "RES03:%lx", 636 (unsigned long) aob->res0[3]); 637 DBF_DEV_EVENT(DBF_INFO, irq, "RES04:%lx", 638 (unsigned long) aob->res0[4]); 639 DBF_DEV_EVENT(DBF_INFO, irq, "RES05:%lx", 640 (unsigned long) aob->res0[5]); 641 DBF_DEV_EVENT(DBF_INFO, irq, "RES1:%x", aob->res1); 642 DBF_DEV_EVENT(DBF_INFO, irq, "RES2:%x", aob->res2); 643 DBF_DEV_EVENT(DBF_INFO, irq, "RES3:%x", aob->res3); 644 DBF_DEV_EVENT(DBF_INFO, irq, "AORC:%u", aob->aorc); 645 DBF_DEV_EVENT(DBF_INFO, irq, "FLAGS:%u", aob->flags); 646 DBF_DEV_EVENT(DBF_INFO, irq, "CBTBS:%u", aob->cbtbs); 647 DBF_DEV_EVENT(DBF_INFO, irq, "SBC:%u", aob->sb_count); 648 for (tmp = 0; tmp < QDIO_MAX_ELEMENTS_PER_BUFFER; ++tmp) { 649 DBF_DEV_EVENT(DBF_INFO, irq, "SBA%d:%lx", tmp, 650 (unsigned long) aob->sba[tmp]); 651 DBF_DEV_EVENT(DBF_INFO, irq, "rSBA%d:%lx", tmp, 652 (unsigned long) q->sbal[i]->element[tmp].addr); 653 DBF_DEV_EVENT(DBF_INFO, irq, "DC%d:%u", tmp, aob->dcount[tmp]); 654 DBF_DEV_EVENT(DBF_INFO, irq, "rDC%d:%u", tmp, 655 q->sbal[i]->element[tmp].length); 656 } 657 DBF_DEV_EVENT(DBF_INFO, irq, "USER0:%lx", (unsigned long) aob->user0); 658 for (tmp = 0; tmp < 2; ++tmp) { 659 DBF_DEV_EVENT(DBF_INFO, irq, "RES4%d:%lx", tmp, 660 (unsigned long) aob->res4[tmp]); 661 } 662 DBF_DEV_EVENT(DBF_INFO, irq, "USER1:%lx", (unsigned long) aob->user1); 663 DBF_DEV_EVENT(DBF_INFO, irq, "USER2:%lx", (unsigned long) aob->user2); 664 } 665 666 static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) 667 { 668 unsigned char state = 0; 669 int j, b = start; 670 671 if (!contains_aobs(q)) 672 return; 673 674 for (j = 0; j < count; ++j) { 675 get_buf_state(q, b, &state, 0); 676 if (state == SLSB_P_OUTPUT_PENDING) { 677 struct qaob *aob = q->u.out.aobs[b]; 678 if (aob == NULL) 679 continue; 680 681 BUG_ON(q->u.out.sbal_state == NULL); 682 q->u.out.sbal_state[b].flags |= 683 QDIO_OUTBUF_STATE_FLAG_PENDING; 684 q->u.out.aobs[b] = NULL; 685 } else if (state == SLSB_P_OUTPUT_EMPTY) { 686 BUG_ON(q->u.out.sbal_state == NULL); 687 q->u.out.sbal_state[b].aob = NULL; 688 } 689 b = next_buf(b); 690 } 691 } 692 693 static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, 694 int bufnr) 695 { 696 unsigned long phys_aob = 0; 697 698 if (!q->use_cq) 699 goto out; 700 701 if (!q->aobs[bufnr]) { 702 struct qaob *aob = qdio_allocate_aob(); 703 q->aobs[bufnr] = aob; 704 } 705 if (q->aobs[bufnr]) { 706 BUG_ON(q->sbal_state == NULL); 707 q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE; 708 q->sbal_state[bufnr].aob = q->aobs[bufnr]; 709 q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; 710 phys_aob = virt_to_phys(q->aobs[bufnr]); 711 BUG_ON(phys_aob & 0xFF); 712 } 713 714 out: 715 return phys_aob; 716 } 717 718 static void qdio_kick_handler(struct qdio_q *q) 719 { 720 int start = q->first_to_kick; 721 int end = q->first_to_check; 722 int count; 723 724 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 725 return; 726 727 count = sub_buf(end, start); 728 729 if (q->is_input_q) { 730 qperf_inc(q, inbound_handler); 731 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); 732 } else { 733 qperf_inc(q, outbound_handler); 734 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 735 start, count); 736 } 737 738 qdio_handle_aobs(q, start, count); 739 740 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 741 q->irq_ptr->int_parm); 742 743 /* for the next time */ 744 q->first_to_kick = end; 745 q->qdio_error = 0; 746 } 747 748 static void __qdio_inbound_processing(struct qdio_q *q) 749 { 750 qperf_inc(q, tasklet_inbound); 751 752 if (!qdio_inbound_q_moved(q)) 753 return; 754 755 qdio_kick_handler(q); 756 757 if (!qdio_inbound_q_done(q)) { 758 /* means poll time is not yet over */ 759 qperf_inc(q, tasklet_inbound_resched); 760 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 761 tasklet_schedule(&q->tasklet); 762 return; 763 } 764 } 765 766 qdio_stop_polling(q); 767 /* 768 * We need to check again to not lose initiative after 769 * resetting the ACK state. 770 */ 771 if (!qdio_inbound_q_done(q)) { 772 qperf_inc(q, tasklet_inbound_resched2); 773 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 774 tasklet_schedule(&q->tasklet); 775 } 776 } 777 778 void qdio_inbound_processing(unsigned long data) 779 { 780 struct qdio_q *q = (struct qdio_q *)data; 781 __qdio_inbound_processing(q); 782 } 783 784 static int get_outbound_buffer_frontier(struct qdio_q *q) 785 { 786 int count, stop; 787 unsigned char state = 0; 788 789 q->timestamp = get_clock(); 790 791 if (need_siga_sync(q)) 792 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 793 !pci_out_supported(q)) || 794 (queue_type(q) == QDIO_IQDIO_QFMT && 795 multicast_outbound(q))) 796 qdio_siga_sync_q(q); 797 798 /* 799 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 800 * would return 0. 801 */ 802 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 803 stop = add_buf(q->first_to_check, count); 804 if (q->first_to_check == stop) 805 goto out; 806 807 count = get_buf_states(q, q->first_to_check, &state, count, 0, 1); 808 if (!count) 809 goto out; 810 811 switch (state) { 812 case SLSB_P_OUTPUT_PENDING: 813 BUG(); 814 case SLSB_P_OUTPUT_EMPTY: 815 /* the adapter got it */ 816 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, 817 "out empty:%1d %02x", q->nr, count); 818 819 atomic_sub(count, &q->nr_buf_used); 820 q->first_to_check = add_buf(q->first_to_check, count); 821 if (q->irq_ptr->perf_stat_enabled) 822 account_sbals(q, count); 823 824 break; 825 case SLSB_P_OUTPUT_ERROR: 826 process_buffer_error(q, count); 827 q->first_to_check = add_buf(q->first_to_check, count); 828 atomic_sub(count, &q->nr_buf_used); 829 if (q->irq_ptr->perf_stat_enabled) 830 account_sbals_error(q, count); 831 break; 832 case SLSB_CU_OUTPUT_PRIMED: 833 /* the adapter has not fetched the output yet */ 834 if (q->irq_ptr->perf_stat_enabled) 835 q->q_stats.nr_sbal_nop++; 836 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", 837 q->nr); 838 break; 839 case SLSB_P_OUTPUT_NOT_INIT: 840 case SLSB_P_OUTPUT_HALTED: 841 break; 842 default: 843 BUG(); 844 } 845 846 out: 847 return q->first_to_check; 848 } 849 850 /* all buffers processed? */ 851 static inline int qdio_outbound_q_done(struct qdio_q *q) 852 { 853 return atomic_read(&q->nr_buf_used) == 0; 854 } 855 856 static inline int qdio_outbound_q_moved(struct qdio_q *q) 857 { 858 int bufnr; 859 860 bufnr = get_outbound_buffer_frontier(q); 861 862 if (bufnr != q->last_move) { 863 q->last_move = bufnr; 864 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 865 return 1; 866 } else 867 return 0; 868 } 869 870 static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) 871 { 872 int retries = 0, cc; 873 unsigned int busy_bit; 874 875 if (!need_siga_out(q)) 876 return 0; 877 878 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 879 retry: 880 qperf_inc(q, siga_write); 881 882 cc = qdio_siga_output(q, &busy_bit, aob); 883 switch (cc) { 884 case 0: 885 break; 886 case 2: 887 if (busy_bit) { 888 while (++retries < QDIO_BUSY_BIT_RETRIES) { 889 mdelay(QDIO_BUSY_BIT_RETRY_DELAY); 890 goto retry; 891 } 892 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); 893 cc = -EBUSY; 894 } else { 895 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 896 cc = -ENOBUFS; 897 } 898 break; 899 case 1: 900 case 3: 901 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 902 cc = -EIO; 903 break; 904 } 905 if (retries) { 906 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); 907 DBF_ERROR("count:%u", retries); 908 } 909 return cc; 910 } 911 912 static void __qdio_outbound_processing(struct qdio_q *q) 913 { 914 qperf_inc(q, tasklet_outbound); 915 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 916 917 if (qdio_outbound_q_moved(q)) 918 qdio_kick_handler(q); 919 920 if (queue_type(q) == QDIO_ZFCP_QFMT) 921 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 922 goto sched; 923 924 if (q->u.out.pci_out_enabled) 925 return; 926 927 /* 928 * Now we know that queue type is either qeth without pci enabled 929 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY 930 * is noticed and outbound_handler is called after some time. 931 */ 932 if (qdio_outbound_q_done(q)) 933 del_timer(&q->u.out.timer); 934 else 935 if (!timer_pending(&q->u.out.timer)) 936 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 937 return; 938 939 sched: 940 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 941 return; 942 tasklet_schedule(&q->tasklet); 943 } 944 945 /* outbound tasklet */ 946 void qdio_outbound_processing(unsigned long data) 947 { 948 struct qdio_q *q = (struct qdio_q *)data; 949 __qdio_outbound_processing(q); 950 } 951 952 void qdio_outbound_timer(unsigned long data) 953 { 954 struct qdio_q *q = (struct qdio_q *)data; 955 956 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 957 return; 958 tasklet_schedule(&q->tasklet); 959 } 960 961 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) 962 { 963 struct qdio_q *out; 964 int i; 965 966 if (!pci_out_supported(q)) 967 return; 968 969 for_each_output_queue(q->irq_ptr, out, i) 970 if (!qdio_outbound_q_done(out)) 971 tasklet_schedule(&out->tasklet); 972 } 973 974 static void __tiqdio_inbound_processing(struct qdio_q *q) 975 { 976 qperf_inc(q, tasklet_inbound); 977 if (need_siga_sync(q) && need_siga_sync_after_ai(q)) 978 qdio_sync_queues(q); 979 980 /* 981 * The interrupt could be caused by a PCI request. Check the 982 * PCI capable outbound queues. 983 */ 984 qdio_check_outbound_after_thinint(q); 985 986 if (!qdio_inbound_q_moved(q)) 987 return; 988 989 qdio_kick_handler(q); 990 991 if (!qdio_inbound_q_done(q)) { 992 qperf_inc(q, tasklet_inbound_resched); 993 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 994 tasklet_schedule(&q->tasklet); 995 return; 996 } 997 } 998 999 qdio_stop_polling(q); 1000 /* 1001 * We need to check again to not lose initiative after 1002 * resetting the ACK state. 1003 */ 1004 if (!qdio_inbound_q_done(q)) { 1005 qperf_inc(q, tasklet_inbound_resched2); 1006 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 1007 tasklet_schedule(&q->tasklet); 1008 } 1009 } 1010 1011 void tiqdio_inbound_processing(unsigned long data) 1012 { 1013 struct qdio_q *q = (struct qdio_q *)data; 1014 __tiqdio_inbound_processing(q); 1015 } 1016 1017 static inline void qdio_set_state(struct qdio_irq *irq_ptr, 1018 enum qdio_irq_states state) 1019 { 1020 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); 1021 1022 irq_ptr->state = state; 1023 mb(); 1024 } 1025 1026 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) 1027 { 1028 if (irb->esw.esw0.erw.cons) { 1029 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); 1030 DBF_ERROR_HEX(irb, 64); 1031 DBF_ERROR_HEX(irb->ecw, 64); 1032 } 1033 } 1034 1035 /* PCI interrupt handler */ 1036 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) 1037 { 1038 int i; 1039 struct qdio_q *q; 1040 1041 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 1042 return; 1043 1044 for_each_input_queue(irq_ptr, q, i) { 1045 if (q->u.in.queue_start_poll) { 1046 /* skip if polling is enabled or already in work */ 1047 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 1048 &q->u.in.queue_irq_state)) { 1049 qperf_inc(q, int_discarded); 1050 continue; 1051 } 1052 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 1053 q->irq_ptr->int_parm); 1054 } else { 1055 tasklet_schedule(&q->tasklet); 1056 } 1057 } 1058 1059 if (!pci_out_supported(q)) 1060 return; 1061 1062 for_each_output_queue(irq_ptr, q, i) { 1063 if (qdio_outbound_q_done(q)) 1064 continue; 1065 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) 1066 qdio_siga_sync_q(q); 1067 tasklet_schedule(&q->tasklet); 1068 } 1069 } 1070 1071 static void qdio_handle_activate_check(struct ccw_device *cdev, 1072 unsigned long intparm, int cstat, int dstat) 1073 { 1074 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1075 struct qdio_q *q; 1076 int count; 1077 1078 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); 1079 DBF_ERROR("intp :%lx", intparm); 1080 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 1081 1082 if (irq_ptr->nr_input_qs) { 1083 q = irq_ptr->input_qs[0]; 1084 } else if (irq_ptr->nr_output_qs) { 1085 q = irq_ptr->output_qs[0]; 1086 } else { 1087 dump_stack(); 1088 goto no_handler; 1089 } 1090 1091 count = sub_buf(q->first_to_check, q->first_to_kick); 1092 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, 1093 q->nr, q->first_to_kick, count, irq_ptr->int_parm); 1094 no_handler: 1095 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1096 /* 1097 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen. 1098 * Therefore we call the LGR detection function here. 1099 */ 1100 lgr_info_log(); 1101 } 1102 1103 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 1104 int dstat) 1105 { 1106 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1107 1108 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); 1109 1110 if (cstat) 1111 goto error; 1112 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) 1113 goto error; 1114 if (!(dstat & DEV_STAT_DEV_END)) 1115 goto error; 1116 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 1117 return; 1118 1119 error: 1120 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 1121 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 1122 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1123 } 1124 1125 /* qdio interrupt handler */ 1126 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, 1127 struct irb *irb) 1128 { 1129 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1130 int cstat, dstat; 1131 1132 if (!intparm || !irq_ptr) { 1133 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); 1134 return; 1135 } 1136 1137 if (irq_ptr->perf_stat_enabled) 1138 irq_ptr->perf_stat.qdio_int++; 1139 1140 if (IS_ERR(irb)) { 1141 switch (PTR_ERR(irb)) { 1142 case -EIO: 1143 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 1144 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1145 wake_up(&cdev->private->wait_q); 1146 return; 1147 default: 1148 WARN_ON(1); 1149 return; 1150 } 1151 } 1152 qdio_irq_check_sense(irq_ptr, irb); 1153 cstat = irb->scsw.cmd.cstat; 1154 dstat = irb->scsw.cmd.dstat; 1155 1156 switch (irq_ptr->state) { 1157 case QDIO_IRQ_STATE_INACTIVE: 1158 qdio_establish_handle_irq(cdev, cstat, dstat); 1159 break; 1160 case QDIO_IRQ_STATE_CLEANUP: 1161 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1162 break; 1163 case QDIO_IRQ_STATE_ESTABLISHED: 1164 case QDIO_IRQ_STATE_ACTIVE: 1165 if (cstat & SCHN_STAT_PCI) { 1166 qdio_int_handler_pci(irq_ptr); 1167 return; 1168 } 1169 if (cstat || dstat) 1170 qdio_handle_activate_check(cdev, intparm, cstat, 1171 dstat); 1172 break; 1173 case QDIO_IRQ_STATE_STOPPED: 1174 break; 1175 default: 1176 WARN_ON(1); 1177 } 1178 wake_up(&cdev->private->wait_q); 1179 } 1180 1181 /** 1182 * qdio_get_ssqd_desc - get qdio subchannel description 1183 * @cdev: ccw device to get description for 1184 * @data: where to store the ssqd 1185 * 1186 * Returns 0 or an error code. The results of the chsc are stored in the 1187 * specified structure. 1188 */ 1189 int qdio_get_ssqd_desc(struct ccw_device *cdev, 1190 struct qdio_ssqd_desc *data) 1191 { 1192 1193 if (!cdev || !cdev->private) 1194 return -EINVAL; 1195 1196 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); 1197 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); 1198 } 1199 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1200 1201 static void qdio_shutdown_queues(struct ccw_device *cdev) 1202 { 1203 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1204 struct qdio_q *q; 1205 int i; 1206 1207 for_each_input_queue(irq_ptr, q, i) 1208 tasklet_kill(&q->tasklet); 1209 1210 for_each_output_queue(irq_ptr, q, i) { 1211 del_timer(&q->u.out.timer); 1212 tasklet_kill(&q->tasklet); 1213 } 1214 } 1215 1216 /** 1217 * qdio_shutdown - shut down a qdio subchannel 1218 * @cdev: associated ccw device 1219 * @how: use halt or clear to shutdown 1220 */ 1221 int qdio_shutdown(struct ccw_device *cdev, int how) 1222 { 1223 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1224 int rc; 1225 unsigned long flags; 1226 1227 if (!irq_ptr) 1228 return -ENODEV; 1229 1230 BUG_ON(irqs_disabled()); 1231 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1232 1233 mutex_lock(&irq_ptr->setup_mutex); 1234 /* 1235 * Subchannel was already shot down. We cannot prevent being called 1236 * twice since cio may trigger a shutdown asynchronously. 1237 */ 1238 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1239 mutex_unlock(&irq_ptr->setup_mutex); 1240 return 0; 1241 } 1242 1243 /* 1244 * Indicate that the device is going down. Scheduling the queue 1245 * tasklets is forbidden from here on. 1246 */ 1247 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1248 1249 tiqdio_remove_input_queues(irq_ptr); 1250 qdio_shutdown_queues(cdev); 1251 qdio_shutdown_debug_entries(irq_ptr, cdev); 1252 1253 /* cleanup subchannel */ 1254 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1255 1256 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) 1257 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); 1258 else 1259 /* default behaviour is halt */ 1260 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); 1261 if (rc) { 1262 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); 1263 DBF_ERROR("rc:%4d", rc); 1264 goto no_cleanup; 1265 } 1266 1267 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 1268 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1269 wait_event_interruptible_timeout(cdev->private->wait_q, 1270 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 1271 irq_ptr->state == QDIO_IRQ_STATE_ERR, 1272 10 * HZ); 1273 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1274 1275 no_cleanup: 1276 qdio_shutdown_thinint(irq_ptr); 1277 1278 /* restore interrupt handler */ 1279 if ((void *)cdev->handler == (void *)qdio_int_handler) 1280 cdev->handler = irq_ptr->orig_handler; 1281 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1282 1283 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1284 mutex_unlock(&irq_ptr->setup_mutex); 1285 if (rc) 1286 return rc; 1287 return 0; 1288 } 1289 EXPORT_SYMBOL_GPL(qdio_shutdown); 1290 1291 /** 1292 * qdio_free - free data structures for a qdio subchannel 1293 * @cdev: associated ccw device 1294 */ 1295 int qdio_free(struct ccw_device *cdev) 1296 { 1297 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1298 1299 if (!irq_ptr) 1300 return -ENODEV; 1301 1302 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); 1303 mutex_lock(&irq_ptr->setup_mutex); 1304 1305 if (irq_ptr->debug_area != NULL) { 1306 debug_unregister(irq_ptr->debug_area); 1307 irq_ptr->debug_area = NULL; 1308 } 1309 cdev->private->qdio_data = NULL; 1310 mutex_unlock(&irq_ptr->setup_mutex); 1311 1312 qdio_release_memory(irq_ptr); 1313 return 0; 1314 } 1315 EXPORT_SYMBOL_GPL(qdio_free); 1316 1317 /** 1318 * qdio_allocate - allocate qdio queues and associated data 1319 * @init_data: initialization data 1320 */ 1321 int qdio_allocate(struct qdio_initialize *init_data) 1322 { 1323 struct qdio_irq *irq_ptr; 1324 1325 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); 1326 1327 if ((init_data->no_input_qs && !init_data->input_handler) || 1328 (init_data->no_output_qs && !init_data->output_handler)) 1329 return -EINVAL; 1330 1331 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || 1332 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) 1333 return -EINVAL; 1334 1335 if ((!init_data->input_sbal_addr_array) || 1336 (!init_data->output_sbal_addr_array)) 1337 return -EINVAL; 1338 1339 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ 1340 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1341 if (!irq_ptr) 1342 goto out_err; 1343 1344 mutex_init(&irq_ptr->setup_mutex); 1345 qdio_allocate_dbf(init_data, irq_ptr); 1346 1347 /* 1348 * Allocate a page for the chsc calls in qdio_establish. 1349 * Must be pre-allocated since a zfcp recovery will call 1350 * qdio_establish. In case of low memory and swap on a zfcp disk 1351 * we may not be able to allocate memory otherwise. 1352 */ 1353 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); 1354 if (!irq_ptr->chsc_page) 1355 goto out_rel; 1356 1357 /* qdr is used in ccw1.cda which is u32 */ 1358 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1359 if (!irq_ptr->qdr) 1360 goto out_rel; 1361 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); 1362 1363 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, 1364 init_data->no_output_qs)) 1365 goto out_rel; 1366 1367 init_data->cdev->private->qdio_data = irq_ptr; 1368 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1369 return 0; 1370 out_rel: 1371 qdio_release_memory(irq_ptr); 1372 out_err: 1373 return -ENOMEM; 1374 } 1375 EXPORT_SYMBOL_GPL(qdio_allocate); 1376 1377 static void qdio_detect_hsicq(struct qdio_irq *irq_ptr) 1378 { 1379 struct qdio_q *q = irq_ptr->input_qs[0]; 1380 int i, use_cq = 0; 1381 1382 if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) 1383 use_cq = 1; 1384 1385 for_each_output_queue(irq_ptr, q, i) { 1386 if (use_cq) { 1387 if (qdio_enable_async_operation(&q->u.out) < 0) { 1388 use_cq = 0; 1389 continue; 1390 } 1391 } else 1392 qdio_disable_async_operation(&q->u.out); 1393 } 1394 DBF_EVENT("use_cq:%d", use_cq); 1395 } 1396 1397 /** 1398 * qdio_establish - establish queues on a qdio subchannel 1399 * @init_data: initialization data 1400 */ 1401 int qdio_establish(struct qdio_initialize *init_data) 1402 { 1403 struct qdio_irq *irq_ptr; 1404 struct ccw_device *cdev = init_data->cdev; 1405 unsigned long saveflags; 1406 int rc; 1407 1408 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); 1409 1410 irq_ptr = cdev->private->qdio_data; 1411 if (!irq_ptr) 1412 return -ENODEV; 1413 1414 if (cdev->private->state != DEV_STATE_ONLINE) 1415 return -EINVAL; 1416 1417 mutex_lock(&irq_ptr->setup_mutex); 1418 qdio_setup_irq(init_data); 1419 1420 rc = qdio_establish_thinint(irq_ptr); 1421 if (rc) { 1422 mutex_unlock(&irq_ptr->setup_mutex); 1423 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1424 return rc; 1425 } 1426 1427 /* establish q */ 1428 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; 1429 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1430 irq_ptr->ccw.count = irq_ptr->equeue.count; 1431 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); 1432 1433 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1434 ccw_device_set_options_mask(cdev, 0); 1435 1436 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); 1437 if (rc) { 1438 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); 1439 DBF_ERROR("rc:%4x", rc); 1440 } 1441 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1442 1443 if (rc) { 1444 mutex_unlock(&irq_ptr->setup_mutex); 1445 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1446 return rc; 1447 } 1448 1449 wait_event_interruptible_timeout(cdev->private->wait_q, 1450 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 1451 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); 1452 1453 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { 1454 mutex_unlock(&irq_ptr->setup_mutex); 1455 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1456 return -EIO; 1457 } 1458 1459 qdio_setup_ssqd_info(irq_ptr); 1460 1461 qdio_detect_hsicq(irq_ptr); 1462 1463 /* qebsm is now setup if available, initialize buffer states */ 1464 qdio_init_buf_states(irq_ptr); 1465 1466 mutex_unlock(&irq_ptr->setup_mutex); 1467 qdio_print_subchannel_info(irq_ptr, cdev); 1468 qdio_setup_debug_entries(irq_ptr, cdev); 1469 return 0; 1470 } 1471 EXPORT_SYMBOL_GPL(qdio_establish); 1472 1473 /** 1474 * qdio_activate - activate queues on a qdio subchannel 1475 * @cdev: associated cdev 1476 */ 1477 int qdio_activate(struct ccw_device *cdev) 1478 { 1479 struct qdio_irq *irq_ptr; 1480 int rc; 1481 unsigned long saveflags; 1482 1483 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); 1484 1485 irq_ptr = cdev->private->qdio_data; 1486 if (!irq_ptr) 1487 return -ENODEV; 1488 1489 if (cdev->private->state != DEV_STATE_ONLINE) 1490 return -EINVAL; 1491 1492 mutex_lock(&irq_ptr->setup_mutex); 1493 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1494 rc = -EBUSY; 1495 goto out; 1496 } 1497 1498 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; 1499 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1500 irq_ptr->ccw.count = irq_ptr->aqueue.count; 1501 irq_ptr->ccw.cda = 0; 1502 1503 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1504 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 1505 1506 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 1507 0, DOIO_DENY_PREFETCH); 1508 if (rc) { 1509 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); 1510 DBF_ERROR("rc:%4x", rc); 1511 } 1512 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1513 1514 if (rc) 1515 goto out; 1516 1517 if (is_thinint_irq(irq_ptr)) 1518 tiqdio_add_input_queues(irq_ptr); 1519 1520 /* wait for subchannel to become active */ 1521 msleep(5); 1522 1523 switch (irq_ptr->state) { 1524 case QDIO_IRQ_STATE_STOPPED: 1525 case QDIO_IRQ_STATE_ERR: 1526 rc = -EIO; 1527 break; 1528 default: 1529 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1530 rc = 0; 1531 } 1532 out: 1533 mutex_unlock(&irq_ptr->setup_mutex); 1534 return rc; 1535 } 1536 EXPORT_SYMBOL_GPL(qdio_activate); 1537 1538 static inline int buf_in_between(int bufnr, int start, int count) 1539 { 1540 int end = add_buf(start, count); 1541 1542 if (end > start) { 1543 if (bufnr >= start && bufnr < end) 1544 return 1; 1545 else 1546 return 0; 1547 } 1548 1549 /* wrap-around case */ 1550 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || 1551 (bufnr < end)) 1552 return 1; 1553 else 1554 return 0; 1555 } 1556 1557 /** 1558 * handle_inbound - reset processed input buffers 1559 * @q: queue containing the buffers 1560 * @callflags: flags 1561 * @bufnr: first buffer to process 1562 * @count: how many buffers are emptied 1563 */ 1564 static int handle_inbound(struct qdio_q *q, unsigned int callflags, 1565 int bufnr, int count) 1566 { 1567 int used, diff; 1568 1569 qperf_inc(q, inbound_call); 1570 1571 if (!q->u.in.polling) 1572 goto set; 1573 1574 /* protect against stop polling setting an ACK for an emptied slsb */ 1575 if (count == QDIO_MAX_BUFFERS_PER_Q) { 1576 /* overwriting everything, just delete polling status */ 1577 q->u.in.polling = 0; 1578 q->u.in.ack_count = 0; 1579 goto set; 1580 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { 1581 if (is_qebsm(q)) { 1582 /* partial overwrite, just update ack_start */ 1583 diff = add_buf(bufnr, count); 1584 diff = sub_buf(diff, q->u.in.ack_start); 1585 q->u.in.ack_count -= diff; 1586 if (q->u.in.ack_count <= 0) { 1587 q->u.in.polling = 0; 1588 q->u.in.ack_count = 0; 1589 goto set; 1590 } 1591 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); 1592 } 1593 else 1594 /* the only ACK will be deleted, so stop polling */ 1595 q->u.in.polling = 0; 1596 } 1597 1598 set: 1599 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1600 1601 used = atomic_add_return(count, &q->nr_buf_used) - count; 1602 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); 1603 1604 if (need_siga_in(q)) 1605 return qdio_siga_input(q); 1606 1607 return 0; 1608 } 1609 1610 /** 1611 * handle_outbound - process filled outbound buffers 1612 * @q: queue containing the buffers 1613 * @callflags: flags 1614 * @bufnr: first buffer to process 1615 * @count: how many buffers are filled 1616 */ 1617 static int handle_outbound(struct qdio_q *q, unsigned int callflags, 1618 int bufnr, int count) 1619 { 1620 unsigned char state = 0; 1621 int used, rc = 0; 1622 1623 qperf_inc(q, outbound_call); 1624 1625 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1626 used = atomic_add_return(count, &q->nr_buf_used); 1627 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1628 1629 if (used == QDIO_MAX_BUFFERS_PER_Q) 1630 qperf_inc(q, outbound_queue_full); 1631 1632 if (callflags & QDIO_FLAG_PCI_OUT) { 1633 q->u.out.pci_out_enabled = 1; 1634 qperf_inc(q, pci_request_int); 1635 } else 1636 q->u.out.pci_out_enabled = 0; 1637 1638 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1639 unsigned long phys_aob = 0; 1640 1641 /* One SIGA-W per buffer required for unicast HSI */ 1642 WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); 1643 1644 phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); 1645 1646 rc = qdio_kick_outbound_q(q, phys_aob); 1647 } else if (need_siga_sync(q)) { 1648 rc = qdio_siga_sync_q(q); 1649 } else { 1650 /* try to fast requeue buffers */ 1651 get_buf_state(q, prev_buf(bufnr), &state, 0); 1652 if (state != SLSB_CU_OUTPUT_PRIMED) 1653 rc = qdio_kick_outbound_q(q, 0); 1654 else 1655 qperf_inc(q, fast_requeue); 1656 } 1657 1658 /* in case of SIGA errors we must process the error immediately */ 1659 if (used >= q->u.out.scan_threshold || rc) 1660 tasklet_schedule(&q->tasklet); 1661 else 1662 /* free the SBALs in case of no further traffic */ 1663 if (!timer_pending(&q->u.out.timer)) 1664 mod_timer(&q->u.out.timer, jiffies + HZ); 1665 return rc; 1666 } 1667 1668 /** 1669 * do_QDIO - process input or output buffers 1670 * @cdev: associated ccw_device for the qdio subchannel 1671 * @callflags: input or output and special flags from the program 1672 * @q_nr: queue number 1673 * @bufnr: buffer number 1674 * @count: how many buffers to process 1675 */ 1676 int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1677 int q_nr, unsigned int bufnr, unsigned int count) 1678 { 1679 struct qdio_irq *irq_ptr; 1680 1681 1682 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1683 return -EINVAL; 1684 1685 irq_ptr = cdev->private->qdio_data; 1686 if (!irq_ptr) 1687 return -ENODEV; 1688 1689 DBF_DEV_EVENT(DBF_INFO, irq_ptr, 1690 "do%02x b:%02x c:%02x", callflags, bufnr, count); 1691 1692 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1693 return -EIO; 1694 if (!count) 1695 return 0; 1696 if (callflags & QDIO_FLAG_SYNC_INPUT) 1697 return handle_inbound(irq_ptr->input_qs[q_nr], 1698 callflags, bufnr, count); 1699 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1700 return handle_outbound(irq_ptr->output_qs[q_nr], 1701 callflags, bufnr, count); 1702 return -EINVAL; 1703 } 1704 EXPORT_SYMBOL_GPL(do_QDIO); 1705 1706 /** 1707 * qdio_start_irq - process input buffers 1708 * @cdev: associated ccw_device for the qdio subchannel 1709 * @nr: input queue number 1710 * 1711 * Return codes 1712 * 0 - success 1713 * 1 - irqs not started since new data is available 1714 */ 1715 int qdio_start_irq(struct ccw_device *cdev, int nr) 1716 { 1717 struct qdio_q *q; 1718 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1719 1720 if (!irq_ptr) 1721 return -ENODEV; 1722 q = irq_ptr->input_qs[nr]; 1723 1724 WARN_ON(queue_irqs_enabled(q)); 1725 1726 clear_nonshared_ind(irq_ptr); 1727 qdio_stop_polling(q); 1728 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); 1729 1730 /* 1731 * We need to check again to not lose initiative after 1732 * resetting the ACK state. 1733 */ 1734 if (test_nonshared_ind(irq_ptr)) 1735 goto rescan; 1736 if (!qdio_inbound_q_done(q)) 1737 goto rescan; 1738 return 0; 1739 1740 rescan: 1741 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 1742 &q->u.in.queue_irq_state)) 1743 return 0; 1744 else 1745 return 1; 1746 1747 } 1748 EXPORT_SYMBOL(qdio_start_irq); 1749 1750 /** 1751 * qdio_get_next_buffers - process input buffers 1752 * @cdev: associated ccw_device for the qdio subchannel 1753 * @nr: input queue number 1754 * @bufnr: first filled buffer number 1755 * @error: buffers are in error state 1756 * 1757 * Return codes 1758 * < 0 - error 1759 * = 0 - no new buffers found 1760 * > 0 - number of processed buffers 1761 */ 1762 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, 1763 int *error) 1764 { 1765 struct qdio_q *q; 1766 int start, end; 1767 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1768 1769 if (!irq_ptr) 1770 return -ENODEV; 1771 q = irq_ptr->input_qs[nr]; 1772 WARN_ON(queue_irqs_enabled(q)); 1773 1774 /* 1775 * Cannot rely on automatic sync after interrupt since queues may 1776 * also be examined without interrupt. 1777 */ 1778 if (need_siga_sync(q)) 1779 qdio_sync_queues(q); 1780 1781 /* check the PCI capable outbound queues. */ 1782 qdio_check_outbound_after_thinint(q); 1783 1784 if (!qdio_inbound_q_moved(q)) 1785 return 0; 1786 1787 /* Note: upper-layer MUST stop processing immediately here ... */ 1788 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 1789 return -EIO; 1790 1791 start = q->first_to_kick; 1792 end = q->first_to_check; 1793 *bufnr = start; 1794 *error = q->qdio_error; 1795 1796 /* for the next time */ 1797 q->first_to_kick = end; 1798 q->qdio_error = 0; 1799 return sub_buf(end, start); 1800 } 1801 EXPORT_SYMBOL(qdio_get_next_buffers); 1802 1803 /** 1804 * qdio_stop_irq - disable interrupt processing for the device 1805 * @cdev: associated ccw_device for the qdio subchannel 1806 * @nr: input queue number 1807 * 1808 * Return codes 1809 * 0 - interrupts were already disabled 1810 * 1 - interrupts successfully disabled 1811 */ 1812 int qdio_stop_irq(struct ccw_device *cdev, int nr) 1813 { 1814 struct qdio_q *q; 1815 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1816 1817 if (!irq_ptr) 1818 return -ENODEV; 1819 q = irq_ptr->input_qs[nr]; 1820 1821 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 1822 &q->u.in.queue_irq_state)) 1823 return 0; 1824 else 1825 return 1; 1826 } 1827 EXPORT_SYMBOL(qdio_stop_irq); 1828 1829 static int __init init_QDIO(void) 1830 { 1831 int rc; 1832 1833 rc = qdio_debug_init(); 1834 if (rc) 1835 return rc; 1836 rc = qdio_setup_init(); 1837 if (rc) 1838 goto out_debug; 1839 rc = tiqdio_allocate_memory(); 1840 if (rc) 1841 goto out_cache; 1842 rc = tiqdio_register_thinints(); 1843 if (rc) 1844 goto out_ti; 1845 return 0; 1846 1847 out_ti: 1848 tiqdio_free_memory(); 1849 out_cache: 1850 qdio_setup_exit(); 1851 out_debug: 1852 qdio_debug_exit(); 1853 return rc; 1854 } 1855 1856 static void __exit exit_QDIO(void) 1857 { 1858 tiqdio_unregister_thinints(); 1859 tiqdio_free_memory(); 1860 qdio_setup_exit(); 1861 qdio_debug_exit(); 1862 } 1863 1864 module_init(init_QDIO); 1865 module_exit(exit_QDIO); 1866