1 /* 2 * linux/drivers/s390/cio/qdio_main.c 3 * 4 * Linux for s390 qdio support, buffer handling, qdio API and module support. 5 * 6 * Copyright 2000,2008 IBM Corp. 7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 8 * Jan Glauber <jang@linux.vnet.ibm.com> 9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com> 10 */ 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/timer.h> 15 #include <linux/delay.h> 16 #include <linux/gfp.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/atomic.h> 19 #include <asm/debug.h> 20 #include <asm/qdio.h> 21 22 #include "cio.h" 23 #include "css.h" 24 #include "device.h" 25 #include "qdio.h" 26 #include "qdio_debug.h" 27 28 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 29 "Jan Glauber <jang@linux.vnet.ibm.com>"); 30 MODULE_DESCRIPTION("QDIO base support"); 31 MODULE_LICENSE("GPL"); 32 33 static inline int do_siga_sync(unsigned long schid, 34 unsigned int out_mask, unsigned int in_mask, 35 unsigned int fc) 36 { 37 register unsigned long __fc asm ("0") = fc; 38 register unsigned long __schid asm ("1") = schid; 39 register unsigned long out asm ("2") = out_mask; 40 register unsigned long in asm ("3") = in_mask; 41 int cc; 42 43 asm volatile( 44 " siga 0\n" 45 " ipm %0\n" 46 " srl %0,28\n" 47 : "=d" (cc) 48 : "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc"); 49 return cc; 50 } 51 52 static inline int do_siga_input(unsigned long schid, unsigned int mask, 53 unsigned int fc) 54 { 55 register unsigned long __fc asm ("0") = fc; 56 register unsigned long __schid asm ("1") = schid; 57 register unsigned long __mask asm ("2") = mask; 58 int cc; 59 60 asm volatile( 61 " siga 0\n" 62 " ipm %0\n" 63 " srl %0,28\n" 64 : "=d" (cc) 65 : "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory"); 66 return cc; 67 } 68 69 /** 70 * do_siga_output - perform SIGA-w/wt function 71 * @schid: subchannel id or in case of QEBSM the subchannel token 72 * @mask: which output queues to process 73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer 74 * @fc: function code to perform 75 * 76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION. 77 * Note: For IQDC unicast queues only the highest priority queue is processed. 78 */ 79 static inline int do_siga_output(unsigned long schid, unsigned long mask, 80 unsigned int *bb, unsigned int fc) 81 { 82 register unsigned long __fc asm("0") = fc; 83 register unsigned long __schid asm("1") = schid; 84 register unsigned long __mask asm("2") = mask; 85 int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION; 86 87 asm volatile( 88 " siga 0\n" 89 "0: ipm %0\n" 90 " srl %0,28\n" 91 "1:\n" 92 EX_TABLE(0b, 1b) 93 : "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask) 94 : : "cc", "memory"); 95 *bb = ((unsigned int) __fc) >> 31; 96 return cc; 97 } 98 99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) 100 { 101 /* all done or next buffer state different */ 102 if (ccq == 0 || ccq == 32) 103 return 0; 104 /* not all buffers processed */ 105 if (ccq == 96 || ccq == 97) 106 return 1; 107 /* notify devices immediately */ 108 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); 109 return -EIO; 110 } 111 112 /** 113 * qdio_do_eqbs - extract buffer states for QEBSM 114 * @q: queue to manipulate 115 * @state: state of the extracted buffers 116 * @start: buffer number to start at 117 * @count: count of buffers to examine 118 * @auto_ack: automatically acknowledge buffers 119 * 120 * Returns the number of successfully extracted equal buffer states. 121 * Stops processing if a state is different from the last buffers state. 122 */ 123 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, 124 int start, int count, int auto_ack) 125 { 126 unsigned int ccq = 0; 127 int tmp_count = count, tmp_start = start; 128 int nr = q->nr; 129 int rc; 130 131 BUG_ON(!q->irq_ptr->sch_token); 132 qperf_inc(q, eqbs); 133 134 if (!q->is_input_q) 135 nr += q->irq_ptr->nr_input_qs; 136 again: 137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, 138 auto_ack); 139 rc = qdio_check_ccq(q, ccq); 140 141 /* At least one buffer was processed, return and extract the remaining 142 * buffers later. 143 */ 144 if ((ccq == 96) && (count != tmp_count)) { 145 qperf_inc(q, eqbs_partial); 146 return (count - tmp_count); 147 } 148 149 if (rc == 1) { 150 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); 151 goto again; 152 } 153 154 if (rc < 0) { 155 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); 156 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 157 q->handler(q->irq_ptr->cdev, 158 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 159 0, -1, -1, q->irq_ptr->int_parm); 160 return 0; 161 } 162 return count - tmp_count; 163 } 164 165 /** 166 * qdio_do_sqbs - set buffer states for QEBSM 167 * @q: queue to manipulate 168 * @state: new state of the buffers 169 * @start: first buffer number to change 170 * @count: how many buffers to change 171 * 172 * Returns the number of successfully changed buffers. 173 * Does retrying until the specified count of buffer states is set or an 174 * error occurs. 175 */ 176 static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, 177 int count) 178 { 179 unsigned int ccq = 0; 180 int tmp_count = count, tmp_start = start; 181 int nr = q->nr; 182 int rc; 183 184 if (!count) 185 return 0; 186 187 BUG_ON(!q->irq_ptr->sch_token); 188 qperf_inc(q, sqbs); 189 190 if (!q->is_input_q) 191 nr += q->irq_ptr->nr_input_qs; 192 again: 193 ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); 194 rc = qdio_check_ccq(q, ccq); 195 if (rc == 1) { 196 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 197 qperf_inc(q, sqbs_partial); 198 goto again; 199 } 200 if (rc < 0) { 201 DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); 202 DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); 203 q->handler(q->irq_ptr->cdev, 204 QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 205 0, -1, -1, q->irq_ptr->int_parm); 206 return 0; 207 } 208 WARN_ON(tmp_count); 209 return count - tmp_count; 210 } 211 212 /* returns number of examined buffers and their common state in *state */ 213 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, 214 unsigned char *state, unsigned int count, 215 int auto_ack) 216 { 217 unsigned char __state = 0; 218 int i; 219 220 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 221 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 222 223 if (is_qebsm(q)) 224 return qdio_do_eqbs(q, state, bufnr, count, auto_ack); 225 226 for (i = 0; i < count; i++) { 227 if (!__state) 228 __state = q->slsb.val[bufnr]; 229 else if (q->slsb.val[bufnr] != __state) 230 break; 231 bufnr = next_buf(bufnr); 232 } 233 *state = __state; 234 return i; 235 } 236 237 static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, 238 unsigned char *state, int auto_ack) 239 { 240 return get_buf_states(q, bufnr, state, 1, auto_ack); 241 } 242 243 /* wrap-around safe setting of slsb states, returns number of changed buffers */ 244 static inline int set_buf_states(struct qdio_q *q, int bufnr, 245 unsigned char state, int count) 246 { 247 int i; 248 249 BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK); 250 BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); 251 252 if (is_qebsm(q)) 253 return qdio_do_sqbs(q, state, bufnr, count); 254 255 for (i = 0; i < count; i++) { 256 xchg(&q->slsb.val[bufnr], state); 257 bufnr = next_buf(bufnr); 258 } 259 return count; 260 } 261 262 static inline int set_buf_state(struct qdio_q *q, int bufnr, 263 unsigned char state) 264 { 265 return set_buf_states(q, bufnr, state, 1); 266 } 267 268 /* set slsb states to initial state */ 269 void qdio_init_buf_states(struct qdio_irq *irq_ptr) 270 { 271 struct qdio_q *q; 272 int i; 273 274 for_each_input_queue(irq_ptr, q, i) 275 set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, 276 QDIO_MAX_BUFFERS_PER_Q); 277 for_each_output_queue(irq_ptr, q, i) 278 set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, 279 QDIO_MAX_BUFFERS_PER_Q); 280 } 281 282 static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 283 unsigned int input) 284 { 285 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 286 unsigned int fc = QDIO_SIGA_SYNC; 287 int cc; 288 289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 290 qperf_inc(q, siga_sync); 291 292 if (is_qebsm(q)) { 293 schid = q->irq_ptr->sch_token; 294 fc |= QDIO_SIGA_QEBSM_FLAG; 295 } 296 297 cc = do_siga_sync(schid, output, input, fc); 298 if (unlikely(cc)) 299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 300 return cc; 301 } 302 303 static inline int qdio_siga_sync_q(struct qdio_q *q) 304 { 305 if (q->is_input_q) 306 return qdio_siga_sync(q, 0, q->mask); 307 else 308 return qdio_siga_sync(q, q->mask, 0); 309 } 310 311 static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 312 { 313 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 314 unsigned int fc = QDIO_SIGA_WRITE; 315 u64 start_time = 0; 316 int retries = 0, cc; 317 318 if (is_qebsm(q)) { 319 schid = q->irq_ptr->sch_token; 320 fc |= QDIO_SIGA_QEBSM_FLAG; 321 } 322 again: 323 cc = do_siga_output(schid, q->mask, busy_bit, fc); 324 325 /* hipersocket busy condition */ 326 if (unlikely(*busy_bit)) { 327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 328 retries++; 329 330 if (!start_time) { 331 start_time = get_clock(); 332 goto again; 333 } 334 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 335 goto again; 336 } 337 if (retries) { 338 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, 339 "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); 340 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); 341 } 342 return cc; 343 } 344 345 static inline int qdio_siga_input(struct qdio_q *q) 346 { 347 unsigned long schid = *((u32 *) &q->irq_ptr->schid); 348 unsigned int fc = QDIO_SIGA_READ; 349 int cc; 350 351 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 352 qperf_inc(q, siga_read); 353 354 if (is_qebsm(q)) { 355 schid = q->irq_ptr->sch_token; 356 fc |= QDIO_SIGA_QEBSM_FLAG; 357 } 358 359 cc = do_siga_input(schid, q->mask, fc); 360 if (unlikely(cc)) 361 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 362 return cc; 363 } 364 365 #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) 366 #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) 367 368 static inline void qdio_sync_queues(struct qdio_q *q) 369 { 370 /* PCI capable outbound queues will also be scanned so sync them too */ 371 if (pci_out_supported(q)) 372 qdio_siga_sync_all(q); 373 else 374 qdio_siga_sync_q(q); 375 } 376 377 int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 378 unsigned char *state) 379 { 380 if (need_siga_sync(q)) 381 qdio_siga_sync_q(q); 382 return get_buf_states(q, bufnr, state, 1, 0); 383 } 384 385 static inline void qdio_stop_polling(struct qdio_q *q) 386 { 387 if (!q->u.in.polling) 388 return; 389 390 q->u.in.polling = 0; 391 qperf_inc(q, stop_polling); 392 393 /* show the card that we are not polling anymore */ 394 if (is_qebsm(q)) { 395 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 396 q->u.in.ack_count); 397 q->u.in.ack_count = 0; 398 } else 399 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 400 } 401 402 static inline void account_sbals(struct qdio_q *q, int count) 403 { 404 int pos = 0; 405 406 q->q_stats.nr_sbal_total += count; 407 if (count == QDIO_MAX_BUFFERS_MASK) { 408 q->q_stats.nr_sbals[7]++; 409 return; 410 } 411 while (count >>= 1) 412 pos++; 413 q->q_stats.nr_sbals[pos]++; 414 } 415 416 static void process_buffer_error(struct qdio_q *q, int count) 417 { 418 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : 419 SLSB_P_OUTPUT_NOT_INIT; 420 421 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 422 423 /* special handling for no target buffer empty */ 424 if ((!q->is_input_q && 425 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) { 426 qperf_inc(q, target_full); 427 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 428 q->first_to_check); 429 return; 430 } 431 432 DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); 433 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 434 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 435 DBF_ERROR("F14:%2x F15:%2x", 436 q->sbal[q->first_to_check]->element[14].sflags, 437 q->sbal[q->first_to_check]->element[15].sflags); 438 439 /* 440 * Interrupts may be avoided as long as the error is present 441 * so change the buffer state immediately to avoid starvation. 442 */ 443 set_buf_states(q, q->first_to_check, state, count); 444 } 445 446 static inline void inbound_primed(struct qdio_q *q, int count) 447 { 448 int new; 449 450 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); 451 452 /* for QEBSM the ACK was already set by EQBS */ 453 if (is_qebsm(q)) { 454 if (!q->u.in.polling) { 455 q->u.in.polling = 1; 456 q->u.in.ack_count = count; 457 q->u.in.ack_start = q->first_to_check; 458 return; 459 } 460 461 /* delete the previous ACK's */ 462 set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, 463 q->u.in.ack_count); 464 q->u.in.ack_count = count; 465 q->u.in.ack_start = q->first_to_check; 466 return; 467 } 468 469 /* 470 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling 471 * or by the next inbound run. 472 */ 473 new = add_buf(q->first_to_check, count - 1); 474 if (q->u.in.polling) { 475 /* reset the previous ACK but first set the new one */ 476 set_buf_state(q, new, SLSB_P_INPUT_ACK); 477 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 478 } else { 479 q->u.in.polling = 1; 480 set_buf_state(q, new, SLSB_P_INPUT_ACK); 481 } 482 483 q->u.in.ack_start = new; 484 count--; 485 if (!count) 486 return; 487 /* need to change ALL buffers to get more interrupts */ 488 set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); 489 } 490 491 static int get_inbound_buffer_frontier(struct qdio_q *q) 492 { 493 int count, stop; 494 unsigned char state = 0; 495 496 /* 497 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 498 * would return 0. 499 */ 500 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 501 stop = add_buf(q->first_to_check, count); 502 503 if (q->first_to_check == stop) 504 goto out; 505 506 /* 507 * No siga sync here, as a PCI or we after a thin interrupt 508 * already sync'ed the queues. 509 */ 510 count = get_buf_states(q, q->first_to_check, &state, count, 1); 511 if (!count) 512 goto out; 513 514 switch (state) { 515 case SLSB_P_INPUT_PRIMED: 516 inbound_primed(q, count); 517 q->first_to_check = add_buf(q->first_to_check, count); 518 if (atomic_sub(count, &q->nr_buf_used) == 0) 519 qperf_inc(q, inbound_queue_full); 520 if (q->irq_ptr->perf_stat_enabled) 521 account_sbals(q, count); 522 break; 523 case SLSB_P_INPUT_ERROR: 524 process_buffer_error(q, count); 525 q->first_to_check = add_buf(q->first_to_check, count); 526 atomic_sub(count, &q->nr_buf_used); 527 if (q->irq_ptr->perf_stat_enabled) 528 account_sbals_error(q, count); 529 break; 530 case SLSB_CU_INPUT_EMPTY: 531 case SLSB_P_INPUT_NOT_INIT: 532 case SLSB_P_INPUT_ACK: 533 if (q->irq_ptr->perf_stat_enabled) 534 q->q_stats.nr_sbal_nop++; 535 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 536 break; 537 default: 538 BUG(); 539 } 540 out: 541 return q->first_to_check; 542 } 543 544 static int qdio_inbound_q_moved(struct qdio_q *q) 545 { 546 int bufnr; 547 548 bufnr = get_inbound_buffer_frontier(q); 549 550 if ((bufnr != q->last_move) || q->qdio_error) { 551 q->last_move = bufnr; 552 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 553 q->u.in.timestamp = get_clock(); 554 return 1; 555 } else 556 return 0; 557 } 558 559 static inline int qdio_inbound_q_done(struct qdio_q *q) 560 { 561 unsigned char state = 0; 562 563 if (!atomic_read(&q->nr_buf_used)) 564 return 1; 565 566 if (need_siga_sync(q)) 567 qdio_siga_sync_q(q); 568 get_buf_state(q, q->first_to_check, &state, 0); 569 570 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 571 /* more work coming */ 572 return 0; 573 574 if (is_thinint_irq(q->irq_ptr)) 575 return 1; 576 577 /* don't poll under z/VM */ 578 if (MACHINE_IS_VM) 579 return 1; 580 581 /* 582 * At this point we know, that inbound first_to_check 583 * has (probably) not moved (see qdio_inbound_processing). 584 */ 585 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 586 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 587 q->first_to_check); 588 return 1; 589 } else 590 return 0; 591 } 592 593 static void qdio_kick_handler(struct qdio_q *q) 594 { 595 int start = q->first_to_kick; 596 int end = q->first_to_check; 597 int count; 598 599 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 600 return; 601 602 count = sub_buf(end, start); 603 604 if (q->is_input_q) { 605 qperf_inc(q, inbound_handler); 606 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); 607 } else { 608 qperf_inc(q, outbound_handler); 609 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 610 start, count); 611 } 612 613 q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, 614 q->irq_ptr->int_parm); 615 616 /* for the next time */ 617 q->first_to_kick = end; 618 q->qdio_error = 0; 619 } 620 621 static void __qdio_inbound_processing(struct qdio_q *q) 622 { 623 qperf_inc(q, tasklet_inbound); 624 625 if (!qdio_inbound_q_moved(q)) 626 return; 627 628 qdio_kick_handler(q); 629 630 if (!qdio_inbound_q_done(q)) { 631 /* means poll time is not yet over */ 632 qperf_inc(q, tasklet_inbound_resched); 633 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 634 tasklet_schedule(&q->tasklet); 635 return; 636 } 637 } 638 639 qdio_stop_polling(q); 640 /* 641 * We need to check again to not lose initiative after 642 * resetting the ACK state. 643 */ 644 if (!qdio_inbound_q_done(q)) { 645 qperf_inc(q, tasklet_inbound_resched2); 646 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 647 tasklet_schedule(&q->tasklet); 648 } 649 } 650 651 void qdio_inbound_processing(unsigned long data) 652 { 653 struct qdio_q *q = (struct qdio_q *)data; 654 __qdio_inbound_processing(q); 655 } 656 657 static int get_outbound_buffer_frontier(struct qdio_q *q) 658 { 659 int count, stop; 660 unsigned char state = 0; 661 662 if (need_siga_sync(q)) 663 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 664 !pci_out_supported(q)) || 665 (queue_type(q) == QDIO_IQDIO_QFMT && 666 multicast_outbound(q))) 667 qdio_siga_sync_q(q); 668 669 /* 670 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 671 * would return 0. 672 */ 673 count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); 674 stop = add_buf(q->first_to_check, count); 675 676 if (q->first_to_check == stop) 677 return q->first_to_check; 678 679 count = get_buf_states(q, q->first_to_check, &state, count, 0); 680 if (!count) 681 return q->first_to_check; 682 683 switch (state) { 684 case SLSB_P_OUTPUT_EMPTY: 685 /* the adapter got it */ 686 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); 687 688 atomic_sub(count, &q->nr_buf_used); 689 q->first_to_check = add_buf(q->first_to_check, count); 690 if (q->irq_ptr->perf_stat_enabled) 691 account_sbals(q, count); 692 break; 693 case SLSB_P_OUTPUT_ERROR: 694 process_buffer_error(q, count); 695 q->first_to_check = add_buf(q->first_to_check, count); 696 atomic_sub(count, &q->nr_buf_used); 697 if (q->irq_ptr->perf_stat_enabled) 698 account_sbals_error(q, count); 699 break; 700 case SLSB_CU_OUTPUT_PRIMED: 701 /* the adapter has not fetched the output yet */ 702 if (q->irq_ptr->perf_stat_enabled) 703 q->q_stats.nr_sbal_nop++; 704 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 705 break; 706 case SLSB_P_OUTPUT_NOT_INIT: 707 case SLSB_P_OUTPUT_HALTED: 708 break; 709 default: 710 BUG(); 711 } 712 return q->first_to_check; 713 } 714 715 /* all buffers processed? */ 716 static inline int qdio_outbound_q_done(struct qdio_q *q) 717 { 718 return atomic_read(&q->nr_buf_used) == 0; 719 } 720 721 static inline int qdio_outbound_q_moved(struct qdio_q *q) 722 { 723 int bufnr; 724 725 bufnr = get_outbound_buffer_frontier(q); 726 727 if ((bufnr != q->last_move) || q->qdio_error) { 728 q->last_move = bufnr; 729 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); 730 return 1; 731 } else 732 return 0; 733 } 734 735 static int qdio_kick_outbound_q(struct qdio_q *q) 736 { 737 int retries = 0, cc; 738 unsigned int busy_bit; 739 740 if (!need_siga_out(q)) 741 return 0; 742 743 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 744 retry: 745 qperf_inc(q, siga_write); 746 747 cc = qdio_siga_output(q, &busy_bit); 748 switch (cc) { 749 case 0: 750 break; 751 case 2: 752 if (busy_bit) { 753 while (++retries < QDIO_BUSY_BIT_RETRIES) { 754 mdelay(QDIO_BUSY_BIT_RETRY_DELAY); 755 goto retry; 756 } 757 DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); 758 cc |= QDIO_ERROR_SIGA_BUSY; 759 } else 760 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); 761 break; 762 case 1: 763 case 3: 764 DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); 765 break; 766 } 767 if (retries) { 768 DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); 769 DBF_ERROR("count:%u", retries); 770 } 771 return cc; 772 } 773 774 static void __qdio_outbound_processing(struct qdio_q *q) 775 { 776 qperf_inc(q, tasklet_outbound); 777 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 778 779 if (qdio_outbound_q_moved(q)) 780 qdio_kick_handler(q); 781 782 if (queue_type(q) == QDIO_ZFCP_QFMT) 783 if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) 784 goto sched; 785 786 /* bail out for HiperSockets unicast queues */ 787 if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) 788 return; 789 790 if ((queue_type(q) == QDIO_IQDIO_QFMT) && 791 (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL) 792 goto sched; 793 794 if (q->u.out.pci_out_enabled) 795 return; 796 797 /* 798 * Now we know that queue type is either qeth without pci enabled 799 * or HiperSockets multicast. Make sure buffer switch from PRIMED to 800 * EMPTY is noticed and outbound_handler is called after some time. 801 */ 802 if (qdio_outbound_q_done(q)) 803 del_timer(&q->u.out.timer); 804 else 805 if (!timer_pending(&q->u.out.timer)) 806 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 807 return; 808 809 sched: 810 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 811 return; 812 tasklet_schedule(&q->tasklet); 813 } 814 815 /* outbound tasklet */ 816 void qdio_outbound_processing(unsigned long data) 817 { 818 struct qdio_q *q = (struct qdio_q *)data; 819 __qdio_outbound_processing(q); 820 } 821 822 void qdio_outbound_timer(unsigned long data) 823 { 824 struct qdio_q *q = (struct qdio_q *)data; 825 826 if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 827 return; 828 tasklet_schedule(&q->tasklet); 829 } 830 831 static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) 832 { 833 struct qdio_q *out; 834 int i; 835 836 if (!pci_out_supported(q)) 837 return; 838 839 for_each_output_queue(q->irq_ptr, out, i) 840 if (!qdio_outbound_q_done(out)) 841 tasklet_schedule(&out->tasklet); 842 } 843 844 static void __tiqdio_inbound_processing(struct qdio_q *q) 845 { 846 qperf_inc(q, tasklet_inbound); 847 if (need_siga_sync(q) && need_siga_sync_after_ai(q)) 848 qdio_sync_queues(q); 849 850 /* 851 * The interrupt could be caused by a PCI request. Check the 852 * PCI capable outbound queues. 853 */ 854 qdio_check_outbound_after_thinint(q); 855 856 if (!qdio_inbound_q_moved(q)) 857 return; 858 859 qdio_kick_handler(q); 860 861 if (!qdio_inbound_q_done(q)) { 862 qperf_inc(q, tasklet_inbound_resched); 863 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 864 tasklet_schedule(&q->tasklet); 865 return; 866 } 867 } 868 869 qdio_stop_polling(q); 870 /* 871 * We need to check again to not lose initiative after 872 * resetting the ACK state. 873 */ 874 if (!qdio_inbound_q_done(q)) { 875 qperf_inc(q, tasklet_inbound_resched2); 876 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 877 tasklet_schedule(&q->tasklet); 878 } 879 } 880 881 void tiqdio_inbound_processing(unsigned long data) 882 { 883 struct qdio_q *q = (struct qdio_q *)data; 884 __tiqdio_inbound_processing(q); 885 } 886 887 static inline void qdio_set_state(struct qdio_irq *irq_ptr, 888 enum qdio_irq_states state) 889 { 890 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); 891 892 irq_ptr->state = state; 893 mb(); 894 } 895 896 static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) 897 { 898 if (irb->esw.esw0.erw.cons) { 899 DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); 900 DBF_ERROR_HEX(irb, 64); 901 DBF_ERROR_HEX(irb->ecw, 64); 902 } 903 } 904 905 /* PCI interrupt handler */ 906 static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) 907 { 908 int i; 909 struct qdio_q *q; 910 911 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 912 return; 913 914 for_each_input_queue(irq_ptr, q, i) { 915 if (q->u.in.queue_start_poll) { 916 /* skip if polling is enabled or already in work */ 917 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 918 &q->u.in.queue_irq_state)) { 919 qperf_inc(q, int_discarded); 920 continue; 921 } 922 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, 923 q->irq_ptr->int_parm); 924 } else 925 tasklet_schedule(&q->tasklet); 926 } 927 928 if (!pci_out_supported(q)) 929 return; 930 931 for_each_output_queue(irq_ptr, q, i) { 932 if (qdio_outbound_q_done(q)) 933 continue; 934 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) 935 qdio_siga_sync_q(q); 936 tasklet_schedule(&q->tasklet); 937 } 938 } 939 940 static void qdio_handle_activate_check(struct ccw_device *cdev, 941 unsigned long intparm, int cstat, int dstat) 942 { 943 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 944 struct qdio_q *q; 945 946 DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); 947 DBF_ERROR("intp :%lx", intparm); 948 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 949 950 if (irq_ptr->nr_input_qs) { 951 q = irq_ptr->input_qs[0]; 952 } else if (irq_ptr->nr_output_qs) { 953 q = irq_ptr->output_qs[0]; 954 } else { 955 dump_stack(); 956 goto no_handler; 957 } 958 q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, 959 0, -1, -1, irq_ptr->int_parm); 960 no_handler: 961 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 962 } 963 964 static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, 965 int dstat) 966 { 967 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 968 969 DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); 970 971 if (cstat) 972 goto error; 973 if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END)) 974 goto error; 975 if (!(dstat & DEV_STAT_DEV_END)) 976 goto error; 977 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); 978 return; 979 980 error: 981 DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); 982 DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); 983 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 984 } 985 986 /* qdio interrupt handler */ 987 void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, 988 struct irb *irb) 989 { 990 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 991 int cstat, dstat; 992 993 if (!intparm || !irq_ptr) { 994 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); 995 return; 996 } 997 998 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++; 999 if (irq_ptr->perf_stat_enabled) 1000 irq_ptr->perf_stat.qdio_int++; 1001 1002 if (IS_ERR(irb)) { 1003 switch (PTR_ERR(irb)) { 1004 case -EIO: 1005 DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); 1006 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); 1007 wake_up(&cdev->private->wait_q); 1008 return; 1009 default: 1010 WARN_ON(1); 1011 return; 1012 } 1013 } 1014 qdio_irq_check_sense(irq_ptr, irb); 1015 cstat = irb->scsw.cmd.cstat; 1016 dstat = irb->scsw.cmd.dstat; 1017 1018 switch (irq_ptr->state) { 1019 case QDIO_IRQ_STATE_INACTIVE: 1020 qdio_establish_handle_irq(cdev, cstat, dstat); 1021 break; 1022 case QDIO_IRQ_STATE_CLEANUP: 1023 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1024 break; 1025 case QDIO_IRQ_STATE_ESTABLISHED: 1026 case QDIO_IRQ_STATE_ACTIVE: 1027 if (cstat & SCHN_STAT_PCI) { 1028 qdio_int_handler_pci(irq_ptr); 1029 return; 1030 } 1031 if (cstat || dstat) 1032 qdio_handle_activate_check(cdev, intparm, cstat, 1033 dstat); 1034 break; 1035 case QDIO_IRQ_STATE_STOPPED: 1036 break; 1037 default: 1038 WARN_ON(1); 1039 } 1040 wake_up(&cdev->private->wait_q); 1041 } 1042 1043 /** 1044 * qdio_get_ssqd_desc - get qdio subchannel description 1045 * @cdev: ccw device to get description for 1046 * @data: where to store the ssqd 1047 * 1048 * Returns 0 or an error code. The results of the chsc are stored in the 1049 * specified structure. 1050 */ 1051 int qdio_get_ssqd_desc(struct ccw_device *cdev, 1052 struct qdio_ssqd_desc *data) 1053 { 1054 1055 if (!cdev || !cdev->private) 1056 return -EINVAL; 1057 1058 DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); 1059 return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); 1060 } 1061 EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1062 1063 static void qdio_shutdown_queues(struct ccw_device *cdev) 1064 { 1065 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1066 struct qdio_q *q; 1067 int i; 1068 1069 for_each_input_queue(irq_ptr, q, i) 1070 tasklet_kill(&q->tasklet); 1071 1072 for_each_output_queue(irq_ptr, q, i) { 1073 del_timer(&q->u.out.timer); 1074 tasklet_kill(&q->tasklet); 1075 } 1076 } 1077 1078 /** 1079 * qdio_shutdown - shut down a qdio subchannel 1080 * @cdev: associated ccw device 1081 * @how: use halt or clear to shutdown 1082 */ 1083 int qdio_shutdown(struct ccw_device *cdev, int how) 1084 { 1085 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1086 int rc; 1087 unsigned long flags; 1088 1089 if (!irq_ptr) 1090 return -ENODEV; 1091 1092 BUG_ON(irqs_disabled()); 1093 DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); 1094 1095 mutex_lock(&irq_ptr->setup_mutex); 1096 /* 1097 * Subchannel was already shot down. We cannot prevent being called 1098 * twice since cio may trigger a shutdown asynchronously. 1099 */ 1100 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1101 mutex_unlock(&irq_ptr->setup_mutex); 1102 return 0; 1103 } 1104 1105 /* 1106 * Indicate that the device is going down. Scheduling the queue 1107 * tasklets is forbidden from here on. 1108 */ 1109 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); 1110 1111 tiqdio_remove_input_queues(irq_ptr); 1112 qdio_shutdown_queues(cdev); 1113 qdio_shutdown_debug_entries(irq_ptr, cdev); 1114 1115 /* cleanup subchannel */ 1116 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1117 1118 if (how & QDIO_FLAG_CLEANUP_USING_CLEAR) 1119 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); 1120 else 1121 /* default behaviour is halt */ 1122 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); 1123 if (rc) { 1124 DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); 1125 DBF_ERROR("rc:%4d", rc); 1126 goto no_cleanup; 1127 } 1128 1129 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); 1130 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1131 wait_event_interruptible_timeout(cdev->private->wait_q, 1132 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || 1133 irq_ptr->state == QDIO_IRQ_STATE_ERR, 1134 10 * HZ); 1135 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1136 1137 no_cleanup: 1138 qdio_shutdown_thinint(irq_ptr); 1139 1140 /* restore interrupt handler */ 1141 if ((void *)cdev->handler == (void *)qdio_int_handler) 1142 cdev->handler = irq_ptr->orig_handler; 1143 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1144 1145 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1146 mutex_unlock(&irq_ptr->setup_mutex); 1147 if (rc) 1148 return rc; 1149 return 0; 1150 } 1151 EXPORT_SYMBOL_GPL(qdio_shutdown); 1152 1153 /** 1154 * qdio_free - free data structures for a qdio subchannel 1155 * @cdev: associated ccw device 1156 */ 1157 int qdio_free(struct ccw_device *cdev) 1158 { 1159 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1160 1161 if (!irq_ptr) 1162 return -ENODEV; 1163 1164 DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); 1165 mutex_lock(&irq_ptr->setup_mutex); 1166 1167 if (irq_ptr->debug_area != NULL) { 1168 debug_unregister(irq_ptr->debug_area); 1169 irq_ptr->debug_area = NULL; 1170 } 1171 cdev->private->qdio_data = NULL; 1172 mutex_unlock(&irq_ptr->setup_mutex); 1173 1174 qdio_release_memory(irq_ptr); 1175 return 0; 1176 } 1177 EXPORT_SYMBOL_GPL(qdio_free); 1178 1179 /** 1180 * qdio_allocate - allocate qdio queues and associated data 1181 * @init_data: initialization data 1182 */ 1183 int qdio_allocate(struct qdio_initialize *init_data) 1184 { 1185 struct qdio_irq *irq_ptr; 1186 1187 DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); 1188 1189 if ((init_data->no_input_qs && !init_data->input_handler) || 1190 (init_data->no_output_qs && !init_data->output_handler)) 1191 return -EINVAL; 1192 1193 if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) || 1194 (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)) 1195 return -EINVAL; 1196 1197 if ((!init_data->input_sbal_addr_array) || 1198 (!init_data->output_sbal_addr_array)) 1199 return -EINVAL; 1200 1201 /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ 1202 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1203 if (!irq_ptr) 1204 goto out_err; 1205 1206 mutex_init(&irq_ptr->setup_mutex); 1207 qdio_allocate_dbf(init_data, irq_ptr); 1208 1209 /* 1210 * Allocate a page for the chsc calls in qdio_establish. 1211 * Must be pre-allocated since a zfcp recovery will call 1212 * qdio_establish. In case of low memory and swap on a zfcp disk 1213 * we may not be able to allocate memory otherwise. 1214 */ 1215 irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL); 1216 if (!irq_ptr->chsc_page) 1217 goto out_rel; 1218 1219 /* qdr is used in ccw1.cda which is u32 */ 1220 irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1221 if (!irq_ptr->qdr) 1222 goto out_rel; 1223 WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); 1224 1225 if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, 1226 init_data->no_output_qs)) 1227 goto out_rel; 1228 1229 init_data->cdev->private->qdio_data = irq_ptr; 1230 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); 1231 return 0; 1232 out_rel: 1233 qdio_release_memory(irq_ptr); 1234 out_err: 1235 return -ENOMEM; 1236 } 1237 EXPORT_SYMBOL_GPL(qdio_allocate); 1238 1239 /** 1240 * qdio_establish - establish queues on a qdio subchannel 1241 * @init_data: initialization data 1242 */ 1243 int qdio_establish(struct qdio_initialize *init_data) 1244 { 1245 struct qdio_irq *irq_ptr; 1246 struct ccw_device *cdev = init_data->cdev; 1247 unsigned long saveflags; 1248 int rc; 1249 1250 DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); 1251 1252 irq_ptr = cdev->private->qdio_data; 1253 if (!irq_ptr) 1254 return -ENODEV; 1255 1256 if (cdev->private->state != DEV_STATE_ONLINE) 1257 return -EINVAL; 1258 1259 mutex_lock(&irq_ptr->setup_mutex); 1260 qdio_setup_irq(init_data); 1261 1262 rc = qdio_establish_thinint(irq_ptr); 1263 if (rc) { 1264 mutex_unlock(&irq_ptr->setup_mutex); 1265 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1266 return rc; 1267 } 1268 1269 /* establish q */ 1270 irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd; 1271 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1272 irq_ptr->ccw.count = irq_ptr->equeue.count; 1273 irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr); 1274 1275 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1276 ccw_device_set_options_mask(cdev, 0); 1277 1278 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); 1279 if (rc) { 1280 DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); 1281 DBF_ERROR("rc:%4x", rc); 1282 } 1283 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1284 1285 if (rc) { 1286 mutex_unlock(&irq_ptr->setup_mutex); 1287 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1288 return rc; 1289 } 1290 1291 wait_event_interruptible_timeout(cdev->private->wait_q, 1292 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || 1293 irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ); 1294 1295 if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) { 1296 mutex_unlock(&irq_ptr->setup_mutex); 1297 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 1298 return -EIO; 1299 } 1300 1301 qdio_setup_ssqd_info(irq_ptr); 1302 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1303 1304 /* qebsm is now setup if available, initialize buffer states */ 1305 qdio_init_buf_states(irq_ptr); 1306 1307 mutex_unlock(&irq_ptr->setup_mutex); 1308 qdio_print_subchannel_info(irq_ptr, cdev); 1309 qdio_setup_debug_entries(irq_ptr, cdev); 1310 return 0; 1311 } 1312 EXPORT_SYMBOL_GPL(qdio_establish); 1313 1314 /** 1315 * qdio_activate - activate queues on a qdio subchannel 1316 * @cdev: associated cdev 1317 */ 1318 int qdio_activate(struct ccw_device *cdev) 1319 { 1320 struct qdio_irq *irq_ptr; 1321 int rc; 1322 unsigned long saveflags; 1323 1324 DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); 1325 1326 irq_ptr = cdev->private->qdio_data; 1327 if (!irq_ptr) 1328 return -ENODEV; 1329 1330 if (cdev->private->state != DEV_STATE_ONLINE) 1331 return -EINVAL; 1332 1333 mutex_lock(&irq_ptr->setup_mutex); 1334 if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) { 1335 rc = -EBUSY; 1336 goto out; 1337 } 1338 1339 irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd; 1340 irq_ptr->ccw.flags = CCW_FLAG_SLI; 1341 irq_ptr->ccw.count = irq_ptr->aqueue.count; 1342 irq_ptr->ccw.cda = 0; 1343 1344 spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags); 1345 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); 1346 1347 rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, 1348 0, DOIO_DENY_PREFETCH); 1349 if (rc) { 1350 DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); 1351 DBF_ERROR("rc:%4x", rc); 1352 } 1353 spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); 1354 1355 if (rc) 1356 goto out; 1357 1358 if (is_thinint_irq(irq_ptr)) 1359 tiqdio_add_input_queues(irq_ptr); 1360 1361 /* wait for subchannel to become active */ 1362 msleep(5); 1363 1364 switch (irq_ptr->state) { 1365 case QDIO_IRQ_STATE_STOPPED: 1366 case QDIO_IRQ_STATE_ERR: 1367 rc = -EIO; 1368 break; 1369 default: 1370 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); 1371 rc = 0; 1372 } 1373 out: 1374 mutex_unlock(&irq_ptr->setup_mutex); 1375 return rc; 1376 } 1377 EXPORT_SYMBOL_GPL(qdio_activate); 1378 1379 static inline int buf_in_between(int bufnr, int start, int count) 1380 { 1381 int end = add_buf(start, count); 1382 1383 if (end > start) { 1384 if (bufnr >= start && bufnr < end) 1385 return 1; 1386 else 1387 return 0; 1388 } 1389 1390 /* wrap-around case */ 1391 if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) || 1392 (bufnr < end)) 1393 return 1; 1394 else 1395 return 0; 1396 } 1397 1398 /** 1399 * handle_inbound - reset processed input buffers 1400 * @q: queue containing the buffers 1401 * @callflags: flags 1402 * @bufnr: first buffer to process 1403 * @count: how many buffers are emptied 1404 */ 1405 static int handle_inbound(struct qdio_q *q, unsigned int callflags, 1406 int bufnr, int count) 1407 { 1408 int used, diff; 1409 1410 qperf_inc(q, inbound_call); 1411 1412 if (!q->u.in.polling) 1413 goto set; 1414 1415 /* protect against stop polling setting an ACK for an emptied slsb */ 1416 if (count == QDIO_MAX_BUFFERS_PER_Q) { 1417 /* overwriting everything, just delete polling status */ 1418 q->u.in.polling = 0; 1419 q->u.in.ack_count = 0; 1420 goto set; 1421 } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { 1422 if (is_qebsm(q)) { 1423 /* partial overwrite, just update ack_start */ 1424 diff = add_buf(bufnr, count); 1425 diff = sub_buf(diff, q->u.in.ack_start); 1426 q->u.in.ack_count -= diff; 1427 if (q->u.in.ack_count <= 0) { 1428 q->u.in.polling = 0; 1429 q->u.in.ack_count = 0; 1430 goto set; 1431 } 1432 q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); 1433 } 1434 else 1435 /* the only ACK will be deleted, so stop polling */ 1436 q->u.in.polling = 0; 1437 } 1438 1439 set: 1440 count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); 1441 1442 used = atomic_add_return(count, &q->nr_buf_used) - count; 1443 BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); 1444 1445 /* no need to signal as long as the adapter had free buffers */ 1446 if (used) 1447 return 0; 1448 1449 if (need_siga_in(q)) 1450 return qdio_siga_input(q); 1451 return 0; 1452 } 1453 1454 /** 1455 * handle_outbound - process filled outbound buffers 1456 * @q: queue containing the buffers 1457 * @callflags: flags 1458 * @bufnr: first buffer to process 1459 * @count: how many buffers are filled 1460 */ 1461 static int handle_outbound(struct qdio_q *q, unsigned int callflags, 1462 int bufnr, int count) 1463 { 1464 unsigned char state = 0; 1465 int used, rc = 0; 1466 1467 qperf_inc(q, outbound_call); 1468 1469 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1470 used = atomic_add_return(count, &q->nr_buf_used); 1471 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1472 1473 if (used == QDIO_MAX_BUFFERS_PER_Q) 1474 qperf_inc(q, outbound_queue_full); 1475 1476 if (callflags & QDIO_FLAG_PCI_OUT) { 1477 q->u.out.pci_out_enabled = 1; 1478 qperf_inc(q, pci_request_int); 1479 } else 1480 q->u.out.pci_out_enabled = 0; 1481 1482 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1483 /* One SIGA-W per buffer required for unicast HiperSockets. */ 1484 WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); 1485 1486 rc = qdio_kick_outbound_q(q); 1487 } else if (need_siga_sync(q)) { 1488 rc = qdio_siga_sync_q(q); 1489 } else { 1490 /* try to fast requeue buffers */ 1491 get_buf_state(q, prev_buf(bufnr), &state, 0); 1492 if (state != SLSB_CU_OUTPUT_PRIMED) 1493 rc = qdio_kick_outbound_q(q); 1494 else 1495 qperf_inc(q, fast_requeue); 1496 } 1497 1498 /* in case of SIGA errors we must process the error immediately */ 1499 if (used >= q->u.out.scan_threshold || rc) 1500 tasklet_schedule(&q->tasklet); 1501 else 1502 /* free the SBALs in case of no further traffic */ 1503 if (!timer_pending(&q->u.out.timer)) 1504 mod_timer(&q->u.out.timer, jiffies + HZ); 1505 return rc; 1506 } 1507 1508 /** 1509 * do_QDIO - process input or output buffers 1510 * @cdev: associated ccw_device for the qdio subchannel 1511 * @callflags: input or output and special flags from the program 1512 * @q_nr: queue number 1513 * @bufnr: buffer number 1514 * @count: how many buffers to process 1515 */ 1516 int do_QDIO(struct ccw_device *cdev, unsigned int callflags, 1517 int q_nr, unsigned int bufnr, unsigned int count) 1518 { 1519 struct qdio_irq *irq_ptr; 1520 1521 if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) 1522 return -EINVAL; 1523 1524 irq_ptr = cdev->private->qdio_data; 1525 if (!irq_ptr) 1526 return -ENODEV; 1527 1528 DBF_DEV_EVENT(DBF_INFO, irq_ptr, 1529 "do%02x b:%02x c:%02x", callflags, bufnr, count); 1530 1531 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1532 return -EBUSY; 1533 if (!count) 1534 return 0; 1535 if (callflags & QDIO_FLAG_SYNC_INPUT) 1536 return handle_inbound(irq_ptr->input_qs[q_nr], 1537 callflags, bufnr, count); 1538 else if (callflags & QDIO_FLAG_SYNC_OUTPUT) 1539 return handle_outbound(irq_ptr->output_qs[q_nr], 1540 callflags, bufnr, count); 1541 return -EINVAL; 1542 } 1543 EXPORT_SYMBOL_GPL(do_QDIO); 1544 1545 /** 1546 * qdio_start_irq - process input buffers 1547 * @cdev: associated ccw_device for the qdio subchannel 1548 * @nr: input queue number 1549 * 1550 * Return codes 1551 * 0 - success 1552 * 1 - irqs not started since new data is available 1553 */ 1554 int qdio_start_irq(struct ccw_device *cdev, int nr) 1555 { 1556 struct qdio_q *q; 1557 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1558 1559 if (!irq_ptr) 1560 return -ENODEV; 1561 q = irq_ptr->input_qs[nr]; 1562 1563 WARN_ON(queue_irqs_enabled(q)); 1564 1565 if (!shared_ind(q->irq_ptr->dsci)) 1566 xchg(q->irq_ptr->dsci, 0); 1567 1568 qdio_stop_polling(q); 1569 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); 1570 1571 /* 1572 * We need to check again to not lose initiative after 1573 * resetting the ACK state. 1574 */ 1575 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci) 1576 goto rescan; 1577 if (!qdio_inbound_q_done(q)) 1578 goto rescan; 1579 return 0; 1580 1581 rescan: 1582 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 1583 &q->u.in.queue_irq_state)) 1584 return 0; 1585 else 1586 return 1; 1587 1588 } 1589 EXPORT_SYMBOL(qdio_start_irq); 1590 1591 /** 1592 * qdio_get_next_buffers - process input buffers 1593 * @cdev: associated ccw_device for the qdio subchannel 1594 * @nr: input queue number 1595 * @bufnr: first filled buffer number 1596 * @error: buffers are in error state 1597 * 1598 * Return codes 1599 * < 0 - error 1600 * = 0 - no new buffers found 1601 * > 0 - number of processed buffers 1602 */ 1603 int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, 1604 int *error) 1605 { 1606 struct qdio_q *q; 1607 int start, end; 1608 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1609 1610 if (!irq_ptr) 1611 return -ENODEV; 1612 q = irq_ptr->input_qs[nr]; 1613 WARN_ON(queue_irqs_enabled(q)); 1614 1615 /* 1616 * Cannot rely on automatic sync after interrupt since queues may 1617 * also be examined without interrupt. 1618 */ 1619 if (need_siga_sync(q)) 1620 qdio_sync_queues(q); 1621 1622 /* check the PCI capable outbound queues. */ 1623 qdio_check_outbound_after_thinint(q); 1624 1625 if (!qdio_inbound_q_moved(q)) 1626 return 0; 1627 1628 /* Note: upper-layer MUST stop processing immediately here ... */ 1629 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) 1630 return -EIO; 1631 1632 start = q->first_to_kick; 1633 end = q->first_to_check; 1634 *bufnr = start; 1635 *error = q->qdio_error; 1636 1637 /* for the next time */ 1638 q->first_to_kick = end; 1639 q->qdio_error = 0; 1640 return sub_buf(end, start); 1641 } 1642 EXPORT_SYMBOL(qdio_get_next_buffers); 1643 1644 /** 1645 * qdio_stop_irq - disable interrupt processing for the device 1646 * @cdev: associated ccw_device for the qdio subchannel 1647 * @nr: input queue number 1648 * 1649 * Return codes 1650 * 0 - interrupts were already disabled 1651 * 1 - interrupts successfully disabled 1652 */ 1653 int qdio_stop_irq(struct ccw_device *cdev, int nr) 1654 { 1655 struct qdio_q *q; 1656 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1657 1658 if (!irq_ptr) 1659 return -ENODEV; 1660 q = irq_ptr->input_qs[nr]; 1661 1662 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, 1663 &q->u.in.queue_irq_state)) 1664 return 0; 1665 else 1666 return 1; 1667 } 1668 EXPORT_SYMBOL(qdio_stop_irq); 1669 1670 static int __init init_QDIO(void) 1671 { 1672 int rc; 1673 1674 rc = qdio_debug_init(); 1675 if (rc) 1676 return rc; 1677 rc = qdio_setup_init(); 1678 if (rc) 1679 goto out_debug; 1680 rc = tiqdio_allocate_memory(); 1681 if (rc) 1682 goto out_cache; 1683 rc = tiqdio_register_thinints(); 1684 if (rc) 1685 goto out_ti; 1686 return 0; 1687 1688 out_ti: 1689 tiqdio_free_memory(); 1690 out_cache: 1691 qdio_setup_exit(); 1692 out_debug: 1693 qdio_debug_exit(); 1694 return rc; 1695 } 1696 1697 static void __exit exit_QDIO(void) 1698 { 1699 tiqdio_unregister_thinints(); 1700 tiqdio_free_memory(); 1701 qdio_setup_exit(); 1702 qdio_debug_exit(); 1703 } 1704 1705 module_init(init_QDIO); 1706 module_exit(exit_QDIO); 1707