1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2008 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 #include <scsi/scsi_tcq.h> 11 12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 13 static void qla2x00_process_completed_request(struct scsi_qla_host *, 14 struct req_que *, uint32_t); 15 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 16 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 17 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 18 sts_entry_t *); 19 static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *); 20 21 /** 22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 23 * @irq: 24 * @dev_id: SCSI driver HA context 25 * 26 * Called by system whenever the host adapter generates an interrupt. 27 * 28 * Returns handled flag. 29 */ 30 irqreturn_t 31 qla2100_intr_handler(int irq, void *dev_id) 32 { 33 scsi_qla_host_t *vha; 34 struct qla_hw_data *ha; 35 struct device_reg_2xxx __iomem *reg; 36 int status; 37 unsigned long iter; 38 uint16_t hccr; 39 uint16_t mb[4]; 40 struct rsp_que *rsp; 41 42 rsp = (struct rsp_que *) dev_id; 43 if (!rsp) { 44 printk(KERN_INFO 45 "%s(): NULL response queue pointer\n", __func__); 46 return (IRQ_NONE); 47 } 48 49 ha = rsp->hw; 50 reg = &ha->iobase->isp; 51 status = 0; 52 53 spin_lock(&ha->hardware_lock); 54 vha = qla2x00_get_rsp_host(rsp); 55 for (iter = 50; iter--; ) { 56 hccr = RD_REG_WORD(®->hccr); 57 if (hccr & HCCR_RISC_PAUSE) { 58 if (pci_channel_offline(ha->pdev)) 59 break; 60 61 /* 62 * Issue a "HARD" reset in order for the RISC interrupt 63 * bit to be cleared. Schedule a big hammmer to get 64 * out of the RISC PAUSED state. 65 */ 66 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 67 RD_REG_WORD(®->hccr); 68 69 ha->isp_ops->fw_dump(vha, 1); 70 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 71 break; 72 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 73 break; 74 75 if (RD_REG_WORD(®->semaphore) & BIT_0) { 76 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 77 RD_REG_WORD(®->hccr); 78 79 /* Get mailbox data. */ 80 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 81 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 82 qla2x00_mbx_completion(vha, mb[0]); 83 status |= MBX_INTERRUPT; 84 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 85 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 86 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 87 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 88 qla2x00_async_event(vha, rsp, mb); 89 } else { 90 /*EMPTY*/ 91 DEBUG2(printk("scsi(%ld): Unrecognized " 92 "interrupt type (%d).\n", 93 vha->host_no, mb[0])); 94 } 95 /* Release mailbox registers. */ 96 WRT_REG_WORD(®->semaphore, 0); 97 RD_REG_WORD(®->semaphore); 98 } else { 99 qla2x00_process_response_queue(rsp); 100 101 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 102 RD_REG_WORD(®->hccr); 103 } 104 } 105 spin_unlock(&ha->hardware_lock); 106 107 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 108 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 109 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 110 complete(&ha->mbx_intr_comp); 111 } 112 113 return (IRQ_HANDLED); 114 } 115 116 /** 117 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 118 * @irq: 119 * @dev_id: SCSI driver HA context 120 * 121 * Called by system whenever the host adapter generates an interrupt. 122 * 123 * Returns handled flag. 124 */ 125 irqreturn_t 126 qla2300_intr_handler(int irq, void *dev_id) 127 { 128 scsi_qla_host_t *vha; 129 struct device_reg_2xxx __iomem *reg; 130 int status; 131 unsigned long iter; 132 uint32_t stat; 133 uint16_t hccr; 134 uint16_t mb[4]; 135 struct rsp_que *rsp; 136 struct qla_hw_data *ha; 137 138 rsp = (struct rsp_que *) dev_id; 139 if (!rsp) { 140 printk(KERN_INFO 141 "%s(): NULL response queue pointer\n", __func__); 142 return (IRQ_NONE); 143 } 144 145 ha = rsp->hw; 146 reg = &ha->iobase->isp; 147 status = 0; 148 149 spin_lock(&ha->hardware_lock); 150 vha = qla2x00_get_rsp_host(rsp); 151 for (iter = 50; iter--; ) { 152 stat = RD_REG_DWORD(®->u.isp2300.host_status); 153 if (stat & HSR_RISC_PAUSED) { 154 if (pci_channel_offline(ha->pdev)) 155 break; 156 157 hccr = RD_REG_WORD(®->hccr); 158 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 159 qla_printk(KERN_INFO, ha, "Parity error -- " 160 "HCCR=%x, Dumping firmware!\n", hccr); 161 else 162 qla_printk(KERN_INFO, ha, "RISC paused -- " 163 "HCCR=%x, Dumping firmware!\n", hccr); 164 165 /* 166 * Issue a "HARD" reset in order for the RISC 167 * interrupt bit to be cleared. Schedule a big 168 * hammmer to get out of the RISC PAUSED state. 169 */ 170 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 171 RD_REG_WORD(®->hccr); 172 173 ha->isp_ops->fw_dump(vha, 1); 174 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 175 break; 176 } else if ((stat & HSR_RISC_INT) == 0) 177 break; 178 179 switch (stat & 0xff) { 180 case 0x1: 181 case 0x2: 182 case 0x10: 183 case 0x11: 184 qla2x00_mbx_completion(vha, MSW(stat)); 185 status |= MBX_INTERRUPT; 186 187 /* Release mailbox registers. */ 188 WRT_REG_WORD(®->semaphore, 0); 189 break; 190 case 0x12: 191 mb[0] = MSW(stat); 192 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 193 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 194 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 195 qla2x00_async_event(vha, rsp, mb); 196 break; 197 case 0x13: 198 qla2x00_process_response_queue(rsp); 199 break; 200 case 0x15: 201 mb[0] = MBA_CMPLT_1_16BIT; 202 mb[1] = MSW(stat); 203 qla2x00_async_event(vha, rsp, mb); 204 break; 205 case 0x16: 206 mb[0] = MBA_SCSI_COMPLETION; 207 mb[1] = MSW(stat); 208 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 209 qla2x00_async_event(vha, rsp, mb); 210 break; 211 default: 212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 213 "(%d).\n", 214 vha->host_no, stat & 0xff)); 215 break; 216 } 217 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 218 RD_REG_WORD_RELAXED(®->hccr); 219 } 220 spin_unlock(&ha->hardware_lock); 221 222 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 223 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 224 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 225 complete(&ha->mbx_intr_comp); 226 } 227 228 return (IRQ_HANDLED); 229 } 230 231 /** 232 * qla2x00_mbx_completion() - Process mailbox command completions. 233 * @ha: SCSI driver HA context 234 * @mb0: Mailbox0 register 235 */ 236 static void 237 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 238 { 239 uint16_t cnt; 240 uint16_t __iomem *wptr; 241 struct qla_hw_data *ha = vha->hw; 242 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 243 244 /* Load return mailbox registers. */ 245 ha->flags.mbox_int = 1; 246 ha->mailbox_out[0] = mb0; 247 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 248 249 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 250 if (IS_QLA2200(ha) && cnt == 8) 251 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 252 if (cnt == 4 || cnt == 5) 253 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 254 else 255 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 256 257 wptr++; 258 } 259 260 if (ha->mcp) { 261 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 262 __func__, vha->host_no, ha->mcp->mb[0])); 263 } else { 264 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 265 __func__, vha->host_no)); 266 } 267 } 268 269 static void 270 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 271 { 272 static char *event[] = 273 { "Complete", "Request Notification", "Time Extension" }; 274 int rval; 275 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 276 uint16_t __iomem *wptr; 277 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 278 279 /* Seed data -- mailbox1 -> mailbox7. */ 280 wptr = (uint16_t __iomem *)®24->mailbox1; 281 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 282 mb[cnt] = RD_REG_WORD(wptr); 283 284 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 285 "%04x %04x %04x %04x %04x %04x %04x.\n", vha->host_no, 286 event[aen & 0xff], 287 mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6])); 288 289 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 290 timeout = (descr >> 8) & 0xf; 291 if (aen != MBA_IDC_NOTIFY || !timeout) 292 return; 293 294 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- " 295 "ACK timeout=%d.\n", vha->host_no, event[aen & 0xff], timeout)); 296 297 rval = qla2x00_post_idc_ack_work(vha, mb); 298 if (rval != QLA_SUCCESS) 299 qla_printk(KERN_WARNING, vha->hw, 300 "IDC failed to post ACK.\n"); 301 } 302 303 /** 304 * qla2x00_async_event() - Process aynchronous events. 305 * @ha: SCSI driver HA context 306 * @mb: Mailbox registers (0 - 3) 307 */ 308 void 309 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 310 { 311 #define LS_UNKNOWN 2 312 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" }; 313 char *link_speed; 314 uint16_t handle_cnt; 315 uint16_t cnt; 316 uint32_t handles[5]; 317 struct qla_hw_data *ha = vha->hw; 318 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 319 uint32_t rscn_entry, host_pid; 320 uint8_t rscn_queue_index; 321 unsigned long flags; 322 323 /* Setup to process RIO completion. */ 324 handle_cnt = 0; 325 if (IS_QLA81XX(ha)) 326 goto skip_rio; 327 switch (mb[0]) { 328 case MBA_SCSI_COMPLETION: 329 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 330 handle_cnt = 1; 331 break; 332 case MBA_CMPLT_1_16BIT: 333 handles[0] = mb[1]; 334 handle_cnt = 1; 335 mb[0] = MBA_SCSI_COMPLETION; 336 break; 337 case MBA_CMPLT_2_16BIT: 338 handles[0] = mb[1]; 339 handles[1] = mb[2]; 340 handle_cnt = 2; 341 mb[0] = MBA_SCSI_COMPLETION; 342 break; 343 case MBA_CMPLT_3_16BIT: 344 handles[0] = mb[1]; 345 handles[1] = mb[2]; 346 handles[2] = mb[3]; 347 handle_cnt = 3; 348 mb[0] = MBA_SCSI_COMPLETION; 349 break; 350 case MBA_CMPLT_4_16BIT: 351 handles[0] = mb[1]; 352 handles[1] = mb[2]; 353 handles[2] = mb[3]; 354 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 355 handle_cnt = 4; 356 mb[0] = MBA_SCSI_COMPLETION; 357 break; 358 case MBA_CMPLT_5_16BIT: 359 handles[0] = mb[1]; 360 handles[1] = mb[2]; 361 handles[2] = mb[3]; 362 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 363 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 364 handle_cnt = 5; 365 mb[0] = MBA_SCSI_COMPLETION; 366 break; 367 case MBA_CMPLT_2_32BIT: 368 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 369 handles[1] = le32_to_cpu( 370 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 371 RD_MAILBOX_REG(ha, reg, 6)); 372 handle_cnt = 2; 373 mb[0] = MBA_SCSI_COMPLETION; 374 break; 375 default: 376 break; 377 } 378 skip_rio: 379 switch (mb[0]) { 380 case MBA_SCSI_COMPLETION: /* Fast Post */ 381 if (!vha->flags.online) 382 break; 383 384 for (cnt = 0; cnt < handle_cnt; cnt++) 385 qla2x00_process_completed_request(vha, rsp->req, 386 handles[cnt]); 387 break; 388 389 case MBA_RESET: /* Reset */ 390 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", 391 vha->host_no)); 392 393 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 394 break; 395 396 case MBA_SYSTEM_ERR: /* System Error */ 397 qla_printk(KERN_INFO, ha, 398 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 399 mb[1], mb[2], mb[3]); 400 401 ha->isp_ops->fw_dump(vha, 1); 402 403 if (IS_FWI2_CAPABLE(ha)) { 404 if (mb[1] == 0 && mb[2] == 0) { 405 qla_printk(KERN_ERR, ha, 406 "Unrecoverable Hardware Error: adapter " 407 "marked OFFLINE!\n"); 408 vha->flags.online = 0; 409 } else 410 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 411 } else if (mb[1] == 0) { 412 qla_printk(KERN_INFO, ha, 413 "Unrecoverable Hardware Error: adapter marked " 414 "OFFLINE!\n"); 415 vha->flags.online = 0; 416 } else 417 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 418 break; 419 420 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 421 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", 422 vha->host_no)); 423 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 424 425 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 426 break; 427 428 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 429 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 430 vha->host_no)); 431 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 432 433 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 434 break; 435 436 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 437 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 438 vha->host_no)); 439 break; 440 441 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 442 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha->host_no, 443 mb[1])); 444 qla_printk(KERN_INFO, ha, "LIP occurred (%x).\n", mb[1]); 445 446 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 447 atomic_set(&vha->loop_state, LOOP_DOWN); 448 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 449 qla2x00_mark_all_devices_lost(vha, 1); 450 } 451 452 if (vha->vp_idx) { 453 atomic_set(&vha->vp_state, VP_FAILED); 454 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 455 } 456 457 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 458 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 459 460 vha->flags.management_server_logged_in = 0; 461 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 462 break; 463 464 case MBA_LOOP_UP: /* Loop Up Event */ 465 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 466 link_speed = link_speeds[0]; 467 ha->link_data_rate = PORT_SPEED_1GB; 468 } else { 469 link_speed = link_speeds[LS_UNKNOWN]; 470 if (mb[1] < 5) 471 link_speed = link_speeds[mb[1]]; 472 else if (mb[1] == 0x13) 473 link_speed = link_speeds[5]; 474 ha->link_data_rate = mb[1]; 475 } 476 477 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 478 vha->host_no, link_speed)); 479 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", 480 link_speed); 481 482 vha->flags.management_server_logged_in = 0; 483 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 484 break; 485 486 case MBA_LOOP_DOWN: /* Loop Down Event */ 487 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN " 488 "(%x %x %x).\n", vha->host_no, mb[1], mb[2], mb[3])); 489 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n", 490 mb[1], mb[2], mb[3]); 491 492 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 493 atomic_set(&vha->loop_state, LOOP_DOWN); 494 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 495 vha->device_flags |= DFLG_NO_CABLE; 496 qla2x00_mark_all_devices_lost(vha, 1); 497 } 498 499 if (vha->vp_idx) { 500 atomic_set(&vha->vp_state, VP_FAILED); 501 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 502 } 503 504 vha->flags.management_server_logged_in = 0; 505 ha->link_data_rate = PORT_SPEED_UNKNOWN; 506 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 507 break; 508 509 case MBA_LIP_RESET: /* LIP reset occurred */ 510 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 511 vha->host_no, mb[1])); 512 qla_printk(KERN_INFO, ha, 513 "LIP reset occurred (%x).\n", mb[1]); 514 515 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 516 atomic_set(&vha->loop_state, LOOP_DOWN); 517 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 518 qla2x00_mark_all_devices_lost(vha, 1); 519 } 520 521 if (vha->vp_idx) { 522 atomic_set(&vha->vp_state, VP_FAILED); 523 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 524 } 525 526 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 527 528 ha->operating_mode = LOOP; 529 vha->flags.management_server_logged_in = 0; 530 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 531 break; 532 533 /* case MBA_DCBX_COMPLETE: */ 534 case MBA_POINT_TO_POINT: /* Point-to-Point */ 535 if (IS_QLA2100(ha)) 536 break; 537 538 if (IS_QLA81XX(ha)) 539 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x " 540 "%04x\n", vha->host_no, mb[1], mb[2], mb[3])); 541 else 542 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE " 543 "received.\n", vha->host_no)); 544 545 /* 546 * Until there's a transition from loop down to loop up, treat 547 * this as loop down only. 548 */ 549 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 550 atomic_set(&vha->loop_state, LOOP_DOWN); 551 if (!atomic_read(&vha->loop_down_timer)) 552 atomic_set(&vha->loop_down_timer, 553 LOOP_DOWN_TIME); 554 qla2x00_mark_all_devices_lost(vha, 1); 555 } 556 557 if (vha->vp_idx) { 558 atomic_set(&vha->vp_state, VP_FAILED); 559 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 560 } 561 562 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 563 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 564 565 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 566 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 567 568 ha->flags.gpsc_supported = 1; 569 vha->flags.management_server_logged_in = 0; 570 break; 571 572 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 573 if (IS_QLA2100(ha)) 574 break; 575 576 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 577 "received.\n", 578 vha->host_no)); 579 qla_printk(KERN_INFO, ha, 580 "Configuration change detected: value=%x.\n", mb[1]); 581 582 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 583 atomic_set(&vha->loop_state, LOOP_DOWN); 584 if (!atomic_read(&vha->loop_down_timer)) 585 atomic_set(&vha->loop_down_timer, 586 LOOP_DOWN_TIME); 587 qla2x00_mark_all_devices_lost(vha, 1); 588 } 589 590 if (vha->vp_idx) { 591 atomic_set(&vha->vp_state, VP_FAILED); 592 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 593 } 594 595 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 596 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 597 break; 598 599 case MBA_PORT_UPDATE: /* Port database update */ 600 /* Only handle SCNs for our Vport index. */ 601 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) 602 break; 603 604 /* 605 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 606 * event etc. earlier indicating loop is down) then process 607 * it. Otherwise ignore it and Wait for RSCN to come in. 608 */ 609 atomic_set(&vha->loop_down_timer, 0); 610 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 611 atomic_read(&vha->loop_state) != LOOP_DEAD) { 612 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 613 "ignored %04x/%04x/%04x.\n", vha->host_no, mb[1], 614 mb[2], mb[3])); 615 break; 616 } 617 618 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 619 vha->host_no)); 620 DEBUG(printk(KERN_INFO 621 "scsi(%ld): Port database changed %04x %04x %04x.\n", 622 vha->host_no, mb[1], mb[2], mb[3])); 623 624 /* 625 * Mark all devices as missing so we will login again. 626 */ 627 atomic_set(&vha->loop_state, LOOP_UP); 628 629 qla2x00_mark_all_devices_lost(vha, 1); 630 631 vha->flags.rscn_queue_overflow = 1; 632 633 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 634 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 635 break; 636 637 case MBA_RSCN_UPDATE: /* State Change Registration */ 638 /* Check if the Vport has issued a SCR */ 639 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 640 break; 641 /* Only handle SCNs for our Vport index. */ 642 if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff)) 643 break; 644 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 645 vha->host_no)); 646 DEBUG(printk(KERN_INFO 647 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n", 648 vha->host_no, mb[1], mb[2], mb[3])); 649 650 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 651 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 652 | vha->d_id.b.al_pa; 653 if (rscn_entry == host_pid) { 654 DEBUG(printk(KERN_INFO 655 "scsi(%ld): Ignoring RSCN update to local host " 656 "port ID (%06x)\n", 657 vha->host_no, host_pid)); 658 break; 659 } 660 661 /* Ignore reserved bits from RSCN-payload. */ 662 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 663 rscn_queue_index = vha->rscn_in_ptr + 1; 664 if (rscn_queue_index == MAX_RSCN_COUNT) 665 rscn_queue_index = 0; 666 if (rscn_queue_index != vha->rscn_out_ptr) { 667 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry; 668 vha->rscn_in_ptr = rscn_queue_index; 669 } else { 670 vha->flags.rscn_queue_overflow = 1; 671 } 672 673 atomic_set(&vha->loop_state, LOOP_UPDATE); 674 atomic_set(&vha->loop_down_timer, 0); 675 vha->flags.management_server_logged_in = 0; 676 677 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 678 set_bit(RSCN_UPDATE, &vha->dpc_flags); 679 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 680 break; 681 682 /* case MBA_RIO_RESPONSE: */ 683 case MBA_ZIO_RESPONSE: 684 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n", 685 vha->host_no)); 686 687 if (IS_FWI2_CAPABLE(ha)) 688 qla24xx_process_response_queue(rsp); 689 else 690 qla2x00_process_response_queue(rsp); 691 break; 692 693 case MBA_DISCARD_RND_FRAME: 694 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 695 "%04x.\n", vha->host_no, mb[1], mb[2], mb[3])); 696 break; 697 698 case MBA_TRACE_NOTIFICATION: 699 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 700 vha->host_no, mb[1], mb[2])); 701 break; 702 703 case MBA_ISP84XX_ALERT: 704 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- " 705 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 706 707 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 708 switch (mb[1]) { 709 case A84_PANIC_RECOVERY: 710 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery " 711 "%04x %04x\n", mb[2], mb[3]); 712 break; 713 case A84_OP_LOGIN_COMPLETE: 714 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 715 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 716 "firmware version %x\n", ha->cs84xx->op_fw_version)); 717 break; 718 case A84_DIAG_LOGIN_COMPLETE: 719 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 720 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:" 721 "diagnostic firmware version %x\n", 722 ha->cs84xx->diag_fw_version)); 723 break; 724 case A84_GOLD_LOGIN_COMPLETE: 725 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 726 ha->cs84xx->fw_update = 1; 727 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold " 728 "firmware version %x\n", 729 ha->cs84xx->gold_fw_version)); 730 break; 731 default: 732 qla_printk(KERN_ERR, ha, 733 "Alert 84xx: Invalid Alert %04x %04x %04x\n", 734 mb[1], mb[2], mb[3]); 735 } 736 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 737 break; 738 case MBA_DCBX_START: 739 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n", 740 vha->host_no, mb[1], mb[2], mb[3])); 741 break; 742 case MBA_DCBX_PARAM_UPDATE: 743 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- " 744 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 745 break; 746 case MBA_FCF_CONF_ERR: 747 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- " 748 "%04x %04x %04x\n", vha->host_no, mb[1], mb[2], mb[3])); 749 break; 750 case MBA_IDC_COMPLETE: 751 case MBA_IDC_NOTIFY: 752 case MBA_IDC_TIME_EXT: 753 qla81xx_idc_event(vha, mb[0], mb[1]); 754 break; 755 } 756 757 if (!vha->vp_idx && ha->num_vhosts) 758 qla2x00_alert_all_vps(rsp, mb); 759 } 760 761 static void 762 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) 763 { 764 fc_port_t *fcport = data; 765 struct scsi_qla_host *vha = fcport->vha; 766 struct qla_hw_data *ha = vha->hw; 767 struct req_que *req = NULL; 768 769 req = ha->req_q_map[vha->req_ques[0]]; 770 if (!req) 771 return; 772 if (req->max_q_depth <= sdev->queue_depth) 773 return; 774 775 if (sdev->ordered_tags) 776 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 777 sdev->queue_depth + 1); 778 else 779 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 780 sdev->queue_depth + 1); 781 782 fcport->last_ramp_up = jiffies; 783 784 DEBUG2(qla_printk(KERN_INFO, ha, 785 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 786 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 787 sdev->queue_depth)); 788 } 789 790 static void 791 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data) 792 { 793 fc_port_t *fcport = data; 794 795 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) 796 return; 797 798 DEBUG2(qla_printk(KERN_INFO, fcport->vha->hw, 799 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 800 fcport->vha->host_no, sdev->channel, sdev->id, sdev->lun, 801 sdev->queue_depth)); 802 } 803 804 static inline void 805 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req, 806 srb_t *sp) 807 { 808 fc_port_t *fcport; 809 struct scsi_device *sdev; 810 811 sdev = sp->cmd->device; 812 if (sdev->queue_depth >= req->max_q_depth) 813 return; 814 815 fcport = sp->fcport; 816 if (time_before(jiffies, 817 fcport->last_ramp_up + ql2xqfullrampup * HZ)) 818 return; 819 if (time_before(jiffies, 820 fcport->last_queue_full + ql2xqfullrampup * HZ)) 821 return; 822 823 starget_for_each_device(sdev->sdev_target, fcport, 824 qla2x00_adjust_sdev_qdepth_up); 825 } 826 827 /** 828 * qla2x00_process_completed_request() - Process a Fast Post response. 829 * @ha: SCSI driver HA context 830 * @index: SRB index 831 */ 832 static void 833 qla2x00_process_completed_request(struct scsi_qla_host *vha, 834 struct req_que *req, uint32_t index) 835 { 836 srb_t *sp; 837 struct qla_hw_data *ha = vha->hw; 838 839 /* Validate handle. */ 840 if (index >= MAX_OUTSTANDING_COMMANDS) { 841 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 842 vha->host_no, index)); 843 qla_printk(KERN_WARNING, ha, 844 "Invalid SCSI completion handle %d.\n", index); 845 846 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 847 return; 848 } 849 850 sp = req->outstanding_cmds[index]; 851 if (sp) { 852 /* Free outstanding command slot. */ 853 req->outstanding_cmds[index] = NULL; 854 855 /* Save ISP completion status */ 856 sp->cmd->result = DID_OK << 16; 857 858 qla2x00_ramp_up_queue_depth(vha, req, sp); 859 qla2x00_sp_compl(ha, sp); 860 } else { 861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 862 vha->host_no)); 863 qla_printk(KERN_WARNING, ha, 864 "Invalid ISP SCSI completion handle\n"); 865 866 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 867 } 868 } 869 870 /** 871 * qla2x00_process_response_queue() - Process response queue entries. 872 * @ha: SCSI driver HA context 873 */ 874 void 875 qla2x00_process_response_queue(struct rsp_que *rsp) 876 { 877 struct scsi_qla_host *vha; 878 struct qla_hw_data *ha = rsp->hw; 879 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 880 sts_entry_t *pkt; 881 uint16_t handle_cnt; 882 uint16_t cnt; 883 884 vha = qla2x00_get_rsp_host(rsp); 885 886 if (!vha->flags.online) 887 return; 888 889 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 890 pkt = (sts_entry_t *)rsp->ring_ptr; 891 892 rsp->ring_index++; 893 if (rsp->ring_index == rsp->length) { 894 rsp->ring_index = 0; 895 rsp->ring_ptr = rsp->ring; 896 } else { 897 rsp->ring_ptr++; 898 } 899 900 if (pkt->entry_status != 0) { 901 DEBUG3(printk(KERN_INFO 902 "scsi(%ld): Process error entry.\n", vha->host_no)); 903 904 qla2x00_error_entry(vha, rsp, pkt); 905 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 906 wmb(); 907 continue; 908 } 909 910 switch (pkt->entry_type) { 911 case STATUS_TYPE: 912 qla2x00_status_entry(vha, rsp, pkt); 913 break; 914 case STATUS_TYPE_21: 915 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 916 for (cnt = 0; cnt < handle_cnt; cnt++) { 917 qla2x00_process_completed_request(vha, rsp->req, 918 ((sts21_entry_t *)pkt)->handle[cnt]); 919 } 920 break; 921 case STATUS_TYPE_22: 922 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 923 for (cnt = 0; cnt < handle_cnt; cnt++) { 924 qla2x00_process_completed_request(vha, rsp->req, 925 ((sts22_entry_t *)pkt)->handle[cnt]); 926 } 927 break; 928 case STATUS_CONT_TYPE: 929 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 930 break; 931 default: 932 /* Type Not Supported. */ 933 DEBUG4(printk(KERN_WARNING 934 "scsi(%ld): Received unknown response pkt type %x " 935 "entry status=%x.\n", 936 vha->host_no, pkt->entry_type, pkt->entry_status)); 937 break; 938 } 939 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 940 wmb(); 941 } 942 943 /* Adjust ring index */ 944 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 945 } 946 947 static inline void 948 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len) 949 { 950 struct scsi_cmnd *cp = sp->cmd; 951 952 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 953 sense_len = SCSI_SENSE_BUFFERSIZE; 954 955 sp->request_sense_length = sense_len; 956 sp->request_sense_ptr = cp->sense_buffer; 957 if (sp->request_sense_length > 32) 958 sense_len = 32; 959 960 memcpy(cp->sense_buffer, sense_data, sense_len); 961 962 sp->request_sense_ptr += sense_len; 963 sp->request_sense_length -= sense_len; 964 if (sp->request_sense_length != 0) 965 sp->fcport->vha->status_srb = sp; 966 967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " 968 "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, 969 cp->device->channel, cp->device->id, cp->device->lun, cp, 970 cp->serial_number)); 971 if (sense_len) 972 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); 973 } 974 975 /** 976 * qla2x00_status_entry() - Process a Status IOCB entry. 977 * @ha: SCSI driver HA context 978 * @pkt: Entry pointer 979 */ 980 static void 981 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 982 { 983 srb_t *sp; 984 fc_port_t *fcport; 985 struct scsi_cmnd *cp; 986 sts_entry_t *sts; 987 struct sts_entry_24xx *sts24; 988 uint16_t comp_status; 989 uint16_t scsi_status; 990 uint8_t lscsi_status; 991 int32_t resid; 992 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 993 uint8_t *rsp_info, *sense_data; 994 struct qla_hw_data *ha = vha->hw; 995 struct req_que *req = rsp->req; 996 997 sts = (sts_entry_t *) pkt; 998 sts24 = (struct sts_entry_24xx *) pkt; 999 if (IS_FWI2_CAPABLE(ha)) { 1000 comp_status = le16_to_cpu(sts24->comp_status); 1001 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1002 } else { 1003 comp_status = le16_to_cpu(sts->comp_status); 1004 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1005 } 1006 1007 /* Fast path completion. */ 1008 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1009 qla2x00_process_completed_request(vha, req, sts->handle); 1010 1011 return; 1012 } 1013 1014 /* Validate handle. */ 1015 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 1016 sp = req->outstanding_cmds[sts->handle]; 1017 req->outstanding_cmds[sts->handle] = NULL; 1018 } else 1019 sp = NULL; 1020 1021 if (sp == NULL) { 1022 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", 1023 vha->host_no)); 1024 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); 1025 1026 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1027 qla2xxx_wake_dpc(vha); 1028 return; 1029 } 1030 cp = sp->cmd; 1031 if (cp == NULL) { 1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 1033 "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp)); 1034 qla_printk(KERN_WARNING, ha, 1035 "Command is NULL: already returned to OS (sp=%p)\n", sp); 1036 1037 return; 1038 } 1039 1040 lscsi_status = scsi_status & STATUS_MASK; 1041 1042 fcport = sp->fcport; 1043 1044 sense_len = rsp_info_len = resid_len = fw_resid_len = 0; 1045 if (IS_FWI2_CAPABLE(ha)) { 1046 sense_len = le32_to_cpu(sts24->sense_len); 1047 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1048 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1049 fw_resid_len = le32_to_cpu(sts24->residual_len); 1050 rsp_info = sts24->data; 1051 sense_data = sts24->data; 1052 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1053 } else { 1054 sense_len = le16_to_cpu(sts->req_sense_length); 1055 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1056 resid_len = le32_to_cpu(sts->residual_length); 1057 rsp_info = sts->rsp_info; 1058 sense_data = sts->req_sense_data; 1059 } 1060 1061 /* Check for any FCP transport errors. */ 1062 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1063 /* Sense data lies beyond any FCP RESPONSE data. */ 1064 if (IS_FWI2_CAPABLE(ha)) 1065 sense_data += rsp_info_len; 1066 if (rsp_info_len > 3 && rsp_info[3]) { 1067 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 1068 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." 1069 "retrying command\n", vha->host_no, 1070 cp->device->channel, cp->device->id, 1071 cp->device->lun, rsp_info_len, rsp_info[0], 1072 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], 1073 rsp_info[5], rsp_info[6], rsp_info[7])); 1074 1075 cp->result = DID_BUS_BUSY << 16; 1076 qla2x00_sp_compl(ha, sp); 1077 return; 1078 } 1079 } 1080 1081 /* Check for overrun. */ 1082 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 1083 scsi_status & SS_RESIDUAL_OVER) 1084 comp_status = CS_DATA_OVERRUN; 1085 1086 /* 1087 * Based on Host and scsi status generate status code for Linux 1088 */ 1089 switch (comp_status) { 1090 case CS_COMPLETE: 1091 case CS_QUEUE_FULL: 1092 if (scsi_status == 0) { 1093 cp->result = DID_OK << 16; 1094 break; 1095 } 1096 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1097 resid = resid_len; 1098 scsi_set_resid(cp, resid); 1099 1100 if (!lscsi_status && 1101 ((unsigned)(scsi_bufflen(cp) - resid) < 1102 cp->underflow)) { 1103 qla_printk(KERN_INFO, ha, 1104 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1105 "detected (%x of %x bytes)...returning " 1106 "error status.\n", vha->host_no, 1107 cp->device->channel, cp->device->id, 1108 cp->device->lun, resid, 1109 scsi_bufflen(cp)); 1110 1111 cp->result = DID_ERROR << 16; 1112 break; 1113 } 1114 } 1115 cp->result = DID_OK << 16 | lscsi_status; 1116 1117 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1118 DEBUG2(printk(KERN_INFO 1119 "scsi(%ld): QUEUE FULL status detected " 1120 "0x%x-0x%x.\n", vha->host_no, comp_status, 1121 scsi_status)); 1122 1123 /* Adjust queue depth for all luns on the port. */ 1124 fcport->last_queue_full = jiffies; 1125 starget_for_each_device(cp->device->sdev_target, 1126 fcport, qla2x00_adjust_sdev_qdepth_down); 1127 break; 1128 } 1129 if (lscsi_status != SS_CHECK_CONDITION) 1130 break; 1131 1132 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1133 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1134 break; 1135 1136 qla2x00_handle_sense(sp, sense_data, sense_len); 1137 break; 1138 1139 case CS_DATA_UNDERRUN: 1140 resid = resid_len; 1141 /* Use F/W calculated residual length. */ 1142 if (IS_FWI2_CAPABLE(ha)) { 1143 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1144 lscsi_status = 0; 1145 } else if (resid != fw_resid_len) { 1146 scsi_status &= ~SS_RESIDUAL_UNDER; 1147 lscsi_status = 0; 1148 } 1149 resid = fw_resid_len; 1150 } 1151 1152 if (scsi_status & SS_RESIDUAL_UNDER) { 1153 scsi_set_resid(cp, resid); 1154 } else { 1155 DEBUG2(printk(KERN_INFO 1156 "scsi(%ld:%d:%d) UNDERRUN status detected " 1157 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " 1158 "os_underflow=0x%x\n", vha->host_no, 1159 cp->device->id, cp->device->lun, comp_status, 1160 scsi_status, resid_len, resid, cp->cmnd[0], 1161 cp->underflow)); 1162 1163 } 1164 1165 /* 1166 * Check to see if SCSI Status is non zero. If so report SCSI 1167 * Status. 1168 */ 1169 if (lscsi_status != 0) { 1170 cp->result = DID_OK << 16 | lscsi_status; 1171 1172 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1173 DEBUG2(printk(KERN_INFO 1174 "scsi(%ld): QUEUE FULL status detected " 1175 "0x%x-0x%x.\n", vha->host_no, comp_status, 1176 scsi_status)); 1177 1178 /* 1179 * Adjust queue depth for all luns on the 1180 * port. 1181 */ 1182 fcport->last_queue_full = jiffies; 1183 starget_for_each_device( 1184 cp->device->sdev_target, fcport, 1185 qla2x00_adjust_sdev_qdepth_down); 1186 break; 1187 } 1188 if (lscsi_status != SS_CHECK_CONDITION) 1189 break; 1190 1191 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1192 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1193 break; 1194 1195 qla2x00_handle_sense(sp, sense_data, sense_len); 1196 } else { 1197 /* 1198 * If RISC reports underrun and target does not report 1199 * it then we must have a lost frame, so tell upper 1200 * layer to retry it by reporting a bus busy. 1201 */ 1202 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1203 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1204 "frame(s) detected (%x of %x bytes)..." 1205 "retrying command.\n", 1206 vha->host_no, cp->device->channel, 1207 cp->device->id, cp->device->lun, resid, 1208 scsi_bufflen(cp))); 1209 1210 cp->result = DID_BUS_BUSY << 16; 1211 break; 1212 } 1213 1214 /* Handle mid-layer underflow */ 1215 if ((unsigned)(scsi_bufflen(cp) - resid) < 1216 cp->underflow) { 1217 qla_printk(KERN_INFO, ha, 1218 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1219 "detected (%x of %x bytes)...returning " 1220 "error status.\n", vha->host_no, 1221 cp->device->channel, cp->device->id, 1222 cp->device->lun, resid, 1223 scsi_bufflen(cp)); 1224 1225 cp->result = DID_ERROR << 16; 1226 break; 1227 } 1228 1229 /* Everybody online, looking good... */ 1230 cp->result = DID_OK << 16; 1231 } 1232 break; 1233 1234 case CS_DATA_OVERRUN: 1235 DEBUG2(printk(KERN_INFO 1236 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", 1237 vha->host_no, cp->device->id, cp->device->lun, comp_status, 1238 scsi_status)); 1239 DEBUG2(printk(KERN_INFO 1240 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1241 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 1242 cp->cmnd[4], cp->cmnd[5])); 1243 DEBUG2(printk(KERN_INFO 1244 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR " 1245 "status!\n", 1246 cp->serial_number, scsi_bufflen(cp), resid_len)); 1247 1248 cp->result = DID_ERROR << 16; 1249 break; 1250 1251 case CS_PORT_LOGGED_OUT: 1252 case CS_PORT_CONFIG_CHG: 1253 case CS_PORT_BUSY: 1254 case CS_INCOMPLETE: 1255 case CS_PORT_UNAVAILABLE: 1256 /* 1257 * If the port is in Target Down state, return all IOs for this 1258 * Target with DID_NO_CONNECT ELSE Queue the IOs in the 1259 * retry_queue. 1260 */ 1261 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " 1262 "pid=%ld, compl status=0x%x, port state=0x%x\n", 1263 vha->host_no, cp->device->id, cp->device->lun, 1264 cp->serial_number, comp_status, 1265 atomic_read(&fcport->state))); 1266 1267 /* 1268 * We are going to have the fc class block the rport 1269 * while we try to recover so instruct the mid layer 1270 * to requeue until the class decides how to handle this. 1271 */ 1272 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1273 if (atomic_read(&fcport->state) == FCS_ONLINE) 1274 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1275 break; 1276 1277 case CS_RESET: 1278 DEBUG2(printk(KERN_INFO 1279 "scsi(%ld): RESET status detected 0x%x-0x%x.\n", 1280 vha->host_no, comp_status, scsi_status)); 1281 1282 cp->result = DID_RESET << 16; 1283 break; 1284 1285 case CS_ABORTED: 1286 /* 1287 * hv2.19.12 - DID_ABORT does not retry the request if we 1288 * aborted this request then abort otherwise it must be a 1289 * reset. 1290 */ 1291 DEBUG2(printk(KERN_INFO 1292 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", 1293 vha->host_no, comp_status, scsi_status)); 1294 1295 cp->result = DID_RESET << 16; 1296 break; 1297 1298 case CS_TIMEOUT: 1299 /* 1300 * We are going to have the fc class block the rport 1301 * while we try to recover so instruct the mid layer 1302 * to requeue until the class decides how to handle this. 1303 */ 1304 cp->result = DID_TRANSPORT_DISRUPTED << 16; 1305 1306 if (IS_FWI2_CAPABLE(ha)) { 1307 DEBUG2(printk(KERN_INFO 1308 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1309 "0x%x-0x%x\n", vha->host_no, cp->device->channel, 1310 cp->device->id, cp->device->lun, comp_status, 1311 scsi_status)); 1312 break; 1313 } 1314 DEBUG2(printk(KERN_INFO 1315 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " 1316 "sflags=%x.\n", vha->host_no, cp->device->channel, 1317 cp->device->id, cp->device->lun, comp_status, scsi_status, 1318 le16_to_cpu(sts->status_flags))); 1319 1320 /* Check to see if logout occurred. */ 1321 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1322 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1323 break; 1324 1325 default: 1326 DEBUG3(printk("scsi(%ld): Error detected (unknown status) " 1327 "0x%x-0x%x.\n", vha->host_no, comp_status, scsi_status)); 1328 qla_printk(KERN_INFO, ha, 1329 "Unknown status detected 0x%x-0x%x.\n", 1330 comp_status, scsi_status); 1331 1332 cp->result = DID_ERROR << 16; 1333 break; 1334 } 1335 1336 /* Place command on done queue. */ 1337 if (vha->status_srb == NULL) 1338 qla2x00_sp_compl(ha, sp); 1339 } 1340 1341 /** 1342 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1343 * @ha: SCSI driver HA context 1344 * @pkt: Entry pointer 1345 * 1346 * Extended sense data. 1347 */ 1348 static void 1349 qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) 1350 { 1351 uint8_t sense_sz = 0; 1352 struct qla_hw_data *ha = vha->hw; 1353 srb_t *sp = vha->status_srb; 1354 struct scsi_cmnd *cp; 1355 1356 if (sp != NULL && sp->request_sense_length != 0) { 1357 cp = sp->cmd; 1358 if (cp == NULL) { 1359 DEBUG2(printk("%s(): Cmd already returned back to OS " 1360 "sp=%p.\n", __func__, sp)); 1361 qla_printk(KERN_INFO, ha, 1362 "cmd is NULL: already returned to OS (sp=%p)\n", 1363 sp); 1364 1365 vha->status_srb = NULL; 1366 return; 1367 } 1368 1369 if (sp->request_sense_length > sizeof(pkt->data)) { 1370 sense_sz = sizeof(pkt->data); 1371 } else { 1372 sense_sz = sp->request_sense_length; 1373 } 1374 1375 /* Move sense data. */ 1376 if (IS_FWI2_CAPABLE(ha)) 1377 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1378 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1379 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1380 1381 sp->request_sense_ptr += sense_sz; 1382 sp->request_sense_length -= sense_sz; 1383 1384 /* Place command on done queue. */ 1385 if (sp->request_sense_length == 0) { 1386 vha->status_srb = NULL; 1387 qla2x00_sp_compl(ha, sp); 1388 } 1389 } 1390 } 1391 1392 /** 1393 * qla2x00_error_entry() - Process an error entry. 1394 * @ha: SCSI driver HA context 1395 * @pkt: Entry pointer 1396 */ 1397 static void 1398 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 1399 { 1400 srb_t *sp; 1401 struct qla_hw_data *ha = vha->hw; 1402 struct req_que *req = rsp->req; 1403 #if defined(QL_DEBUG_LEVEL_2) 1404 if (pkt->entry_status & RF_INV_E_ORDER) 1405 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1406 else if (pkt->entry_status & RF_INV_E_COUNT) 1407 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); 1408 else if (pkt->entry_status & RF_INV_E_PARAM) 1409 qla_printk(KERN_ERR, ha, 1410 "%s: Invalid Entry Parameter\n", __func__); 1411 else if (pkt->entry_status & RF_INV_E_TYPE) 1412 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); 1413 else if (pkt->entry_status & RF_BUSY) 1414 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); 1415 else 1416 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); 1417 #endif 1418 1419 /* Validate handle. */ 1420 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1421 sp = req->outstanding_cmds[pkt->handle]; 1422 else 1423 sp = NULL; 1424 1425 if (sp) { 1426 /* Free outstanding command slot. */ 1427 req->outstanding_cmds[pkt->handle] = NULL; 1428 1429 /* Bad payload or header */ 1430 if (pkt->entry_status & 1431 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1432 RF_INV_E_PARAM | RF_INV_E_TYPE)) { 1433 sp->cmd->result = DID_ERROR << 16; 1434 } else if (pkt->entry_status & RF_BUSY) { 1435 sp->cmd->result = DID_BUS_BUSY << 16; 1436 } else { 1437 sp->cmd->result = DID_ERROR << 16; 1438 } 1439 qla2x00_sp_compl(ha, sp); 1440 1441 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1442 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1443 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1444 vha->host_no)); 1445 qla_printk(KERN_WARNING, ha, 1446 "Error entry - invalid handle\n"); 1447 1448 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1449 qla2xxx_wake_dpc(vha); 1450 } 1451 } 1452 1453 /** 1454 * qla24xx_mbx_completion() - Process mailbox command completions. 1455 * @ha: SCSI driver HA context 1456 * @mb0: Mailbox0 register 1457 */ 1458 static void 1459 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1460 { 1461 uint16_t cnt; 1462 uint16_t __iomem *wptr; 1463 struct qla_hw_data *ha = vha->hw; 1464 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1465 1466 /* Load return mailbox registers. */ 1467 ha->flags.mbox_int = 1; 1468 ha->mailbox_out[0] = mb0; 1469 wptr = (uint16_t __iomem *)®->mailbox1; 1470 1471 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1472 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1473 wptr++; 1474 } 1475 1476 if (ha->mcp) { 1477 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1478 __func__, vha->host_no, ha->mcp->mb[0])); 1479 } else { 1480 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1481 __func__, vha->host_no)); 1482 } 1483 } 1484 1485 /** 1486 * qla24xx_process_response_queue() - Process response queue entries. 1487 * @ha: SCSI driver HA context 1488 */ 1489 void 1490 qla24xx_process_response_queue(struct rsp_que *rsp) 1491 { 1492 struct sts_entry_24xx *pkt; 1493 struct scsi_qla_host *vha; 1494 1495 vha = qla2x00_get_rsp_host(rsp); 1496 1497 if (!vha->flags.online) 1498 return; 1499 1500 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1501 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 1502 1503 rsp->ring_index++; 1504 if (rsp->ring_index == rsp->length) { 1505 rsp->ring_index = 0; 1506 rsp->ring_ptr = rsp->ring; 1507 } else { 1508 rsp->ring_ptr++; 1509 } 1510 1511 if (pkt->entry_status != 0) { 1512 DEBUG3(printk(KERN_INFO 1513 "scsi(%ld): Process error entry.\n", vha->host_no)); 1514 1515 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 1516 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1517 wmb(); 1518 continue; 1519 } 1520 1521 switch (pkt->entry_type) { 1522 case STATUS_TYPE: 1523 qla2x00_status_entry(vha, rsp, pkt); 1524 break; 1525 case STATUS_CONT_TYPE: 1526 qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt); 1527 break; 1528 case VP_RPT_ID_IOCB_TYPE: 1529 qla24xx_report_id_acquisition(vha, 1530 (struct vp_rpt_id_entry_24xx *)pkt); 1531 break; 1532 default: 1533 /* Type Not Supported. */ 1534 DEBUG4(printk(KERN_WARNING 1535 "scsi(%ld): Received unknown response pkt type %x " 1536 "entry status=%x.\n", 1537 vha->host_no, pkt->entry_type, pkt->entry_status)); 1538 break; 1539 } 1540 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1541 wmb(); 1542 } 1543 1544 /* Adjust ring index */ 1545 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 1546 } 1547 1548 static void 1549 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 1550 { 1551 int rval; 1552 uint32_t cnt; 1553 struct qla_hw_data *ha = vha->hw; 1554 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1555 1556 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)) 1557 return; 1558 1559 rval = QLA_SUCCESS; 1560 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1561 RD_REG_DWORD(®->iobase_addr); 1562 WRT_REG_DWORD(®->iobase_window, 0x0001); 1563 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 1564 rval == QLA_SUCCESS; cnt--) { 1565 if (cnt) { 1566 WRT_REG_DWORD(®->iobase_window, 0x0001); 1567 udelay(10); 1568 } else 1569 rval = QLA_FUNCTION_TIMEOUT; 1570 } 1571 if (rval == QLA_SUCCESS) 1572 goto next_test; 1573 1574 WRT_REG_DWORD(®->iobase_window, 0x0003); 1575 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 1576 rval == QLA_SUCCESS; cnt--) { 1577 if (cnt) { 1578 WRT_REG_DWORD(®->iobase_window, 0x0003); 1579 udelay(10); 1580 } else 1581 rval = QLA_FUNCTION_TIMEOUT; 1582 } 1583 if (rval != QLA_SUCCESS) 1584 goto done; 1585 1586 next_test: 1587 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 1588 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); 1589 1590 done: 1591 WRT_REG_DWORD(®->iobase_window, 0x0000); 1592 RD_REG_DWORD(®->iobase_window); 1593 } 1594 1595 /** 1596 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 1597 * @irq: 1598 * @dev_id: SCSI driver HA context 1599 * 1600 * Called by system whenever the host adapter generates an interrupt. 1601 * 1602 * Returns handled flag. 1603 */ 1604 irqreturn_t 1605 qla24xx_intr_handler(int irq, void *dev_id) 1606 { 1607 scsi_qla_host_t *vha; 1608 struct qla_hw_data *ha; 1609 struct device_reg_24xx __iomem *reg; 1610 int status; 1611 unsigned long iter; 1612 uint32_t stat; 1613 uint32_t hccr; 1614 uint16_t mb[4]; 1615 struct rsp_que *rsp; 1616 1617 rsp = (struct rsp_que *) dev_id; 1618 if (!rsp) { 1619 printk(KERN_INFO 1620 "%s(): NULL response queue pointer\n", __func__); 1621 return IRQ_NONE; 1622 } 1623 1624 ha = rsp->hw; 1625 reg = &ha->iobase->isp24; 1626 status = 0; 1627 1628 spin_lock(&ha->hardware_lock); 1629 vha = qla2x00_get_rsp_host(rsp); 1630 for (iter = 50; iter--; ) { 1631 stat = RD_REG_DWORD(®->host_status); 1632 if (stat & HSRX_RISC_PAUSED) { 1633 if (pci_channel_offline(ha->pdev)) 1634 break; 1635 1636 hccr = RD_REG_DWORD(®->hccr); 1637 1638 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1639 "Dumping firmware!\n", hccr); 1640 1641 qla2xxx_check_risc_status(vha); 1642 1643 ha->isp_ops->fw_dump(vha, 1); 1644 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1645 break; 1646 } else if ((stat & HSRX_RISC_INT) == 0) 1647 break; 1648 1649 switch (stat & 0xff) { 1650 case 0x1: 1651 case 0x2: 1652 case 0x10: 1653 case 0x11: 1654 qla24xx_mbx_completion(vha, MSW(stat)); 1655 status |= MBX_INTERRUPT; 1656 1657 break; 1658 case 0x12: 1659 mb[0] = MSW(stat); 1660 mb[1] = RD_REG_WORD(®->mailbox1); 1661 mb[2] = RD_REG_WORD(®->mailbox2); 1662 mb[3] = RD_REG_WORD(®->mailbox3); 1663 qla2x00_async_event(vha, rsp, mb); 1664 break; 1665 case 0x13: 1666 case 0x14: 1667 qla24xx_process_response_queue(rsp); 1668 break; 1669 default: 1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1671 "(%d).\n", 1672 vha->host_no, stat & 0xff)); 1673 break; 1674 } 1675 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 1676 RD_REG_DWORD_RELAXED(®->hccr); 1677 } 1678 spin_unlock(&ha->hardware_lock); 1679 1680 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1681 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1682 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1683 complete(&ha->mbx_intr_comp); 1684 } 1685 1686 return IRQ_HANDLED; 1687 } 1688 1689 static irqreturn_t 1690 qla24xx_msix_rsp_q(int irq, void *dev_id) 1691 { 1692 struct qla_hw_data *ha; 1693 struct rsp_que *rsp; 1694 struct device_reg_24xx __iomem *reg; 1695 1696 rsp = (struct rsp_que *) dev_id; 1697 if (!rsp) { 1698 printk(KERN_INFO 1699 "%s(): NULL response queue pointer\n", __func__); 1700 return IRQ_NONE; 1701 } 1702 ha = rsp->hw; 1703 reg = &ha->iobase->isp24; 1704 1705 spin_lock_irq(&ha->hardware_lock); 1706 1707 qla24xx_process_response_queue(rsp); 1708 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 1709 1710 spin_unlock_irq(&ha->hardware_lock); 1711 1712 return IRQ_HANDLED; 1713 } 1714 1715 static irqreturn_t 1716 qla25xx_msix_rsp_q(int irq, void *dev_id) 1717 { 1718 struct qla_hw_data *ha; 1719 struct rsp_que *rsp; 1720 struct device_reg_24xx __iomem *reg; 1721 1722 rsp = (struct rsp_que *) dev_id; 1723 if (!rsp) { 1724 printk(KERN_INFO 1725 "%s(): NULL response queue pointer\n", __func__); 1726 return IRQ_NONE; 1727 } 1728 ha = rsp->hw; 1729 reg = &ha->iobase->isp24; 1730 1731 spin_lock_irq(&ha->hardware_lock); 1732 1733 qla24xx_process_response_queue(rsp); 1734 1735 spin_unlock_irq(&ha->hardware_lock); 1736 1737 return IRQ_HANDLED; 1738 } 1739 1740 static irqreturn_t 1741 qla24xx_msix_default(int irq, void *dev_id) 1742 { 1743 scsi_qla_host_t *vha; 1744 struct qla_hw_data *ha; 1745 struct rsp_que *rsp; 1746 struct device_reg_24xx __iomem *reg; 1747 int status; 1748 uint32_t stat; 1749 uint32_t hccr; 1750 uint16_t mb[4]; 1751 1752 rsp = (struct rsp_que *) dev_id; 1753 if (!rsp) { 1754 DEBUG(printk( 1755 "%s(): NULL response queue pointer\n", __func__)); 1756 return IRQ_NONE; 1757 } 1758 ha = rsp->hw; 1759 reg = &ha->iobase->isp24; 1760 status = 0; 1761 1762 spin_lock_irq(&ha->hardware_lock); 1763 vha = qla2x00_get_rsp_host(rsp); 1764 do { 1765 stat = RD_REG_DWORD(®->host_status); 1766 if (stat & HSRX_RISC_PAUSED) { 1767 if (pci_channel_offline(ha->pdev)) 1768 break; 1769 1770 hccr = RD_REG_DWORD(®->hccr); 1771 1772 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1773 "Dumping firmware!\n", hccr); 1774 1775 qla2xxx_check_risc_status(vha); 1776 1777 ha->isp_ops->fw_dump(vha, 1); 1778 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1779 break; 1780 } else if ((stat & HSRX_RISC_INT) == 0) 1781 break; 1782 1783 switch (stat & 0xff) { 1784 case 0x1: 1785 case 0x2: 1786 case 0x10: 1787 case 0x11: 1788 qla24xx_mbx_completion(vha, MSW(stat)); 1789 status |= MBX_INTERRUPT; 1790 1791 break; 1792 case 0x12: 1793 mb[0] = MSW(stat); 1794 mb[1] = RD_REG_WORD(®->mailbox1); 1795 mb[2] = RD_REG_WORD(®->mailbox2); 1796 mb[3] = RD_REG_WORD(®->mailbox3); 1797 qla2x00_async_event(vha, rsp, mb); 1798 break; 1799 case 0x13: 1800 case 0x14: 1801 qla24xx_process_response_queue(rsp); 1802 break; 1803 default: 1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1805 "(%d).\n", 1806 vha->host_no, stat & 0xff)); 1807 break; 1808 } 1809 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 1810 } while (0); 1811 spin_unlock_irq(&ha->hardware_lock); 1812 1813 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1814 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1815 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1816 complete(&ha->mbx_intr_comp); 1817 } 1818 1819 return IRQ_HANDLED; 1820 } 1821 1822 /* Interrupt handling helpers. */ 1823 1824 struct qla_init_msix_entry { 1825 uint16_t entry; 1826 uint16_t index; 1827 const char *name; 1828 irq_handler_t handler; 1829 }; 1830 1831 static struct qla_init_msix_entry base_queue = { 1832 .entry = 0, 1833 .index = 0, 1834 .name = "qla2xxx (default)", 1835 .handler = qla24xx_msix_default, 1836 }; 1837 1838 static struct qla_init_msix_entry base_rsp_queue = { 1839 .entry = 1, 1840 .index = 1, 1841 .name = "qla2xxx (rsp_q)", 1842 .handler = qla24xx_msix_rsp_q, 1843 }; 1844 1845 static struct qla_init_msix_entry multi_rsp_queue = { 1846 .entry = 1, 1847 .index = 1, 1848 .name = "qla2xxx (multi_q)", 1849 .handler = qla25xx_msix_rsp_q, 1850 }; 1851 1852 static void 1853 qla24xx_disable_msix(struct qla_hw_data *ha) 1854 { 1855 int i; 1856 struct qla_msix_entry *qentry; 1857 1858 for (i = 0; i < ha->msix_count; i++) { 1859 qentry = &ha->msix_entries[i]; 1860 if (qentry->have_irq) 1861 free_irq(qentry->vector, qentry->rsp); 1862 } 1863 pci_disable_msix(ha->pdev); 1864 kfree(ha->msix_entries); 1865 ha->msix_entries = NULL; 1866 ha->flags.msix_enabled = 0; 1867 } 1868 1869 static int 1870 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 1871 { 1872 #define MIN_MSIX_COUNT 2 1873 int i, ret; 1874 struct msix_entry *entries; 1875 struct qla_msix_entry *qentry; 1876 struct qla_init_msix_entry *msix_queue; 1877 1878 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 1879 GFP_KERNEL); 1880 if (!entries) 1881 return -ENOMEM; 1882 1883 for (i = 0; i < ha->msix_count; i++) 1884 entries[i].entry = i; 1885 1886 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 1887 if (ret) { 1888 if (ret < MIN_MSIX_COUNT) 1889 goto msix_failed; 1890 1891 qla_printk(KERN_WARNING, ha, 1892 "MSI-X: Failed to enable support -- %d/%d\n" 1893 " Retry with %d vectors\n", ha->msix_count, ret, ret); 1894 ha->msix_count = ret; 1895 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 1896 if (ret) { 1897 msix_failed: 1898 qla_printk(KERN_WARNING, ha, "MSI-X: Failed to enable" 1899 " support, giving up -- %d/%d\n", 1900 ha->msix_count, ret); 1901 goto msix_out; 1902 } 1903 ha->max_queues = ha->msix_count - 1; 1904 } 1905 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 1906 ha->msix_count, GFP_KERNEL); 1907 if (!ha->msix_entries) { 1908 ret = -ENOMEM; 1909 goto msix_out; 1910 } 1911 ha->flags.msix_enabled = 1; 1912 1913 for (i = 0; i < ha->msix_count; i++) { 1914 qentry = &ha->msix_entries[i]; 1915 qentry->vector = entries[i].vector; 1916 qentry->entry = entries[i].entry; 1917 qentry->have_irq = 0; 1918 qentry->rsp = NULL; 1919 } 1920 1921 /* Enable MSI-X for AENs for queue 0 */ 1922 qentry = &ha->msix_entries[0]; 1923 ret = request_irq(qentry->vector, base_queue.handler, 0, 1924 base_queue.name, rsp); 1925 if (ret) { 1926 qla_printk(KERN_WARNING, ha, 1927 "MSI-X: Unable to register handler -- %x/%d.\n", 1928 qentry->vector, ret); 1929 qla24xx_disable_msix(ha); 1930 goto msix_out; 1931 } 1932 qentry->have_irq = 1; 1933 qentry->rsp = rsp; 1934 1935 /* Enable MSI-X vector for response queue update for queue 0 */ 1936 if (ha->max_queues > 1 && ha->mqiobase) { 1937 ha->mqenable = 1; 1938 msix_queue = &multi_rsp_queue; 1939 qla_printk(KERN_INFO, ha, 1940 "MQ enabled, Number of Queue Resources: %d \n", 1941 ha->max_queues); 1942 } else { 1943 ha->mqenable = 0; 1944 msix_queue = &base_rsp_queue; 1945 } 1946 1947 qentry = &ha->msix_entries[1]; 1948 ret = request_irq(qentry->vector, msix_queue->handler, 0, 1949 msix_queue->name, rsp); 1950 if (ret) { 1951 qla_printk(KERN_WARNING, ha, 1952 "MSI-X: Unable to register handler -- %x/%d.\n", 1953 qentry->vector, ret); 1954 qla24xx_disable_msix(ha); 1955 ha->mqenable = 0; 1956 goto msix_out; 1957 } 1958 qentry->have_irq = 1; 1959 qentry->rsp = rsp; 1960 1961 msix_out: 1962 kfree(entries); 1963 return ret; 1964 } 1965 1966 int 1967 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 1968 { 1969 int ret; 1970 device_reg_t __iomem *reg = ha->iobase; 1971 1972 /* If possible, enable MSI-X. */ 1973 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 1974 !IS_QLA8432(ha) && !IS_QLA8001(ha)) 1975 goto skip_msix; 1976 1977 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX || 1978 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 1979 DEBUG2(qla_printk(KERN_WARNING, ha, 1980 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1981 ha->pdev->revision, ha->fw_attributes)); 1982 1983 goto skip_msix; 1984 } 1985 1986 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 1987 (ha->pdev->subsystem_device == 0x7040 || 1988 ha->pdev->subsystem_device == 0x7041 || 1989 ha->pdev->subsystem_device == 0x1705)) { 1990 DEBUG2(qla_printk(KERN_WARNING, ha, 1991 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n", 1992 ha->pdev->subsystem_vendor, 1993 ha->pdev->subsystem_device)); 1994 1995 goto skip_msi; 1996 } 1997 1998 ret = qla24xx_enable_msix(ha, rsp); 1999 if (!ret) { 2000 DEBUG2(qla_printk(KERN_INFO, ha, 2001 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 2002 ha->fw_attributes)); 2003 goto clear_risc_ints; 2004 } 2005 qla_printk(KERN_WARNING, ha, 2006 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 2007 skip_msix: 2008 2009 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2010 !IS_QLA8001(ha)) 2011 goto skip_msi; 2012 2013 ret = pci_enable_msi(ha->pdev); 2014 if (!ret) { 2015 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 2016 ha->flags.msi_enabled = 1; 2017 } 2018 skip_msi: 2019 2020 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2021 IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); 2022 if (ret) { 2023 qla_printk(KERN_WARNING, ha, 2024 "Failed to reserve interrupt %d already in use.\n", 2025 ha->pdev->irq); 2026 goto fail; 2027 } 2028 ha->flags.inta_enabled = 1; 2029 clear_risc_ints: 2030 2031 /* 2032 * FIXME: Noted that 8014s were being dropped during NK testing. 2033 * Timing deltas during MSI-X/INTa transitions? 2034 */ 2035 if (IS_QLA81XX(ha)) 2036 goto fail; 2037 spin_lock_irq(&ha->hardware_lock); 2038 if (IS_FWI2_CAPABLE(ha)) { 2039 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); 2040 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); 2041 } else { 2042 WRT_REG_WORD(®->isp.semaphore, 0); 2043 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); 2044 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); 2045 } 2046 spin_unlock_irq(&ha->hardware_lock); 2047 2048 fail: 2049 return ret; 2050 } 2051 2052 void 2053 qla2x00_free_irqs(scsi_qla_host_t *vha) 2054 { 2055 struct qla_hw_data *ha = vha->hw; 2056 struct rsp_que *rsp = ha->rsp_q_map[0]; 2057 2058 if (ha->flags.msix_enabled) 2059 qla24xx_disable_msix(ha); 2060 else if (ha->flags.inta_enabled) { 2061 free_irq(ha->pdev->irq, rsp); 2062 pci_disable_msi(ha->pdev); 2063 } 2064 } 2065 2066 static struct scsi_qla_host * 2067 qla2x00_get_rsp_host(struct rsp_que *rsp) 2068 { 2069 srb_t *sp; 2070 struct qla_hw_data *ha = rsp->hw; 2071 struct scsi_qla_host *vha = NULL; 2072 struct sts_entry_24xx *pkt; 2073 struct req_que *req; 2074 2075 if (rsp->id) { 2076 pkt = (struct sts_entry_24xx *) rsp->ring_ptr; 2077 req = rsp->req; 2078 if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) { 2079 sp = req->outstanding_cmds[pkt->handle]; 2080 if (sp) 2081 vha = sp->fcport->vha; 2082 } 2083 } 2084 if (!vha) 2085 /* handle it in base queue */ 2086 vha = pci_get_drvdata(ha->pdev); 2087 2088 return vha; 2089 } 2090 2091 int qla25xx_request_irq(struct rsp_que *rsp) 2092 { 2093 struct qla_hw_data *ha = rsp->hw; 2094 struct qla_init_msix_entry *intr = &multi_rsp_queue; 2095 struct qla_msix_entry *msix = rsp->msix; 2096 int ret; 2097 2098 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2099 if (ret) { 2100 qla_printk(KERN_WARNING, ha, 2101 "MSI-X: Unable to register handler -- %x/%d.\n", 2102 msix->vector, ret); 2103 return ret; 2104 } 2105 msix->have_irq = 1; 2106 msix->rsp = rsp; 2107 return ret; 2108 } 2109