1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2011 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <scsi/scsi_tcq.h> 12 #include <scsi/scsi_bsg_fc.h> 13 #include <scsi/scsi_eh.h> 14 15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 16 static void qla2x00_process_completed_request(struct scsi_qla_host *, 17 struct req_que *, uint32_t); 18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 21 sts_entry_t *); 22 23 /** 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 25 * @irq: 26 * @dev_id: SCSI driver HA context 27 * 28 * Called by system whenever the host adapter generates an interrupt. 29 * 30 * Returns handled flag. 31 */ 32 irqreturn_t 33 qla2100_intr_handler(int irq, void *dev_id) 34 { 35 scsi_qla_host_t *vha; 36 struct qla_hw_data *ha; 37 struct device_reg_2xxx __iomem *reg; 38 int status; 39 unsigned long iter; 40 uint16_t hccr; 41 uint16_t mb[4]; 42 struct rsp_que *rsp; 43 unsigned long flags; 44 45 rsp = (struct rsp_que *) dev_id; 46 if (!rsp) { 47 ql_log(ql_log_info, NULL, 0x505d, 48 "%s: NULL response queue pointer.\n", __func__); 49 return (IRQ_NONE); 50 } 51 52 ha = rsp->hw; 53 reg = &ha->iobase->isp; 54 status = 0; 55 56 spin_lock_irqsave(&ha->hardware_lock, flags); 57 vha = pci_get_drvdata(ha->pdev); 58 for (iter = 50; iter--; ) { 59 hccr = RD_REG_WORD(®->hccr); 60 if (hccr & HCCR_RISC_PAUSE) { 61 if (pci_channel_offline(ha->pdev)) 62 break; 63 64 /* 65 * Issue a "HARD" reset in order for the RISC interrupt 66 * bit to be cleared. Schedule a big hammer to get 67 * out of the RISC PAUSED state. 68 */ 69 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 70 RD_REG_WORD(®->hccr); 71 72 ha->isp_ops->fw_dump(vha, 1); 73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 74 break; 75 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 76 break; 77 78 if (RD_REG_WORD(®->semaphore) & BIT_0) { 79 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 80 RD_REG_WORD(®->hccr); 81 82 /* Get mailbox data. */ 83 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 84 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 85 qla2x00_mbx_completion(vha, mb[0]); 86 status |= MBX_INTERRUPT; 87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 88 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 89 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 90 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 91 qla2x00_async_event(vha, rsp, mb); 92 } else { 93 /*EMPTY*/ 94 ql_dbg(ql_dbg_async, vha, 0x5025, 95 "Unrecognized interrupt type (%d).\n", 96 mb[0]); 97 } 98 /* Release mailbox registers. */ 99 WRT_REG_WORD(®->semaphore, 0); 100 RD_REG_WORD(®->semaphore); 101 } else { 102 qla2x00_process_response_queue(rsp); 103 104 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 105 RD_REG_WORD(®->hccr); 106 } 107 } 108 spin_unlock_irqrestore(&ha->hardware_lock, flags); 109 110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 113 complete(&ha->mbx_intr_comp); 114 } 115 116 return (IRQ_HANDLED); 117 } 118 119 /** 120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 121 * @irq: 122 * @dev_id: SCSI driver HA context 123 * 124 * Called by system whenever the host adapter generates an interrupt. 125 * 126 * Returns handled flag. 127 */ 128 irqreturn_t 129 qla2300_intr_handler(int irq, void *dev_id) 130 { 131 scsi_qla_host_t *vha; 132 struct device_reg_2xxx __iomem *reg; 133 int status; 134 unsigned long iter; 135 uint32_t stat; 136 uint16_t hccr; 137 uint16_t mb[4]; 138 struct rsp_que *rsp; 139 struct qla_hw_data *ha; 140 unsigned long flags; 141 142 rsp = (struct rsp_que *) dev_id; 143 if (!rsp) { 144 ql_log(ql_log_info, NULL, 0x5058, 145 "%s: NULL response queue pointer.\n", __func__); 146 return (IRQ_NONE); 147 } 148 149 ha = rsp->hw; 150 reg = &ha->iobase->isp; 151 status = 0; 152 153 spin_lock_irqsave(&ha->hardware_lock, flags); 154 vha = pci_get_drvdata(ha->pdev); 155 for (iter = 50; iter--; ) { 156 stat = RD_REG_DWORD(®->u.isp2300.host_status); 157 if (stat & HSR_RISC_PAUSED) { 158 if (unlikely(pci_channel_offline(ha->pdev))) 159 break; 160 161 hccr = RD_REG_WORD(®->hccr); 162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 163 ql_log(ql_log_warn, vha, 0x5026, 164 "Parity error -- HCCR=%x, Dumping " 165 "firmware.\n", hccr); 166 else 167 ql_log(ql_log_warn, vha, 0x5027, 168 "RISC paused -- HCCR=%x, Dumping " 169 "firmware.\n", hccr); 170 171 /* 172 * Issue a "HARD" reset in order for the RISC 173 * interrupt bit to be cleared. Schedule a big 174 * hammer to get out of the RISC PAUSED state. 175 */ 176 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 177 RD_REG_WORD(®->hccr); 178 179 ha->isp_ops->fw_dump(vha, 1); 180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 181 break; 182 } else if ((stat & HSR_RISC_INT) == 0) 183 break; 184 185 switch (stat & 0xff) { 186 case 0x1: 187 case 0x2: 188 case 0x10: 189 case 0x11: 190 qla2x00_mbx_completion(vha, MSW(stat)); 191 status |= MBX_INTERRUPT; 192 193 /* Release mailbox registers. */ 194 WRT_REG_WORD(®->semaphore, 0); 195 break; 196 case 0x12: 197 mb[0] = MSW(stat); 198 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 199 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 200 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 201 qla2x00_async_event(vha, rsp, mb); 202 break; 203 case 0x13: 204 qla2x00_process_response_queue(rsp); 205 break; 206 case 0x15: 207 mb[0] = MBA_CMPLT_1_16BIT; 208 mb[1] = MSW(stat); 209 qla2x00_async_event(vha, rsp, mb); 210 break; 211 case 0x16: 212 mb[0] = MBA_SCSI_COMPLETION; 213 mb[1] = MSW(stat); 214 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 215 qla2x00_async_event(vha, rsp, mb); 216 break; 217 default: 218 ql_dbg(ql_dbg_async, vha, 0x5028, 219 "Unrecognized interrupt type (%d).\n", stat & 0xff); 220 break; 221 } 222 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 223 RD_REG_WORD_RELAXED(®->hccr); 224 } 225 spin_unlock_irqrestore(&ha->hardware_lock, flags); 226 227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 230 complete(&ha->mbx_intr_comp); 231 } 232 233 return (IRQ_HANDLED); 234 } 235 236 /** 237 * qla2x00_mbx_completion() - Process mailbox command completions. 238 * @ha: SCSI driver HA context 239 * @mb0: Mailbox0 register 240 */ 241 static void 242 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 243 { 244 uint16_t cnt; 245 uint32_t mboxes; 246 uint16_t __iomem *wptr; 247 struct qla_hw_data *ha = vha->hw; 248 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 249 250 /* Read all mbox registers? */ 251 mboxes = (1 << ha->mbx_count) - 1; 252 if (!ha->mcp) 253 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n"); 254 else 255 mboxes = ha->mcp->in_mb; 256 257 /* Load return mailbox registers. */ 258 ha->flags.mbox_int = 1; 259 ha->mailbox_out[0] = mb0; 260 mboxes >>= 1; 261 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 262 263 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 264 if (IS_QLA2200(ha) && cnt == 8) 265 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 266 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 267 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 268 else if (mboxes & BIT_0) 269 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 270 271 wptr++; 272 mboxes >>= 1; 273 } 274 } 275 276 static void 277 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 278 { 279 static char *event[] = 280 { "Complete", "Request Notification", "Time Extension" }; 281 int rval; 282 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 283 uint16_t __iomem *wptr; 284 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 285 286 /* Seed data -- mailbox1 -> mailbox7. */ 287 wptr = (uint16_t __iomem *)®24->mailbox1; 288 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 289 mb[cnt] = RD_REG_WORD(wptr); 290 291 ql_dbg(ql_dbg_async, vha, 0x5021, 292 "Inter-Driver Communication %s -- " 293 "%04x %04x %04x %04x %04x %04x %04x.\n", 294 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 295 mb[4], mb[5], mb[6]); 296 297 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 298 timeout = (descr >> 8) & 0xf; 299 if (aen != MBA_IDC_NOTIFY || !timeout) 300 return; 301 302 ql_dbg(ql_dbg_async, vha, 0x5022, 303 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 304 vha->host_no, event[aen & 0xff], timeout); 305 306 rval = qla2x00_post_idc_ack_work(vha, mb); 307 if (rval != QLA_SUCCESS) 308 ql_log(ql_log_warn, vha, 0x5023, 309 "IDC failed to post ACK.\n"); 310 } 311 312 /** 313 * qla2x00_async_event() - Process aynchronous events. 314 * @ha: SCSI driver HA context 315 * @mb: Mailbox registers (0 - 3) 316 */ 317 void 318 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 319 { 320 #define LS_UNKNOWN 2 321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" }; 322 char *link_speed; 323 uint16_t handle_cnt; 324 uint16_t cnt, mbx; 325 uint32_t handles[5]; 326 struct qla_hw_data *ha = vha->hw; 327 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 328 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 329 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 330 uint32_t rscn_entry, host_pid; 331 unsigned long flags; 332 333 /* Setup to process RIO completion. */ 334 handle_cnt = 0; 335 if (IS_CNA_CAPABLE(ha)) 336 goto skip_rio; 337 switch (mb[0]) { 338 case MBA_SCSI_COMPLETION: 339 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 340 handle_cnt = 1; 341 break; 342 case MBA_CMPLT_1_16BIT: 343 handles[0] = mb[1]; 344 handle_cnt = 1; 345 mb[0] = MBA_SCSI_COMPLETION; 346 break; 347 case MBA_CMPLT_2_16BIT: 348 handles[0] = mb[1]; 349 handles[1] = mb[2]; 350 handle_cnt = 2; 351 mb[0] = MBA_SCSI_COMPLETION; 352 break; 353 case MBA_CMPLT_3_16BIT: 354 handles[0] = mb[1]; 355 handles[1] = mb[2]; 356 handles[2] = mb[3]; 357 handle_cnt = 3; 358 mb[0] = MBA_SCSI_COMPLETION; 359 break; 360 case MBA_CMPLT_4_16BIT: 361 handles[0] = mb[1]; 362 handles[1] = mb[2]; 363 handles[2] = mb[3]; 364 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 365 handle_cnt = 4; 366 mb[0] = MBA_SCSI_COMPLETION; 367 break; 368 case MBA_CMPLT_5_16BIT: 369 handles[0] = mb[1]; 370 handles[1] = mb[2]; 371 handles[2] = mb[3]; 372 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 373 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 374 handle_cnt = 5; 375 mb[0] = MBA_SCSI_COMPLETION; 376 break; 377 case MBA_CMPLT_2_32BIT: 378 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 379 handles[1] = le32_to_cpu( 380 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 381 RD_MAILBOX_REG(ha, reg, 6)); 382 handle_cnt = 2; 383 mb[0] = MBA_SCSI_COMPLETION; 384 break; 385 default: 386 break; 387 } 388 skip_rio: 389 switch (mb[0]) { 390 case MBA_SCSI_COMPLETION: /* Fast Post */ 391 if (!vha->flags.online) 392 break; 393 394 for (cnt = 0; cnt < handle_cnt; cnt++) 395 qla2x00_process_completed_request(vha, rsp->req, 396 handles[cnt]); 397 break; 398 399 case MBA_RESET: /* Reset */ 400 ql_dbg(ql_dbg_async, vha, 0x5002, 401 "Asynchronous RESET.\n"); 402 403 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 404 break; 405 406 case MBA_SYSTEM_ERR: /* System Error */ 407 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? 408 RD_REG_WORD(®24->mailbox7) : 0; 409 ql_log(ql_log_warn, vha, 0x5003, 410 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 411 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 412 413 ha->isp_ops->fw_dump(vha, 1); 414 415 if (IS_FWI2_CAPABLE(ha)) { 416 if (mb[1] == 0 && mb[2] == 0) { 417 ql_log(ql_log_fatal, vha, 0x5004, 418 "Unrecoverable Hardware Error: adapter " 419 "marked OFFLINE!\n"); 420 vha->flags.online = 0; 421 vha->device_flags |= DFLG_DEV_FAILED; 422 } else { 423 /* Check to see if MPI timeout occurred */ 424 if ((mbx & MBX_3) && (ha->flags.port0)) 425 set_bit(MPI_RESET_NEEDED, 426 &vha->dpc_flags); 427 428 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 429 } 430 } else if (mb[1] == 0) { 431 ql_log(ql_log_fatal, vha, 0x5005, 432 "Unrecoverable Hardware Error: adapter marked " 433 "OFFLINE!\n"); 434 vha->flags.online = 0; 435 vha->device_flags |= DFLG_DEV_FAILED; 436 } else 437 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 438 break; 439 440 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 441 ql_log(ql_log_warn, vha, 0x5006, 442 "ISP Request Transfer Error (%x).\n", mb[1]); 443 444 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 445 break; 446 447 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 448 ql_log(ql_log_warn, vha, 0x5007, 449 "ISP Response Transfer Error.\n"); 450 451 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 452 break; 453 454 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 455 ql_dbg(ql_dbg_async, vha, 0x5008, 456 "Asynchronous WAKEUP_THRES.\n"); 457 break; 458 459 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 460 ql_dbg(ql_dbg_async, vha, 0x5009, 461 "LIP occurred (%x).\n", mb[1]); 462 463 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 464 atomic_set(&vha->loop_state, LOOP_DOWN); 465 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 466 qla2x00_mark_all_devices_lost(vha, 1); 467 } 468 469 if (vha->vp_idx) { 470 atomic_set(&vha->vp_state, VP_FAILED); 471 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 472 } 473 474 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 475 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 476 477 vha->flags.management_server_logged_in = 0; 478 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 479 break; 480 481 case MBA_LOOP_UP: /* Loop Up Event */ 482 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 483 link_speed = link_speeds[0]; 484 ha->link_data_rate = PORT_SPEED_1GB; 485 } else { 486 link_speed = link_speeds[LS_UNKNOWN]; 487 if (mb[1] < 6) 488 link_speed = link_speeds[mb[1]]; 489 else if (mb[1] == 0x13) 490 link_speed = link_speeds[6]; 491 ha->link_data_rate = mb[1]; 492 } 493 494 ql_dbg(ql_dbg_async, vha, 0x500a, 495 "LOOP UP detected (%s Gbps).\n", link_speed); 496 497 vha->flags.management_server_logged_in = 0; 498 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 499 break; 500 501 case MBA_LOOP_DOWN: /* Loop Down Event */ 502 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 503 ? RD_REG_WORD(®24->mailbox4) : 0; 504 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; 505 ql_dbg(ql_dbg_async, vha, 0x500b, 506 "LOOP DOWN detected (%x %x %x %x).\n", 507 mb[1], mb[2], mb[3], mbx); 508 509 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 510 atomic_set(&vha->loop_state, LOOP_DOWN); 511 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 512 vha->device_flags |= DFLG_NO_CABLE; 513 qla2x00_mark_all_devices_lost(vha, 1); 514 } 515 516 if (vha->vp_idx) { 517 atomic_set(&vha->vp_state, VP_FAILED); 518 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 519 } 520 521 vha->flags.management_server_logged_in = 0; 522 ha->link_data_rate = PORT_SPEED_UNKNOWN; 523 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 524 break; 525 526 case MBA_LIP_RESET: /* LIP reset occurred */ 527 ql_dbg(ql_dbg_async, vha, 0x500c, 528 "LIP reset occurred (%x).\n", mb[1]); 529 530 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 531 atomic_set(&vha->loop_state, LOOP_DOWN); 532 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 533 qla2x00_mark_all_devices_lost(vha, 1); 534 } 535 536 if (vha->vp_idx) { 537 atomic_set(&vha->vp_state, VP_FAILED); 538 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 539 } 540 541 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 542 543 ha->operating_mode = LOOP; 544 vha->flags.management_server_logged_in = 0; 545 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 546 break; 547 548 /* case MBA_DCBX_COMPLETE: */ 549 case MBA_POINT_TO_POINT: /* Point-to-Point */ 550 if (IS_QLA2100(ha)) 551 break; 552 553 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { 554 ql_dbg(ql_dbg_async, vha, 0x500d, 555 "DCBX Completed -- %04x %04x %04x.\n", 556 mb[1], mb[2], mb[3]); 557 if (ha->notify_dcbx_comp) 558 complete(&ha->dcbx_comp); 559 560 } else 561 ql_dbg(ql_dbg_async, vha, 0x500e, 562 "Asynchronous P2P MODE received.\n"); 563 564 /* 565 * Until there's a transition from loop down to loop up, treat 566 * this as loop down only. 567 */ 568 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 569 atomic_set(&vha->loop_state, LOOP_DOWN); 570 if (!atomic_read(&vha->loop_down_timer)) 571 atomic_set(&vha->loop_down_timer, 572 LOOP_DOWN_TIME); 573 qla2x00_mark_all_devices_lost(vha, 1); 574 } 575 576 if (vha->vp_idx) { 577 atomic_set(&vha->vp_state, VP_FAILED); 578 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 579 } 580 581 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 582 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 583 584 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 585 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 586 587 ha->flags.gpsc_supported = 1; 588 vha->flags.management_server_logged_in = 0; 589 break; 590 591 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 592 if (IS_QLA2100(ha)) 593 break; 594 595 ql_dbg(ql_dbg_async, vha, 0x500f, 596 "Configuration change detected: value=%x.\n", mb[1]); 597 598 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 599 atomic_set(&vha->loop_state, LOOP_DOWN); 600 if (!atomic_read(&vha->loop_down_timer)) 601 atomic_set(&vha->loop_down_timer, 602 LOOP_DOWN_TIME); 603 qla2x00_mark_all_devices_lost(vha, 1); 604 } 605 606 if (vha->vp_idx) { 607 atomic_set(&vha->vp_state, VP_FAILED); 608 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 609 } 610 611 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 612 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 613 break; 614 615 case MBA_PORT_UPDATE: /* Port database update */ 616 /* 617 * Handle only global and vn-port update events 618 * 619 * Relevant inputs: 620 * mb[1] = N_Port handle of changed port 621 * OR 0xffff for global event 622 * mb[2] = New login state 623 * 7 = Port logged out 624 * mb[3] = LSB is vp_idx, 0xff = all vps 625 * 626 * Skip processing if: 627 * Event is global, vp_idx is NOT all vps, 628 * vp_idx does not match 629 * Event is not global, vp_idx does not match 630 */ 631 if (IS_QLA2XXX_MIDTYPE(ha) && 632 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 633 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 634 break; 635 636 /* Global event -- port logout or port unavailable. */ 637 if (mb[1] == 0xffff && mb[2] == 0x7) { 638 ql_dbg(ql_dbg_async, vha, 0x5010, 639 "Port unavailable %04x %04x %04x.\n", 640 mb[1], mb[2], mb[3]); 641 642 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 643 atomic_set(&vha->loop_state, LOOP_DOWN); 644 atomic_set(&vha->loop_down_timer, 645 LOOP_DOWN_TIME); 646 vha->device_flags |= DFLG_NO_CABLE; 647 qla2x00_mark_all_devices_lost(vha, 1); 648 } 649 650 if (vha->vp_idx) { 651 atomic_set(&vha->vp_state, VP_FAILED); 652 fc_vport_set_state(vha->fc_vport, 653 FC_VPORT_FAILED); 654 qla2x00_mark_all_devices_lost(vha, 1); 655 } 656 657 vha->flags.management_server_logged_in = 0; 658 ha->link_data_rate = PORT_SPEED_UNKNOWN; 659 break; 660 } 661 662 /* 663 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 664 * event etc. earlier indicating loop is down) then process 665 * it. Otherwise ignore it and Wait for RSCN to come in. 666 */ 667 atomic_set(&vha->loop_down_timer, 0); 668 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 669 atomic_read(&vha->loop_state) != LOOP_DEAD) { 670 ql_dbg(ql_dbg_async, vha, 0x5011, 671 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 672 mb[1], mb[2], mb[3]); 673 break; 674 } 675 676 ql_dbg(ql_dbg_async, vha, 0x5012, 677 "Port database changed %04x %04x %04x.\n", 678 mb[1], mb[2], mb[3]); 679 680 /* 681 * Mark all devices as missing so we will login again. 682 */ 683 atomic_set(&vha->loop_state, LOOP_UP); 684 685 qla2x00_mark_all_devices_lost(vha, 1); 686 687 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 688 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 689 break; 690 691 case MBA_RSCN_UPDATE: /* State Change Registration */ 692 /* Check if the Vport has issued a SCR */ 693 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 694 break; 695 /* Only handle SCNs for our Vport index. */ 696 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 697 break; 698 699 ql_dbg(ql_dbg_async, vha, 0x5013, 700 "RSCN database changed -- %04x %04x %04x.\n", 701 mb[1], mb[2], mb[3]); 702 703 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 704 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 705 | vha->d_id.b.al_pa; 706 if (rscn_entry == host_pid) { 707 ql_dbg(ql_dbg_async, vha, 0x5014, 708 "Ignoring RSCN update to local host " 709 "port ID (%06x).\n", host_pid); 710 break; 711 } 712 713 /* Ignore reserved bits from RSCN-payload. */ 714 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 715 716 atomic_set(&vha->loop_down_timer, 0); 717 vha->flags.management_server_logged_in = 0; 718 719 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 720 set_bit(RSCN_UPDATE, &vha->dpc_flags); 721 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 722 break; 723 724 /* case MBA_RIO_RESPONSE: */ 725 case MBA_ZIO_RESPONSE: 726 ql_dbg(ql_dbg_async, vha, 0x5015, 727 "[R|Z]IO update completion.\n"); 728 729 if (IS_FWI2_CAPABLE(ha)) 730 qla24xx_process_response_queue(vha, rsp); 731 else 732 qla2x00_process_response_queue(rsp); 733 break; 734 735 case MBA_DISCARD_RND_FRAME: 736 ql_dbg(ql_dbg_async, vha, 0x5016, 737 "Discard RND Frame -- %04x %04x %04x.\n", 738 mb[1], mb[2], mb[3]); 739 break; 740 741 case MBA_TRACE_NOTIFICATION: 742 ql_dbg(ql_dbg_async, vha, 0x5017, 743 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 744 break; 745 746 case MBA_ISP84XX_ALERT: 747 ql_dbg(ql_dbg_async, vha, 0x5018, 748 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 749 mb[1], mb[2], mb[3]); 750 751 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 752 switch (mb[1]) { 753 case A84_PANIC_RECOVERY: 754 ql_log(ql_log_info, vha, 0x5019, 755 "Alert 84XX: panic recovery %04x %04x.\n", 756 mb[2], mb[3]); 757 break; 758 case A84_OP_LOGIN_COMPLETE: 759 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 760 ql_log(ql_log_info, vha, 0x501a, 761 "Alert 84XX: firmware version %x.\n", 762 ha->cs84xx->op_fw_version); 763 break; 764 case A84_DIAG_LOGIN_COMPLETE: 765 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 766 ql_log(ql_log_info, vha, 0x501b, 767 "Alert 84XX: diagnostic firmware version %x.\n", 768 ha->cs84xx->diag_fw_version); 769 break; 770 case A84_GOLD_LOGIN_COMPLETE: 771 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 772 ha->cs84xx->fw_update = 1; 773 ql_log(ql_log_info, vha, 0x501c, 774 "Alert 84XX: gold firmware version %x.\n", 775 ha->cs84xx->gold_fw_version); 776 break; 777 default: 778 ql_log(ql_log_warn, vha, 0x501d, 779 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 780 mb[1], mb[2], mb[3]); 781 } 782 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 783 break; 784 case MBA_DCBX_START: 785 ql_dbg(ql_dbg_async, vha, 0x501e, 786 "DCBX Started -- %04x %04x %04x.\n", 787 mb[1], mb[2], mb[3]); 788 break; 789 case MBA_DCBX_PARAM_UPDATE: 790 ql_dbg(ql_dbg_async, vha, 0x501f, 791 "DCBX Parameters Updated -- %04x %04x %04x.\n", 792 mb[1], mb[2], mb[3]); 793 break; 794 case MBA_FCF_CONF_ERR: 795 ql_dbg(ql_dbg_async, vha, 0x5020, 796 "FCF Configuration Error -- %04x %04x %04x.\n", 797 mb[1], mb[2], mb[3]); 798 break; 799 case MBA_IDC_COMPLETE: 800 case MBA_IDC_NOTIFY: 801 case MBA_IDC_TIME_EXT: 802 qla81xx_idc_event(vha, mb[0], mb[1]); 803 break; 804 default: 805 ql_dbg(ql_dbg_async, vha, 0x5057, 806 "Unknown AEN:%04x %04x %04x %04x\n", 807 mb[0], mb[1], mb[2], mb[3]); 808 } 809 810 if (!vha->vp_idx && ha->num_vhosts) 811 qla2x00_alert_all_vps(rsp, mb); 812 } 813 814 /** 815 * qla2x00_process_completed_request() - Process a Fast Post response. 816 * @ha: SCSI driver HA context 817 * @index: SRB index 818 */ 819 static void 820 qla2x00_process_completed_request(struct scsi_qla_host *vha, 821 struct req_que *req, uint32_t index) 822 { 823 srb_t *sp; 824 struct qla_hw_data *ha = vha->hw; 825 826 /* Validate handle. */ 827 if (index >= MAX_OUTSTANDING_COMMANDS) { 828 ql_log(ql_log_warn, vha, 0x3014, 829 "Invalid SCSI command index (%x).\n", index); 830 831 if (IS_QLA82XX(ha)) 832 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 833 else 834 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 835 return; 836 } 837 838 sp = req->outstanding_cmds[index]; 839 if (sp) { 840 /* Free outstanding command slot. */ 841 req->outstanding_cmds[index] = NULL; 842 843 /* Save ISP completion status */ 844 sp->done(ha, sp, DID_OK << 16); 845 } else { 846 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 847 848 if (IS_QLA82XX(ha)) 849 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 850 else 851 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 852 } 853 } 854 855 static srb_t * 856 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 857 struct req_que *req, void *iocb) 858 { 859 struct qla_hw_data *ha = vha->hw; 860 sts_entry_t *pkt = iocb; 861 srb_t *sp = NULL; 862 uint16_t index; 863 864 index = LSW(pkt->handle); 865 if (index >= MAX_OUTSTANDING_COMMANDS) { 866 ql_log(ql_log_warn, vha, 0x5031, 867 "Invalid command index (%x).\n", index); 868 if (IS_QLA82XX(ha)) 869 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 870 else 871 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 872 goto done; 873 } 874 sp = req->outstanding_cmds[index]; 875 if (!sp) { 876 ql_log(ql_log_warn, vha, 0x5032, 877 "Invalid completion handle (%x) -- timed-out.\n", index); 878 return sp; 879 } 880 if (sp->handle != index) { 881 ql_log(ql_log_warn, vha, 0x5033, 882 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 883 return NULL; 884 } 885 886 req->outstanding_cmds[index] = NULL; 887 888 done: 889 return sp; 890 } 891 892 static void 893 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 894 struct mbx_entry *mbx) 895 { 896 const char func[] = "MBX-IOCB"; 897 const char *type; 898 fc_port_t *fcport; 899 srb_t *sp; 900 struct srb_iocb *lio; 901 uint16_t *data; 902 uint16_t status; 903 904 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 905 if (!sp) 906 return; 907 908 lio = &sp->u.iocb_cmd; 909 type = sp->name; 910 fcport = sp->fcport; 911 data = lio->u.logio.data; 912 913 data[0] = MBS_COMMAND_ERROR; 914 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 915 QLA_LOGIO_LOGIN_RETRIED : 0; 916 if (mbx->entry_status) { 917 ql_dbg(ql_dbg_async, vha, 0x5043, 918 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 919 "entry-status=%x status=%x state-flag=%x " 920 "status-flags=%x.\n", type, sp->handle, 921 fcport->d_id.b.domain, fcport->d_id.b.area, 922 fcport->d_id.b.al_pa, mbx->entry_status, 923 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 924 le16_to_cpu(mbx->status_flags)); 925 926 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 927 (uint8_t *)mbx, sizeof(*mbx)); 928 929 goto logio_done; 930 } 931 932 status = le16_to_cpu(mbx->status); 933 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 934 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 935 status = 0; 936 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 937 ql_dbg(ql_dbg_async, vha, 0x5045, 938 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 939 type, sp->handle, fcport->d_id.b.domain, 940 fcport->d_id.b.area, fcport->d_id.b.al_pa, 941 le16_to_cpu(mbx->mb1)); 942 943 data[0] = MBS_COMMAND_COMPLETE; 944 if (sp->type == SRB_LOGIN_CMD) { 945 fcport->port_type = FCT_TARGET; 946 if (le16_to_cpu(mbx->mb1) & BIT_0) 947 fcport->port_type = FCT_INITIATOR; 948 else if (le16_to_cpu(mbx->mb1) & BIT_1) 949 fcport->flags |= FCF_FCP2_DEVICE; 950 } 951 goto logio_done; 952 } 953 954 data[0] = le16_to_cpu(mbx->mb0); 955 switch (data[0]) { 956 case MBS_PORT_ID_USED: 957 data[1] = le16_to_cpu(mbx->mb1); 958 break; 959 case MBS_LOOP_ID_USED: 960 break; 961 default: 962 data[0] = MBS_COMMAND_ERROR; 963 break; 964 } 965 966 ql_log(ql_log_warn, vha, 0x5046, 967 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 968 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 969 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 970 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 971 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 972 le16_to_cpu(mbx->mb7)); 973 974 logio_done: 975 sp->done(vha, sp, 0); 976 } 977 978 static void 979 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 980 sts_entry_t *pkt, int iocb_type) 981 { 982 const char func[] = "CT_IOCB"; 983 const char *type; 984 srb_t *sp; 985 struct fc_bsg_job *bsg_job; 986 uint16_t comp_status; 987 int res; 988 989 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 990 if (!sp) 991 return; 992 993 bsg_job = sp->u.bsg_job; 994 995 type = "ct pass-through"; 996 997 comp_status = le16_to_cpu(pkt->comp_status); 998 999 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1000 * fc payload to the caller 1001 */ 1002 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1003 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1004 1005 if (comp_status != CS_COMPLETE) { 1006 if (comp_status == CS_DATA_UNDERRUN) { 1007 res = DID_OK << 16; 1008 bsg_job->reply->reply_payload_rcv_len = 1009 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1010 1011 ql_log(ql_log_warn, vha, 0x5048, 1012 "CT pass-through-%s error " 1013 "comp_status-status=0x%x total_byte = 0x%x.\n", 1014 type, comp_status, 1015 bsg_job->reply->reply_payload_rcv_len); 1016 } else { 1017 ql_log(ql_log_warn, vha, 0x5049, 1018 "CT pass-through-%s error " 1019 "comp_status-status=0x%x.\n", type, comp_status); 1020 res = DID_ERROR << 16; 1021 bsg_job->reply->reply_payload_rcv_len = 0; 1022 } 1023 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1024 (uint8_t *)pkt, sizeof(*pkt)); 1025 } else { 1026 res = DID_OK << 16; 1027 bsg_job->reply->reply_payload_rcv_len = 1028 bsg_job->reply_payload.payload_len; 1029 bsg_job->reply_len = 0; 1030 } 1031 1032 sp->done(vha, sp, res); 1033 } 1034 1035 static void 1036 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1037 struct sts_entry_24xx *pkt, int iocb_type) 1038 { 1039 const char func[] = "ELS_CT_IOCB"; 1040 const char *type; 1041 srb_t *sp; 1042 struct fc_bsg_job *bsg_job; 1043 uint16_t comp_status; 1044 uint32_t fw_status[3]; 1045 uint8_t* fw_sts_ptr; 1046 int res; 1047 1048 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1049 if (!sp) 1050 return; 1051 bsg_job = sp->u.bsg_job; 1052 1053 type = NULL; 1054 switch (sp->type) { 1055 case SRB_ELS_CMD_RPT: 1056 case SRB_ELS_CMD_HST: 1057 type = "els"; 1058 break; 1059 case SRB_CT_CMD: 1060 type = "ct pass-through"; 1061 break; 1062 default: 1063 ql_dbg(ql_dbg_user, vha, 0x503e, 1064 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1065 return; 1066 } 1067 1068 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1069 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1070 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1071 1072 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1073 * fc payload to the caller 1074 */ 1075 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1076 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1077 1078 if (comp_status != CS_COMPLETE) { 1079 if (comp_status == CS_DATA_UNDERRUN) { 1080 res = DID_OK << 16; 1081 bsg_job->reply->reply_payload_rcv_len = 1082 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1083 1084 ql_dbg(ql_dbg_user, vha, 0x503f, 1085 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1086 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1087 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1088 le16_to_cpu(((struct els_sts_entry_24xx *) 1089 pkt)->total_byte_count)); 1090 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1091 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1092 } 1093 else { 1094 ql_dbg(ql_dbg_user, vha, 0x5040, 1095 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1096 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1097 type, sp->handle, comp_status, 1098 le16_to_cpu(((struct els_sts_entry_24xx *) 1099 pkt)->error_subcode_1), 1100 le16_to_cpu(((struct els_sts_entry_24xx *) 1101 pkt)->error_subcode_2)); 1102 res = DID_ERROR << 16; 1103 bsg_job->reply->reply_payload_rcv_len = 0; 1104 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1105 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1106 } 1107 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1108 (uint8_t *)pkt, sizeof(*pkt)); 1109 } 1110 else { 1111 res = DID_OK << 16; 1112 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1113 bsg_job->reply_len = 0; 1114 } 1115 1116 sp->done(vha, sp, res); 1117 } 1118 1119 static void 1120 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1121 struct logio_entry_24xx *logio) 1122 { 1123 const char func[] = "LOGIO-IOCB"; 1124 const char *type; 1125 fc_port_t *fcport; 1126 srb_t *sp; 1127 struct srb_iocb *lio; 1128 uint16_t *data; 1129 uint32_t iop[2]; 1130 1131 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1132 if (!sp) 1133 return; 1134 1135 lio = &sp->u.iocb_cmd; 1136 type = sp->name; 1137 fcport = sp->fcport; 1138 data = lio->u.logio.data; 1139 1140 data[0] = MBS_COMMAND_ERROR; 1141 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1142 QLA_LOGIO_LOGIN_RETRIED : 0; 1143 if (logio->entry_status) { 1144 ql_log(ql_log_warn, fcport->vha, 0x5034, 1145 "Async-%s error entry - hdl=%x" 1146 "portid=%02x%02x%02x entry-status=%x.\n", 1147 type, sp->handle, fcport->d_id.b.domain, 1148 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1149 logio->entry_status); 1150 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1151 (uint8_t *)logio, sizeof(*logio)); 1152 1153 goto logio_done; 1154 } 1155 1156 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1157 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1158 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1159 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1160 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1161 le32_to_cpu(logio->io_parameter[0])); 1162 1163 data[0] = MBS_COMMAND_COMPLETE; 1164 if (sp->type != SRB_LOGIN_CMD) 1165 goto logio_done; 1166 1167 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1168 if (iop[0] & BIT_4) { 1169 fcport->port_type = FCT_TARGET; 1170 if (iop[0] & BIT_8) 1171 fcport->flags |= FCF_FCP2_DEVICE; 1172 } else if (iop[0] & BIT_5) 1173 fcport->port_type = FCT_INITIATOR; 1174 1175 if (logio->io_parameter[7] || logio->io_parameter[8]) 1176 fcport->supported_classes |= FC_COS_CLASS2; 1177 if (logio->io_parameter[9] || logio->io_parameter[10]) 1178 fcport->supported_classes |= FC_COS_CLASS3; 1179 1180 goto logio_done; 1181 } 1182 1183 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1184 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1185 switch (iop[0]) { 1186 case LSC_SCODE_PORTID_USED: 1187 data[0] = MBS_PORT_ID_USED; 1188 data[1] = LSW(iop[1]); 1189 break; 1190 case LSC_SCODE_NPORT_USED: 1191 data[0] = MBS_LOOP_ID_USED; 1192 break; 1193 default: 1194 data[0] = MBS_COMMAND_ERROR; 1195 break; 1196 } 1197 1198 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1199 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1200 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1201 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1202 le16_to_cpu(logio->comp_status), 1203 le32_to_cpu(logio->io_parameter[0]), 1204 le32_to_cpu(logio->io_parameter[1])); 1205 1206 logio_done: 1207 sp->done(vha, sp, 0); 1208 } 1209 1210 static void 1211 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1212 struct tsk_mgmt_entry *tsk) 1213 { 1214 const char func[] = "TMF-IOCB"; 1215 const char *type; 1216 fc_port_t *fcport; 1217 srb_t *sp; 1218 struct srb_iocb *iocb; 1219 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1220 int error = 1; 1221 1222 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1223 if (!sp) 1224 return; 1225 1226 iocb = &sp->u.iocb_cmd; 1227 type = sp->name; 1228 fcport = sp->fcport; 1229 1230 if (sts->entry_status) { 1231 ql_log(ql_log_warn, fcport->vha, 0x5038, 1232 "Async-%s error - hdl=%x entry-status(%x).\n", 1233 type, sp->handle, sts->entry_status); 1234 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1235 ql_log(ql_log_warn, fcport->vha, 0x5039, 1236 "Async-%s error - hdl=%x completion status(%x).\n", 1237 type, sp->handle, sts->comp_status); 1238 } else if (!(le16_to_cpu(sts->scsi_status) & 1239 SS_RESPONSE_INFO_LEN_VALID)) { 1240 ql_log(ql_log_warn, fcport->vha, 0x503a, 1241 "Async-%s error - hdl=%x no response info(%x).\n", 1242 type, sp->handle, sts->scsi_status); 1243 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1244 ql_log(ql_log_warn, fcport->vha, 0x503b, 1245 "Async-%s error - hdl=%x not enough response(%d).\n", 1246 type, sp->handle, sts->rsp_data_len); 1247 } else if (sts->data[3]) { 1248 ql_log(ql_log_warn, fcport->vha, 0x503c, 1249 "Async-%s error - hdl=%x response(%x).\n", 1250 type, sp->handle, sts->data[3]); 1251 } else { 1252 error = 0; 1253 } 1254 1255 if (error) { 1256 iocb->u.tmf.data = error; 1257 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1258 (uint8_t *)sts, sizeof(*sts)); 1259 } 1260 1261 sp->done(vha, sp, 0); 1262 } 1263 1264 /** 1265 * qla2x00_process_response_queue() - Process response queue entries. 1266 * @ha: SCSI driver HA context 1267 */ 1268 void 1269 qla2x00_process_response_queue(struct rsp_que *rsp) 1270 { 1271 struct scsi_qla_host *vha; 1272 struct qla_hw_data *ha = rsp->hw; 1273 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1274 sts_entry_t *pkt; 1275 uint16_t handle_cnt; 1276 uint16_t cnt; 1277 1278 vha = pci_get_drvdata(ha->pdev); 1279 1280 if (!vha->flags.online) 1281 return; 1282 1283 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1284 pkt = (sts_entry_t *)rsp->ring_ptr; 1285 1286 rsp->ring_index++; 1287 if (rsp->ring_index == rsp->length) { 1288 rsp->ring_index = 0; 1289 rsp->ring_ptr = rsp->ring; 1290 } else { 1291 rsp->ring_ptr++; 1292 } 1293 1294 if (pkt->entry_status != 0) { 1295 qla2x00_error_entry(vha, rsp, pkt); 1296 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1297 wmb(); 1298 continue; 1299 } 1300 1301 switch (pkt->entry_type) { 1302 case STATUS_TYPE: 1303 qla2x00_status_entry(vha, rsp, pkt); 1304 break; 1305 case STATUS_TYPE_21: 1306 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1307 for (cnt = 0; cnt < handle_cnt; cnt++) { 1308 qla2x00_process_completed_request(vha, rsp->req, 1309 ((sts21_entry_t *)pkt)->handle[cnt]); 1310 } 1311 break; 1312 case STATUS_TYPE_22: 1313 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1314 for (cnt = 0; cnt < handle_cnt; cnt++) { 1315 qla2x00_process_completed_request(vha, rsp->req, 1316 ((sts22_entry_t *)pkt)->handle[cnt]); 1317 } 1318 break; 1319 case STATUS_CONT_TYPE: 1320 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1321 break; 1322 case MBX_IOCB_TYPE: 1323 qla2x00_mbx_iocb_entry(vha, rsp->req, 1324 (struct mbx_entry *)pkt); 1325 break; 1326 case CT_IOCB_TYPE: 1327 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1328 break; 1329 default: 1330 /* Type Not Supported. */ 1331 ql_log(ql_log_warn, vha, 0x504a, 1332 "Received unknown response pkt type %x " 1333 "entry status=%x.\n", 1334 pkt->entry_type, pkt->entry_status); 1335 break; 1336 } 1337 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1338 wmb(); 1339 } 1340 1341 /* Adjust ring index */ 1342 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1343 } 1344 1345 static inline void 1346 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1347 uint32_t sense_len, struct rsp_que *rsp, int res) 1348 { 1349 struct scsi_qla_host *vha = sp->fcport->vha; 1350 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1351 uint32_t track_sense_len; 1352 1353 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1354 sense_len = SCSI_SENSE_BUFFERSIZE; 1355 1356 SET_CMD_SENSE_LEN(sp, sense_len); 1357 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1358 track_sense_len = sense_len; 1359 1360 if (sense_len > par_sense_len) 1361 sense_len = par_sense_len; 1362 1363 memcpy(cp->sense_buffer, sense_data, sense_len); 1364 1365 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1366 track_sense_len -= sense_len; 1367 SET_CMD_SENSE_LEN(sp, track_sense_len); 1368 1369 if (track_sense_len != 0) { 1370 rsp->status_srb = sp; 1371 cp->result = res; 1372 } 1373 1374 if (sense_len) { 1375 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1376 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 1377 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1378 cp); 1379 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1380 cp->sense_buffer, sense_len); 1381 } 1382 } 1383 1384 struct scsi_dif_tuple { 1385 __be16 guard; /* Checksum */ 1386 __be16 app_tag; /* APPL identifer */ 1387 __be32 ref_tag; /* Target LBA or indirect LBA */ 1388 }; 1389 1390 /* 1391 * Checks the guard or meta-data for the type of error 1392 * detected by the HBA. In case of errors, we set the 1393 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1394 * to indicate to the kernel that the HBA detected error. 1395 */ 1396 static inline int 1397 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1398 { 1399 struct scsi_qla_host *vha = sp->fcport->vha; 1400 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1401 uint8_t *ap = &sts24->data[12]; 1402 uint8_t *ep = &sts24->data[20]; 1403 uint32_t e_ref_tag, a_ref_tag; 1404 uint16_t e_app_tag, a_app_tag; 1405 uint16_t e_guard, a_guard; 1406 1407 /* 1408 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1409 * would make guard field appear at offset 2 1410 */ 1411 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1412 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1413 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1414 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1415 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1416 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1417 1418 ql_dbg(ql_dbg_io, vha, 0x3023, 1419 "iocb(s) %p Returned STATUS.\n", sts24); 1420 1421 ql_dbg(ql_dbg_io, vha, 0x3024, 1422 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1423 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1424 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1425 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1426 a_app_tag, e_app_tag, a_guard, e_guard); 1427 1428 /* 1429 * Ignore sector if: 1430 * For type 3: ref & app tag is all 'f's 1431 * For type 0,1,2: app tag is all 'f's 1432 */ 1433 if ((a_app_tag == 0xffff) && 1434 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1435 (a_ref_tag == 0xffffffff))) { 1436 uint32_t blocks_done, resid; 1437 sector_t lba_s = scsi_get_lba(cmd); 1438 1439 /* 2TB boundary case covered automatically with this */ 1440 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1441 1442 resid = scsi_bufflen(cmd) - (blocks_done * 1443 cmd->device->sector_size); 1444 1445 scsi_set_resid(cmd, resid); 1446 cmd->result = DID_OK << 16; 1447 1448 /* Update protection tag */ 1449 if (scsi_prot_sg_count(cmd)) { 1450 uint32_t i, j = 0, k = 0, num_ent; 1451 struct scatterlist *sg; 1452 struct sd_dif_tuple *spt; 1453 1454 /* Patch the corresponding protection tags */ 1455 scsi_for_each_prot_sg(cmd, sg, 1456 scsi_prot_sg_count(cmd), i) { 1457 num_ent = sg_dma_len(sg) / 8; 1458 if (k + num_ent < blocks_done) { 1459 k += num_ent; 1460 continue; 1461 } 1462 j = blocks_done - k - 1; 1463 k = blocks_done; 1464 break; 1465 } 1466 1467 if (k != blocks_done) { 1468 ql_log(ql_log_warn, vha, 0x302f, 1469 "unexpected tag values tag:lba=%x:%llx)\n", 1470 e_ref_tag, (unsigned long long)lba_s); 1471 return 1; 1472 } 1473 1474 spt = page_address(sg_page(sg)) + sg->offset; 1475 spt += j; 1476 1477 spt->app_tag = 0xffff; 1478 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1479 spt->ref_tag = 0xffffffff; 1480 } 1481 1482 return 0; 1483 } 1484 1485 /* check guard */ 1486 if (e_guard != a_guard) { 1487 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1488 0x10, 0x1); 1489 set_driver_byte(cmd, DRIVER_SENSE); 1490 set_host_byte(cmd, DID_ABORT); 1491 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1492 return 1; 1493 } 1494 1495 /* check ref tag */ 1496 if (e_ref_tag != a_ref_tag) { 1497 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1498 0x10, 0x3); 1499 set_driver_byte(cmd, DRIVER_SENSE); 1500 set_host_byte(cmd, DID_ABORT); 1501 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1502 return 1; 1503 } 1504 1505 /* check appl tag */ 1506 if (e_app_tag != a_app_tag) { 1507 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1508 0x10, 0x2); 1509 set_driver_byte(cmd, DRIVER_SENSE); 1510 set_host_byte(cmd, DID_ABORT); 1511 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1512 return 1; 1513 } 1514 1515 return 1; 1516 } 1517 1518 /** 1519 * qla2x00_status_entry() - Process a Status IOCB entry. 1520 * @ha: SCSI driver HA context 1521 * @pkt: Entry pointer 1522 */ 1523 static void 1524 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1525 { 1526 srb_t *sp; 1527 fc_port_t *fcport; 1528 struct scsi_cmnd *cp; 1529 sts_entry_t *sts; 1530 struct sts_entry_24xx *sts24; 1531 uint16_t comp_status; 1532 uint16_t scsi_status; 1533 uint16_t ox_id; 1534 uint8_t lscsi_status; 1535 int32_t resid; 1536 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1537 fw_resid_len; 1538 uint8_t *rsp_info, *sense_data; 1539 struct qla_hw_data *ha = vha->hw; 1540 uint32_t handle; 1541 uint16_t que; 1542 struct req_que *req; 1543 int logit = 1; 1544 int res = 0; 1545 1546 sts = (sts_entry_t *) pkt; 1547 sts24 = (struct sts_entry_24xx *) pkt; 1548 if (IS_FWI2_CAPABLE(ha)) { 1549 comp_status = le16_to_cpu(sts24->comp_status); 1550 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1551 } else { 1552 comp_status = le16_to_cpu(sts->comp_status); 1553 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1554 } 1555 handle = (uint32_t) LSW(sts->handle); 1556 que = MSW(sts->handle); 1557 req = ha->req_q_map[que]; 1558 1559 /* Fast path completion. */ 1560 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1561 qla2x00_process_completed_request(vha, req, handle); 1562 1563 return; 1564 } 1565 1566 /* Validate handle. */ 1567 if (handle < MAX_OUTSTANDING_COMMANDS) { 1568 sp = req->outstanding_cmds[handle]; 1569 req->outstanding_cmds[handle] = NULL; 1570 } else 1571 sp = NULL; 1572 1573 if (sp == NULL) { 1574 ql_dbg(ql_dbg_io, vha, 0x3017, 1575 "Invalid status handle (0x%x).\n", sts->handle); 1576 1577 if (IS_QLA82XX(ha)) 1578 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1579 else 1580 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1581 qla2xxx_wake_dpc(vha); 1582 return; 1583 } 1584 cp = GET_CMD_SP(sp); 1585 if (cp == NULL) { 1586 ql_dbg(ql_dbg_io, vha, 0x3018, 1587 "Command already returned (0x%x/%p).\n", 1588 sts->handle, sp); 1589 1590 return; 1591 } 1592 1593 lscsi_status = scsi_status & STATUS_MASK; 1594 1595 fcport = sp->fcport; 1596 1597 ox_id = 0; 1598 sense_len = par_sense_len = rsp_info_len = resid_len = 1599 fw_resid_len = 0; 1600 if (IS_FWI2_CAPABLE(ha)) { 1601 if (scsi_status & SS_SENSE_LEN_VALID) 1602 sense_len = le32_to_cpu(sts24->sense_len); 1603 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1604 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1605 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 1606 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1607 if (comp_status == CS_DATA_UNDERRUN) 1608 fw_resid_len = le32_to_cpu(sts24->residual_len); 1609 rsp_info = sts24->data; 1610 sense_data = sts24->data; 1611 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1612 ox_id = le16_to_cpu(sts24->ox_id); 1613 par_sense_len = sizeof(sts24->data); 1614 } else { 1615 if (scsi_status & SS_SENSE_LEN_VALID) 1616 sense_len = le16_to_cpu(sts->req_sense_length); 1617 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1618 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1619 resid_len = le32_to_cpu(sts->residual_length); 1620 rsp_info = sts->rsp_info; 1621 sense_data = sts->req_sense_data; 1622 par_sense_len = sizeof(sts->req_sense_data); 1623 } 1624 1625 /* Check for any FCP transport errors. */ 1626 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1627 /* Sense data lies beyond any FCP RESPONSE data. */ 1628 if (IS_FWI2_CAPABLE(ha)) { 1629 sense_data += rsp_info_len; 1630 par_sense_len -= rsp_info_len; 1631 } 1632 if (rsp_info_len > 3 && rsp_info[3]) { 1633 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 1634 "FCP I/O protocol failure (0x%x/0x%x).\n", 1635 rsp_info_len, rsp_info[3]); 1636 1637 res = DID_BUS_BUSY << 16; 1638 goto out; 1639 } 1640 } 1641 1642 /* Check for overrun. */ 1643 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 1644 scsi_status & SS_RESIDUAL_OVER) 1645 comp_status = CS_DATA_OVERRUN; 1646 1647 /* 1648 * Based on Host and scsi status generate status code for Linux 1649 */ 1650 switch (comp_status) { 1651 case CS_COMPLETE: 1652 case CS_QUEUE_FULL: 1653 if (scsi_status == 0) { 1654 res = DID_OK << 16; 1655 break; 1656 } 1657 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 1658 resid = resid_len; 1659 scsi_set_resid(cp, resid); 1660 1661 if (!lscsi_status && 1662 ((unsigned)(scsi_bufflen(cp) - resid) < 1663 cp->underflow)) { 1664 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 1665 "Mid-layer underflow " 1666 "detected (0x%x of 0x%x bytes).\n", 1667 resid, scsi_bufflen(cp)); 1668 1669 res = DID_ERROR << 16; 1670 break; 1671 } 1672 } 1673 res = DID_OK << 16 | lscsi_status; 1674 1675 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1676 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 1677 "QUEUE FULL detected.\n"); 1678 break; 1679 } 1680 logit = 0; 1681 if (lscsi_status != SS_CHECK_CONDITION) 1682 break; 1683 1684 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1685 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1686 break; 1687 1688 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 1689 rsp, res); 1690 break; 1691 1692 case CS_DATA_UNDERRUN: 1693 /* Use F/W calculated residual length. */ 1694 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 1695 scsi_set_resid(cp, resid); 1696 if (scsi_status & SS_RESIDUAL_UNDER) { 1697 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 1698 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 1699 "Dropped frame(s) detected " 1700 "(0x%x of 0x%x bytes).\n", 1701 resid, scsi_bufflen(cp)); 1702 1703 res = DID_ERROR << 16 | lscsi_status; 1704 goto check_scsi_status; 1705 } 1706 1707 if (!lscsi_status && 1708 ((unsigned)(scsi_bufflen(cp) - resid) < 1709 cp->underflow)) { 1710 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 1711 "Mid-layer underflow " 1712 "detected (0x%x of 0x%x bytes).\n", 1713 resid, scsi_bufflen(cp)); 1714 1715 res = DID_ERROR << 16; 1716 break; 1717 } 1718 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 1719 lscsi_status != SAM_STAT_BUSY) { 1720 /* 1721 * scsi status of task set and busy are considered to be 1722 * task not completed. 1723 */ 1724 1725 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 1726 "Dropped frame(s) detected (0x%x " 1727 "of 0x%x bytes).\n", resid, 1728 scsi_bufflen(cp)); 1729 1730 res = DID_ERROR << 16 | lscsi_status; 1731 goto check_scsi_status; 1732 } else { 1733 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 1734 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 1735 scsi_status, lscsi_status); 1736 } 1737 1738 res = DID_OK << 16 | lscsi_status; 1739 logit = 0; 1740 1741 check_scsi_status: 1742 /* 1743 * Check to see if SCSI Status is non zero. If so report SCSI 1744 * Status. 1745 */ 1746 if (lscsi_status != 0) { 1747 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1748 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 1749 "QUEUE FULL detected.\n"); 1750 logit = 1; 1751 break; 1752 } 1753 if (lscsi_status != SS_CHECK_CONDITION) 1754 break; 1755 1756 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1757 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1758 break; 1759 1760 qla2x00_handle_sense(sp, sense_data, par_sense_len, 1761 sense_len, rsp, res); 1762 } 1763 break; 1764 1765 case CS_PORT_LOGGED_OUT: 1766 case CS_PORT_CONFIG_CHG: 1767 case CS_PORT_BUSY: 1768 case CS_INCOMPLETE: 1769 case CS_PORT_UNAVAILABLE: 1770 case CS_TIMEOUT: 1771 case CS_RESET: 1772 1773 /* 1774 * We are going to have the fc class block the rport 1775 * while we try to recover so instruct the mid layer 1776 * to requeue until the class decides how to handle this. 1777 */ 1778 res = DID_TRANSPORT_DISRUPTED << 16; 1779 1780 if (comp_status == CS_TIMEOUT) { 1781 if (IS_FWI2_CAPABLE(ha)) 1782 break; 1783 else if ((le16_to_cpu(sts->status_flags) & 1784 SF_LOGOUT_SENT) == 0) 1785 break; 1786 } 1787 1788 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 1789 "Port down status: port-state=0x%x.\n", 1790 atomic_read(&fcport->state)); 1791 1792 if (atomic_read(&fcport->state) == FCS_ONLINE) 1793 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1794 break; 1795 1796 case CS_ABORTED: 1797 res = DID_RESET << 16; 1798 break; 1799 1800 case CS_DIF_ERROR: 1801 logit = qla2x00_handle_dif_error(sp, sts24); 1802 break; 1803 default: 1804 res = DID_ERROR << 16; 1805 break; 1806 } 1807 1808 out: 1809 if (logit) 1810 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 1811 "FCP command status: 0x%x-0x%x (0x%x) " 1812 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 1813 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 1814 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 1815 comp_status, scsi_status, res, vha->host_no, 1816 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 1817 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 1818 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 1819 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], 1820 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, 1821 resid_len, fw_resid_len); 1822 1823 if (rsp->status_srb == NULL) 1824 sp->done(ha, sp, res); 1825 } 1826 1827 /** 1828 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1829 * @ha: SCSI driver HA context 1830 * @pkt: Entry pointer 1831 * 1832 * Extended sense data. 1833 */ 1834 static void 1835 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 1836 { 1837 uint8_t sense_sz = 0; 1838 struct qla_hw_data *ha = rsp->hw; 1839 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 1840 srb_t *sp = rsp->status_srb; 1841 struct scsi_cmnd *cp; 1842 uint32_t sense_len; 1843 uint8_t *sense_ptr; 1844 1845 if (!sp || !GET_CMD_SENSE_LEN(sp)) 1846 return; 1847 1848 sense_len = GET_CMD_SENSE_LEN(sp); 1849 sense_ptr = GET_CMD_SENSE_PTR(sp); 1850 1851 cp = GET_CMD_SP(sp); 1852 if (cp == NULL) { 1853 ql_log(ql_log_warn, vha, 0x3025, 1854 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 1855 1856 rsp->status_srb = NULL; 1857 return; 1858 } 1859 1860 if (sense_len > sizeof(pkt->data)) 1861 sense_sz = sizeof(pkt->data); 1862 else 1863 sense_sz = sense_len; 1864 1865 /* Move sense data. */ 1866 if (IS_FWI2_CAPABLE(ha)) 1867 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1868 memcpy(sense_ptr, pkt->data, sense_sz); 1869 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 1870 sense_ptr, sense_sz); 1871 1872 sense_len -= sense_sz; 1873 sense_ptr += sense_sz; 1874 1875 SET_CMD_SENSE_PTR(sp, sense_ptr); 1876 SET_CMD_SENSE_LEN(sp, sense_len); 1877 1878 /* Place command on done queue. */ 1879 if (sense_len == 0) { 1880 rsp->status_srb = NULL; 1881 sp->done(ha, sp, cp->result); 1882 } 1883 } 1884 1885 /** 1886 * qla2x00_error_entry() - Process an error entry. 1887 * @ha: SCSI driver HA context 1888 * @pkt: Entry pointer 1889 */ 1890 static void 1891 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 1892 { 1893 srb_t *sp; 1894 struct qla_hw_data *ha = vha->hw; 1895 const char func[] = "ERROR-IOCB"; 1896 uint16_t que = MSW(pkt->handle); 1897 struct req_que *req = NULL; 1898 int res = DID_ERROR << 16; 1899 1900 ql_dbg(ql_dbg_async, vha, 0x502a, 1901 "type of error status in response: 0x%x\n", pkt->entry_status); 1902 1903 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 1904 goto fatal; 1905 1906 req = ha->req_q_map[que]; 1907 1908 if (pkt->entry_status & RF_BUSY) 1909 res = DID_BUS_BUSY << 16; 1910 1911 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1912 if (sp) { 1913 sp->done(ha, sp, res); 1914 return; 1915 } 1916 fatal: 1917 ql_log(ql_log_warn, vha, 0x5030, 1918 "Error entry - invalid handle/queue.\n"); 1919 1920 if (IS_QLA82XX(ha)) 1921 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1922 else 1923 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1924 qla2xxx_wake_dpc(vha); 1925 } 1926 1927 /** 1928 * qla24xx_mbx_completion() - Process mailbox command completions. 1929 * @ha: SCSI driver HA context 1930 * @mb0: Mailbox0 register 1931 */ 1932 static void 1933 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 1934 { 1935 uint16_t cnt; 1936 uint32_t mboxes; 1937 uint16_t __iomem *wptr; 1938 struct qla_hw_data *ha = vha->hw; 1939 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1940 1941 /* Read all mbox registers? */ 1942 mboxes = (1 << ha->mbx_count) - 1; 1943 if (!ha->mcp) 1944 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n"); 1945 else 1946 mboxes = ha->mcp->in_mb; 1947 1948 /* Load return mailbox registers. */ 1949 ha->flags.mbox_int = 1; 1950 ha->mailbox_out[0] = mb0; 1951 mboxes >>= 1; 1952 wptr = (uint16_t __iomem *)®->mailbox1; 1953 1954 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1955 if (mboxes & BIT_0) 1956 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1957 1958 mboxes >>= 1; 1959 wptr++; 1960 } 1961 } 1962 1963 /** 1964 * qla24xx_process_response_queue() - Process response queue entries. 1965 * @ha: SCSI driver HA context 1966 */ 1967 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 1968 struct rsp_que *rsp) 1969 { 1970 struct sts_entry_24xx *pkt; 1971 struct qla_hw_data *ha = vha->hw; 1972 1973 if (!vha->flags.online) 1974 return; 1975 1976 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1977 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 1978 1979 rsp->ring_index++; 1980 if (rsp->ring_index == rsp->length) { 1981 rsp->ring_index = 0; 1982 rsp->ring_ptr = rsp->ring; 1983 } else { 1984 rsp->ring_ptr++; 1985 } 1986 1987 if (pkt->entry_status != 0) { 1988 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 1989 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1990 wmb(); 1991 continue; 1992 } 1993 1994 switch (pkt->entry_type) { 1995 case STATUS_TYPE: 1996 qla2x00_status_entry(vha, rsp, pkt); 1997 break; 1998 case STATUS_CONT_TYPE: 1999 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2000 break; 2001 case VP_RPT_ID_IOCB_TYPE: 2002 qla24xx_report_id_acquisition(vha, 2003 (struct vp_rpt_id_entry_24xx *)pkt); 2004 break; 2005 case LOGINOUT_PORT_IOCB_TYPE: 2006 qla24xx_logio_entry(vha, rsp->req, 2007 (struct logio_entry_24xx *)pkt); 2008 break; 2009 case TSK_MGMT_IOCB_TYPE: 2010 qla24xx_tm_iocb_entry(vha, rsp->req, 2011 (struct tsk_mgmt_entry *)pkt); 2012 break; 2013 case CT_IOCB_TYPE: 2014 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2015 break; 2016 case ELS_IOCB_TYPE: 2017 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2018 break; 2019 case MARKER_TYPE: 2020 /* Do nothing in this case, this check is to prevent it 2021 * from falling into default case 2022 */ 2023 break; 2024 default: 2025 /* Type Not Supported. */ 2026 ql_dbg(ql_dbg_async, vha, 0x5042, 2027 "Received unknown response pkt type %x " 2028 "entry status=%x.\n", 2029 pkt->entry_type, pkt->entry_status); 2030 break; 2031 } 2032 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2033 wmb(); 2034 } 2035 2036 /* Adjust ring index */ 2037 if (IS_QLA82XX(ha)) { 2038 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2039 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2040 } else 2041 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2042 } 2043 2044 static void 2045 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2046 { 2047 int rval; 2048 uint32_t cnt; 2049 struct qla_hw_data *ha = vha->hw; 2050 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2051 2052 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 2053 return; 2054 2055 rval = QLA_SUCCESS; 2056 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2057 RD_REG_DWORD(®->iobase_addr); 2058 WRT_REG_DWORD(®->iobase_window, 0x0001); 2059 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2060 rval == QLA_SUCCESS; cnt--) { 2061 if (cnt) { 2062 WRT_REG_DWORD(®->iobase_window, 0x0001); 2063 udelay(10); 2064 } else 2065 rval = QLA_FUNCTION_TIMEOUT; 2066 } 2067 if (rval == QLA_SUCCESS) 2068 goto next_test; 2069 2070 WRT_REG_DWORD(®->iobase_window, 0x0003); 2071 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2072 rval == QLA_SUCCESS; cnt--) { 2073 if (cnt) { 2074 WRT_REG_DWORD(®->iobase_window, 0x0003); 2075 udelay(10); 2076 } else 2077 rval = QLA_FUNCTION_TIMEOUT; 2078 } 2079 if (rval != QLA_SUCCESS) 2080 goto done; 2081 2082 next_test: 2083 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2084 ql_log(ql_log_info, vha, 0x504c, 2085 "Additional code -- 0x55AA.\n"); 2086 2087 done: 2088 WRT_REG_DWORD(®->iobase_window, 0x0000); 2089 RD_REG_DWORD(®->iobase_window); 2090 } 2091 2092 /** 2093 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2094 * @irq: 2095 * @dev_id: SCSI driver HA context 2096 * 2097 * Called by system whenever the host adapter generates an interrupt. 2098 * 2099 * Returns handled flag. 2100 */ 2101 irqreturn_t 2102 qla24xx_intr_handler(int irq, void *dev_id) 2103 { 2104 scsi_qla_host_t *vha; 2105 struct qla_hw_data *ha; 2106 struct device_reg_24xx __iomem *reg; 2107 int status; 2108 unsigned long iter; 2109 uint32_t stat; 2110 uint32_t hccr; 2111 uint16_t mb[4]; 2112 struct rsp_que *rsp; 2113 unsigned long flags; 2114 2115 rsp = (struct rsp_que *) dev_id; 2116 if (!rsp) { 2117 ql_log(ql_log_info, NULL, 0x5059, 2118 "%s: NULL response queue pointer.\n", __func__); 2119 return IRQ_NONE; 2120 } 2121 2122 ha = rsp->hw; 2123 reg = &ha->iobase->isp24; 2124 status = 0; 2125 2126 if (unlikely(pci_channel_offline(ha->pdev))) 2127 return IRQ_HANDLED; 2128 2129 spin_lock_irqsave(&ha->hardware_lock, flags); 2130 vha = pci_get_drvdata(ha->pdev); 2131 for (iter = 50; iter--; ) { 2132 stat = RD_REG_DWORD(®->host_status); 2133 if (stat & HSRX_RISC_PAUSED) { 2134 if (unlikely(pci_channel_offline(ha->pdev))) 2135 break; 2136 2137 hccr = RD_REG_DWORD(®->hccr); 2138 2139 ql_log(ql_log_warn, vha, 0x504b, 2140 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2141 hccr); 2142 2143 qla2xxx_check_risc_status(vha); 2144 2145 ha->isp_ops->fw_dump(vha, 1); 2146 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2147 break; 2148 } else if ((stat & HSRX_RISC_INT) == 0) 2149 break; 2150 2151 switch (stat & 0xff) { 2152 case 0x1: 2153 case 0x2: 2154 case 0x10: 2155 case 0x11: 2156 qla24xx_mbx_completion(vha, MSW(stat)); 2157 status |= MBX_INTERRUPT; 2158 2159 break; 2160 case 0x12: 2161 mb[0] = MSW(stat); 2162 mb[1] = RD_REG_WORD(®->mailbox1); 2163 mb[2] = RD_REG_WORD(®->mailbox2); 2164 mb[3] = RD_REG_WORD(®->mailbox3); 2165 qla2x00_async_event(vha, rsp, mb); 2166 break; 2167 case 0x13: 2168 case 0x14: 2169 qla24xx_process_response_queue(vha, rsp); 2170 break; 2171 default: 2172 ql_dbg(ql_dbg_async, vha, 0x504f, 2173 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2174 break; 2175 } 2176 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2177 RD_REG_DWORD_RELAXED(®->hccr); 2178 } 2179 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2180 2181 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2182 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2183 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2184 complete(&ha->mbx_intr_comp); 2185 } 2186 2187 return IRQ_HANDLED; 2188 } 2189 2190 static irqreturn_t 2191 qla24xx_msix_rsp_q(int irq, void *dev_id) 2192 { 2193 struct qla_hw_data *ha; 2194 struct rsp_que *rsp; 2195 struct device_reg_24xx __iomem *reg; 2196 struct scsi_qla_host *vha; 2197 unsigned long flags; 2198 2199 rsp = (struct rsp_que *) dev_id; 2200 if (!rsp) { 2201 ql_log(ql_log_info, NULL, 0x505a, 2202 "%s: NULL response queue pointer.\n", __func__); 2203 return IRQ_NONE; 2204 } 2205 ha = rsp->hw; 2206 reg = &ha->iobase->isp24; 2207 2208 spin_lock_irqsave(&ha->hardware_lock, flags); 2209 2210 vha = pci_get_drvdata(ha->pdev); 2211 qla24xx_process_response_queue(vha, rsp); 2212 if (!ha->flags.disable_msix_handshake) { 2213 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2214 RD_REG_DWORD_RELAXED(®->hccr); 2215 } 2216 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2217 2218 return IRQ_HANDLED; 2219 } 2220 2221 static irqreturn_t 2222 qla25xx_msix_rsp_q(int irq, void *dev_id) 2223 { 2224 struct qla_hw_data *ha; 2225 struct rsp_que *rsp; 2226 struct device_reg_24xx __iomem *reg; 2227 unsigned long flags; 2228 2229 rsp = (struct rsp_que *) dev_id; 2230 if (!rsp) { 2231 ql_log(ql_log_info, NULL, 0x505b, 2232 "%s: NULL response queue pointer.\n", __func__); 2233 return IRQ_NONE; 2234 } 2235 ha = rsp->hw; 2236 2237 /* Clear the interrupt, if enabled, for this response queue */ 2238 if (!ha->flags.disable_msix_handshake) { 2239 reg = &ha->iobase->isp24; 2240 spin_lock_irqsave(&ha->hardware_lock, flags); 2241 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2242 RD_REG_DWORD_RELAXED(®->hccr); 2243 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2244 } 2245 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2246 2247 return IRQ_HANDLED; 2248 } 2249 2250 static irqreturn_t 2251 qla24xx_msix_default(int irq, void *dev_id) 2252 { 2253 scsi_qla_host_t *vha; 2254 struct qla_hw_data *ha; 2255 struct rsp_que *rsp; 2256 struct device_reg_24xx __iomem *reg; 2257 int status; 2258 uint32_t stat; 2259 uint32_t hccr; 2260 uint16_t mb[4]; 2261 unsigned long flags; 2262 2263 rsp = (struct rsp_que *) dev_id; 2264 if (!rsp) { 2265 ql_log(ql_log_info, NULL, 0x505c, 2266 "%s: NULL response queue pointer.\n", __func__); 2267 return IRQ_NONE; 2268 } 2269 ha = rsp->hw; 2270 reg = &ha->iobase->isp24; 2271 status = 0; 2272 2273 spin_lock_irqsave(&ha->hardware_lock, flags); 2274 vha = pci_get_drvdata(ha->pdev); 2275 do { 2276 stat = RD_REG_DWORD(®->host_status); 2277 if (stat & HSRX_RISC_PAUSED) { 2278 if (unlikely(pci_channel_offline(ha->pdev))) 2279 break; 2280 2281 hccr = RD_REG_DWORD(®->hccr); 2282 2283 ql_log(ql_log_info, vha, 0x5050, 2284 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2285 hccr); 2286 2287 qla2xxx_check_risc_status(vha); 2288 2289 ha->isp_ops->fw_dump(vha, 1); 2290 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2291 break; 2292 } else if ((stat & HSRX_RISC_INT) == 0) 2293 break; 2294 2295 switch (stat & 0xff) { 2296 case 0x1: 2297 case 0x2: 2298 case 0x10: 2299 case 0x11: 2300 qla24xx_mbx_completion(vha, MSW(stat)); 2301 status |= MBX_INTERRUPT; 2302 2303 break; 2304 case 0x12: 2305 mb[0] = MSW(stat); 2306 mb[1] = RD_REG_WORD(®->mailbox1); 2307 mb[2] = RD_REG_WORD(®->mailbox2); 2308 mb[3] = RD_REG_WORD(®->mailbox3); 2309 qla2x00_async_event(vha, rsp, mb); 2310 break; 2311 case 0x13: 2312 case 0x14: 2313 qla24xx_process_response_queue(vha, rsp); 2314 break; 2315 default: 2316 ql_dbg(ql_dbg_async, vha, 0x5051, 2317 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2318 break; 2319 } 2320 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2321 } while (0); 2322 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2323 2324 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2325 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2326 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2327 complete(&ha->mbx_intr_comp); 2328 } 2329 return IRQ_HANDLED; 2330 } 2331 2332 /* Interrupt handling helpers. */ 2333 2334 struct qla_init_msix_entry { 2335 const char *name; 2336 irq_handler_t handler; 2337 }; 2338 2339 static struct qla_init_msix_entry msix_entries[3] = { 2340 { "qla2xxx (default)", qla24xx_msix_default }, 2341 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2342 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2343 }; 2344 2345 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2346 { "qla2xxx (default)", qla82xx_msix_default }, 2347 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2348 }; 2349 2350 static void 2351 qla24xx_disable_msix(struct qla_hw_data *ha) 2352 { 2353 int i; 2354 struct qla_msix_entry *qentry; 2355 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2356 2357 for (i = 0; i < ha->msix_count; i++) { 2358 qentry = &ha->msix_entries[i]; 2359 if (qentry->have_irq) 2360 free_irq(qentry->vector, qentry->rsp); 2361 } 2362 pci_disable_msix(ha->pdev); 2363 kfree(ha->msix_entries); 2364 ha->msix_entries = NULL; 2365 ha->flags.msix_enabled = 0; 2366 ql_dbg(ql_dbg_init, vha, 0x0042, 2367 "Disabled the MSI.\n"); 2368 } 2369 2370 static int 2371 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2372 { 2373 #define MIN_MSIX_COUNT 2 2374 int i, ret; 2375 struct msix_entry *entries; 2376 struct qla_msix_entry *qentry; 2377 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2378 2379 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2380 GFP_KERNEL); 2381 if (!entries) { 2382 ql_log(ql_log_warn, vha, 0x00bc, 2383 "Failed to allocate memory for msix_entry.\n"); 2384 return -ENOMEM; 2385 } 2386 2387 for (i = 0; i < ha->msix_count; i++) 2388 entries[i].entry = i; 2389 2390 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2391 if (ret) { 2392 if (ret < MIN_MSIX_COUNT) 2393 goto msix_failed; 2394 2395 ql_log(ql_log_warn, vha, 0x00c6, 2396 "MSI-X: Failed to enable support " 2397 "-- %d/%d\n Retry with %d vectors.\n", 2398 ha->msix_count, ret, ret); 2399 ha->msix_count = ret; 2400 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2401 if (ret) { 2402 msix_failed: 2403 ql_log(ql_log_fatal, vha, 0x00c7, 2404 "MSI-X: Failed to enable support, " 2405 "giving up -- %d/%d.\n", 2406 ha->msix_count, ret); 2407 goto msix_out; 2408 } 2409 ha->max_rsp_queues = ha->msix_count - 1; 2410 } 2411 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2412 ha->msix_count, GFP_KERNEL); 2413 if (!ha->msix_entries) { 2414 ql_log(ql_log_fatal, vha, 0x00c8, 2415 "Failed to allocate memory for ha->msix_entries.\n"); 2416 ret = -ENOMEM; 2417 goto msix_out; 2418 } 2419 ha->flags.msix_enabled = 1; 2420 2421 for (i = 0; i < ha->msix_count; i++) { 2422 qentry = &ha->msix_entries[i]; 2423 qentry->vector = entries[i].vector; 2424 qentry->entry = entries[i].entry; 2425 qentry->have_irq = 0; 2426 qentry->rsp = NULL; 2427 } 2428 2429 /* Enable MSI-X vectors for the base queue */ 2430 for (i = 0; i < 2; i++) { 2431 qentry = &ha->msix_entries[i]; 2432 if (IS_QLA82XX(ha)) { 2433 ret = request_irq(qentry->vector, 2434 qla82xx_msix_entries[i].handler, 2435 0, qla82xx_msix_entries[i].name, rsp); 2436 } else { 2437 ret = request_irq(qentry->vector, 2438 msix_entries[i].handler, 2439 0, msix_entries[i].name, rsp); 2440 } 2441 if (ret) { 2442 ql_log(ql_log_fatal, vha, 0x00cb, 2443 "MSI-X: unable to register handler -- %x/%d.\n", 2444 qentry->vector, ret); 2445 qla24xx_disable_msix(ha); 2446 ha->mqenable = 0; 2447 goto msix_out; 2448 } 2449 qentry->have_irq = 1; 2450 qentry->rsp = rsp; 2451 rsp->msix = qentry; 2452 } 2453 2454 /* Enable MSI-X vector for response queue update for queue 0 */ 2455 if (IS_QLA83XX(ha)) { 2456 if (ha->msixbase && ha->mqiobase && 2457 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2458 ha->mqenable = 1; 2459 } else 2460 if (ha->mqiobase 2461 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2462 ha->mqenable = 1; 2463 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2464 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2465 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2466 ql_dbg(ql_dbg_init, vha, 0x0055, 2467 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2468 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2469 2470 msix_out: 2471 kfree(entries); 2472 return ret; 2473 } 2474 2475 int 2476 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2477 { 2478 int ret; 2479 device_reg_t __iomem *reg = ha->iobase; 2480 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2481 2482 /* If possible, enable MSI-X. */ 2483 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2484 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 2485 goto skip_msi; 2486 2487 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2488 (ha->pdev->subsystem_device == 0x7040 || 2489 ha->pdev->subsystem_device == 0x7041 || 2490 ha->pdev->subsystem_device == 0x1705)) { 2491 ql_log(ql_log_warn, vha, 0x0034, 2492 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2493 ha->pdev->subsystem_vendor, 2494 ha->pdev->subsystem_device); 2495 goto skip_msi; 2496 } 2497 2498 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2499 ql_log(ql_log_warn, vha, 0x0035, 2500 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2501 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2502 goto skip_msix; 2503 } 2504 2505 ret = qla24xx_enable_msix(ha, rsp); 2506 if (!ret) { 2507 ql_dbg(ql_dbg_init, vha, 0x0036, 2508 "MSI-X: Enabled (0x%X, 0x%X).\n", 2509 ha->chip_revision, ha->fw_attributes); 2510 goto clear_risc_ints; 2511 } 2512 ql_log(ql_log_info, vha, 0x0037, 2513 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2514 skip_msix: 2515 2516 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2517 !IS_QLA8001(ha)) 2518 goto skip_msi; 2519 2520 ret = pci_enable_msi(ha->pdev); 2521 if (!ret) { 2522 ql_dbg(ql_dbg_init, vha, 0x0038, 2523 "MSI: Enabled.\n"); 2524 ha->flags.msi_enabled = 1; 2525 } else 2526 ql_log(ql_log_warn, vha, 0x0039, 2527 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2528 skip_msi: 2529 2530 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2531 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2532 QLA2XXX_DRIVER_NAME, rsp); 2533 if (ret) { 2534 ql_log(ql_log_warn, vha, 0x003a, 2535 "Failed to reserve interrupt %d already in use.\n", 2536 ha->pdev->irq); 2537 goto fail; 2538 } 2539 2540 clear_risc_ints: 2541 2542 /* 2543 * FIXME: Noted that 8014s were being dropped during NK testing. 2544 * Timing deltas during MSI-X/INTa transitions? 2545 */ 2546 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA83XX(ha)) 2547 goto fail; 2548 spin_lock_irq(&ha->hardware_lock); 2549 if (IS_FWI2_CAPABLE(ha)) { 2550 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_HOST_INT); 2551 WRT_REG_DWORD(®->isp24.hccr, HCCRX_CLR_RISC_INT); 2552 } else { 2553 WRT_REG_WORD(®->isp.semaphore, 0); 2554 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_RISC_INT); 2555 WRT_REG_WORD(®->isp.hccr, HCCR_CLR_HOST_INT); 2556 } 2557 spin_unlock_irq(&ha->hardware_lock); 2558 2559 fail: 2560 return ret; 2561 } 2562 2563 void 2564 qla2x00_free_irqs(scsi_qla_host_t *vha) 2565 { 2566 struct qla_hw_data *ha = vha->hw; 2567 struct rsp_que *rsp = ha->rsp_q_map[0]; 2568 2569 if (ha->flags.msix_enabled) 2570 qla24xx_disable_msix(ha); 2571 else if (ha->flags.msi_enabled) { 2572 free_irq(ha->pdev->irq, rsp); 2573 pci_disable_msi(ha->pdev); 2574 } else 2575 free_irq(ha->pdev->irq, rsp); 2576 } 2577 2578 2579 int qla25xx_request_irq(struct rsp_que *rsp) 2580 { 2581 struct qla_hw_data *ha = rsp->hw; 2582 struct qla_init_msix_entry *intr = &msix_entries[2]; 2583 struct qla_msix_entry *msix = rsp->msix; 2584 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2585 int ret; 2586 2587 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2588 if (ret) { 2589 ql_log(ql_log_fatal, vha, 0x00e6, 2590 "MSI-X: Unable to register handler -- %x/%d.\n", 2591 msix->vector, ret); 2592 return ret; 2593 } 2594 msix->have_irq = 1; 2595 msix->rsp = rsp; 2596 return ret; 2597 } 2598