1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 26 const char *const port_state_str[] = { 27 "Unknown", 28 "UNCONFIGURED", 29 "DEAD", 30 "LOST", 31 "ONLINE" 32 }; 33 34 static void qla24xx_purex_iocb(scsi_qla_host_t *vha, void *pkt, 35 void (*process_item)(struct scsi_qla_host *vha, void *pkt)) 36 { 37 struct purex_list *list = &vha->purex_list; 38 struct purex_item *item; 39 ulong flags; 40 41 item = kzalloc(sizeof(*item), GFP_KERNEL); 42 if (!item) { 43 ql_log(ql_log_warn, vha, 0x5092, 44 ">> Failed allocate purex list item.\n"); 45 return; 46 } 47 48 item->vha = vha; 49 item->process_item = process_item; 50 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 51 52 spin_lock_irqsave(&list->lock, flags); 53 list_add_tail(&item->list, &list->head); 54 spin_unlock_irqrestore(&list->lock, flags); 55 56 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 57 } 58 59 static void 60 qla24xx_process_abts(struct scsi_qla_host *vha, void *pkt) 61 { 62 struct abts_entry_24xx *abts = pkt; 63 struct qla_hw_data *ha = vha->hw; 64 struct els_entry_24xx *rsp_els; 65 struct abts_entry_24xx *abts_rsp; 66 dma_addr_t dma; 67 uint32_t fctl; 68 int rval; 69 70 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 71 72 ql_log(ql_log_warn, vha, 0x0287, 73 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 74 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 75 abts->seq_id, abts->seq_cnt); 76 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 77 "-------- ABTS RCV -------\n"); 78 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 79 (uint8_t *)abts, sizeof(*abts)); 80 81 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 82 GFP_KERNEL); 83 if (!rsp_els) { 84 ql_log(ql_log_warn, vha, 0x0287, 85 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 86 return; 87 } 88 89 /* terminate exchange */ 90 rsp_els->entry_type = ELS_IOCB_TYPE; 91 rsp_els->entry_count = 1; 92 rsp_els->nport_handle = cpu_to_le16(~0); 93 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 94 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); 95 ql_dbg(ql_dbg_init, vha, 0x0283, 96 "Sending ELS Response to terminate exchange %#x...\n", 97 abts->rx_xch_addr_to_abort); 98 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 99 "-------- ELS RSP -------\n"); 100 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 101 (uint8_t *)rsp_els, sizeof(*rsp_els)); 102 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 103 if (rval) { 104 ql_log(ql_log_warn, vha, 0x0288, 105 "%s: iocb failed to execute -> %x\n", __func__, rval); 106 } else if (rsp_els->comp_status) { 107 ql_log(ql_log_warn, vha, 0x0289, 108 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 109 __func__, rsp_els->comp_status, 110 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 111 } else { 112 ql_dbg(ql_dbg_init, vha, 0x028a, 113 "%s: abort exchange done.\n", __func__); 114 } 115 116 /* send ABTS response */ 117 abts_rsp = (void *)rsp_els; 118 memset(abts_rsp, 0, sizeof(*abts_rsp)); 119 abts_rsp->entry_type = ABTS_RSP_TYPE; 120 abts_rsp->entry_count = 1; 121 abts_rsp->nport_handle = abts->nport_handle; 122 abts_rsp->vp_idx = abts->vp_idx; 123 abts_rsp->sof_type = abts->sof_type & 0xf0; 124 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 125 abts_rsp->d_id[0] = abts->s_id[0]; 126 abts_rsp->d_id[1] = abts->s_id[1]; 127 abts_rsp->d_id[2] = abts->s_id[2]; 128 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 129 abts_rsp->s_id[0] = abts->d_id[0]; 130 abts_rsp->s_id[1] = abts->d_id[1]; 131 abts_rsp->s_id[2] = abts->d_id[2]; 132 abts_rsp->cs_ctl = abts->cs_ctl; 133 /* include flipping bit23 in fctl */ 134 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 135 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 136 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 137 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 138 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 139 abts_rsp->type = FC_TYPE_BLD; 140 abts_rsp->rx_id = abts->rx_id; 141 abts_rsp->ox_id = abts->ox_id; 142 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 143 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 144 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); 145 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 146 ql_dbg(ql_dbg_init, vha, 0x028b, 147 "Sending BA ACC response to ABTS %#x...\n", 148 abts->rx_xch_addr_to_abort); 149 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 150 "-------- ELS RSP -------\n"); 151 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 152 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 153 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 154 if (rval) { 155 ql_log(ql_log_warn, vha, 0x028c, 156 "%s: iocb failed to execute -> %x\n", __func__, rval); 157 } else if (abts_rsp->comp_status) { 158 ql_log(ql_log_warn, vha, 0x028d, 159 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 160 __func__, abts_rsp->comp_status, 161 abts_rsp->payload.error.subcode1, 162 abts_rsp->payload.error.subcode2); 163 } else { 164 ql_dbg(ql_dbg_init, vha, 0x028ea, 165 "%s: done.\n", __func__); 166 } 167 168 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 169 } 170 171 /** 172 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 173 * @irq: interrupt number 174 * @dev_id: SCSI driver HA context 175 * 176 * Called by system whenever the host adapter generates an interrupt. 177 * 178 * Returns handled flag. 179 */ 180 irqreturn_t 181 qla2100_intr_handler(int irq, void *dev_id) 182 { 183 scsi_qla_host_t *vha; 184 struct qla_hw_data *ha; 185 struct device_reg_2xxx __iomem *reg; 186 int status; 187 unsigned long iter; 188 uint16_t hccr; 189 uint16_t mb[8]; 190 struct rsp_que *rsp; 191 unsigned long flags; 192 193 rsp = (struct rsp_que *) dev_id; 194 if (!rsp) { 195 ql_log(ql_log_info, NULL, 0x505d, 196 "%s: NULL response queue pointer.\n", __func__); 197 return (IRQ_NONE); 198 } 199 200 ha = rsp->hw; 201 reg = &ha->iobase->isp; 202 status = 0; 203 204 spin_lock_irqsave(&ha->hardware_lock, flags); 205 vha = pci_get_drvdata(ha->pdev); 206 for (iter = 50; iter--; ) { 207 hccr = rd_reg_word(®->hccr); 208 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 209 break; 210 if (hccr & HCCR_RISC_PAUSE) { 211 if (pci_channel_offline(ha->pdev)) 212 break; 213 214 /* 215 * Issue a "HARD" reset in order for the RISC interrupt 216 * bit to be cleared. Schedule a big hammer to get 217 * out of the RISC PAUSED state. 218 */ 219 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 220 rd_reg_word(®->hccr); 221 222 ha->isp_ops->fw_dump(vha); 223 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 224 break; 225 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) 226 break; 227 228 if (rd_reg_word(®->semaphore) & BIT_0) { 229 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 230 rd_reg_word(®->hccr); 231 232 /* Get mailbox data. */ 233 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 234 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 235 qla2x00_mbx_completion(vha, mb[0]); 236 status |= MBX_INTERRUPT; 237 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 238 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 239 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 240 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 241 qla2x00_async_event(vha, rsp, mb); 242 } else { 243 /*EMPTY*/ 244 ql_dbg(ql_dbg_async, vha, 0x5025, 245 "Unrecognized interrupt type (%d).\n", 246 mb[0]); 247 } 248 /* Release mailbox registers. */ 249 wrt_reg_word(®->semaphore, 0); 250 rd_reg_word(®->semaphore); 251 } else { 252 qla2x00_process_response_queue(rsp); 253 254 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 255 rd_reg_word(®->hccr); 256 } 257 } 258 qla2x00_handle_mbx_completion(ha, status); 259 spin_unlock_irqrestore(&ha->hardware_lock, flags); 260 261 return (IRQ_HANDLED); 262 } 263 264 bool 265 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 266 { 267 /* Check for PCI disconnection */ 268 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 269 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 270 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 271 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 272 /* 273 * Schedule this (only once) on the default system 274 * workqueue so that all the adapter workqueues and the 275 * DPC thread can be shutdown cleanly. 276 */ 277 schedule_work(&vha->hw->board_disable); 278 } 279 return true; 280 } else 281 return false; 282 } 283 284 bool 285 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 286 { 287 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 288 } 289 290 /** 291 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 292 * @irq: interrupt number 293 * @dev_id: SCSI driver HA context 294 * 295 * Called by system whenever the host adapter generates an interrupt. 296 * 297 * Returns handled flag. 298 */ 299 irqreturn_t 300 qla2300_intr_handler(int irq, void *dev_id) 301 { 302 scsi_qla_host_t *vha; 303 struct device_reg_2xxx __iomem *reg; 304 int status; 305 unsigned long iter; 306 uint32_t stat; 307 uint16_t hccr; 308 uint16_t mb[8]; 309 struct rsp_que *rsp; 310 struct qla_hw_data *ha; 311 unsigned long flags; 312 313 rsp = (struct rsp_que *) dev_id; 314 if (!rsp) { 315 ql_log(ql_log_info, NULL, 0x5058, 316 "%s: NULL response queue pointer.\n", __func__); 317 return (IRQ_NONE); 318 } 319 320 ha = rsp->hw; 321 reg = &ha->iobase->isp; 322 status = 0; 323 324 spin_lock_irqsave(&ha->hardware_lock, flags); 325 vha = pci_get_drvdata(ha->pdev); 326 for (iter = 50; iter--; ) { 327 stat = rd_reg_dword(®->u.isp2300.host_status); 328 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 329 break; 330 if (stat & HSR_RISC_PAUSED) { 331 if (unlikely(pci_channel_offline(ha->pdev))) 332 break; 333 334 hccr = rd_reg_word(®->hccr); 335 336 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 337 ql_log(ql_log_warn, vha, 0x5026, 338 "Parity error -- HCCR=%x, Dumping " 339 "firmware.\n", hccr); 340 else 341 ql_log(ql_log_warn, vha, 0x5027, 342 "RISC paused -- HCCR=%x, Dumping " 343 "firmware.\n", hccr); 344 345 /* 346 * Issue a "HARD" reset in order for the RISC 347 * interrupt bit to be cleared. Schedule a big 348 * hammer to get out of the RISC PAUSED state. 349 */ 350 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 351 rd_reg_word(®->hccr); 352 353 ha->isp_ops->fw_dump(vha); 354 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 355 break; 356 } else if ((stat & HSR_RISC_INT) == 0) 357 break; 358 359 switch (stat & 0xff) { 360 case 0x1: 361 case 0x2: 362 case 0x10: 363 case 0x11: 364 qla2x00_mbx_completion(vha, MSW(stat)); 365 status |= MBX_INTERRUPT; 366 367 /* Release mailbox registers. */ 368 wrt_reg_word(®->semaphore, 0); 369 break; 370 case 0x12: 371 mb[0] = MSW(stat); 372 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 373 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 374 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 375 qla2x00_async_event(vha, rsp, mb); 376 break; 377 case 0x13: 378 qla2x00_process_response_queue(rsp); 379 break; 380 case 0x15: 381 mb[0] = MBA_CMPLT_1_16BIT; 382 mb[1] = MSW(stat); 383 qla2x00_async_event(vha, rsp, mb); 384 break; 385 case 0x16: 386 mb[0] = MBA_SCSI_COMPLETION; 387 mb[1] = MSW(stat); 388 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 389 qla2x00_async_event(vha, rsp, mb); 390 break; 391 default: 392 ql_dbg(ql_dbg_async, vha, 0x5028, 393 "Unrecognized interrupt type (%d).\n", stat & 0xff); 394 break; 395 } 396 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 397 rd_reg_word_relaxed(®->hccr); 398 } 399 qla2x00_handle_mbx_completion(ha, status); 400 spin_unlock_irqrestore(&ha->hardware_lock, flags); 401 402 return (IRQ_HANDLED); 403 } 404 405 /** 406 * qla2x00_mbx_completion() - Process mailbox command completions. 407 * @vha: SCSI driver HA context 408 * @mb0: Mailbox0 register 409 */ 410 static void 411 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 412 { 413 uint16_t cnt; 414 uint32_t mboxes; 415 __le16 __iomem *wptr; 416 struct qla_hw_data *ha = vha->hw; 417 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 418 419 /* Read all mbox registers? */ 420 WARN_ON_ONCE(ha->mbx_count > 32); 421 mboxes = (1ULL << ha->mbx_count) - 1; 422 if (!ha->mcp) 423 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 424 else 425 mboxes = ha->mcp->in_mb; 426 427 /* Load return mailbox registers. */ 428 ha->flags.mbox_int = 1; 429 ha->mailbox_out[0] = mb0; 430 mboxes >>= 1; 431 wptr = MAILBOX_REG(ha, reg, 1); 432 433 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 434 if (IS_QLA2200(ha) && cnt == 8) 435 wptr = MAILBOX_REG(ha, reg, 8); 436 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 437 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 438 else if (mboxes & BIT_0) 439 ha->mailbox_out[cnt] = rd_reg_word(wptr); 440 441 wptr++; 442 mboxes >>= 1; 443 } 444 } 445 446 static void 447 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 448 { 449 static char *event[] = 450 { "Complete", "Request Notification", "Time Extension" }; 451 int rval; 452 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 453 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 454 __le16 __iomem *wptr; 455 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 456 457 /* Seed data -- mailbox1 -> mailbox7. */ 458 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 459 wptr = ®24->mailbox1; 460 else if (IS_QLA8044(vha->hw)) 461 wptr = ®82->mailbox_out[1]; 462 else 463 return; 464 465 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 466 mb[cnt] = rd_reg_word(wptr); 467 468 ql_dbg(ql_dbg_async, vha, 0x5021, 469 "Inter-Driver Communication %s -- " 470 "%04x %04x %04x %04x %04x %04x %04x.\n", 471 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 472 mb[4], mb[5], mb[6]); 473 switch (aen) { 474 /* Handle IDC Error completion case. */ 475 case MBA_IDC_COMPLETE: 476 if (mb[1] >> 15) { 477 vha->hw->flags.idc_compl_status = 1; 478 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 479 complete(&vha->hw->dcbx_comp); 480 } 481 break; 482 483 case MBA_IDC_NOTIFY: 484 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 485 timeout = (descr >> 8) & 0xf; 486 ql_dbg(ql_dbg_async, vha, 0x5022, 487 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 488 vha->host_no, event[aen & 0xff], timeout); 489 490 if (!timeout) 491 return; 492 rval = qla2x00_post_idc_ack_work(vha, mb); 493 if (rval != QLA_SUCCESS) 494 ql_log(ql_log_warn, vha, 0x5023, 495 "IDC failed to post ACK.\n"); 496 break; 497 case MBA_IDC_TIME_EXT: 498 vha->hw->idc_extend_tmo = descr; 499 ql_dbg(ql_dbg_async, vha, 0x5087, 500 "%lu Inter-Driver Communication %s -- " 501 "Extend timeout by=%d.\n", 502 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 503 break; 504 } 505 } 506 507 #define LS_UNKNOWN 2 508 const char * 509 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 510 { 511 static const char *const link_speeds[] = { 512 "1", "2", "?", "4", "8", "16", "32", "10" 513 }; 514 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 515 516 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 517 return link_speeds[0]; 518 else if (speed == 0x13) 519 return link_speeds[QLA_LAST_SPEED]; 520 else if (speed < QLA_LAST_SPEED) 521 return link_speeds[speed]; 522 else 523 return link_speeds[LS_UNKNOWN]; 524 } 525 526 static void 527 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 528 { 529 struct qla_hw_data *ha = vha->hw; 530 531 /* 532 * 8200 AEN Interpretation: 533 * mb[0] = AEN code 534 * mb[1] = AEN Reason code 535 * mb[2] = LSW of Peg-Halt Status-1 Register 536 * mb[6] = MSW of Peg-Halt Status-1 Register 537 * mb[3] = LSW of Peg-Halt Status-2 register 538 * mb[7] = MSW of Peg-Halt Status-2 register 539 * mb[4] = IDC Device-State Register value 540 * mb[5] = IDC Driver-Presence Register value 541 */ 542 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 543 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 544 mb[0], mb[1], mb[2], mb[6]); 545 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 546 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 547 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 548 549 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 550 IDC_HEARTBEAT_FAILURE)) { 551 ha->flags.nic_core_hung = 1; 552 ql_log(ql_log_warn, vha, 0x5060, 553 "83XX: F/W Error Reported: Check if reset required.\n"); 554 555 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 556 uint32_t protocol_engine_id, fw_err_code, err_level; 557 558 /* 559 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 560 * - PEG-Halt Status-1 Register: 561 * (LSW = mb[2], MSW = mb[6]) 562 * Bits 0-7 = protocol-engine ID 563 * Bits 8-28 = f/w error code 564 * Bits 29-31 = Error-level 565 * Error-level 0x1 = Non-Fatal error 566 * Error-level 0x2 = Recoverable Fatal error 567 * Error-level 0x4 = UnRecoverable Fatal error 568 * - PEG-Halt Status-2 Register: 569 * (LSW = mb[3], MSW = mb[7]) 570 */ 571 protocol_engine_id = (mb[2] & 0xff); 572 fw_err_code = (((mb[2] & 0xff00) >> 8) | 573 ((mb[6] & 0x1fff) << 8)); 574 err_level = ((mb[6] & 0xe000) >> 13); 575 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 576 "Register: protocol_engine_id=0x%x " 577 "fw_err_code=0x%x err_level=0x%x.\n", 578 protocol_engine_id, fw_err_code, err_level); 579 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 580 "Register: 0x%x%x.\n", mb[7], mb[3]); 581 if (err_level == ERR_LEVEL_NON_FATAL) { 582 ql_log(ql_log_warn, vha, 0x5063, 583 "Not a fatal error, f/w has recovered itself.\n"); 584 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 585 ql_log(ql_log_fatal, vha, 0x5064, 586 "Recoverable Fatal error: Chip reset " 587 "required.\n"); 588 qla83xx_schedule_work(vha, 589 QLA83XX_NIC_CORE_RESET); 590 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 591 ql_log(ql_log_fatal, vha, 0x5065, 592 "Unrecoverable Fatal error: Set FAILED " 593 "state, reboot required.\n"); 594 qla83xx_schedule_work(vha, 595 QLA83XX_NIC_CORE_UNRECOVERABLE); 596 } 597 } 598 599 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 600 uint16_t peg_fw_state, nw_interface_link_up; 601 uint16_t nw_interface_signal_detect, sfp_status; 602 uint16_t htbt_counter, htbt_monitor_enable; 603 uint16_t sfp_additional_info, sfp_multirate; 604 uint16_t sfp_tx_fault, link_speed, dcbx_status; 605 606 /* 607 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 608 * - PEG-to-FC Status Register: 609 * (LSW = mb[2], MSW = mb[6]) 610 * Bits 0-7 = Peg-Firmware state 611 * Bit 8 = N/W Interface Link-up 612 * Bit 9 = N/W Interface signal detected 613 * Bits 10-11 = SFP Status 614 * SFP Status 0x0 = SFP+ transceiver not expected 615 * SFP Status 0x1 = SFP+ transceiver not present 616 * SFP Status 0x2 = SFP+ transceiver invalid 617 * SFP Status 0x3 = SFP+ transceiver present and 618 * valid 619 * Bits 12-14 = Heartbeat Counter 620 * Bit 15 = Heartbeat Monitor Enable 621 * Bits 16-17 = SFP Additional Info 622 * SFP info 0x0 = Unregocnized transceiver for 623 * Ethernet 624 * SFP info 0x1 = SFP+ brand validation failed 625 * SFP info 0x2 = SFP+ speed validation failed 626 * SFP info 0x3 = SFP+ access error 627 * Bit 18 = SFP Multirate 628 * Bit 19 = SFP Tx Fault 629 * Bits 20-22 = Link Speed 630 * Bits 23-27 = Reserved 631 * Bits 28-30 = DCBX Status 632 * DCBX Status 0x0 = DCBX Disabled 633 * DCBX Status 0x1 = DCBX Enabled 634 * DCBX Status 0x2 = DCBX Exchange error 635 * Bit 31 = Reserved 636 */ 637 peg_fw_state = (mb[2] & 0x00ff); 638 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 639 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 640 sfp_status = ((mb[2] & 0x0c00) >> 10); 641 htbt_counter = ((mb[2] & 0x7000) >> 12); 642 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 643 sfp_additional_info = (mb[6] & 0x0003); 644 sfp_multirate = ((mb[6] & 0x0004) >> 2); 645 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 646 link_speed = ((mb[6] & 0x0070) >> 4); 647 dcbx_status = ((mb[6] & 0x7000) >> 12); 648 649 ql_log(ql_log_warn, vha, 0x5066, 650 "Peg-to-Fc Status Register:\n" 651 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 652 "nw_interface_signal_detect=0x%x" 653 "\nsfp_statis=0x%x.\n ", peg_fw_state, 654 nw_interface_link_up, nw_interface_signal_detect, 655 sfp_status); 656 ql_log(ql_log_warn, vha, 0x5067, 657 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 658 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 659 htbt_counter, htbt_monitor_enable, 660 sfp_additional_info, sfp_multirate); 661 ql_log(ql_log_warn, vha, 0x5068, 662 "sfp_tx_fault=0x%x, link_state=0x%x, " 663 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 664 dcbx_status); 665 666 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 667 } 668 669 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 670 ql_log(ql_log_warn, vha, 0x5069, 671 "Heartbeat Failure encountered, chip reset " 672 "required.\n"); 673 674 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 675 } 676 } 677 678 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 679 ql_log(ql_log_info, vha, 0x506a, 680 "IDC Device-State changed = 0x%x.\n", mb[4]); 681 if (ha->flags.nic_core_reset_owner) 682 return; 683 qla83xx_schedule_work(vha, MBA_IDC_AEN); 684 } 685 } 686 687 int 688 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 689 { 690 struct qla_hw_data *ha = vha->hw; 691 scsi_qla_host_t *vp; 692 uint32_t vp_did; 693 unsigned long flags; 694 int ret = 0; 695 696 if (!ha->num_vhosts) 697 return ret; 698 699 spin_lock_irqsave(&ha->vport_slock, flags); 700 list_for_each_entry(vp, &ha->vp_list, list) { 701 vp_did = vp->d_id.b24; 702 if (vp_did == rscn_entry) { 703 ret = 1; 704 break; 705 } 706 } 707 spin_unlock_irqrestore(&ha->vport_slock, flags); 708 709 return ret; 710 } 711 712 fc_port_t * 713 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 714 { 715 fc_port_t *f, *tf; 716 717 f = tf = NULL; 718 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 719 if (f->loop_id == loop_id) 720 return f; 721 return NULL; 722 } 723 724 fc_port_t * 725 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 726 { 727 fc_port_t *f, *tf; 728 729 f = tf = NULL; 730 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 731 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 732 if (incl_deleted) 733 return f; 734 else if (f->deleted == 0) 735 return f; 736 } 737 } 738 return NULL; 739 } 740 741 fc_port_t * 742 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 743 u8 incl_deleted) 744 { 745 fc_port_t *f, *tf; 746 747 f = tf = NULL; 748 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 749 if (f->d_id.b24 == id->b24) { 750 if (incl_deleted) 751 return f; 752 else if (f->deleted == 0) 753 return f; 754 } 755 } 756 return NULL; 757 } 758 759 /* Shall be called only on supported adapters. */ 760 static void 761 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 762 { 763 struct qla_hw_data *ha = vha->hw; 764 bool reset_isp_needed = 0; 765 766 ql_log(ql_log_warn, vha, 0x02f0, 767 "MPI Heartbeat stop. MPI reset is%s needed. " 768 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 769 mb[0] & BIT_8 ? "" : " not", 770 mb[0], mb[1], mb[2], mb[3]); 771 772 if ((mb[1] & BIT_8) == 0) 773 return; 774 775 ql_log(ql_log_warn, vha, 0x02f1, 776 "MPI Heartbeat stop. FW dump needed\n"); 777 778 if (ql2xfulldump_on_mpifail) { 779 ha->isp_ops->fw_dump(vha); 780 reset_isp_needed = 1; 781 } 782 783 ha->isp_ops->mpi_fw_dump(vha, 1); 784 785 if (reset_isp_needed) { 786 vha->hw->flags.fw_init_done = 0; 787 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 788 qla2xxx_wake_dpc(vha); 789 } 790 } 791 792 /** 793 * qla2x00_async_event() - Process aynchronous events. 794 * @vha: SCSI driver HA context 795 * @rsp: response queue 796 * @mb: Mailbox registers (0 - 3) 797 */ 798 void 799 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 800 { 801 uint16_t handle_cnt; 802 uint16_t cnt, mbx; 803 uint32_t handles[5]; 804 struct qla_hw_data *ha = vha->hw; 805 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 806 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 807 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 808 uint32_t rscn_entry, host_pid; 809 unsigned long flags; 810 fc_port_t *fcport = NULL; 811 812 if (!vha->hw->flags.fw_started) 813 return; 814 815 /* Setup to process RIO completion. */ 816 handle_cnt = 0; 817 if (IS_CNA_CAPABLE(ha)) 818 goto skip_rio; 819 switch (mb[0]) { 820 case MBA_SCSI_COMPLETION: 821 handles[0] = make_handle(mb[2], mb[1]); 822 handle_cnt = 1; 823 break; 824 case MBA_CMPLT_1_16BIT: 825 handles[0] = mb[1]; 826 handle_cnt = 1; 827 mb[0] = MBA_SCSI_COMPLETION; 828 break; 829 case MBA_CMPLT_2_16BIT: 830 handles[0] = mb[1]; 831 handles[1] = mb[2]; 832 handle_cnt = 2; 833 mb[0] = MBA_SCSI_COMPLETION; 834 break; 835 case MBA_CMPLT_3_16BIT: 836 handles[0] = mb[1]; 837 handles[1] = mb[2]; 838 handles[2] = mb[3]; 839 handle_cnt = 3; 840 mb[0] = MBA_SCSI_COMPLETION; 841 break; 842 case MBA_CMPLT_4_16BIT: 843 handles[0] = mb[1]; 844 handles[1] = mb[2]; 845 handles[2] = mb[3]; 846 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 847 handle_cnt = 4; 848 mb[0] = MBA_SCSI_COMPLETION; 849 break; 850 case MBA_CMPLT_5_16BIT: 851 handles[0] = mb[1]; 852 handles[1] = mb[2]; 853 handles[2] = mb[3]; 854 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 855 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 856 handle_cnt = 5; 857 mb[0] = MBA_SCSI_COMPLETION; 858 break; 859 case MBA_CMPLT_2_32BIT: 860 handles[0] = make_handle(mb[2], mb[1]); 861 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), 862 RD_MAILBOX_REG(ha, reg, 6)); 863 handle_cnt = 2; 864 mb[0] = MBA_SCSI_COMPLETION; 865 break; 866 default: 867 break; 868 } 869 skip_rio: 870 switch (mb[0]) { 871 case MBA_SCSI_COMPLETION: /* Fast Post */ 872 if (!vha->flags.online) 873 break; 874 875 for (cnt = 0; cnt < handle_cnt; cnt++) 876 qla2x00_process_completed_request(vha, rsp->req, 877 handles[cnt]); 878 break; 879 880 case MBA_RESET: /* Reset */ 881 ql_dbg(ql_dbg_async, vha, 0x5002, 882 "Asynchronous RESET.\n"); 883 884 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 885 break; 886 887 case MBA_SYSTEM_ERR: /* System Error */ 888 mbx = 0; 889 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 890 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 891 u16 m[4]; 892 893 m[0] = rd_reg_word(®24->mailbox4); 894 m[1] = rd_reg_word(®24->mailbox5); 895 m[2] = rd_reg_word(®24->mailbox6); 896 mbx = m[3] = rd_reg_word(®24->mailbox7); 897 898 ql_log(ql_log_warn, vha, 0x5003, 899 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 900 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 901 } else 902 ql_log(ql_log_warn, vha, 0x5003, 903 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 904 mb[1], mb[2], mb[3]); 905 906 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 907 rd_reg_word(®24->mailbox7) & BIT_8) 908 ha->isp_ops->mpi_fw_dump(vha, 1); 909 ha->isp_ops->fw_dump(vha); 910 ha->flags.fw_init_done = 0; 911 QLA_FW_STOPPED(ha); 912 913 if (IS_FWI2_CAPABLE(ha)) { 914 if (mb[1] == 0 && mb[2] == 0) { 915 ql_log(ql_log_fatal, vha, 0x5004, 916 "Unrecoverable Hardware Error: adapter " 917 "marked OFFLINE!\n"); 918 vha->flags.online = 0; 919 vha->device_flags |= DFLG_DEV_FAILED; 920 } else { 921 /* Check to see if MPI timeout occurred */ 922 if ((mbx & MBX_3) && (ha->port_no == 0)) 923 set_bit(MPI_RESET_NEEDED, 924 &vha->dpc_flags); 925 926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 927 } 928 } else if (mb[1] == 0) { 929 ql_log(ql_log_fatal, vha, 0x5005, 930 "Unrecoverable Hardware Error: adapter marked " 931 "OFFLINE!\n"); 932 vha->flags.online = 0; 933 vha->device_flags |= DFLG_DEV_FAILED; 934 } else 935 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 936 break; 937 938 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 939 ql_log(ql_log_warn, vha, 0x5006, 940 "ISP Request Transfer Error (%x).\n", mb[1]); 941 942 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 943 break; 944 945 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 946 ql_log(ql_log_warn, vha, 0x5007, 947 "ISP Response Transfer Error (%x).\n", mb[1]); 948 949 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 950 break; 951 952 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 953 ql_dbg(ql_dbg_async, vha, 0x5008, 954 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 955 break; 956 957 case MBA_LOOP_INIT_ERR: 958 ql_log(ql_log_warn, vha, 0x5090, 959 "LOOP INIT ERROR (%x).\n", mb[1]); 960 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 961 break; 962 963 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 964 ha->flags.lip_ae = 1; 965 966 ql_dbg(ql_dbg_async, vha, 0x5009, 967 "LIP occurred (%x).\n", mb[1]); 968 969 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 970 atomic_set(&vha->loop_state, LOOP_DOWN); 971 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 972 qla2x00_mark_all_devices_lost(vha); 973 } 974 975 if (vha->vp_idx) { 976 atomic_set(&vha->vp_state, VP_FAILED); 977 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 978 } 979 980 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 981 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 982 983 vha->flags.management_server_logged_in = 0; 984 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 985 break; 986 987 case MBA_LOOP_UP: /* Loop Up Event */ 988 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 989 ha->link_data_rate = PORT_SPEED_1GB; 990 else 991 ha->link_data_rate = mb[1]; 992 993 ql_log(ql_log_info, vha, 0x500a, 994 "LOOP UP detected (%s Gbps).\n", 995 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 996 997 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 998 if (mb[2] & BIT_0) 999 ql_log(ql_log_info, vha, 0x11a0, 1000 "FEC=enabled (link up).\n"); 1001 } 1002 1003 vha->flags.management_server_logged_in = 0; 1004 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 1005 1006 break; 1007 1008 case MBA_LOOP_DOWN: /* Loop Down Event */ 1009 SAVE_TOPO(ha); 1010 ha->flags.lip_ae = 0; 1011 ha->current_topology = 0; 1012 1013 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 1014 ? rd_reg_word(®24->mailbox4) : 0; 1015 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) 1016 : mbx; 1017 ql_log(ql_log_info, vha, 0x500b, 1018 "LOOP DOWN detected (%x %x %x %x).\n", 1019 mb[1], mb[2], mb[3], mbx); 1020 1021 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1022 atomic_set(&vha->loop_state, LOOP_DOWN); 1023 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1024 /* 1025 * In case of loop down, restore WWPN from 1026 * NVRAM in case of FA-WWPN capable ISP 1027 * Restore for Physical Port only 1028 */ 1029 if (!vha->vp_idx) { 1030 if (ha->flags.fawwpn_enabled && 1031 (ha->current_topology == ISP_CFG_F)) { 1032 void *wwpn = ha->init_cb->port_name; 1033 1034 memcpy(vha->port_name, wwpn, WWN_SIZE); 1035 fc_host_port_name(vha->host) = 1036 wwn_to_u64(vha->port_name); 1037 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1038 vha, 0x00d8, "LOOP DOWN detected," 1039 "restore WWPN %016llx\n", 1040 wwn_to_u64(vha->port_name)); 1041 } 1042 1043 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1044 } 1045 1046 vha->device_flags |= DFLG_NO_CABLE; 1047 qla2x00_mark_all_devices_lost(vha); 1048 } 1049 1050 if (vha->vp_idx) { 1051 atomic_set(&vha->vp_state, VP_FAILED); 1052 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1053 } 1054 1055 vha->flags.management_server_logged_in = 0; 1056 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1057 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1058 break; 1059 1060 case MBA_LIP_RESET: /* LIP reset occurred */ 1061 ql_dbg(ql_dbg_async, vha, 0x500c, 1062 "LIP reset occurred (%x).\n", mb[1]); 1063 1064 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1065 atomic_set(&vha->loop_state, LOOP_DOWN); 1066 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1067 qla2x00_mark_all_devices_lost(vha); 1068 } 1069 1070 if (vha->vp_idx) { 1071 atomic_set(&vha->vp_state, VP_FAILED); 1072 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1073 } 1074 1075 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1076 1077 ha->operating_mode = LOOP; 1078 vha->flags.management_server_logged_in = 0; 1079 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1080 break; 1081 1082 /* case MBA_DCBX_COMPLETE: */ 1083 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1084 ha->flags.lip_ae = 0; 1085 1086 if (IS_QLA2100(ha)) 1087 break; 1088 1089 if (IS_CNA_CAPABLE(ha)) { 1090 ql_dbg(ql_dbg_async, vha, 0x500d, 1091 "DCBX Completed -- %04x %04x %04x.\n", 1092 mb[1], mb[2], mb[3]); 1093 if (ha->notify_dcbx_comp && !vha->vp_idx) 1094 complete(&ha->dcbx_comp); 1095 1096 } else 1097 ql_dbg(ql_dbg_async, vha, 0x500e, 1098 "Asynchronous P2P MODE received.\n"); 1099 1100 /* 1101 * Until there's a transition from loop down to loop up, treat 1102 * this as loop down only. 1103 */ 1104 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1105 atomic_set(&vha->loop_state, LOOP_DOWN); 1106 if (!atomic_read(&vha->loop_down_timer)) 1107 atomic_set(&vha->loop_down_timer, 1108 LOOP_DOWN_TIME); 1109 if (!N2N_TOPO(ha)) 1110 qla2x00_mark_all_devices_lost(vha); 1111 } 1112 1113 if (vha->vp_idx) { 1114 atomic_set(&vha->vp_state, VP_FAILED); 1115 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1116 } 1117 1118 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1119 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1120 1121 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1122 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1123 1124 vha->flags.management_server_logged_in = 0; 1125 break; 1126 1127 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1128 if (IS_QLA2100(ha)) 1129 break; 1130 1131 ql_dbg(ql_dbg_async, vha, 0x500f, 1132 "Configuration change detected: value=%x.\n", mb[1]); 1133 1134 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1135 atomic_set(&vha->loop_state, LOOP_DOWN); 1136 if (!atomic_read(&vha->loop_down_timer)) 1137 atomic_set(&vha->loop_down_timer, 1138 LOOP_DOWN_TIME); 1139 qla2x00_mark_all_devices_lost(vha); 1140 } 1141 1142 if (vha->vp_idx) { 1143 atomic_set(&vha->vp_state, VP_FAILED); 1144 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1145 } 1146 1147 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1148 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1149 break; 1150 1151 case MBA_PORT_UPDATE: /* Port database update */ 1152 /* 1153 * Handle only global and vn-port update events 1154 * 1155 * Relevant inputs: 1156 * mb[1] = N_Port handle of changed port 1157 * OR 0xffff for global event 1158 * mb[2] = New login state 1159 * 7 = Port logged out 1160 * mb[3] = LSB is vp_idx, 0xff = all vps 1161 * 1162 * Skip processing if: 1163 * Event is global, vp_idx is NOT all vps, 1164 * vp_idx does not match 1165 * Event is not global, vp_idx does not match 1166 */ 1167 if (IS_QLA2XXX_MIDTYPE(ha) && 1168 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1169 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1170 break; 1171 1172 if (mb[2] == 0x7) { 1173 ql_dbg(ql_dbg_async, vha, 0x5010, 1174 "Port %s %04x %04x %04x.\n", 1175 mb[1] == 0xffff ? "unavailable" : "logout", 1176 mb[1], mb[2], mb[3]); 1177 1178 if (mb[1] == 0xffff) 1179 goto global_port_update; 1180 1181 if (mb[1] == NPH_SNS_LID(ha)) { 1182 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1183 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1184 break; 1185 } 1186 1187 /* use handle_cnt for loop id/nport handle */ 1188 if (IS_FWI2_CAPABLE(ha)) 1189 handle_cnt = NPH_SNS; 1190 else 1191 handle_cnt = SIMPLE_NAME_SERVER; 1192 if (mb[1] == handle_cnt) { 1193 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1194 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1195 break; 1196 } 1197 1198 /* Port logout */ 1199 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1200 if (!fcport) 1201 break; 1202 if (atomic_read(&fcport->state) != FCS_ONLINE) 1203 break; 1204 ql_dbg(ql_dbg_async, vha, 0x508a, 1205 "Marking port lost loopid=%04x portid=%06x.\n", 1206 fcport->loop_id, fcport->d_id.b24); 1207 if (qla_ini_mode_enabled(vha)) { 1208 fcport->logout_on_delete = 0; 1209 qlt_schedule_sess_for_deletion(fcport); 1210 } 1211 break; 1212 1213 global_port_update: 1214 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1215 atomic_set(&vha->loop_state, LOOP_DOWN); 1216 atomic_set(&vha->loop_down_timer, 1217 LOOP_DOWN_TIME); 1218 vha->device_flags |= DFLG_NO_CABLE; 1219 qla2x00_mark_all_devices_lost(vha); 1220 } 1221 1222 if (vha->vp_idx) { 1223 atomic_set(&vha->vp_state, VP_FAILED); 1224 fc_vport_set_state(vha->fc_vport, 1225 FC_VPORT_FAILED); 1226 qla2x00_mark_all_devices_lost(vha); 1227 } 1228 1229 vha->flags.management_server_logged_in = 0; 1230 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1231 break; 1232 } 1233 1234 /* 1235 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1236 * event etc. earlier indicating loop is down) then process 1237 * it. Otherwise ignore it and Wait for RSCN to come in. 1238 */ 1239 atomic_set(&vha->loop_down_timer, 0); 1240 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1241 !ha->flags.n2n_ae && 1242 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1243 ql_dbg(ql_dbg_async, vha, 0x5011, 1244 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1245 mb[1], mb[2], mb[3]); 1246 break; 1247 } 1248 1249 ql_dbg(ql_dbg_async, vha, 0x5012, 1250 "Port database changed %04x %04x %04x.\n", 1251 mb[1], mb[2], mb[3]); 1252 1253 /* 1254 * Mark all devices as missing so we will login again. 1255 */ 1256 atomic_set(&vha->loop_state, LOOP_UP); 1257 vha->scan.scan_retry = 0; 1258 1259 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1260 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1261 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1262 break; 1263 1264 case MBA_RSCN_UPDATE: /* State Change Registration */ 1265 /* Check if the Vport has issued a SCR */ 1266 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1267 break; 1268 /* Only handle SCNs for our Vport index. */ 1269 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1270 break; 1271 1272 ql_dbg(ql_dbg_async, vha, 0x5013, 1273 "RSCN database changed -- %04x %04x %04x.\n", 1274 mb[1], mb[2], mb[3]); 1275 1276 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1277 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1278 | vha->d_id.b.al_pa; 1279 if (rscn_entry == host_pid) { 1280 ql_dbg(ql_dbg_async, vha, 0x5014, 1281 "Ignoring RSCN update to local host " 1282 "port ID (%06x).\n", host_pid); 1283 break; 1284 } 1285 1286 /* Ignore reserved bits from RSCN-payload. */ 1287 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1288 1289 /* Skip RSCNs for virtual ports on the same physical port */ 1290 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1291 break; 1292 1293 atomic_set(&vha->loop_down_timer, 0); 1294 vha->flags.management_server_logged_in = 0; 1295 { 1296 struct event_arg ea; 1297 1298 memset(&ea, 0, sizeof(ea)); 1299 ea.id.b24 = rscn_entry; 1300 ea.id.b.rsvd_1 = rscn_entry >> 24; 1301 qla2x00_handle_rscn(vha, &ea); 1302 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1303 } 1304 break; 1305 /* case MBA_RIO_RESPONSE: */ 1306 case MBA_ZIO_RESPONSE: 1307 ql_dbg(ql_dbg_async, vha, 0x5015, 1308 "[R|Z]IO update completion.\n"); 1309 1310 if (IS_FWI2_CAPABLE(ha)) 1311 qla24xx_process_response_queue(vha, rsp); 1312 else 1313 qla2x00_process_response_queue(rsp); 1314 break; 1315 1316 case MBA_DISCARD_RND_FRAME: 1317 ql_dbg(ql_dbg_async, vha, 0x5016, 1318 "Discard RND Frame -- %04x %04x %04x.\n", 1319 mb[1], mb[2], mb[3]); 1320 break; 1321 1322 case MBA_TRACE_NOTIFICATION: 1323 ql_dbg(ql_dbg_async, vha, 0x5017, 1324 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1325 break; 1326 1327 case MBA_ISP84XX_ALERT: 1328 ql_dbg(ql_dbg_async, vha, 0x5018, 1329 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1330 mb[1], mb[2], mb[3]); 1331 1332 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1333 switch (mb[1]) { 1334 case A84_PANIC_RECOVERY: 1335 ql_log(ql_log_info, vha, 0x5019, 1336 "Alert 84XX: panic recovery %04x %04x.\n", 1337 mb[2], mb[3]); 1338 break; 1339 case A84_OP_LOGIN_COMPLETE: 1340 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1341 ql_log(ql_log_info, vha, 0x501a, 1342 "Alert 84XX: firmware version %x.\n", 1343 ha->cs84xx->op_fw_version); 1344 break; 1345 case A84_DIAG_LOGIN_COMPLETE: 1346 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1347 ql_log(ql_log_info, vha, 0x501b, 1348 "Alert 84XX: diagnostic firmware version %x.\n", 1349 ha->cs84xx->diag_fw_version); 1350 break; 1351 case A84_GOLD_LOGIN_COMPLETE: 1352 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1353 ha->cs84xx->fw_update = 1; 1354 ql_log(ql_log_info, vha, 0x501c, 1355 "Alert 84XX: gold firmware version %x.\n", 1356 ha->cs84xx->gold_fw_version); 1357 break; 1358 default: 1359 ql_log(ql_log_warn, vha, 0x501d, 1360 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1361 mb[1], mb[2], mb[3]); 1362 } 1363 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1364 break; 1365 case MBA_DCBX_START: 1366 ql_dbg(ql_dbg_async, vha, 0x501e, 1367 "DCBX Started -- %04x %04x %04x.\n", 1368 mb[1], mb[2], mb[3]); 1369 break; 1370 case MBA_DCBX_PARAM_UPDATE: 1371 ql_dbg(ql_dbg_async, vha, 0x501f, 1372 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1373 mb[1], mb[2], mb[3]); 1374 break; 1375 case MBA_FCF_CONF_ERR: 1376 ql_dbg(ql_dbg_async, vha, 0x5020, 1377 "FCF Configuration Error -- %04x %04x %04x.\n", 1378 mb[1], mb[2], mb[3]); 1379 break; 1380 case MBA_IDC_NOTIFY: 1381 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1382 mb[4] = rd_reg_word(®24->mailbox4); 1383 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1384 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1385 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1386 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1387 /* 1388 * Extend loop down timer since port is active. 1389 */ 1390 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1391 atomic_set(&vha->loop_down_timer, 1392 LOOP_DOWN_TIME); 1393 qla2xxx_wake_dpc(vha); 1394 } 1395 } 1396 /* fall through */ 1397 case MBA_IDC_COMPLETE: 1398 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1399 complete(&ha->lb_portup_comp); 1400 /* Fallthru */ 1401 case MBA_IDC_TIME_EXT: 1402 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1403 IS_QLA8044(ha)) 1404 qla81xx_idc_event(vha, mb[0], mb[1]); 1405 break; 1406 1407 case MBA_IDC_AEN: 1408 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1409 qla27xx_handle_8200_aen(vha, mb); 1410 } else if (IS_QLA83XX(ha)) { 1411 mb[4] = rd_reg_word(®24->mailbox4); 1412 mb[5] = rd_reg_word(®24->mailbox5); 1413 mb[6] = rd_reg_word(®24->mailbox6); 1414 mb[7] = rd_reg_word(®24->mailbox7); 1415 qla83xx_handle_8200_aen(vha, mb); 1416 } else { 1417 ql_dbg(ql_dbg_async, vha, 0x5052, 1418 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1419 mb[0], mb[1], mb[2], mb[3]); 1420 } 1421 break; 1422 1423 case MBA_DPORT_DIAGNOSTICS: 1424 ql_dbg(ql_dbg_async, vha, 0x5052, 1425 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1426 mb[0], mb[1], mb[2], mb[3]); 1427 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1428 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1429 static char *results[] = { 1430 "start", "done(pass)", "done(error)", "undefined" }; 1431 static char *types[] = { 1432 "none", "dynamic", "static", "other" }; 1433 uint result = mb[1] >> 0 & 0x3; 1434 uint type = mb[1] >> 6 & 0x3; 1435 uint sw = mb[1] >> 15 & 0x1; 1436 ql_dbg(ql_dbg_async, vha, 0x5052, 1437 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1438 results[result], types[type], sw); 1439 if (result == 2) { 1440 static char *reasons[] = { 1441 "reserved", "unexpected reject", 1442 "unexpected phase", "retry exceeded", 1443 "timed out", "not supported", 1444 "user stopped" }; 1445 uint reason = mb[2] >> 0 & 0xf; 1446 uint phase = mb[2] >> 12 & 0xf; 1447 ql_dbg(ql_dbg_async, vha, 0x5052, 1448 "D-Port Diagnostics: reason=%s phase=%u \n", 1449 reason < 7 ? reasons[reason] : "other", 1450 phase >> 1); 1451 } 1452 } 1453 break; 1454 1455 case MBA_TEMPERATURE_ALERT: 1456 ql_dbg(ql_dbg_async, vha, 0x505e, 1457 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1458 if (mb[1] == 0x12) 1459 schedule_work(&ha->board_disable); 1460 break; 1461 1462 case MBA_TRANS_INSERT: 1463 ql_dbg(ql_dbg_async, vha, 0x5091, 1464 "Transceiver Insertion: %04x\n", mb[1]); 1465 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1466 break; 1467 1468 case MBA_TRANS_REMOVE: 1469 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1470 break; 1471 1472 default: 1473 ql_dbg(ql_dbg_async, vha, 0x5057, 1474 "Unknown AEN:%04x %04x %04x %04x\n", 1475 mb[0], mb[1], mb[2], mb[3]); 1476 } 1477 1478 qlt_async_event(mb[0], vha, mb); 1479 1480 if (!vha->vp_idx && ha->num_vhosts) 1481 qla2x00_alert_all_vps(rsp, mb); 1482 } 1483 1484 /** 1485 * qla2x00_process_completed_request() - Process a Fast Post response. 1486 * @vha: SCSI driver HA context 1487 * @req: request queue 1488 * @index: SRB index 1489 */ 1490 void 1491 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1492 struct req_que *req, uint32_t index) 1493 { 1494 srb_t *sp; 1495 struct qla_hw_data *ha = vha->hw; 1496 1497 /* Validate handle. */ 1498 if (index >= req->num_outstanding_cmds) { 1499 ql_log(ql_log_warn, vha, 0x3014, 1500 "Invalid SCSI command index (%x).\n", index); 1501 1502 if (IS_P3P_TYPE(ha)) 1503 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1504 else 1505 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1506 return; 1507 } 1508 1509 sp = req->outstanding_cmds[index]; 1510 if (sp) { 1511 /* Free outstanding command slot. */ 1512 req->outstanding_cmds[index] = NULL; 1513 1514 /* Save ISP completion status */ 1515 sp->done(sp, DID_OK << 16); 1516 } else { 1517 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1518 1519 if (IS_P3P_TYPE(ha)) 1520 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1521 else 1522 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1523 } 1524 } 1525 1526 srb_t * 1527 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1528 struct req_que *req, void *iocb) 1529 { 1530 struct qla_hw_data *ha = vha->hw; 1531 sts_entry_t *pkt = iocb; 1532 srb_t *sp = NULL; 1533 uint16_t index; 1534 1535 index = LSW(pkt->handle); 1536 if (index >= req->num_outstanding_cmds) { 1537 ql_log(ql_log_warn, vha, 0x5031, 1538 "Invalid command index (%x) type %8ph.\n", 1539 index, iocb); 1540 if (IS_P3P_TYPE(ha)) 1541 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1542 else 1543 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1544 goto done; 1545 } 1546 sp = req->outstanding_cmds[index]; 1547 if (!sp) { 1548 ql_log(ql_log_warn, vha, 0x5032, 1549 "Invalid completion handle (%x) -- timed-out.\n", index); 1550 return sp; 1551 } 1552 if (sp->handle != index) { 1553 ql_log(ql_log_warn, vha, 0x5033, 1554 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1555 return NULL; 1556 } 1557 1558 req->outstanding_cmds[index] = NULL; 1559 1560 done: 1561 return sp; 1562 } 1563 1564 static void 1565 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1566 struct mbx_entry *mbx) 1567 { 1568 const char func[] = "MBX-IOCB"; 1569 const char *type; 1570 fc_port_t *fcport; 1571 srb_t *sp; 1572 struct srb_iocb *lio; 1573 uint16_t *data; 1574 uint16_t status; 1575 1576 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1577 if (!sp) 1578 return; 1579 1580 lio = &sp->u.iocb_cmd; 1581 type = sp->name; 1582 fcport = sp->fcport; 1583 data = lio->u.logio.data; 1584 1585 data[0] = MBS_COMMAND_ERROR; 1586 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1587 QLA_LOGIO_LOGIN_RETRIED : 0; 1588 if (mbx->entry_status) { 1589 ql_dbg(ql_dbg_async, vha, 0x5043, 1590 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1591 "entry-status=%x status=%x state-flag=%x " 1592 "status-flags=%x.\n", type, sp->handle, 1593 fcport->d_id.b.domain, fcport->d_id.b.area, 1594 fcport->d_id.b.al_pa, mbx->entry_status, 1595 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1596 le16_to_cpu(mbx->status_flags)); 1597 1598 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1599 mbx, sizeof(*mbx)); 1600 1601 goto logio_done; 1602 } 1603 1604 status = le16_to_cpu(mbx->status); 1605 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1606 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1607 status = 0; 1608 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1609 ql_dbg(ql_dbg_async, vha, 0x5045, 1610 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1611 type, sp->handle, fcport->d_id.b.domain, 1612 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1613 le16_to_cpu(mbx->mb1)); 1614 1615 data[0] = MBS_COMMAND_COMPLETE; 1616 if (sp->type == SRB_LOGIN_CMD) { 1617 fcport->port_type = FCT_TARGET; 1618 if (le16_to_cpu(mbx->mb1) & BIT_0) 1619 fcport->port_type = FCT_INITIATOR; 1620 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1621 fcport->flags |= FCF_FCP2_DEVICE; 1622 } 1623 goto logio_done; 1624 } 1625 1626 data[0] = le16_to_cpu(mbx->mb0); 1627 switch (data[0]) { 1628 case MBS_PORT_ID_USED: 1629 data[1] = le16_to_cpu(mbx->mb1); 1630 break; 1631 case MBS_LOOP_ID_USED: 1632 break; 1633 default: 1634 data[0] = MBS_COMMAND_ERROR; 1635 break; 1636 } 1637 1638 ql_log(ql_log_warn, vha, 0x5046, 1639 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1640 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1641 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1642 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1643 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1644 le16_to_cpu(mbx->mb7)); 1645 1646 logio_done: 1647 sp->done(sp, 0); 1648 } 1649 1650 static void 1651 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1652 struct mbx_24xx_entry *pkt) 1653 { 1654 const char func[] = "MBX-IOCB2"; 1655 srb_t *sp; 1656 struct srb_iocb *si; 1657 u16 sz, i; 1658 int res; 1659 1660 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1661 if (!sp) 1662 return; 1663 1664 si = &sp->u.iocb_cmd; 1665 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1666 1667 for (i = 0; i < sz; i++) 1668 si->u.mbx.in_mb[i] = pkt->mb[i]; 1669 1670 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1671 1672 sp->done(sp, res); 1673 } 1674 1675 static void 1676 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1677 struct nack_to_isp *pkt) 1678 { 1679 const char func[] = "nack"; 1680 srb_t *sp; 1681 int res = 0; 1682 1683 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1684 if (!sp) 1685 return; 1686 1687 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1688 res = QLA_FUNCTION_FAILED; 1689 1690 sp->done(sp, res); 1691 } 1692 1693 static void 1694 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1695 sts_entry_t *pkt, int iocb_type) 1696 { 1697 const char func[] = "CT_IOCB"; 1698 const char *type; 1699 srb_t *sp; 1700 struct bsg_job *bsg_job; 1701 struct fc_bsg_reply *bsg_reply; 1702 uint16_t comp_status; 1703 int res = 0; 1704 1705 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1706 if (!sp) 1707 return; 1708 1709 switch (sp->type) { 1710 case SRB_CT_CMD: 1711 bsg_job = sp->u.bsg_job; 1712 bsg_reply = bsg_job->reply; 1713 1714 type = "ct pass-through"; 1715 1716 comp_status = le16_to_cpu(pkt->comp_status); 1717 1718 /* 1719 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1720 * fc payload to the caller 1721 */ 1722 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1723 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1724 1725 if (comp_status != CS_COMPLETE) { 1726 if (comp_status == CS_DATA_UNDERRUN) { 1727 res = DID_OK << 16; 1728 bsg_reply->reply_payload_rcv_len = 1729 le16_to_cpu(pkt->rsp_info_len); 1730 1731 ql_log(ql_log_warn, vha, 0x5048, 1732 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1733 type, comp_status, 1734 bsg_reply->reply_payload_rcv_len); 1735 } else { 1736 ql_log(ql_log_warn, vha, 0x5049, 1737 "CT pass-through-%s error comp_status=0x%x.\n", 1738 type, comp_status); 1739 res = DID_ERROR << 16; 1740 bsg_reply->reply_payload_rcv_len = 0; 1741 } 1742 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1743 pkt, sizeof(*pkt)); 1744 } else { 1745 res = DID_OK << 16; 1746 bsg_reply->reply_payload_rcv_len = 1747 bsg_job->reply_payload.payload_len; 1748 bsg_job->reply_len = 0; 1749 } 1750 break; 1751 case SRB_CT_PTHRU_CMD: 1752 /* 1753 * borrowing sts_entry_24xx.comp_status. 1754 * same location as ct_entry_24xx.comp_status 1755 */ 1756 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1757 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1758 sp->name); 1759 break; 1760 } 1761 1762 sp->done(sp, res); 1763 } 1764 1765 static void 1766 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1767 struct sts_entry_24xx *pkt, int iocb_type) 1768 { 1769 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; 1770 const char func[] = "ELS_CT_IOCB"; 1771 const char *type; 1772 srb_t *sp; 1773 struct bsg_job *bsg_job; 1774 struct fc_bsg_reply *bsg_reply; 1775 uint16_t comp_status; 1776 uint32_t fw_status[3]; 1777 int res; 1778 struct srb_iocb *els; 1779 1780 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1781 if (!sp) 1782 return; 1783 1784 type = NULL; 1785 switch (sp->type) { 1786 case SRB_ELS_CMD_RPT: 1787 case SRB_ELS_CMD_HST: 1788 type = "els"; 1789 break; 1790 case SRB_CT_CMD: 1791 type = "ct pass-through"; 1792 break; 1793 case SRB_ELS_DCMD: 1794 type = "Driver ELS logo"; 1795 if (iocb_type != ELS_IOCB_TYPE) { 1796 ql_dbg(ql_dbg_user, vha, 0x5047, 1797 "Completing %s: (%p) type=%d.\n", 1798 type, sp, sp->type); 1799 sp->done(sp, 0); 1800 return; 1801 } 1802 break; 1803 case SRB_CT_PTHRU_CMD: 1804 /* borrowing sts_entry_24xx.comp_status. 1805 same location as ct_entry_24xx.comp_status 1806 */ 1807 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 1808 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1809 sp->name); 1810 sp->done(sp, res); 1811 return; 1812 default: 1813 ql_dbg(ql_dbg_user, vha, 0x503e, 1814 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1815 return; 1816 } 1817 1818 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1819 fw_status[1] = le32_to_cpu(ese->error_subcode_1); 1820 fw_status[2] = le32_to_cpu(ese->error_subcode_2); 1821 1822 if (iocb_type == ELS_IOCB_TYPE) { 1823 els = &sp->u.iocb_cmd; 1824 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); 1825 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); 1826 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); 1827 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); 1828 if (comp_status == CS_COMPLETE) { 1829 res = DID_OK << 16; 1830 } else { 1831 if (comp_status == CS_DATA_UNDERRUN) { 1832 res = DID_OK << 16; 1833 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( 1834 ese->total_byte_count)); 1835 } else { 1836 els->u.els_plogi.len = 0; 1837 res = DID_ERROR << 16; 1838 } 1839 } 1840 ql_dbg(ql_dbg_user, vha, 0x503f, 1841 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", 1842 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1843 le32_to_cpu(ese->total_byte_count)); 1844 goto els_ct_done; 1845 } 1846 1847 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1848 * fc payload to the caller 1849 */ 1850 bsg_job = sp->u.bsg_job; 1851 bsg_reply = bsg_job->reply; 1852 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1853 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1854 1855 if (comp_status != CS_COMPLETE) { 1856 if (comp_status == CS_DATA_UNDERRUN) { 1857 res = DID_OK << 16; 1858 bsg_reply->reply_payload_rcv_len = 1859 le32_to_cpu(ese->total_byte_count); 1860 1861 ql_dbg(ql_dbg_user, vha, 0x503f, 1862 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1863 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1864 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1865 le32_to_cpu(ese->total_byte_count)); 1866 } else { 1867 ql_dbg(ql_dbg_user, vha, 0x5040, 1868 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1869 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1870 type, sp->handle, comp_status, 1871 le32_to_cpu(ese->error_subcode_1), 1872 le32_to_cpu(ese->error_subcode_2)); 1873 res = DID_ERROR << 16; 1874 bsg_reply->reply_payload_rcv_len = 0; 1875 } 1876 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 1877 fw_status, sizeof(fw_status)); 1878 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1879 pkt, sizeof(*pkt)); 1880 } 1881 else { 1882 res = DID_OK << 16; 1883 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1884 bsg_job->reply_len = 0; 1885 } 1886 els_ct_done: 1887 1888 sp->done(sp, res); 1889 } 1890 1891 static void 1892 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1893 struct logio_entry_24xx *logio) 1894 { 1895 const char func[] = "LOGIO-IOCB"; 1896 const char *type; 1897 fc_port_t *fcport; 1898 srb_t *sp; 1899 struct srb_iocb *lio; 1900 uint16_t *data; 1901 uint32_t iop[2]; 1902 1903 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1904 if (!sp) 1905 return; 1906 1907 lio = &sp->u.iocb_cmd; 1908 type = sp->name; 1909 fcport = sp->fcport; 1910 data = lio->u.logio.data; 1911 1912 data[0] = MBS_COMMAND_ERROR; 1913 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1914 QLA_LOGIO_LOGIN_RETRIED : 0; 1915 if (logio->entry_status) { 1916 ql_log(ql_log_warn, fcport->vha, 0x5034, 1917 "Async-%s error entry - %8phC hdl=%x" 1918 "portid=%02x%02x%02x entry-status=%x.\n", 1919 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 1920 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1921 logio->entry_status); 1922 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1923 logio, sizeof(*logio)); 1924 1925 goto logio_done; 1926 } 1927 1928 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1929 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 1930 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 1931 type, sp->handle, fcport->d_id.b24, fcport->port_name, 1932 le32_to_cpu(logio->io_parameter[0])); 1933 1934 vha->hw->exch_starvation = 0; 1935 data[0] = MBS_COMMAND_COMPLETE; 1936 1937 if (sp->type == SRB_PRLI_CMD) { 1938 lio->u.logio.iop[0] = 1939 le32_to_cpu(logio->io_parameter[0]); 1940 lio->u.logio.iop[1] = 1941 le32_to_cpu(logio->io_parameter[1]); 1942 goto logio_done; 1943 } 1944 1945 if (sp->type != SRB_LOGIN_CMD) 1946 goto logio_done; 1947 1948 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1949 if (iop[0] & BIT_4) { 1950 fcport->port_type = FCT_TARGET; 1951 if (iop[0] & BIT_8) 1952 fcport->flags |= FCF_FCP2_DEVICE; 1953 } else if (iop[0] & BIT_5) 1954 fcport->port_type = FCT_INITIATOR; 1955 1956 if (iop[0] & BIT_7) 1957 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1958 1959 if (logio->io_parameter[7] || logio->io_parameter[8]) 1960 fcport->supported_classes |= FC_COS_CLASS2; 1961 if (logio->io_parameter[9] || logio->io_parameter[10]) 1962 fcport->supported_classes |= FC_COS_CLASS3; 1963 1964 goto logio_done; 1965 } 1966 1967 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1968 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1969 lio->u.logio.iop[0] = iop[0]; 1970 lio->u.logio.iop[1] = iop[1]; 1971 switch (iop[0]) { 1972 case LSC_SCODE_PORTID_USED: 1973 data[0] = MBS_PORT_ID_USED; 1974 data[1] = LSW(iop[1]); 1975 break; 1976 case LSC_SCODE_NPORT_USED: 1977 data[0] = MBS_LOOP_ID_USED; 1978 break; 1979 case LSC_SCODE_CMD_FAILED: 1980 if (iop[1] == 0x0606) { 1981 /* 1982 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 1983 * Target side acked. 1984 */ 1985 data[0] = MBS_COMMAND_COMPLETE; 1986 goto logio_done; 1987 } 1988 data[0] = MBS_COMMAND_ERROR; 1989 break; 1990 case LSC_SCODE_NOXCB: 1991 vha->hw->exch_starvation++; 1992 if (vha->hw->exch_starvation > 5) { 1993 ql_log(ql_log_warn, vha, 0xd046, 1994 "Exchange starvation. Resetting RISC\n"); 1995 1996 vha->hw->exch_starvation = 0; 1997 1998 if (IS_P3P_TYPE(vha->hw)) 1999 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2000 else 2001 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2002 qla2xxx_wake_dpc(vha); 2003 } 2004 /* fall through */ 2005 default: 2006 data[0] = MBS_COMMAND_ERROR; 2007 break; 2008 } 2009 2010 ql_dbg(ql_dbg_async, sp->vha, 0x5037, 2011 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2012 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2013 le16_to_cpu(logio->comp_status), 2014 le32_to_cpu(logio->io_parameter[0]), 2015 le32_to_cpu(logio->io_parameter[1])); 2016 2017 logio_done: 2018 sp->done(sp, 0); 2019 } 2020 2021 static void 2022 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2023 { 2024 const char func[] = "TMF-IOCB"; 2025 const char *type; 2026 fc_port_t *fcport; 2027 srb_t *sp; 2028 struct srb_iocb *iocb; 2029 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2030 2031 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2032 if (!sp) 2033 return; 2034 2035 iocb = &sp->u.iocb_cmd; 2036 type = sp->name; 2037 fcport = sp->fcport; 2038 iocb->u.tmf.data = QLA_SUCCESS; 2039 2040 if (sts->entry_status) { 2041 ql_log(ql_log_warn, fcport->vha, 0x5038, 2042 "Async-%s error - hdl=%x entry-status(%x).\n", 2043 type, sp->handle, sts->entry_status); 2044 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2045 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2046 ql_log(ql_log_warn, fcport->vha, 0x5039, 2047 "Async-%s error - hdl=%x completion status(%x).\n", 2048 type, sp->handle, sts->comp_status); 2049 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2050 } else if ((le16_to_cpu(sts->scsi_status) & 2051 SS_RESPONSE_INFO_LEN_VALID)) { 2052 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2053 ql_log(ql_log_warn, fcport->vha, 0x503b, 2054 "Async-%s error - hdl=%x not enough response(%d).\n", 2055 type, sp->handle, sts->rsp_data_len); 2056 } else if (sts->data[3]) { 2057 ql_log(ql_log_warn, fcport->vha, 0x503c, 2058 "Async-%s error - hdl=%x response(%x).\n", 2059 type, sp->handle, sts->data[3]); 2060 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2061 } 2062 } 2063 2064 if (iocb->u.tmf.data != QLA_SUCCESS) 2065 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2066 sts, sizeof(*sts)); 2067 2068 sp->done(sp, 0); 2069 } 2070 2071 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2072 void *tsk, srb_t *sp) 2073 { 2074 fc_port_t *fcport; 2075 struct srb_iocb *iocb; 2076 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2077 uint16_t state_flags; 2078 struct nvmefc_fcp_req *fd; 2079 uint16_t ret = QLA_SUCCESS; 2080 __le16 comp_status = sts->comp_status; 2081 int logit = 0; 2082 2083 iocb = &sp->u.iocb_cmd; 2084 fcport = sp->fcport; 2085 iocb->u.nvme.comp_status = comp_status; 2086 state_flags = le16_to_cpu(sts->state_flags); 2087 fd = iocb->u.nvme.desc; 2088 2089 if (unlikely(iocb->u.nvme.aen_op)) 2090 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2091 2092 if (unlikely(comp_status != CS_COMPLETE)) 2093 logit = 1; 2094 2095 fd->transferred_length = fd->payload_length - 2096 le32_to_cpu(sts->residual_len); 2097 2098 /* 2099 * State flags: Bit 6 and 0. 2100 * If 0 is set, we don't care about 6. 2101 * both cases resp was dma'd to host buffer 2102 * if both are 0, that is good path case. 2103 * if six is set and 0 is clear, we need to 2104 * copy resp data from status iocb to resp buffer. 2105 */ 2106 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2107 iocb->u.nvme.rsp_pyld_len = 0; 2108 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2109 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2110 /* Response already DMA'd to fd->rspaddr. */ 2111 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2112 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2113 /* 2114 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2115 * as an error. 2116 */ 2117 iocb->u.nvme.rsp_pyld_len = 0; 2118 fd->transferred_length = 0; 2119 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2120 "Unexpected values in NVMe_RSP IU.\n"); 2121 logit = 1; 2122 } else if (state_flags & SF_NVME_ERSP) { 2123 uint32_t *inbuf, *outbuf; 2124 uint16_t iter; 2125 2126 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2127 outbuf = (uint32_t *)fd->rspaddr; 2128 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2129 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > 2130 sizeof(struct nvme_fc_ersp_iu))) { 2131 if (ql_mask_match(ql_dbg_io)) { 2132 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2133 iocb->u.nvme.rsp_pyld_len); 2134 ql_log(ql_log_warn, fcport->vha, 0x5100, 2135 "Unexpected response payload length %u.\n", 2136 iocb->u.nvme.rsp_pyld_len); 2137 } 2138 iocb->u.nvme.rsp_pyld_len = 2139 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); 2140 } 2141 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; 2142 for (; iter; iter--) 2143 *outbuf++ = swab32(*inbuf++); 2144 } 2145 2146 if (state_flags & SF_NVME_ERSP) { 2147 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2148 u32 tgt_xfer_len; 2149 2150 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2151 if (fd->transferred_length != tgt_xfer_len) { 2152 ql_dbg(ql_dbg_io, fcport->vha, 0x3079, 2153 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2154 tgt_xfer_len, fd->transferred_length); 2155 logit = 1; 2156 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { 2157 /* 2158 * Do not log if this is just an underflow and there 2159 * is no data loss. 2160 */ 2161 logit = 0; 2162 } 2163 } 2164 2165 if (unlikely(logit)) 2166 ql_log(ql_log_warn, fcport->vha, 0x5060, 2167 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2168 sp->name, sp->handle, comp_status, 2169 fd->transferred_length, le32_to_cpu(sts->residual_len), 2170 sts->ox_id); 2171 2172 /* 2173 * If transport error then Failure (HBA rejects request) 2174 * otherwise transport will handle. 2175 */ 2176 switch (le16_to_cpu(comp_status)) { 2177 case CS_COMPLETE: 2178 break; 2179 2180 case CS_RESET: 2181 case CS_PORT_UNAVAILABLE: 2182 case CS_PORT_LOGGED_OUT: 2183 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2184 /* fall through */ 2185 case CS_ABORTED: 2186 case CS_PORT_BUSY: 2187 fd->transferred_length = 0; 2188 iocb->u.nvme.rsp_pyld_len = 0; 2189 ret = QLA_ABORTED; 2190 break; 2191 case CS_DATA_UNDERRUN: 2192 break; 2193 default: 2194 ret = QLA_FUNCTION_FAILED; 2195 break; 2196 } 2197 sp->done(sp, ret); 2198 } 2199 2200 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2201 struct vp_ctrl_entry_24xx *vce) 2202 { 2203 const char func[] = "CTRLVP-IOCB"; 2204 srb_t *sp; 2205 int rval = QLA_SUCCESS; 2206 2207 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2208 if (!sp) 2209 return; 2210 2211 if (vce->entry_status != 0) { 2212 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2213 "%s: Failed to complete IOCB -- error status (%x)\n", 2214 sp->name, vce->entry_status); 2215 rval = QLA_FUNCTION_FAILED; 2216 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2217 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2218 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2219 sp->name, le16_to_cpu(vce->comp_status), 2220 le16_to_cpu(vce->vp_idx_failed)); 2221 rval = QLA_FUNCTION_FAILED; 2222 } else { 2223 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2224 "Done %s.\n", __func__); 2225 } 2226 2227 sp->rc = rval; 2228 sp->done(sp, rval); 2229 } 2230 2231 /* Process a single response queue entry. */ 2232 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2233 struct rsp_que *rsp, 2234 sts_entry_t *pkt) 2235 { 2236 sts21_entry_t *sts21_entry; 2237 sts22_entry_t *sts22_entry; 2238 uint16_t handle_cnt; 2239 uint16_t cnt; 2240 2241 switch (pkt->entry_type) { 2242 case STATUS_TYPE: 2243 qla2x00_status_entry(vha, rsp, pkt); 2244 break; 2245 case STATUS_TYPE_21: 2246 sts21_entry = (sts21_entry_t *)pkt; 2247 handle_cnt = sts21_entry->handle_count; 2248 for (cnt = 0; cnt < handle_cnt; cnt++) 2249 qla2x00_process_completed_request(vha, rsp->req, 2250 sts21_entry->handle[cnt]); 2251 break; 2252 case STATUS_TYPE_22: 2253 sts22_entry = (sts22_entry_t *)pkt; 2254 handle_cnt = sts22_entry->handle_count; 2255 for (cnt = 0; cnt < handle_cnt; cnt++) 2256 qla2x00_process_completed_request(vha, rsp->req, 2257 sts22_entry->handle[cnt]); 2258 break; 2259 case STATUS_CONT_TYPE: 2260 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2261 break; 2262 case MBX_IOCB_TYPE: 2263 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2264 break; 2265 case CT_IOCB_TYPE: 2266 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2267 break; 2268 default: 2269 /* Type Not Supported. */ 2270 ql_log(ql_log_warn, vha, 0x504a, 2271 "Received unknown response pkt type %x entry status=%x.\n", 2272 pkt->entry_type, pkt->entry_status); 2273 break; 2274 } 2275 } 2276 2277 /** 2278 * qla2x00_process_response_queue() - Process response queue entries. 2279 * @rsp: response queue 2280 */ 2281 void 2282 qla2x00_process_response_queue(struct rsp_que *rsp) 2283 { 2284 struct scsi_qla_host *vha; 2285 struct qla_hw_data *ha = rsp->hw; 2286 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2287 sts_entry_t *pkt; 2288 2289 vha = pci_get_drvdata(ha->pdev); 2290 2291 if (!vha->flags.online) 2292 return; 2293 2294 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2295 pkt = (sts_entry_t *)rsp->ring_ptr; 2296 2297 rsp->ring_index++; 2298 if (rsp->ring_index == rsp->length) { 2299 rsp->ring_index = 0; 2300 rsp->ring_ptr = rsp->ring; 2301 } else { 2302 rsp->ring_ptr++; 2303 } 2304 2305 if (pkt->entry_status != 0) { 2306 qla2x00_error_entry(vha, rsp, pkt); 2307 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2308 wmb(); 2309 continue; 2310 } 2311 2312 qla2x00_process_response_entry(vha, rsp, pkt); 2313 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2314 wmb(); 2315 } 2316 2317 /* Adjust ring index */ 2318 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2319 } 2320 2321 static inline void 2322 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2323 uint32_t sense_len, struct rsp_que *rsp, int res) 2324 { 2325 struct scsi_qla_host *vha = sp->vha; 2326 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2327 uint32_t track_sense_len; 2328 2329 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2330 sense_len = SCSI_SENSE_BUFFERSIZE; 2331 2332 SET_CMD_SENSE_LEN(sp, sense_len); 2333 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2334 track_sense_len = sense_len; 2335 2336 if (sense_len > par_sense_len) 2337 sense_len = par_sense_len; 2338 2339 memcpy(cp->sense_buffer, sense_data, sense_len); 2340 2341 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2342 track_sense_len -= sense_len; 2343 SET_CMD_SENSE_LEN(sp, track_sense_len); 2344 2345 if (track_sense_len != 0) { 2346 rsp->status_srb = sp; 2347 cp->result = res; 2348 } 2349 2350 if (sense_len) { 2351 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2352 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2353 sp->vha->host_no, cp->device->id, cp->device->lun, 2354 cp); 2355 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2356 cp->sense_buffer, sense_len); 2357 } 2358 } 2359 2360 struct scsi_dif_tuple { 2361 __be16 guard; /* Checksum */ 2362 __be16 app_tag; /* APPL identifier */ 2363 __be32 ref_tag; /* Target LBA or indirect LBA */ 2364 }; 2365 2366 /* 2367 * Checks the guard or meta-data for the type of error 2368 * detected by the HBA. In case of errors, we set the 2369 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2370 * to indicate to the kernel that the HBA detected error. 2371 */ 2372 static inline int 2373 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2374 { 2375 struct scsi_qla_host *vha = sp->vha; 2376 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2377 uint8_t *ap = &sts24->data[12]; 2378 uint8_t *ep = &sts24->data[20]; 2379 uint32_t e_ref_tag, a_ref_tag; 2380 uint16_t e_app_tag, a_app_tag; 2381 uint16_t e_guard, a_guard; 2382 2383 /* 2384 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2385 * would make guard field appear at offset 2 2386 */ 2387 a_guard = get_unaligned_le16(ap + 2); 2388 a_app_tag = get_unaligned_le16(ap + 0); 2389 a_ref_tag = get_unaligned_le32(ap + 4); 2390 e_guard = get_unaligned_le16(ep + 2); 2391 e_app_tag = get_unaligned_le16(ep + 0); 2392 e_ref_tag = get_unaligned_le32(ep + 4); 2393 2394 ql_dbg(ql_dbg_io, vha, 0x3023, 2395 "iocb(s) %p Returned STATUS.\n", sts24); 2396 2397 ql_dbg(ql_dbg_io, vha, 0x3024, 2398 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2399 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2400 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2401 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2402 a_app_tag, e_app_tag, a_guard, e_guard); 2403 2404 /* 2405 * Ignore sector if: 2406 * For type 3: ref & app tag is all 'f's 2407 * For type 0,1,2: app tag is all 'f's 2408 */ 2409 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && 2410 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || 2411 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { 2412 uint32_t blocks_done, resid; 2413 sector_t lba_s = scsi_get_lba(cmd); 2414 2415 /* 2TB boundary case covered automatically with this */ 2416 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2417 2418 resid = scsi_bufflen(cmd) - (blocks_done * 2419 cmd->device->sector_size); 2420 2421 scsi_set_resid(cmd, resid); 2422 cmd->result = DID_OK << 16; 2423 2424 /* Update protection tag */ 2425 if (scsi_prot_sg_count(cmd)) { 2426 uint32_t i, j = 0, k = 0, num_ent; 2427 struct scatterlist *sg; 2428 struct t10_pi_tuple *spt; 2429 2430 /* Patch the corresponding protection tags */ 2431 scsi_for_each_prot_sg(cmd, sg, 2432 scsi_prot_sg_count(cmd), i) { 2433 num_ent = sg_dma_len(sg) / 8; 2434 if (k + num_ent < blocks_done) { 2435 k += num_ent; 2436 continue; 2437 } 2438 j = blocks_done - k - 1; 2439 k = blocks_done; 2440 break; 2441 } 2442 2443 if (k != blocks_done) { 2444 ql_log(ql_log_warn, vha, 0x302f, 2445 "unexpected tag values tag:lba=%x:%llx)\n", 2446 e_ref_tag, (unsigned long long)lba_s); 2447 return 1; 2448 } 2449 2450 spt = page_address(sg_page(sg)) + sg->offset; 2451 spt += j; 2452 2453 spt->app_tag = T10_PI_APP_ESCAPE; 2454 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2455 spt->ref_tag = T10_PI_REF_ESCAPE; 2456 } 2457 2458 return 0; 2459 } 2460 2461 /* check guard */ 2462 if (e_guard != a_guard) { 2463 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2464 0x10, 0x1); 2465 set_driver_byte(cmd, DRIVER_SENSE); 2466 set_host_byte(cmd, DID_ABORT); 2467 cmd->result |= SAM_STAT_CHECK_CONDITION; 2468 return 1; 2469 } 2470 2471 /* check ref tag */ 2472 if (e_ref_tag != a_ref_tag) { 2473 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2474 0x10, 0x3); 2475 set_driver_byte(cmd, DRIVER_SENSE); 2476 set_host_byte(cmd, DID_ABORT); 2477 cmd->result |= SAM_STAT_CHECK_CONDITION; 2478 return 1; 2479 } 2480 2481 /* check appl tag */ 2482 if (e_app_tag != a_app_tag) { 2483 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2484 0x10, 0x2); 2485 set_driver_byte(cmd, DRIVER_SENSE); 2486 set_host_byte(cmd, DID_ABORT); 2487 cmd->result |= SAM_STAT_CHECK_CONDITION; 2488 return 1; 2489 } 2490 2491 return 1; 2492 } 2493 2494 static void 2495 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2496 struct req_que *req, uint32_t index) 2497 { 2498 struct qla_hw_data *ha = vha->hw; 2499 srb_t *sp; 2500 uint16_t comp_status; 2501 uint16_t scsi_status; 2502 uint16_t thread_id; 2503 uint32_t rval = EXT_STATUS_OK; 2504 struct bsg_job *bsg_job = NULL; 2505 struct fc_bsg_request *bsg_request; 2506 struct fc_bsg_reply *bsg_reply; 2507 sts_entry_t *sts = pkt; 2508 struct sts_entry_24xx *sts24 = pkt; 2509 2510 /* Validate handle. */ 2511 if (index >= req->num_outstanding_cmds) { 2512 ql_log(ql_log_warn, vha, 0x70af, 2513 "Invalid SCSI completion handle 0x%x.\n", index); 2514 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2515 return; 2516 } 2517 2518 sp = req->outstanding_cmds[index]; 2519 if (!sp) { 2520 ql_log(ql_log_warn, vha, 0x70b0, 2521 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2522 req->id, index); 2523 2524 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2525 return; 2526 } 2527 2528 /* Free outstanding command slot. */ 2529 req->outstanding_cmds[index] = NULL; 2530 bsg_job = sp->u.bsg_job; 2531 bsg_request = bsg_job->request; 2532 bsg_reply = bsg_job->reply; 2533 2534 if (IS_FWI2_CAPABLE(ha)) { 2535 comp_status = le16_to_cpu(sts24->comp_status); 2536 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2537 } else { 2538 comp_status = le16_to_cpu(sts->comp_status); 2539 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2540 } 2541 2542 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2543 switch (comp_status) { 2544 case CS_COMPLETE: 2545 if (scsi_status == 0) { 2546 bsg_reply->reply_payload_rcv_len = 2547 bsg_job->reply_payload.payload_len; 2548 vha->qla_stats.input_bytes += 2549 bsg_reply->reply_payload_rcv_len; 2550 vha->qla_stats.input_requests++; 2551 rval = EXT_STATUS_OK; 2552 } 2553 goto done; 2554 2555 case CS_DATA_OVERRUN: 2556 ql_dbg(ql_dbg_user, vha, 0x70b1, 2557 "Command completed with data overrun thread_id=%d\n", 2558 thread_id); 2559 rval = EXT_STATUS_DATA_OVERRUN; 2560 break; 2561 2562 case CS_DATA_UNDERRUN: 2563 ql_dbg(ql_dbg_user, vha, 0x70b2, 2564 "Command completed with data underrun thread_id=%d\n", 2565 thread_id); 2566 rval = EXT_STATUS_DATA_UNDERRUN; 2567 break; 2568 case CS_BIDIR_RD_OVERRUN: 2569 ql_dbg(ql_dbg_user, vha, 0x70b3, 2570 "Command completed with read data overrun thread_id=%d\n", 2571 thread_id); 2572 rval = EXT_STATUS_DATA_OVERRUN; 2573 break; 2574 2575 case CS_BIDIR_RD_WR_OVERRUN: 2576 ql_dbg(ql_dbg_user, vha, 0x70b4, 2577 "Command completed with read and write data overrun " 2578 "thread_id=%d\n", thread_id); 2579 rval = EXT_STATUS_DATA_OVERRUN; 2580 break; 2581 2582 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2583 ql_dbg(ql_dbg_user, vha, 0x70b5, 2584 "Command completed with read data over and write data " 2585 "underrun thread_id=%d\n", thread_id); 2586 rval = EXT_STATUS_DATA_OVERRUN; 2587 break; 2588 2589 case CS_BIDIR_RD_UNDERRUN: 2590 ql_dbg(ql_dbg_user, vha, 0x70b6, 2591 "Command completed with read data underrun " 2592 "thread_id=%d\n", thread_id); 2593 rval = EXT_STATUS_DATA_UNDERRUN; 2594 break; 2595 2596 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2597 ql_dbg(ql_dbg_user, vha, 0x70b7, 2598 "Command completed with read data under and write data " 2599 "overrun thread_id=%d\n", thread_id); 2600 rval = EXT_STATUS_DATA_UNDERRUN; 2601 break; 2602 2603 case CS_BIDIR_RD_WR_UNDERRUN: 2604 ql_dbg(ql_dbg_user, vha, 0x70b8, 2605 "Command completed with read and write data underrun " 2606 "thread_id=%d\n", thread_id); 2607 rval = EXT_STATUS_DATA_UNDERRUN; 2608 break; 2609 2610 case CS_BIDIR_DMA: 2611 ql_dbg(ql_dbg_user, vha, 0x70b9, 2612 "Command completed with data DMA error thread_id=%d\n", 2613 thread_id); 2614 rval = EXT_STATUS_DMA_ERR; 2615 break; 2616 2617 case CS_TIMEOUT: 2618 ql_dbg(ql_dbg_user, vha, 0x70ba, 2619 "Command completed with timeout thread_id=%d\n", 2620 thread_id); 2621 rval = EXT_STATUS_TIMEOUT; 2622 break; 2623 default: 2624 ql_dbg(ql_dbg_user, vha, 0x70bb, 2625 "Command completed with completion status=0x%x " 2626 "thread_id=%d\n", comp_status, thread_id); 2627 rval = EXT_STATUS_ERR; 2628 break; 2629 } 2630 bsg_reply->reply_payload_rcv_len = 0; 2631 2632 done: 2633 /* Return the vendor specific reply to API */ 2634 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2635 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2636 /* Always return DID_OK, bsg will send the vendor specific response 2637 * in this case only */ 2638 sp->done(sp, DID_OK << 16); 2639 2640 } 2641 2642 /** 2643 * qla2x00_status_entry() - Process a Status IOCB entry. 2644 * @vha: SCSI driver HA context 2645 * @rsp: response queue 2646 * @pkt: Entry pointer 2647 */ 2648 static void 2649 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2650 { 2651 srb_t *sp; 2652 fc_port_t *fcport; 2653 struct scsi_cmnd *cp; 2654 sts_entry_t *sts = pkt; 2655 struct sts_entry_24xx *sts24 = pkt; 2656 uint16_t comp_status; 2657 uint16_t scsi_status; 2658 uint16_t ox_id; 2659 uint8_t lscsi_status; 2660 int32_t resid; 2661 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2662 fw_resid_len; 2663 uint8_t *rsp_info, *sense_data; 2664 struct qla_hw_data *ha = vha->hw; 2665 uint32_t handle; 2666 uint16_t que; 2667 struct req_que *req; 2668 int logit = 1; 2669 int res = 0; 2670 uint16_t state_flags = 0; 2671 uint16_t retry_delay = 0; 2672 2673 if (IS_FWI2_CAPABLE(ha)) { 2674 comp_status = le16_to_cpu(sts24->comp_status); 2675 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2676 state_flags = le16_to_cpu(sts24->state_flags); 2677 } else { 2678 comp_status = le16_to_cpu(sts->comp_status); 2679 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2680 } 2681 handle = (uint32_t) LSW(sts->handle); 2682 que = MSW(sts->handle); 2683 req = ha->req_q_map[que]; 2684 2685 /* Check for invalid queue pointer */ 2686 if (req == NULL || 2687 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2688 ql_dbg(ql_dbg_io, vha, 0x3059, 2689 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2690 "que=%u.\n", sts->handle, req, que); 2691 return; 2692 } 2693 2694 /* Validate handle. */ 2695 if (handle < req->num_outstanding_cmds) { 2696 sp = req->outstanding_cmds[handle]; 2697 if (!sp) { 2698 ql_dbg(ql_dbg_io, vha, 0x3075, 2699 "%s(%ld): Already returned command for status handle (0x%x).\n", 2700 __func__, vha->host_no, sts->handle); 2701 return; 2702 } 2703 } else { 2704 ql_dbg(ql_dbg_io, vha, 0x3017, 2705 "Invalid status handle, out of range (0x%x).\n", 2706 sts->handle); 2707 2708 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2709 if (IS_P3P_TYPE(ha)) 2710 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2711 else 2712 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2713 qla2xxx_wake_dpc(vha); 2714 } 2715 return; 2716 } 2717 2718 if (sp->cmd_type != TYPE_SRB) { 2719 req->outstanding_cmds[handle] = NULL; 2720 ql_dbg(ql_dbg_io, vha, 0x3015, 2721 "Unknown sp->cmd_type %x %p).\n", 2722 sp->cmd_type, sp); 2723 return; 2724 } 2725 2726 /* NVME completion. */ 2727 if (sp->type == SRB_NVME_CMD) { 2728 req->outstanding_cmds[handle] = NULL; 2729 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 2730 return; 2731 } 2732 2733 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2734 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2735 return; 2736 } 2737 2738 /* Task Management completion. */ 2739 if (sp->type == SRB_TM_CMD) { 2740 qla24xx_tm_iocb_entry(vha, req, pkt); 2741 return; 2742 } 2743 2744 /* Fast path completion. */ 2745 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2746 qla2x00_process_completed_request(vha, req, handle); 2747 2748 return; 2749 } 2750 2751 req->outstanding_cmds[handle] = NULL; 2752 cp = GET_CMD_SP(sp); 2753 if (cp == NULL) { 2754 ql_dbg(ql_dbg_io, vha, 0x3018, 2755 "Command already returned (0x%x/%p).\n", 2756 sts->handle, sp); 2757 2758 return; 2759 } 2760 2761 lscsi_status = scsi_status & STATUS_MASK; 2762 2763 fcport = sp->fcport; 2764 2765 ox_id = 0; 2766 sense_len = par_sense_len = rsp_info_len = resid_len = 2767 fw_resid_len = 0; 2768 if (IS_FWI2_CAPABLE(ha)) { 2769 u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay); 2770 2771 if (scsi_status & SS_SENSE_LEN_VALID) 2772 sense_len = le32_to_cpu(sts24->sense_len); 2773 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2774 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2775 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2776 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2777 if (comp_status == CS_DATA_UNDERRUN) 2778 fw_resid_len = le32_to_cpu(sts24->residual_len); 2779 rsp_info = sts24->data; 2780 sense_data = sts24->data; 2781 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2782 ox_id = le16_to_cpu(sts24->ox_id); 2783 par_sense_len = sizeof(sts24->data); 2784 /* Valid values of the retry delay timer are 0x1-0xffef */ 2785 if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) { 2786 retry_delay = sts24_retry_delay & 0x3fff; 2787 ql_dbg(ql_dbg_io, sp->vha, 0x3033, 2788 "%s: scope=%#x retry_delay=%#x\n", __func__, 2789 sts24_retry_delay >> 14, retry_delay); 2790 } 2791 } else { 2792 if (scsi_status & SS_SENSE_LEN_VALID) 2793 sense_len = le16_to_cpu(sts->req_sense_length); 2794 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2795 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2796 resid_len = le32_to_cpu(sts->residual_length); 2797 rsp_info = sts->rsp_info; 2798 sense_data = sts->req_sense_data; 2799 par_sense_len = sizeof(sts->req_sense_data); 2800 } 2801 2802 /* Check for any FCP transport errors. */ 2803 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2804 /* Sense data lies beyond any FCP RESPONSE data. */ 2805 if (IS_FWI2_CAPABLE(ha)) { 2806 sense_data += rsp_info_len; 2807 par_sense_len -= rsp_info_len; 2808 } 2809 if (rsp_info_len > 3 && rsp_info[3]) { 2810 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2811 "FCP I/O protocol failure (0x%x/0x%x).\n", 2812 rsp_info_len, rsp_info[3]); 2813 2814 res = DID_BUS_BUSY << 16; 2815 goto out; 2816 } 2817 } 2818 2819 /* Check for overrun. */ 2820 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2821 scsi_status & SS_RESIDUAL_OVER) 2822 comp_status = CS_DATA_OVERRUN; 2823 2824 /* 2825 * Check retry_delay_timer value if we receive a busy or 2826 * queue full. 2827 */ 2828 if (lscsi_status == SAM_STAT_TASK_SET_FULL || 2829 lscsi_status == SAM_STAT_BUSY) 2830 qla2x00_set_retry_delay_timestamp(fcport, retry_delay); 2831 2832 /* 2833 * Based on Host and scsi status generate status code for Linux 2834 */ 2835 switch (comp_status) { 2836 case CS_COMPLETE: 2837 case CS_QUEUE_FULL: 2838 if (scsi_status == 0) { 2839 res = DID_OK << 16; 2840 break; 2841 } 2842 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2843 resid = resid_len; 2844 scsi_set_resid(cp, resid); 2845 2846 if (!lscsi_status && 2847 ((unsigned)(scsi_bufflen(cp) - resid) < 2848 cp->underflow)) { 2849 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2850 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2851 resid, scsi_bufflen(cp)); 2852 2853 res = DID_ERROR << 16; 2854 break; 2855 } 2856 } 2857 res = DID_OK << 16 | lscsi_status; 2858 2859 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2860 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2861 "QUEUE FULL detected.\n"); 2862 break; 2863 } 2864 logit = 0; 2865 if (lscsi_status != SS_CHECK_CONDITION) 2866 break; 2867 2868 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2869 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2870 break; 2871 2872 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2873 rsp, res); 2874 break; 2875 2876 case CS_DATA_UNDERRUN: 2877 /* Use F/W calculated residual length. */ 2878 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2879 scsi_set_resid(cp, resid); 2880 if (scsi_status & SS_RESIDUAL_UNDER) { 2881 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2882 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2883 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2884 resid, scsi_bufflen(cp)); 2885 2886 res = DID_ERROR << 16 | lscsi_status; 2887 goto check_scsi_status; 2888 } 2889 2890 if (!lscsi_status && 2891 ((unsigned)(scsi_bufflen(cp) - resid) < 2892 cp->underflow)) { 2893 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2894 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2895 resid, scsi_bufflen(cp)); 2896 2897 res = DID_ERROR << 16; 2898 break; 2899 } 2900 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2901 lscsi_status != SAM_STAT_BUSY) { 2902 /* 2903 * scsi status of task set and busy are considered to be 2904 * task not completed. 2905 */ 2906 2907 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2908 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2909 resid, scsi_bufflen(cp)); 2910 2911 res = DID_ERROR << 16 | lscsi_status; 2912 goto check_scsi_status; 2913 } else { 2914 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2915 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2916 scsi_status, lscsi_status); 2917 } 2918 2919 res = DID_OK << 16 | lscsi_status; 2920 logit = 0; 2921 2922 check_scsi_status: 2923 /* 2924 * Check to see if SCSI Status is non zero. If so report SCSI 2925 * Status. 2926 */ 2927 if (lscsi_status != 0) { 2928 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2929 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2930 "QUEUE FULL detected.\n"); 2931 logit = 1; 2932 break; 2933 } 2934 if (lscsi_status != SS_CHECK_CONDITION) 2935 break; 2936 2937 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2938 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2939 break; 2940 2941 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2942 sense_len, rsp, res); 2943 } 2944 break; 2945 2946 case CS_PORT_LOGGED_OUT: 2947 case CS_PORT_CONFIG_CHG: 2948 case CS_PORT_BUSY: 2949 case CS_INCOMPLETE: 2950 case CS_PORT_UNAVAILABLE: 2951 case CS_TIMEOUT: 2952 case CS_RESET: 2953 2954 /* 2955 * We are going to have the fc class block the rport 2956 * while we try to recover so instruct the mid layer 2957 * to requeue until the class decides how to handle this. 2958 */ 2959 res = DID_TRANSPORT_DISRUPTED << 16; 2960 2961 if (comp_status == CS_TIMEOUT) { 2962 if (IS_FWI2_CAPABLE(ha)) 2963 break; 2964 else if ((le16_to_cpu(sts->status_flags) & 2965 SF_LOGOUT_SENT) == 0) 2966 break; 2967 } 2968 2969 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2970 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2971 "Port to be marked lost on fcport=%02x%02x%02x, current " 2972 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 2973 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2974 port_state_str[FCS_ONLINE], 2975 comp_status); 2976 2977 qlt_schedule_sess_for_deletion(fcport); 2978 } 2979 2980 break; 2981 2982 case CS_ABORTED: 2983 res = DID_RESET << 16; 2984 break; 2985 2986 case CS_DIF_ERROR: 2987 logit = qla2x00_handle_dif_error(sp, sts24); 2988 res = cp->result; 2989 break; 2990 2991 case CS_TRANSPORT: 2992 res = DID_ERROR << 16; 2993 2994 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2995 break; 2996 2997 if (state_flags & BIT_4) 2998 scmd_printk(KERN_WARNING, cp, 2999 "Unsupported device '%s' found.\n", 3000 cp->device->vendor); 3001 break; 3002 3003 case CS_DMA: 3004 ql_log(ql_log_info, fcport->vha, 0x3022, 3005 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3006 comp_status, scsi_status, res, vha->host_no, 3007 cp->device->id, cp->device->lun, fcport->d_id.b24, 3008 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3009 resid_len, fw_resid_len, sp, cp); 3010 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 3011 pkt, sizeof(*sts24)); 3012 res = DID_ERROR << 16; 3013 break; 3014 default: 3015 res = DID_ERROR << 16; 3016 break; 3017 } 3018 3019 out: 3020 if (logit) 3021 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 3022 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 3023 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 3024 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3025 comp_status, scsi_status, res, vha->host_no, 3026 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3027 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3028 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3029 resid_len, fw_resid_len, sp, cp); 3030 3031 if (rsp->status_srb == NULL) 3032 sp->done(sp, res); 3033 } 3034 3035 /** 3036 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3037 * @rsp: response queue 3038 * @pkt: Entry pointer 3039 * 3040 * Extended sense data. 3041 */ 3042 static void 3043 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3044 { 3045 uint8_t sense_sz = 0; 3046 struct qla_hw_data *ha = rsp->hw; 3047 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3048 srb_t *sp = rsp->status_srb; 3049 struct scsi_cmnd *cp; 3050 uint32_t sense_len; 3051 uint8_t *sense_ptr; 3052 3053 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3054 return; 3055 3056 sense_len = GET_CMD_SENSE_LEN(sp); 3057 sense_ptr = GET_CMD_SENSE_PTR(sp); 3058 3059 cp = GET_CMD_SP(sp); 3060 if (cp == NULL) { 3061 ql_log(ql_log_warn, vha, 0x3025, 3062 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3063 3064 rsp->status_srb = NULL; 3065 return; 3066 } 3067 3068 if (sense_len > sizeof(pkt->data)) 3069 sense_sz = sizeof(pkt->data); 3070 else 3071 sense_sz = sense_len; 3072 3073 /* Move sense data. */ 3074 if (IS_FWI2_CAPABLE(ha)) 3075 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3076 memcpy(sense_ptr, pkt->data, sense_sz); 3077 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3078 sense_ptr, sense_sz); 3079 3080 sense_len -= sense_sz; 3081 sense_ptr += sense_sz; 3082 3083 SET_CMD_SENSE_PTR(sp, sense_ptr); 3084 SET_CMD_SENSE_LEN(sp, sense_len); 3085 3086 /* Place command on done queue. */ 3087 if (sense_len == 0) { 3088 rsp->status_srb = NULL; 3089 sp->done(sp, cp->result); 3090 } 3091 } 3092 3093 /** 3094 * qla2x00_error_entry() - Process an error entry. 3095 * @vha: SCSI driver HA context 3096 * @rsp: response queue 3097 * @pkt: Entry pointer 3098 * return : 1=allow further error analysis. 0=no additional error analysis. 3099 */ 3100 static int 3101 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3102 { 3103 srb_t *sp; 3104 struct qla_hw_data *ha = vha->hw; 3105 const char func[] = "ERROR-IOCB"; 3106 uint16_t que = MSW(pkt->handle); 3107 struct req_que *req = NULL; 3108 int res = DID_ERROR << 16; 3109 3110 ql_dbg(ql_dbg_async, vha, 0x502a, 3111 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3112 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3113 3114 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3115 goto fatal; 3116 3117 req = ha->req_q_map[que]; 3118 3119 if (pkt->entry_status & RF_BUSY) 3120 res = DID_BUS_BUSY << 16; 3121 3122 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3123 return 0; 3124 3125 switch (pkt->entry_type) { 3126 case NOTIFY_ACK_TYPE: 3127 case STATUS_TYPE: 3128 case STATUS_CONT_TYPE: 3129 case LOGINOUT_PORT_IOCB_TYPE: 3130 case CT_IOCB_TYPE: 3131 case ELS_IOCB_TYPE: 3132 case ABORT_IOCB_TYPE: 3133 case MBX_IOCB_TYPE: 3134 default: 3135 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3136 if (sp) { 3137 sp->done(sp, res); 3138 return 0; 3139 } 3140 break; 3141 3142 case ABTS_RESP_24XX: 3143 case CTIO_TYPE7: 3144 case CTIO_CRC2: 3145 return 1; 3146 } 3147 fatal: 3148 ql_log(ql_log_warn, vha, 0x5030, 3149 "Error entry - invalid handle/queue (%04x).\n", que); 3150 return 0; 3151 } 3152 3153 /** 3154 * qla24xx_mbx_completion() - Process mailbox command completions. 3155 * @vha: SCSI driver HA context 3156 * @mb0: Mailbox0 register 3157 */ 3158 static void 3159 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3160 { 3161 uint16_t cnt; 3162 uint32_t mboxes; 3163 __le16 __iomem *wptr; 3164 struct qla_hw_data *ha = vha->hw; 3165 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3166 3167 /* Read all mbox registers? */ 3168 WARN_ON_ONCE(ha->mbx_count > 32); 3169 mboxes = (1ULL << ha->mbx_count) - 1; 3170 if (!ha->mcp) 3171 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3172 else 3173 mboxes = ha->mcp->in_mb; 3174 3175 /* Load return mailbox registers. */ 3176 ha->flags.mbox_int = 1; 3177 ha->mailbox_out[0] = mb0; 3178 mboxes >>= 1; 3179 wptr = ®->mailbox1; 3180 3181 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3182 if (mboxes & BIT_0) 3183 ha->mailbox_out[cnt] = rd_reg_word(wptr); 3184 3185 mboxes >>= 1; 3186 wptr++; 3187 } 3188 } 3189 3190 static void 3191 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3192 struct abort_entry_24xx *pkt) 3193 { 3194 const char func[] = "ABT_IOCB"; 3195 srb_t *sp; 3196 struct srb_iocb *abt; 3197 3198 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3199 if (!sp) 3200 return; 3201 3202 abt = &sp->u.iocb_cmd; 3203 abt->u.abt.comp_status = pkt->nport_handle; 3204 sp->done(sp, 0); 3205 } 3206 3207 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3208 struct pt_ls4_request *pkt, struct req_que *req) 3209 { 3210 srb_t *sp; 3211 const char func[] = "LS4_IOCB"; 3212 uint16_t comp_status; 3213 3214 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3215 if (!sp) 3216 return; 3217 3218 comp_status = le16_to_cpu(pkt->status); 3219 sp->done(sp, comp_status); 3220 } 3221 3222 /** 3223 * qla24xx_process_response_queue() - Process response queue entries. 3224 * @vha: SCSI driver HA context 3225 * @rsp: response queue 3226 */ 3227 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3228 struct rsp_que *rsp) 3229 { 3230 struct sts_entry_24xx *pkt; 3231 struct qla_hw_data *ha = vha->hw; 3232 3233 if (!ha->flags.fw_started) 3234 return; 3235 3236 if (rsp->qpair->cpuid != smp_processor_id()) 3237 qla_cpu_update(rsp->qpair, smp_processor_id()); 3238 3239 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 3240 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 3241 3242 rsp->ring_index++; 3243 if (rsp->ring_index == rsp->length) { 3244 rsp->ring_index = 0; 3245 rsp->ring_ptr = rsp->ring; 3246 } else { 3247 rsp->ring_ptr++; 3248 } 3249 3250 if (pkt->entry_status != 0) { 3251 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 3252 goto process_err; 3253 3254 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3255 wmb(); 3256 continue; 3257 } 3258 process_err: 3259 3260 switch (pkt->entry_type) { 3261 case STATUS_TYPE: 3262 qla2x00_status_entry(vha, rsp, pkt); 3263 break; 3264 case STATUS_CONT_TYPE: 3265 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 3266 break; 3267 case VP_RPT_ID_IOCB_TYPE: 3268 qla24xx_report_id_acquisition(vha, 3269 (struct vp_rpt_id_entry_24xx *)pkt); 3270 break; 3271 case LOGINOUT_PORT_IOCB_TYPE: 3272 qla24xx_logio_entry(vha, rsp->req, 3273 (struct logio_entry_24xx *)pkt); 3274 break; 3275 case CT_IOCB_TYPE: 3276 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 3277 break; 3278 case ELS_IOCB_TYPE: 3279 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 3280 break; 3281 case ABTS_RECV_24XX: 3282 if (qla_ini_mode_enabled(vha)) { 3283 qla24xx_purex_iocb(vha, pkt, 3284 qla24xx_process_abts); 3285 break; 3286 } 3287 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3288 IS_QLA28XX(ha)) { 3289 /* ensure that the ATIO queue is empty */ 3290 qlt_handle_abts_recv(vha, rsp, 3291 (response_t *)pkt); 3292 break; 3293 } else { 3294 qlt_24xx_process_atio_queue(vha, 1); 3295 } 3296 /* fall through */ 3297 case ABTS_RESP_24XX: 3298 case CTIO_TYPE7: 3299 case CTIO_CRC2: 3300 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3301 break; 3302 case PT_LS4_REQUEST: 3303 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3304 rsp->req); 3305 break; 3306 case NOTIFY_ACK_TYPE: 3307 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3308 qlt_response_pkt_all_vps(vha, rsp, 3309 (response_t *)pkt); 3310 else 3311 qla24xxx_nack_iocb_entry(vha, rsp->req, 3312 (struct nack_to_isp *)pkt); 3313 break; 3314 case MARKER_TYPE: 3315 /* Do nothing in this case, this check is to prevent it 3316 * from falling into default case 3317 */ 3318 break; 3319 case ABORT_IOCB_TYPE: 3320 qla24xx_abort_iocb_entry(vha, rsp->req, 3321 (struct abort_entry_24xx *)pkt); 3322 break; 3323 case MBX_IOCB_TYPE: 3324 qla24xx_mbx_iocb_entry(vha, rsp->req, 3325 (struct mbx_24xx_entry *)pkt); 3326 break; 3327 case VP_CTRL_IOCB_TYPE: 3328 qla_ctrlvp_completed(vha, rsp->req, 3329 (struct vp_ctrl_entry_24xx *)pkt); 3330 break; 3331 case PUREX_IOCB_TYPE: 3332 { 3333 struct purex_entry_24xx *purex = (void *)pkt; 3334 3335 if (purex->els_frame_payload[3] != ELS_COMMAND_RDP) { 3336 ql_dbg(ql_dbg_init, vha, 0x5091, 3337 "Discarding ELS Request opcode %#x...\n", 3338 purex->els_frame_payload[3]); 3339 break; 3340 } 3341 qla24xx_purex_iocb(vha, pkt, qla24xx_process_purex_rdp); 3342 break; 3343 } 3344 default: 3345 /* Type Not Supported. */ 3346 ql_dbg(ql_dbg_async, vha, 0x5042, 3347 "Received unknown response pkt type %x " 3348 "entry status=%x.\n", 3349 pkt->entry_type, pkt->entry_status); 3350 break; 3351 } 3352 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3353 wmb(); 3354 } 3355 3356 /* Adjust ring index */ 3357 if (IS_P3P_TYPE(ha)) { 3358 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3359 3360 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); 3361 } else { 3362 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); 3363 } 3364 } 3365 3366 static void 3367 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3368 { 3369 int rval; 3370 uint32_t cnt; 3371 struct qla_hw_data *ha = vha->hw; 3372 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3373 3374 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3375 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3376 return; 3377 3378 rval = QLA_SUCCESS; 3379 wrt_reg_dword(®->iobase_addr, 0x7C00); 3380 rd_reg_dword(®->iobase_addr); 3381 wrt_reg_dword(®->iobase_window, 0x0001); 3382 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3383 rval == QLA_SUCCESS; cnt--) { 3384 if (cnt) { 3385 wrt_reg_dword(®->iobase_window, 0x0001); 3386 udelay(10); 3387 } else 3388 rval = QLA_FUNCTION_TIMEOUT; 3389 } 3390 if (rval == QLA_SUCCESS) 3391 goto next_test; 3392 3393 rval = QLA_SUCCESS; 3394 wrt_reg_dword(®->iobase_window, 0x0003); 3395 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3396 rval == QLA_SUCCESS; cnt--) { 3397 if (cnt) { 3398 wrt_reg_dword(®->iobase_window, 0x0003); 3399 udelay(10); 3400 } else 3401 rval = QLA_FUNCTION_TIMEOUT; 3402 } 3403 if (rval != QLA_SUCCESS) 3404 goto done; 3405 3406 next_test: 3407 if (rd_reg_dword(®->iobase_c8) & BIT_3) 3408 ql_log(ql_log_info, vha, 0x504c, 3409 "Additional code -- 0x55AA.\n"); 3410 3411 done: 3412 wrt_reg_dword(®->iobase_window, 0x0000); 3413 rd_reg_dword(®->iobase_window); 3414 } 3415 3416 /** 3417 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3418 * @irq: interrupt number 3419 * @dev_id: SCSI driver HA context 3420 * 3421 * Called by system whenever the host adapter generates an interrupt. 3422 * 3423 * Returns handled flag. 3424 */ 3425 irqreturn_t 3426 qla24xx_intr_handler(int irq, void *dev_id) 3427 { 3428 scsi_qla_host_t *vha; 3429 struct qla_hw_data *ha; 3430 struct device_reg_24xx __iomem *reg; 3431 int status; 3432 unsigned long iter; 3433 uint32_t stat; 3434 uint32_t hccr; 3435 uint16_t mb[8]; 3436 struct rsp_que *rsp; 3437 unsigned long flags; 3438 bool process_atio = false; 3439 3440 rsp = (struct rsp_que *) dev_id; 3441 if (!rsp) { 3442 ql_log(ql_log_info, NULL, 0x5059, 3443 "%s: NULL response queue pointer.\n", __func__); 3444 return IRQ_NONE; 3445 } 3446 3447 ha = rsp->hw; 3448 reg = &ha->iobase->isp24; 3449 status = 0; 3450 3451 if (unlikely(pci_channel_offline(ha->pdev))) 3452 return IRQ_HANDLED; 3453 3454 spin_lock_irqsave(&ha->hardware_lock, flags); 3455 vha = pci_get_drvdata(ha->pdev); 3456 for (iter = 50; iter--; ) { 3457 stat = rd_reg_dword(®->host_status); 3458 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3459 break; 3460 if (stat & HSRX_RISC_PAUSED) { 3461 if (unlikely(pci_channel_offline(ha->pdev))) 3462 break; 3463 3464 hccr = rd_reg_dword(®->hccr); 3465 3466 ql_log(ql_log_warn, vha, 0x504b, 3467 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3468 hccr); 3469 3470 qla2xxx_check_risc_status(vha); 3471 3472 ha->isp_ops->fw_dump(vha); 3473 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3474 break; 3475 } else if ((stat & HSRX_RISC_INT) == 0) 3476 break; 3477 3478 switch (stat & 0xff) { 3479 case INTR_ROM_MB_SUCCESS: 3480 case INTR_ROM_MB_FAILED: 3481 case INTR_MB_SUCCESS: 3482 case INTR_MB_FAILED: 3483 qla24xx_mbx_completion(vha, MSW(stat)); 3484 status |= MBX_INTERRUPT; 3485 3486 break; 3487 case INTR_ASYNC_EVENT: 3488 mb[0] = MSW(stat); 3489 mb[1] = rd_reg_word(®->mailbox1); 3490 mb[2] = rd_reg_word(®->mailbox2); 3491 mb[3] = rd_reg_word(®->mailbox3); 3492 qla2x00_async_event(vha, rsp, mb); 3493 break; 3494 case INTR_RSP_QUE_UPDATE: 3495 case INTR_RSP_QUE_UPDATE_83XX: 3496 qla24xx_process_response_queue(vha, rsp); 3497 break; 3498 case INTR_ATIO_QUE_UPDATE_27XX: 3499 case INTR_ATIO_QUE_UPDATE: 3500 process_atio = true; 3501 break; 3502 case INTR_ATIO_RSP_QUE_UPDATE: 3503 process_atio = true; 3504 qla24xx_process_response_queue(vha, rsp); 3505 break; 3506 default: 3507 ql_dbg(ql_dbg_async, vha, 0x504f, 3508 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3509 break; 3510 } 3511 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3512 rd_reg_dword_relaxed(®->hccr); 3513 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3514 ndelay(3500); 3515 } 3516 qla2x00_handle_mbx_completion(ha, status); 3517 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3518 3519 if (process_atio) { 3520 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3521 qlt_24xx_process_atio_queue(vha, 0); 3522 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3523 } 3524 3525 return IRQ_HANDLED; 3526 } 3527 3528 static irqreturn_t 3529 qla24xx_msix_rsp_q(int irq, void *dev_id) 3530 { 3531 struct qla_hw_data *ha; 3532 struct rsp_que *rsp; 3533 struct device_reg_24xx __iomem *reg; 3534 struct scsi_qla_host *vha; 3535 unsigned long flags; 3536 3537 rsp = (struct rsp_que *) dev_id; 3538 if (!rsp) { 3539 ql_log(ql_log_info, NULL, 0x505a, 3540 "%s: NULL response queue pointer.\n", __func__); 3541 return IRQ_NONE; 3542 } 3543 ha = rsp->hw; 3544 reg = &ha->iobase->isp24; 3545 3546 spin_lock_irqsave(&ha->hardware_lock, flags); 3547 3548 vha = pci_get_drvdata(ha->pdev); 3549 qla24xx_process_response_queue(vha, rsp); 3550 if (!ha->flags.disable_msix_handshake) { 3551 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3552 rd_reg_dword_relaxed(®->hccr); 3553 } 3554 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3555 3556 return IRQ_HANDLED; 3557 } 3558 3559 static irqreturn_t 3560 qla24xx_msix_default(int irq, void *dev_id) 3561 { 3562 scsi_qla_host_t *vha; 3563 struct qla_hw_data *ha; 3564 struct rsp_que *rsp; 3565 struct device_reg_24xx __iomem *reg; 3566 int status; 3567 uint32_t stat; 3568 uint32_t hccr; 3569 uint16_t mb[8]; 3570 unsigned long flags; 3571 bool process_atio = false; 3572 3573 rsp = (struct rsp_que *) dev_id; 3574 if (!rsp) { 3575 ql_log(ql_log_info, NULL, 0x505c, 3576 "%s: NULL response queue pointer.\n", __func__); 3577 return IRQ_NONE; 3578 } 3579 ha = rsp->hw; 3580 reg = &ha->iobase->isp24; 3581 status = 0; 3582 3583 spin_lock_irqsave(&ha->hardware_lock, flags); 3584 vha = pci_get_drvdata(ha->pdev); 3585 do { 3586 stat = rd_reg_dword(®->host_status); 3587 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3588 break; 3589 if (stat & HSRX_RISC_PAUSED) { 3590 if (unlikely(pci_channel_offline(ha->pdev))) 3591 break; 3592 3593 hccr = rd_reg_dword(®->hccr); 3594 3595 ql_log(ql_log_info, vha, 0x5050, 3596 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3597 hccr); 3598 3599 qla2xxx_check_risc_status(vha); 3600 3601 ha->isp_ops->fw_dump(vha); 3602 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3603 break; 3604 } else if ((stat & HSRX_RISC_INT) == 0) 3605 break; 3606 3607 switch (stat & 0xff) { 3608 case INTR_ROM_MB_SUCCESS: 3609 case INTR_ROM_MB_FAILED: 3610 case INTR_MB_SUCCESS: 3611 case INTR_MB_FAILED: 3612 qla24xx_mbx_completion(vha, MSW(stat)); 3613 status |= MBX_INTERRUPT; 3614 3615 break; 3616 case INTR_ASYNC_EVENT: 3617 mb[0] = MSW(stat); 3618 mb[1] = rd_reg_word(®->mailbox1); 3619 mb[2] = rd_reg_word(®->mailbox2); 3620 mb[3] = rd_reg_word(®->mailbox3); 3621 qla2x00_async_event(vha, rsp, mb); 3622 break; 3623 case INTR_RSP_QUE_UPDATE: 3624 case INTR_RSP_QUE_UPDATE_83XX: 3625 qla24xx_process_response_queue(vha, rsp); 3626 break; 3627 case INTR_ATIO_QUE_UPDATE_27XX: 3628 case INTR_ATIO_QUE_UPDATE: 3629 process_atio = true; 3630 break; 3631 case INTR_ATIO_RSP_QUE_UPDATE: 3632 process_atio = true; 3633 qla24xx_process_response_queue(vha, rsp); 3634 break; 3635 default: 3636 ql_dbg(ql_dbg_async, vha, 0x5051, 3637 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3638 break; 3639 } 3640 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3641 } while (0); 3642 qla2x00_handle_mbx_completion(ha, status); 3643 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3644 3645 if (process_atio) { 3646 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3647 qlt_24xx_process_atio_queue(vha, 0); 3648 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3649 } 3650 3651 return IRQ_HANDLED; 3652 } 3653 3654 irqreturn_t 3655 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3656 { 3657 struct qla_hw_data *ha; 3658 struct qla_qpair *qpair; 3659 3660 qpair = dev_id; 3661 if (!qpair) { 3662 ql_log(ql_log_info, NULL, 0x505b, 3663 "%s: NULL response queue pointer.\n", __func__); 3664 return IRQ_NONE; 3665 } 3666 ha = qpair->hw; 3667 3668 queue_work(ha->wq, &qpair->q_work); 3669 3670 return IRQ_HANDLED; 3671 } 3672 3673 irqreturn_t 3674 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) 3675 { 3676 struct qla_hw_data *ha; 3677 struct qla_qpair *qpair; 3678 struct device_reg_24xx __iomem *reg; 3679 unsigned long flags; 3680 3681 qpair = dev_id; 3682 if (!qpair) { 3683 ql_log(ql_log_info, NULL, 0x505b, 3684 "%s: NULL response queue pointer.\n", __func__); 3685 return IRQ_NONE; 3686 } 3687 ha = qpair->hw; 3688 3689 reg = &ha->iobase->isp24; 3690 spin_lock_irqsave(&ha->hardware_lock, flags); 3691 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3692 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3693 3694 queue_work(ha->wq, &qpair->q_work); 3695 3696 return IRQ_HANDLED; 3697 } 3698 3699 /* Interrupt handling helpers. */ 3700 3701 struct qla_init_msix_entry { 3702 const char *name; 3703 irq_handler_t handler; 3704 }; 3705 3706 static const struct qla_init_msix_entry msix_entries[] = { 3707 { "default", qla24xx_msix_default }, 3708 { "rsp_q", qla24xx_msix_rsp_q }, 3709 { "atio_q", qla83xx_msix_atio_q }, 3710 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3711 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, 3712 }; 3713 3714 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3715 { "qla2xxx (default)", qla82xx_msix_default }, 3716 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3717 }; 3718 3719 static int 3720 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3721 { 3722 int i, ret; 3723 struct qla_msix_entry *qentry; 3724 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3725 int min_vecs = QLA_BASE_VECTORS; 3726 struct irq_affinity desc = { 3727 .pre_vectors = QLA_BASE_VECTORS, 3728 }; 3729 3730 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3731 IS_ATIO_MSIX_CAPABLE(ha)) { 3732 desc.pre_vectors++; 3733 min_vecs++; 3734 } 3735 3736 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 3737 /* user wants to control IRQ setting for target mode */ 3738 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 3739 ha->msix_count, PCI_IRQ_MSIX); 3740 } else 3741 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 3742 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 3743 &desc); 3744 3745 if (ret < 0) { 3746 ql_log(ql_log_fatal, vha, 0x00c7, 3747 "MSI-X: Failed to enable support, " 3748 "giving up -- %d/%d.\n", 3749 ha->msix_count, ret); 3750 goto msix_out; 3751 } else if (ret < ha->msix_count) { 3752 ql_log(ql_log_info, vha, 0x00c6, 3753 "MSI-X: Using %d vectors\n", ret); 3754 ha->msix_count = ret; 3755 /* Recalculate queue values */ 3756 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 3757 ha->max_req_queues = ha->msix_count - 1; 3758 3759 /* ATIOQ needs 1 vector. That's 1 less QPair */ 3760 if (QLA_TGT_MODE_ENABLED()) 3761 ha->max_req_queues--; 3762 3763 ha->max_rsp_queues = ha->max_req_queues; 3764 3765 ha->max_qpairs = ha->max_req_queues - 1; 3766 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 3767 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3768 } 3769 } 3770 vha->irq_offset = desc.pre_vectors; 3771 ha->msix_entries = kcalloc(ha->msix_count, 3772 sizeof(struct qla_msix_entry), 3773 GFP_KERNEL); 3774 if (!ha->msix_entries) { 3775 ql_log(ql_log_fatal, vha, 0x00c8, 3776 "Failed to allocate memory for ha->msix_entries.\n"); 3777 ret = -ENOMEM; 3778 goto free_irqs; 3779 } 3780 ha->flags.msix_enabled = 1; 3781 3782 for (i = 0; i < ha->msix_count; i++) { 3783 qentry = &ha->msix_entries[i]; 3784 qentry->vector = pci_irq_vector(ha->pdev, i); 3785 qentry->entry = i; 3786 qentry->have_irq = 0; 3787 qentry->in_use = 0; 3788 qentry->handle = NULL; 3789 } 3790 3791 /* Enable MSI-X vectors for the base queue */ 3792 for (i = 0; i < QLA_BASE_VECTORS; i++) { 3793 qentry = &ha->msix_entries[i]; 3794 qentry->handle = rsp; 3795 rsp->msix = qentry; 3796 scnprintf(qentry->name, sizeof(qentry->name), 3797 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 3798 if (IS_P3P_TYPE(ha)) 3799 ret = request_irq(qentry->vector, 3800 qla82xx_msix_entries[i].handler, 3801 0, qla82xx_msix_entries[i].name, rsp); 3802 else 3803 ret = request_irq(qentry->vector, 3804 msix_entries[i].handler, 3805 0, qentry->name, rsp); 3806 if (ret) 3807 goto msix_register_fail; 3808 qentry->have_irq = 1; 3809 qentry->in_use = 1; 3810 } 3811 3812 /* 3813 * If target mode is enable, also request the vector for the ATIO 3814 * queue. 3815 */ 3816 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3817 IS_ATIO_MSIX_CAPABLE(ha)) { 3818 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 3819 rsp->msix = qentry; 3820 qentry->handle = rsp; 3821 scnprintf(qentry->name, sizeof(qentry->name), 3822 "qla2xxx%lu_%s", vha->host_no, 3823 msix_entries[QLA_ATIO_VECTOR].name); 3824 qentry->in_use = 1; 3825 ret = request_irq(qentry->vector, 3826 msix_entries[QLA_ATIO_VECTOR].handler, 3827 0, qentry->name, rsp); 3828 qentry->have_irq = 1; 3829 } 3830 3831 msix_register_fail: 3832 if (ret) { 3833 ql_log(ql_log_fatal, vha, 0x00cb, 3834 "MSI-X: unable to register handler -- %x/%d.\n", 3835 qentry->vector, ret); 3836 qla2x00_free_irqs(vha); 3837 ha->mqenable = 0; 3838 goto msix_out; 3839 } 3840 3841 /* Enable MSI-X vector for response queue update for queue 0 */ 3842 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 3843 if (ha->msixbase && ha->mqiobase && 3844 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3845 ql2xmqsupport)) 3846 ha->mqenable = 1; 3847 } else 3848 if (ha->mqiobase && 3849 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3850 ql2xmqsupport)) 3851 ha->mqenable = 1; 3852 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3853 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3854 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3855 ql_dbg(ql_dbg_init, vha, 0x0055, 3856 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3857 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3858 3859 msix_out: 3860 return ret; 3861 3862 free_irqs: 3863 pci_free_irq_vectors(ha->pdev); 3864 goto msix_out; 3865 } 3866 3867 int 3868 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3869 { 3870 int ret = QLA_FUNCTION_FAILED; 3871 device_reg_t *reg = ha->iobase; 3872 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3873 3874 /* If possible, enable MSI-X. */ 3875 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 3876 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 3877 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 3878 goto skip_msi; 3879 3880 if (ql2xenablemsix == 2) 3881 goto skip_msix; 3882 3883 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3884 (ha->pdev->subsystem_device == 0x7040 || 3885 ha->pdev->subsystem_device == 0x7041 || 3886 ha->pdev->subsystem_device == 0x1705)) { 3887 ql_log(ql_log_warn, vha, 0x0034, 3888 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3889 ha->pdev->subsystem_vendor, 3890 ha->pdev->subsystem_device); 3891 goto skip_msi; 3892 } 3893 3894 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3895 ql_log(ql_log_warn, vha, 0x0035, 3896 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3897 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3898 goto skip_msix; 3899 } 3900 3901 ret = qla24xx_enable_msix(ha, rsp); 3902 if (!ret) { 3903 ql_dbg(ql_dbg_init, vha, 0x0036, 3904 "MSI-X: Enabled (0x%X, 0x%X).\n", 3905 ha->chip_revision, ha->fw_attributes); 3906 goto clear_risc_ints; 3907 } 3908 3909 skip_msix: 3910 3911 ql_log(ql_log_info, vha, 0x0037, 3912 "Falling back-to MSI mode -- ret=%d.\n", ret); 3913 3914 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3915 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3916 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3917 goto skip_msi; 3918 3919 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 3920 if (ret > 0) { 3921 ql_dbg(ql_dbg_init, vha, 0x0038, 3922 "MSI: Enabled.\n"); 3923 ha->flags.msi_enabled = 1; 3924 } else 3925 ql_log(ql_log_warn, vha, 0x0039, 3926 "Falling back-to INTa mode -- ret=%d.\n", ret); 3927 skip_msi: 3928 3929 /* Skip INTx on ISP82xx. */ 3930 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3931 return QLA_FUNCTION_FAILED; 3932 3933 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3934 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3935 QLA2XXX_DRIVER_NAME, rsp); 3936 if (ret) { 3937 ql_log(ql_log_warn, vha, 0x003a, 3938 "Failed to reserve interrupt %d already in use.\n", 3939 ha->pdev->irq); 3940 goto fail; 3941 } else if (!ha->flags.msi_enabled) { 3942 ql_dbg(ql_dbg_init, vha, 0x0125, 3943 "INTa mode: Enabled.\n"); 3944 ha->flags.mr_intr_valid = 1; 3945 } 3946 3947 clear_risc_ints: 3948 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 3949 goto fail; 3950 3951 spin_lock_irq(&ha->hardware_lock); 3952 wrt_reg_word(®->isp.semaphore, 0); 3953 spin_unlock_irq(&ha->hardware_lock); 3954 3955 fail: 3956 return ret; 3957 } 3958 3959 void 3960 qla2x00_free_irqs(scsi_qla_host_t *vha) 3961 { 3962 struct qla_hw_data *ha = vha->hw; 3963 struct rsp_que *rsp; 3964 struct qla_msix_entry *qentry; 3965 int i; 3966 3967 /* 3968 * We need to check that ha->rsp_q_map is valid in case we are called 3969 * from a probe failure context. 3970 */ 3971 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3972 goto free_irqs; 3973 rsp = ha->rsp_q_map[0]; 3974 3975 if (ha->flags.msix_enabled) { 3976 for (i = 0; i < ha->msix_count; i++) { 3977 qentry = &ha->msix_entries[i]; 3978 if (qentry->have_irq) { 3979 irq_set_affinity_notifier(qentry->vector, NULL); 3980 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 3981 } 3982 } 3983 kfree(ha->msix_entries); 3984 ha->msix_entries = NULL; 3985 ha->flags.msix_enabled = 0; 3986 ql_dbg(ql_dbg_init, vha, 0x0042, 3987 "Disabled MSI-X.\n"); 3988 } else { 3989 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 3990 } 3991 3992 free_irqs: 3993 pci_free_irq_vectors(ha->pdev); 3994 } 3995 3996 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 3997 struct qla_msix_entry *msix, int vector_type) 3998 { 3999 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 4000 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4001 int ret; 4002 4003 scnprintf(msix->name, sizeof(msix->name), 4004 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 4005 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 4006 if (ret) { 4007 ql_log(ql_log_fatal, vha, 0x00e6, 4008 "MSI-X: Unable to register handler -- %x/%d.\n", 4009 msix->vector, ret); 4010 return ret; 4011 } 4012 msix->have_irq = 1; 4013 msix->handle = qpair; 4014 return ret; 4015 } 4016