1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, 26 struct purex_item *item); 27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, 28 uint16_t size); 29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, 30 void *pkt); 31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, 32 void **pkt, struct rsp_que **rsp); 33 34 static void 35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) 36 { 37 void *pkt = &item->iocb; 38 uint16_t pkt_size = item->size; 39 40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, 41 "%s: Enter\n", __func__); 42 43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, 44 "-------- ELS REQ -------\n"); 45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, 46 pkt, pkt_size); 47 48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt); 49 } 50 51 const char *const port_state_str[] = { 52 "Unknown", 53 "UNCONFIGURED", 54 "DEAD", 55 "LOST", 56 "ONLINE" 57 }; 58 59 static void 60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) 61 { 62 struct abts_entry_24xx *abts = 63 (struct abts_entry_24xx *)&pkt->iocb; 64 struct qla_hw_data *ha = vha->hw; 65 struct els_entry_24xx *rsp_els; 66 struct abts_entry_24xx *abts_rsp; 67 dma_addr_t dma; 68 uint32_t fctl; 69 int rval; 70 71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 72 73 ql_log(ql_log_warn, vha, 0x0287, 74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 76 abts->seq_id, abts->seq_cnt); 77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 78 "-------- ABTS RCV -------\n"); 79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 80 (uint8_t *)abts, sizeof(*abts)); 81 82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 83 GFP_KERNEL); 84 if (!rsp_els) { 85 ql_log(ql_log_warn, vha, 0x0287, 86 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 87 return; 88 } 89 90 /* terminate exchange */ 91 rsp_els->entry_type = ELS_IOCB_TYPE; 92 rsp_els->entry_count = 1; 93 rsp_els->nport_handle = cpu_to_le16(~0); 94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); 96 ql_dbg(ql_dbg_init, vha, 0x0283, 97 "Sending ELS Response to terminate exchange %#x...\n", 98 abts->rx_xch_addr_to_abort); 99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 100 "-------- ELS RSP -------\n"); 101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 102 (uint8_t *)rsp_els, sizeof(*rsp_els)); 103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 104 if (rval) { 105 ql_log(ql_log_warn, vha, 0x0288, 106 "%s: iocb failed to execute -> %x\n", __func__, rval); 107 } else if (rsp_els->comp_status) { 108 ql_log(ql_log_warn, vha, 0x0289, 109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 110 __func__, rsp_els->comp_status, 111 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 112 } else { 113 ql_dbg(ql_dbg_init, vha, 0x028a, 114 "%s: abort exchange done.\n", __func__); 115 } 116 117 /* send ABTS response */ 118 abts_rsp = (void *)rsp_els; 119 memset(abts_rsp, 0, sizeof(*abts_rsp)); 120 abts_rsp->entry_type = ABTS_RSP_TYPE; 121 abts_rsp->entry_count = 1; 122 abts_rsp->nport_handle = abts->nport_handle; 123 abts_rsp->vp_idx = abts->vp_idx; 124 abts_rsp->sof_type = abts->sof_type & 0xf0; 125 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 126 abts_rsp->d_id[0] = abts->s_id[0]; 127 abts_rsp->d_id[1] = abts->s_id[1]; 128 abts_rsp->d_id[2] = abts->s_id[2]; 129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 130 abts_rsp->s_id[0] = abts->d_id[0]; 131 abts_rsp->s_id[1] = abts->d_id[1]; 132 abts_rsp->s_id[2] = abts->d_id[2]; 133 abts_rsp->cs_ctl = abts->cs_ctl; 134 /* include flipping bit23 in fctl */ 135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 140 abts_rsp->type = FC_TYPE_BLD; 141 abts_rsp->rx_id = abts->rx_id; 142 abts_rsp->ox_id = abts->ox_id; 143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); 146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 147 ql_dbg(ql_dbg_init, vha, 0x028b, 148 "Sending BA ACC response to ABTS %#x...\n", 149 abts->rx_xch_addr_to_abort); 150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 151 "-------- ELS RSP -------\n"); 152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 153 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 155 if (rval) { 156 ql_log(ql_log_warn, vha, 0x028c, 157 "%s: iocb failed to execute -> %x\n", __func__, rval); 158 } else if (abts_rsp->comp_status) { 159 ql_log(ql_log_warn, vha, 0x028d, 160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 161 __func__, abts_rsp->comp_status, 162 abts_rsp->payload.error.subcode1, 163 abts_rsp->payload.error.subcode2); 164 } else { 165 ql_dbg(ql_dbg_init, vha, 0x028ea, 166 "%s: done.\n", __func__); 167 } 168 169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 170 } 171 172 /** 173 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 174 * @irq: interrupt number 175 * @dev_id: SCSI driver HA context 176 * 177 * Called by system whenever the host adapter generates an interrupt. 178 * 179 * Returns handled flag. 180 */ 181 irqreturn_t 182 qla2100_intr_handler(int irq, void *dev_id) 183 { 184 scsi_qla_host_t *vha; 185 struct qla_hw_data *ha; 186 struct device_reg_2xxx __iomem *reg; 187 int status; 188 unsigned long iter; 189 uint16_t hccr; 190 uint16_t mb[8]; 191 struct rsp_que *rsp; 192 unsigned long flags; 193 194 rsp = (struct rsp_que *) dev_id; 195 if (!rsp) { 196 ql_log(ql_log_info, NULL, 0x505d, 197 "%s: NULL response queue pointer.\n", __func__); 198 return (IRQ_NONE); 199 } 200 201 ha = rsp->hw; 202 reg = &ha->iobase->isp; 203 status = 0; 204 205 spin_lock_irqsave(&ha->hardware_lock, flags); 206 vha = pci_get_drvdata(ha->pdev); 207 for (iter = 50; iter--; ) { 208 hccr = rd_reg_word(®->hccr); 209 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 210 break; 211 if (hccr & HCCR_RISC_PAUSE) { 212 if (pci_channel_offline(ha->pdev)) 213 break; 214 215 /* 216 * Issue a "HARD" reset in order for the RISC interrupt 217 * bit to be cleared. Schedule a big hammer to get 218 * out of the RISC PAUSED state. 219 */ 220 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 221 rd_reg_word(®->hccr); 222 223 ha->isp_ops->fw_dump(vha); 224 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 225 break; 226 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) 227 break; 228 229 if (rd_reg_word(®->semaphore) & BIT_0) { 230 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 231 rd_reg_word(®->hccr); 232 233 /* Get mailbox data. */ 234 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 235 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 236 qla2x00_mbx_completion(vha, mb[0]); 237 status |= MBX_INTERRUPT; 238 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 239 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 240 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 241 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 242 qla2x00_async_event(vha, rsp, mb); 243 } else { 244 /*EMPTY*/ 245 ql_dbg(ql_dbg_async, vha, 0x5025, 246 "Unrecognized interrupt type (%d).\n", 247 mb[0]); 248 } 249 /* Release mailbox registers. */ 250 wrt_reg_word(®->semaphore, 0); 251 rd_reg_word(®->semaphore); 252 } else { 253 qla2x00_process_response_queue(rsp); 254 255 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 256 rd_reg_word(®->hccr); 257 } 258 } 259 qla2x00_handle_mbx_completion(ha, status); 260 spin_unlock_irqrestore(&ha->hardware_lock, flags); 261 262 return (IRQ_HANDLED); 263 } 264 265 bool 266 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 267 { 268 /* Check for PCI disconnection */ 269 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 270 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 271 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 272 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 273 qla_schedule_eeh_work(vha); 274 } 275 return true; 276 } else 277 return false; 278 } 279 280 bool 281 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 282 { 283 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 284 } 285 286 /** 287 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 288 * @irq: interrupt number 289 * @dev_id: SCSI driver HA context 290 * 291 * Called by system whenever the host adapter generates an interrupt. 292 * 293 * Returns handled flag. 294 */ 295 irqreturn_t 296 qla2300_intr_handler(int irq, void *dev_id) 297 { 298 scsi_qla_host_t *vha; 299 struct device_reg_2xxx __iomem *reg; 300 int status; 301 unsigned long iter; 302 uint32_t stat; 303 uint16_t hccr; 304 uint16_t mb[8]; 305 struct rsp_que *rsp; 306 struct qla_hw_data *ha; 307 unsigned long flags; 308 309 rsp = (struct rsp_que *) dev_id; 310 if (!rsp) { 311 ql_log(ql_log_info, NULL, 0x5058, 312 "%s: NULL response queue pointer.\n", __func__); 313 return (IRQ_NONE); 314 } 315 316 ha = rsp->hw; 317 reg = &ha->iobase->isp; 318 status = 0; 319 320 spin_lock_irqsave(&ha->hardware_lock, flags); 321 vha = pci_get_drvdata(ha->pdev); 322 for (iter = 50; iter--; ) { 323 stat = rd_reg_dword(®->u.isp2300.host_status); 324 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 325 break; 326 if (stat & HSR_RISC_PAUSED) { 327 if (unlikely(pci_channel_offline(ha->pdev))) 328 break; 329 330 hccr = rd_reg_word(®->hccr); 331 332 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 333 ql_log(ql_log_warn, vha, 0x5026, 334 "Parity error -- HCCR=%x, Dumping " 335 "firmware.\n", hccr); 336 else 337 ql_log(ql_log_warn, vha, 0x5027, 338 "RISC paused -- HCCR=%x, Dumping " 339 "firmware.\n", hccr); 340 341 /* 342 * Issue a "HARD" reset in order for the RISC 343 * interrupt bit to be cleared. Schedule a big 344 * hammer to get out of the RISC PAUSED state. 345 */ 346 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 347 rd_reg_word(®->hccr); 348 349 ha->isp_ops->fw_dump(vha); 350 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 351 break; 352 } else if ((stat & HSR_RISC_INT) == 0) 353 break; 354 355 switch (stat & 0xff) { 356 case 0x1: 357 case 0x2: 358 case 0x10: 359 case 0x11: 360 qla2x00_mbx_completion(vha, MSW(stat)); 361 status |= MBX_INTERRUPT; 362 363 /* Release mailbox registers. */ 364 wrt_reg_word(®->semaphore, 0); 365 break; 366 case 0x12: 367 mb[0] = MSW(stat); 368 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 369 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 370 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 371 qla2x00_async_event(vha, rsp, mb); 372 break; 373 case 0x13: 374 qla2x00_process_response_queue(rsp); 375 break; 376 case 0x15: 377 mb[0] = MBA_CMPLT_1_16BIT; 378 mb[1] = MSW(stat); 379 qla2x00_async_event(vha, rsp, mb); 380 break; 381 case 0x16: 382 mb[0] = MBA_SCSI_COMPLETION; 383 mb[1] = MSW(stat); 384 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 385 qla2x00_async_event(vha, rsp, mb); 386 break; 387 default: 388 ql_dbg(ql_dbg_async, vha, 0x5028, 389 "Unrecognized interrupt type (%d).\n", stat & 0xff); 390 break; 391 } 392 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 393 rd_reg_word_relaxed(®->hccr); 394 } 395 qla2x00_handle_mbx_completion(ha, status); 396 spin_unlock_irqrestore(&ha->hardware_lock, flags); 397 398 return (IRQ_HANDLED); 399 } 400 401 /** 402 * qla2x00_mbx_completion() - Process mailbox command completions. 403 * @vha: SCSI driver HA context 404 * @mb0: Mailbox0 register 405 */ 406 static void 407 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 408 { 409 uint16_t cnt; 410 uint32_t mboxes; 411 __le16 __iomem *wptr; 412 struct qla_hw_data *ha = vha->hw; 413 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 414 415 /* Read all mbox registers? */ 416 WARN_ON_ONCE(ha->mbx_count > 32); 417 mboxes = (1ULL << ha->mbx_count) - 1; 418 if (!ha->mcp) 419 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 420 else 421 mboxes = ha->mcp->in_mb; 422 423 /* Load return mailbox registers. */ 424 ha->flags.mbox_int = 1; 425 ha->mailbox_out[0] = mb0; 426 mboxes >>= 1; 427 wptr = MAILBOX_REG(ha, reg, 1); 428 429 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 430 if (IS_QLA2200(ha) && cnt == 8) 431 wptr = MAILBOX_REG(ha, reg, 8); 432 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 433 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 434 else if (mboxes & BIT_0) 435 ha->mailbox_out[cnt] = rd_reg_word(wptr); 436 437 wptr++; 438 mboxes >>= 1; 439 } 440 } 441 442 static void 443 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 444 { 445 static char *event[] = 446 { "Complete", "Request Notification", "Time Extension" }; 447 int rval; 448 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 449 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 450 __le16 __iomem *wptr; 451 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 452 453 /* Seed data -- mailbox1 -> mailbox7. */ 454 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 455 wptr = ®24->mailbox1; 456 else if (IS_QLA8044(vha->hw)) 457 wptr = ®82->mailbox_out[1]; 458 else 459 return; 460 461 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 462 mb[cnt] = rd_reg_word(wptr); 463 464 ql_dbg(ql_dbg_async, vha, 0x5021, 465 "Inter-Driver Communication %s -- " 466 "%04x %04x %04x %04x %04x %04x %04x.\n", 467 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 468 mb[4], mb[5], mb[6]); 469 switch (aen) { 470 /* Handle IDC Error completion case. */ 471 case MBA_IDC_COMPLETE: 472 if (mb[1] >> 15) { 473 vha->hw->flags.idc_compl_status = 1; 474 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 475 complete(&vha->hw->dcbx_comp); 476 } 477 break; 478 479 case MBA_IDC_NOTIFY: 480 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 481 timeout = (descr >> 8) & 0xf; 482 ql_dbg(ql_dbg_async, vha, 0x5022, 483 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 484 vha->host_no, event[aen & 0xff], timeout); 485 486 if (!timeout) 487 return; 488 rval = qla2x00_post_idc_ack_work(vha, mb); 489 if (rval != QLA_SUCCESS) 490 ql_log(ql_log_warn, vha, 0x5023, 491 "IDC failed to post ACK.\n"); 492 break; 493 case MBA_IDC_TIME_EXT: 494 vha->hw->idc_extend_tmo = descr; 495 ql_dbg(ql_dbg_async, vha, 0x5087, 496 "%lu Inter-Driver Communication %s -- " 497 "Extend timeout by=%d.\n", 498 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 499 break; 500 } 501 } 502 503 #define LS_UNKNOWN 2 504 const char * 505 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 506 { 507 static const char *const link_speeds[] = { 508 "1", "2", "?", "4", "8", "16", "32", "10" 509 }; 510 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 511 512 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 513 return link_speeds[0]; 514 else if (speed == 0x13) 515 return link_speeds[QLA_LAST_SPEED]; 516 else if (speed < QLA_LAST_SPEED) 517 return link_speeds[speed]; 518 else 519 return link_speeds[LS_UNKNOWN]; 520 } 521 522 static void 523 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 524 { 525 struct qla_hw_data *ha = vha->hw; 526 527 /* 528 * 8200 AEN Interpretation: 529 * mb[0] = AEN code 530 * mb[1] = AEN Reason code 531 * mb[2] = LSW of Peg-Halt Status-1 Register 532 * mb[6] = MSW of Peg-Halt Status-1 Register 533 * mb[3] = LSW of Peg-Halt Status-2 register 534 * mb[7] = MSW of Peg-Halt Status-2 register 535 * mb[4] = IDC Device-State Register value 536 * mb[5] = IDC Driver-Presence Register value 537 */ 538 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 539 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 540 mb[0], mb[1], mb[2], mb[6]); 541 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 542 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 543 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 544 545 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 546 IDC_HEARTBEAT_FAILURE)) { 547 ha->flags.nic_core_hung = 1; 548 ql_log(ql_log_warn, vha, 0x5060, 549 "83XX: F/W Error Reported: Check if reset required.\n"); 550 551 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 552 uint32_t protocol_engine_id, fw_err_code, err_level; 553 554 /* 555 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 556 * - PEG-Halt Status-1 Register: 557 * (LSW = mb[2], MSW = mb[6]) 558 * Bits 0-7 = protocol-engine ID 559 * Bits 8-28 = f/w error code 560 * Bits 29-31 = Error-level 561 * Error-level 0x1 = Non-Fatal error 562 * Error-level 0x2 = Recoverable Fatal error 563 * Error-level 0x4 = UnRecoverable Fatal error 564 * - PEG-Halt Status-2 Register: 565 * (LSW = mb[3], MSW = mb[7]) 566 */ 567 protocol_engine_id = (mb[2] & 0xff); 568 fw_err_code = (((mb[2] & 0xff00) >> 8) | 569 ((mb[6] & 0x1fff) << 8)); 570 err_level = ((mb[6] & 0xe000) >> 13); 571 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 572 "Register: protocol_engine_id=0x%x " 573 "fw_err_code=0x%x err_level=0x%x.\n", 574 protocol_engine_id, fw_err_code, err_level); 575 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 576 "Register: 0x%x%x.\n", mb[7], mb[3]); 577 if (err_level == ERR_LEVEL_NON_FATAL) { 578 ql_log(ql_log_warn, vha, 0x5063, 579 "Not a fatal error, f/w has recovered itself.\n"); 580 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 581 ql_log(ql_log_fatal, vha, 0x5064, 582 "Recoverable Fatal error: Chip reset " 583 "required.\n"); 584 qla83xx_schedule_work(vha, 585 QLA83XX_NIC_CORE_RESET); 586 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 587 ql_log(ql_log_fatal, vha, 0x5065, 588 "Unrecoverable Fatal error: Set FAILED " 589 "state, reboot required.\n"); 590 qla83xx_schedule_work(vha, 591 QLA83XX_NIC_CORE_UNRECOVERABLE); 592 } 593 } 594 595 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 596 uint16_t peg_fw_state, nw_interface_link_up; 597 uint16_t nw_interface_signal_detect, sfp_status; 598 uint16_t htbt_counter, htbt_monitor_enable; 599 uint16_t sfp_additional_info, sfp_multirate; 600 uint16_t sfp_tx_fault, link_speed, dcbx_status; 601 602 /* 603 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 604 * - PEG-to-FC Status Register: 605 * (LSW = mb[2], MSW = mb[6]) 606 * Bits 0-7 = Peg-Firmware state 607 * Bit 8 = N/W Interface Link-up 608 * Bit 9 = N/W Interface signal detected 609 * Bits 10-11 = SFP Status 610 * SFP Status 0x0 = SFP+ transceiver not expected 611 * SFP Status 0x1 = SFP+ transceiver not present 612 * SFP Status 0x2 = SFP+ transceiver invalid 613 * SFP Status 0x3 = SFP+ transceiver present and 614 * valid 615 * Bits 12-14 = Heartbeat Counter 616 * Bit 15 = Heartbeat Monitor Enable 617 * Bits 16-17 = SFP Additional Info 618 * SFP info 0x0 = Unregocnized transceiver for 619 * Ethernet 620 * SFP info 0x1 = SFP+ brand validation failed 621 * SFP info 0x2 = SFP+ speed validation failed 622 * SFP info 0x3 = SFP+ access error 623 * Bit 18 = SFP Multirate 624 * Bit 19 = SFP Tx Fault 625 * Bits 20-22 = Link Speed 626 * Bits 23-27 = Reserved 627 * Bits 28-30 = DCBX Status 628 * DCBX Status 0x0 = DCBX Disabled 629 * DCBX Status 0x1 = DCBX Enabled 630 * DCBX Status 0x2 = DCBX Exchange error 631 * Bit 31 = Reserved 632 */ 633 peg_fw_state = (mb[2] & 0x00ff); 634 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 635 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 636 sfp_status = ((mb[2] & 0x0c00) >> 10); 637 htbt_counter = ((mb[2] & 0x7000) >> 12); 638 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 639 sfp_additional_info = (mb[6] & 0x0003); 640 sfp_multirate = ((mb[6] & 0x0004) >> 2); 641 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 642 link_speed = ((mb[6] & 0x0070) >> 4); 643 dcbx_status = ((mb[6] & 0x7000) >> 12); 644 645 ql_log(ql_log_warn, vha, 0x5066, 646 "Peg-to-Fc Status Register:\n" 647 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 648 "nw_interface_signal_detect=0x%x" 649 "\nsfp_statis=0x%x.\n ", peg_fw_state, 650 nw_interface_link_up, nw_interface_signal_detect, 651 sfp_status); 652 ql_log(ql_log_warn, vha, 0x5067, 653 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 654 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 655 htbt_counter, htbt_monitor_enable, 656 sfp_additional_info, sfp_multirate); 657 ql_log(ql_log_warn, vha, 0x5068, 658 "sfp_tx_fault=0x%x, link_state=0x%x, " 659 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 660 dcbx_status); 661 662 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 663 } 664 665 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 666 ql_log(ql_log_warn, vha, 0x5069, 667 "Heartbeat Failure encountered, chip reset " 668 "required.\n"); 669 670 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 671 } 672 } 673 674 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 675 ql_log(ql_log_info, vha, 0x506a, 676 "IDC Device-State changed = 0x%x.\n", mb[4]); 677 if (ha->flags.nic_core_reset_owner) 678 return; 679 qla83xx_schedule_work(vha, MBA_IDC_AEN); 680 } 681 } 682 683 int 684 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 685 { 686 struct qla_hw_data *ha = vha->hw; 687 scsi_qla_host_t *vp; 688 uint32_t vp_did; 689 unsigned long flags; 690 int ret = 0; 691 692 if (!ha->num_vhosts) 693 return ret; 694 695 spin_lock_irqsave(&ha->vport_slock, flags); 696 list_for_each_entry(vp, &ha->vp_list, list) { 697 vp_did = vp->d_id.b24; 698 if (vp_did == rscn_entry) { 699 ret = 1; 700 break; 701 } 702 } 703 spin_unlock_irqrestore(&ha->vport_slock, flags); 704 705 return ret; 706 } 707 708 fc_port_t * 709 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 710 { 711 fc_port_t *f, *tf; 712 713 f = tf = NULL; 714 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 715 if (f->loop_id == loop_id) 716 return f; 717 return NULL; 718 } 719 720 fc_port_t * 721 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 722 { 723 fc_port_t *f, *tf; 724 725 f = tf = NULL; 726 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 727 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 728 if (incl_deleted) 729 return f; 730 else if (f->deleted == 0) 731 return f; 732 } 733 } 734 return NULL; 735 } 736 737 fc_port_t * 738 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 739 u8 incl_deleted) 740 { 741 fc_port_t *f, *tf; 742 743 f = tf = NULL; 744 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 745 if (f->d_id.b24 == id->b24) { 746 if (incl_deleted) 747 return f; 748 else if (f->deleted == 0) 749 return f; 750 } 751 } 752 return NULL; 753 } 754 755 /* Shall be called only on supported adapters. */ 756 static void 757 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 758 { 759 struct qla_hw_data *ha = vha->hw; 760 bool reset_isp_needed = false; 761 762 ql_log(ql_log_warn, vha, 0x02f0, 763 "MPI Heartbeat stop. MPI reset is%s needed. " 764 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 765 mb[1] & BIT_8 ? "" : " not", 766 mb[0], mb[1], mb[2], mb[3]); 767 768 if ((mb[1] & BIT_8) == 0) 769 return; 770 771 ql_log(ql_log_warn, vha, 0x02f1, 772 "MPI Heartbeat stop. FW dump needed\n"); 773 774 if (ql2xfulldump_on_mpifail) { 775 ha->isp_ops->fw_dump(vha); 776 reset_isp_needed = true; 777 } 778 779 ha->isp_ops->mpi_fw_dump(vha, 1); 780 781 if (reset_isp_needed) { 782 vha->hw->flags.fw_init_done = 0; 783 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 784 qla2xxx_wake_dpc(vha); 785 } 786 } 787 788 static struct purex_item * 789 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) 790 { 791 struct purex_item *item = NULL; 792 uint8_t item_hdr_size = sizeof(*item); 793 794 if (size > QLA_DEFAULT_PAYLOAD_SIZE) { 795 item = kzalloc(item_hdr_size + 796 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); 797 } else { 798 if (atomic_inc_return(&vha->default_item.in_use) == 1) { 799 item = &vha->default_item; 800 goto initialize_purex_header; 801 } else { 802 item = kzalloc(item_hdr_size, GFP_ATOMIC); 803 } 804 } 805 if (!item) { 806 ql_log(ql_log_warn, vha, 0x5092, 807 ">> Failed allocate purex list item.\n"); 808 809 return NULL; 810 } 811 812 initialize_purex_header: 813 item->vha = vha; 814 item->size = size; 815 return item; 816 } 817 818 static void 819 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, 820 void (*process_item)(struct scsi_qla_host *vha, 821 struct purex_item *pkt)) 822 { 823 struct purex_list *list = &vha->purex_list; 824 ulong flags; 825 826 pkt->process_item = process_item; 827 828 spin_lock_irqsave(&list->lock, flags); 829 list_add_tail(&pkt->list, &list->head); 830 spin_unlock_irqrestore(&list->lock, flags); 831 832 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 833 } 834 835 /** 836 * qla24xx_copy_std_pkt() - Copy over purex ELS which is 837 * contained in a single IOCB. 838 * purex packet. 839 * @vha: SCSI driver HA context 840 * @pkt: ELS packet 841 */ 842 static struct purex_item 843 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) 844 { 845 struct purex_item *item; 846 847 item = qla24xx_alloc_purex_item(vha, 848 QLA_DEFAULT_PAYLOAD_SIZE); 849 if (!item) 850 return item; 851 852 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 853 return item; 854 } 855 856 /** 857 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can 858 * span over multiple IOCBs. 859 * @vha: SCSI driver HA context 860 * @pkt: ELS packet 861 * @rsp: Response queue 862 */ 863 static struct purex_item * 864 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, 865 struct rsp_que **rsp) 866 { 867 struct purex_entry_24xx *purex = *pkt; 868 struct rsp_que *rsp_q = *rsp; 869 sts_cont_entry_t *new_pkt; 870 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 871 uint16_t buffer_copy_offset = 0; 872 uint16_t entry_count, entry_count_remaining; 873 struct purex_item *item; 874 void *fpin_pkt = NULL; 875 876 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 877 - PURX_ELS_HEADER_SIZE; 878 pending_bytes = total_bytes; 879 entry_count = entry_count_remaining = purex->entry_count; 880 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 881 sizeof(purex->els_frame_payload) : pending_bytes; 882 ql_log(ql_log_info, vha, 0x509a, 883 "FPIN ELS, frame_size 0x%x, entry count %d\n", 884 total_bytes, entry_count); 885 886 item = qla24xx_alloc_purex_item(vha, total_bytes); 887 if (!item) 888 return item; 889 890 fpin_pkt = &item->iocb; 891 892 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); 893 buffer_copy_offset += no_bytes; 894 pending_bytes -= no_bytes; 895 --entry_count_remaining; 896 897 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 898 wmb(); 899 900 do { 901 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 902 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { 903 ql_dbg(ql_dbg_async, vha, 0x5084, 904 "Ran out of IOCBs, partial data 0x%x\n", 905 buffer_copy_offset); 906 cpu_relax(); 907 continue; 908 } 909 910 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 911 *pkt = new_pkt; 912 913 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 914 ql_log(ql_log_warn, vha, 0x507a, 915 "Unexpected IOCB type, partial data 0x%x\n", 916 buffer_copy_offset); 917 break; 918 } 919 920 rsp_q->ring_index++; 921 if (rsp_q->ring_index == rsp_q->length) { 922 rsp_q->ring_index = 0; 923 rsp_q->ring_ptr = rsp_q->ring; 924 } else { 925 rsp_q->ring_ptr++; 926 } 927 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 928 sizeof(new_pkt->data) : pending_bytes; 929 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 930 memcpy(((uint8_t *)fpin_pkt + 931 buffer_copy_offset), new_pkt->data, 932 no_bytes); 933 buffer_copy_offset += no_bytes; 934 pending_bytes -= no_bytes; 935 --entry_count_remaining; 936 } else { 937 ql_log(ql_log_warn, vha, 0x5044, 938 "Attempt to copy more that we got, optimizing..%x\n", 939 buffer_copy_offset); 940 memcpy(((uint8_t *)fpin_pkt + 941 buffer_copy_offset), new_pkt->data, 942 total_bytes - buffer_copy_offset); 943 } 944 945 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 946 wmb(); 947 } 948 949 if (pending_bytes != 0 || entry_count_remaining != 0) { 950 ql_log(ql_log_fatal, vha, 0x508b, 951 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", 952 total_bytes, entry_count_remaining); 953 qla24xx_free_purex_item(item); 954 return NULL; 955 } 956 } while (entry_count_remaining > 0); 957 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); 958 return item; 959 } 960 961 /** 962 * qla2x00_async_event() - Process aynchronous events. 963 * @vha: SCSI driver HA context 964 * @rsp: response queue 965 * @mb: Mailbox registers (0 - 3) 966 */ 967 void 968 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 969 { 970 uint16_t handle_cnt; 971 uint16_t cnt, mbx; 972 uint32_t handles[5]; 973 struct qla_hw_data *ha = vha->hw; 974 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 975 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 976 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 977 uint32_t rscn_entry, host_pid; 978 unsigned long flags; 979 fc_port_t *fcport = NULL; 980 981 if (!vha->hw->flags.fw_started) 982 return; 983 984 /* Setup to process RIO completion. */ 985 handle_cnt = 0; 986 if (IS_CNA_CAPABLE(ha)) 987 goto skip_rio; 988 switch (mb[0]) { 989 case MBA_SCSI_COMPLETION: 990 handles[0] = make_handle(mb[2], mb[1]); 991 handle_cnt = 1; 992 break; 993 case MBA_CMPLT_1_16BIT: 994 handles[0] = mb[1]; 995 handle_cnt = 1; 996 mb[0] = MBA_SCSI_COMPLETION; 997 break; 998 case MBA_CMPLT_2_16BIT: 999 handles[0] = mb[1]; 1000 handles[1] = mb[2]; 1001 handle_cnt = 2; 1002 mb[0] = MBA_SCSI_COMPLETION; 1003 break; 1004 case MBA_CMPLT_3_16BIT: 1005 handles[0] = mb[1]; 1006 handles[1] = mb[2]; 1007 handles[2] = mb[3]; 1008 handle_cnt = 3; 1009 mb[0] = MBA_SCSI_COMPLETION; 1010 break; 1011 case MBA_CMPLT_4_16BIT: 1012 handles[0] = mb[1]; 1013 handles[1] = mb[2]; 1014 handles[2] = mb[3]; 1015 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1016 handle_cnt = 4; 1017 mb[0] = MBA_SCSI_COMPLETION; 1018 break; 1019 case MBA_CMPLT_5_16BIT: 1020 handles[0] = mb[1]; 1021 handles[1] = mb[2]; 1022 handles[2] = mb[3]; 1023 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1024 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 1025 handle_cnt = 5; 1026 mb[0] = MBA_SCSI_COMPLETION; 1027 break; 1028 case MBA_CMPLT_2_32BIT: 1029 handles[0] = make_handle(mb[2], mb[1]); 1030 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), 1031 RD_MAILBOX_REG(ha, reg, 6)); 1032 handle_cnt = 2; 1033 mb[0] = MBA_SCSI_COMPLETION; 1034 break; 1035 default: 1036 break; 1037 } 1038 skip_rio: 1039 switch (mb[0]) { 1040 case MBA_SCSI_COMPLETION: /* Fast Post */ 1041 if (!vha->flags.online) 1042 break; 1043 1044 for (cnt = 0; cnt < handle_cnt; cnt++) 1045 qla2x00_process_completed_request(vha, rsp->req, 1046 handles[cnt]); 1047 break; 1048 1049 case MBA_RESET: /* Reset */ 1050 ql_dbg(ql_dbg_async, vha, 0x5002, 1051 "Asynchronous RESET.\n"); 1052 1053 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1054 break; 1055 1056 case MBA_SYSTEM_ERR: /* System Error */ 1057 mbx = 0; 1058 1059 vha->hw_err_cnt++; 1060 1061 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 1062 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1063 u16 m[4]; 1064 1065 m[0] = rd_reg_word(®24->mailbox4); 1066 m[1] = rd_reg_word(®24->mailbox5); 1067 m[2] = rd_reg_word(®24->mailbox6); 1068 mbx = m[3] = rd_reg_word(®24->mailbox7); 1069 1070 ql_log(ql_log_warn, vha, 0x5003, 1071 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 1072 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 1073 } else 1074 ql_log(ql_log_warn, vha, 0x5003, 1075 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 1076 mb[1], mb[2], mb[3]); 1077 1078 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 1079 rd_reg_word(®24->mailbox7) & BIT_8) 1080 ha->isp_ops->mpi_fw_dump(vha, 1); 1081 ha->isp_ops->fw_dump(vha); 1082 ha->flags.fw_init_done = 0; 1083 QLA_FW_STOPPED(ha); 1084 1085 if (IS_FWI2_CAPABLE(ha)) { 1086 if (mb[1] == 0 && mb[2] == 0) { 1087 ql_log(ql_log_fatal, vha, 0x5004, 1088 "Unrecoverable Hardware Error: adapter " 1089 "marked OFFLINE!\n"); 1090 vha->flags.online = 0; 1091 vha->device_flags |= DFLG_DEV_FAILED; 1092 } else { 1093 /* Check to see if MPI timeout occurred */ 1094 if ((mbx & MBX_3) && (ha->port_no == 0)) 1095 set_bit(MPI_RESET_NEEDED, 1096 &vha->dpc_flags); 1097 1098 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1099 } 1100 } else if (mb[1] == 0) { 1101 ql_log(ql_log_fatal, vha, 0x5005, 1102 "Unrecoverable Hardware Error: adapter marked " 1103 "OFFLINE!\n"); 1104 vha->flags.online = 0; 1105 vha->device_flags |= DFLG_DEV_FAILED; 1106 } else 1107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1108 break; 1109 1110 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 1111 ql_log(ql_log_warn, vha, 0x5006, 1112 "ISP Request Transfer Error (%x).\n", mb[1]); 1113 1114 vha->hw_err_cnt++; 1115 1116 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1117 break; 1118 1119 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 1120 ql_log(ql_log_warn, vha, 0x5007, 1121 "ISP Response Transfer Error (%x).\n", mb[1]); 1122 1123 vha->hw_err_cnt++; 1124 1125 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1126 break; 1127 1128 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 1129 ql_dbg(ql_dbg_async, vha, 0x5008, 1130 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 1131 break; 1132 1133 case MBA_LOOP_INIT_ERR: 1134 ql_log(ql_log_warn, vha, 0x5090, 1135 "LOOP INIT ERROR (%x).\n", mb[1]); 1136 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1137 break; 1138 1139 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 1140 ha->flags.lip_ae = 1; 1141 1142 ql_dbg(ql_dbg_async, vha, 0x5009, 1143 "LIP occurred (%x).\n", mb[1]); 1144 1145 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1146 atomic_set(&vha->loop_state, LOOP_DOWN); 1147 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1148 qla2x00_mark_all_devices_lost(vha); 1149 } 1150 1151 if (vha->vp_idx) { 1152 atomic_set(&vha->vp_state, VP_FAILED); 1153 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1154 } 1155 1156 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1157 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1158 1159 vha->flags.management_server_logged_in = 0; 1160 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 1161 break; 1162 1163 case MBA_LOOP_UP: /* Loop Up Event */ 1164 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1165 ha->link_data_rate = PORT_SPEED_1GB; 1166 else 1167 ha->link_data_rate = mb[1]; 1168 1169 ql_log(ql_log_info, vha, 0x500a, 1170 "LOOP UP detected (%s Gbps).\n", 1171 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 1172 1173 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1174 if (mb[2] & BIT_0) 1175 ql_log(ql_log_info, vha, 0x11a0, 1176 "FEC=enabled (link up).\n"); 1177 } 1178 1179 vha->flags.management_server_logged_in = 0; 1180 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 1181 1182 if (vha->link_down_time < vha->hw->port_down_retry_count) { 1183 vha->short_link_down_cnt++; 1184 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 1185 } 1186 1187 break; 1188 1189 case MBA_LOOP_DOWN: /* Loop Down Event */ 1190 SAVE_TOPO(ha); 1191 ha->flags.lip_ae = 0; 1192 ha->current_topology = 0; 1193 vha->link_down_time = 0; 1194 1195 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 1196 ? rd_reg_word(®24->mailbox4) : 0; 1197 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) 1198 : mbx; 1199 ql_log(ql_log_info, vha, 0x500b, 1200 "LOOP DOWN detected (%x %x %x %x).\n", 1201 mb[1], mb[2], mb[3], mbx); 1202 1203 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1204 atomic_set(&vha->loop_state, LOOP_DOWN); 1205 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1206 /* 1207 * In case of loop down, restore WWPN from 1208 * NVRAM in case of FA-WWPN capable ISP 1209 * Restore for Physical Port only 1210 */ 1211 if (!vha->vp_idx) { 1212 if (ha->flags.fawwpn_enabled && 1213 (ha->current_topology == ISP_CFG_F)) { 1214 void *wwpn = ha->init_cb->port_name; 1215 1216 memcpy(vha->port_name, wwpn, WWN_SIZE); 1217 fc_host_port_name(vha->host) = 1218 wwn_to_u64(vha->port_name); 1219 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1220 vha, 0x00d8, "LOOP DOWN detected," 1221 "restore WWPN %016llx\n", 1222 wwn_to_u64(vha->port_name)); 1223 } 1224 1225 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1226 } 1227 1228 vha->device_flags |= DFLG_NO_CABLE; 1229 qla2x00_mark_all_devices_lost(vha); 1230 } 1231 1232 if (vha->vp_idx) { 1233 atomic_set(&vha->vp_state, VP_FAILED); 1234 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1235 } 1236 1237 vha->flags.management_server_logged_in = 0; 1238 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1239 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1240 break; 1241 1242 case MBA_LIP_RESET: /* LIP reset occurred */ 1243 ql_dbg(ql_dbg_async, vha, 0x500c, 1244 "LIP reset occurred (%x).\n", mb[1]); 1245 1246 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1247 atomic_set(&vha->loop_state, LOOP_DOWN); 1248 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1249 qla2x00_mark_all_devices_lost(vha); 1250 } 1251 1252 if (vha->vp_idx) { 1253 atomic_set(&vha->vp_state, VP_FAILED); 1254 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1255 } 1256 1257 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1258 1259 ha->operating_mode = LOOP; 1260 vha->flags.management_server_logged_in = 0; 1261 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1262 break; 1263 1264 /* case MBA_DCBX_COMPLETE: */ 1265 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1266 ha->flags.lip_ae = 0; 1267 1268 if (IS_QLA2100(ha)) 1269 break; 1270 1271 if (IS_CNA_CAPABLE(ha)) { 1272 ql_dbg(ql_dbg_async, vha, 0x500d, 1273 "DCBX Completed -- %04x %04x %04x.\n", 1274 mb[1], mb[2], mb[3]); 1275 if (ha->notify_dcbx_comp && !vha->vp_idx) 1276 complete(&ha->dcbx_comp); 1277 1278 } else 1279 ql_dbg(ql_dbg_async, vha, 0x500e, 1280 "Asynchronous P2P MODE received.\n"); 1281 1282 /* 1283 * Until there's a transition from loop down to loop up, treat 1284 * this as loop down only. 1285 */ 1286 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1287 atomic_set(&vha->loop_state, LOOP_DOWN); 1288 if (!atomic_read(&vha->loop_down_timer)) 1289 atomic_set(&vha->loop_down_timer, 1290 LOOP_DOWN_TIME); 1291 if (!N2N_TOPO(ha)) 1292 qla2x00_mark_all_devices_lost(vha); 1293 } 1294 1295 if (vha->vp_idx) { 1296 atomic_set(&vha->vp_state, VP_FAILED); 1297 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1298 } 1299 1300 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1301 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1302 1303 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1304 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1305 1306 vha->flags.management_server_logged_in = 0; 1307 break; 1308 1309 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1310 if (IS_QLA2100(ha)) 1311 break; 1312 1313 ql_dbg(ql_dbg_async, vha, 0x500f, 1314 "Configuration change detected: value=%x.\n", mb[1]); 1315 1316 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1317 atomic_set(&vha->loop_state, LOOP_DOWN); 1318 if (!atomic_read(&vha->loop_down_timer)) 1319 atomic_set(&vha->loop_down_timer, 1320 LOOP_DOWN_TIME); 1321 qla2x00_mark_all_devices_lost(vha); 1322 } 1323 1324 if (vha->vp_idx) { 1325 atomic_set(&vha->vp_state, VP_FAILED); 1326 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1327 } 1328 1329 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1330 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1331 break; 1332 1333 case MBA_PORT_UPDATE: /* Port database update */ 1334 /* 1335 * Handle only global and vn-port update events 1336 * 1337 * Relevant inputs: 1338 * mb[1] = N_Port handle of changed port 1339 * OR 0xffff for global event 1340 * mb[2] = New login state 1341 * 7 = Port logged out 1342 * mb[3] = LSB is vp_idx, 0xff = all vps 1343 * 1344 * Skip processing if: 1345 * Event is global, vp_idx is NOT all vps, 1346 * vp_idx does not match 1347 * Event is not global, vp_idx does not match 1348 */ 1349 if (IS_QLA2XXX_MIDTYPE(ha) && 1350 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1351 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1352 break; 1353 1354 if (mb[2] == 0x7) { 1355 ql_dbg(ql_dbg_async, vha, 0x5010, 1356 "Port %s %04x %04x %04x.\n", 1357 mb[1] == 0xffff ? "unavailable" : "logout", 1358 mb[1], mb[2], mb[3]); 1359 1360 if (mb[1] == 0xffff) 1361 goto global_port_update; 1362 1363 if (mb[1] == NPH_SNS_LID(ha)) { 1364 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1365 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1366 break; 1367 } 1368 1369 /* use handle_cnt for loop id/nport handle */ 1370 if (IS_FWI2_CAPABLE(ha)) 1371 handle_cnt = NPH_SNS; 1372 else 1373 handle_cnt = SIMPLE_NAME_SERVER; 1374 if (mb[1] == handle_cnt) { 1375 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1376 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1377 break; 1378 } 1379 1380 /* Port logout */ 1381 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1382 if (!fcport) 1383 break; 1384 if (atomic_read(&fcport->state) != FCS_ONLINE) 1385 break; 1386 ql_dbg(ql_dbg_async, vha, 0x508a, 1387 "Marking port lost loopid=%04x portid=%06x.\n", 1388 fcport->loop_id, fcport->d_id.b24); 1389 if (qla_ini_mode_enabled(vha)) { 1390 fcport->logout_on_delete = 0; 1391 qlt_schedule_sess_for_deletion(fcport); 1392 } 1393 break; 1394 1395 global_port_update: 1396 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1397 atomic_set(&vha->loop_state, LOOP_DOWN); 1398 atomic_set(&vha->loop_down_timer, 1399 LOOP_DOWN_TIME); 1400 vha->device_flags |= DFLG_NO_CABLE; 1401 qla2x00_mark_all_devices_lost(vha); 1402 } 1403 1404 if (vha->vp_idx) { 1405 atomic_set(&vha->vp_state, VP_FAILED); 1406 fc_vport_set_state(vha->fc_vport, 1407 FC_VPORT_FAILED); 1408 qla2x00_mark_all_devices_lost(vha); 1409 } 1410 1411 vha->flags.management_server_logged_in = 0; 1412 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1413 break; 1414 } 1415 1416 /* 1417 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1418 * event etc. earlier indicating loop is down) then process 1419 * it. Otherwise ignore it and Wait for RSCN to come in. 1420 */ 1421 atomic_set(&vha->loop_down_timer, 0); 1422 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1423 !ha->flags.n2n_ae && 1424 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1425 ql_dbg(ql_dbg_async, vha, 0x5011, 1426 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1427 mb[1], mb[2], mb[3]); 1428 break; 1429 } 1430 1431 ql_dbg(ql_dbg_async, vha, 0x5012, 1432 "Port database changed %04x %04x %04x.\n", 1433 mb[1], mb[2], mb[3]); 1434 1435 /* 1436 * Mark all devices as missing so we will login again. 1437 */ 1438 atomic_set(&vha->loop_state, LOOP_UP); 1439 vha->scan.scan_retry = 0; 1440 1441 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1442 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1443 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1444 break; 1445 1446 case MBA_RSCN_UPDATE: /* State Change Registration */ 1447 /* Check if the Vport has issued a SCR */ 1448 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1449 break; 1450 /* Only handle SCNs for our Vport index. */ 1451 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1452 break; 1453 1454 ql_log(ql_log_warn, vha, 0x5013, 1455 "RSCN database changed -- %04x %04x %04x.\n", 1456 mb[1], mb[2], mb[3]); 1457 1458 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1459 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1460 | vha->d_id.b.al_pa; 1461 if (rscn_entry == host_pid) { 1462 ql_dbg(ql_dbg_async, vha, 0x5014, 1463 "Ignoring RSCN update to local host " 1464 "port ID (%06x).\n", host_pid); 1465 break; 1466 } 1467 1468 /* Ignore reserved bits from RSCN-payload. */ 1469 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1470 1471 /* Skip RSCNs for virtual ports on the same physical port */ 1472 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1473 break; 1474 1475 atomic_set(&vha->loop_down_timer, 0); 1476 vha->flags.management_server_logged_in = 0; 1477 { 1478 struct event_arg ea; 1479 1480 memset(&ea, 0, sizeof(ea)); 1481 ea.id.b24 = rscn_entry; 1482 ea.id.b.rsvd_1 = rscn_entry >> 24; 1483 qla2x00_handle_rscn(vha, &ea); 1484 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1485 } 1486 break; 1487 case MBA_CONGN_NOTI_RECV: 1488 if (!ha->flags.scm_enabled || 1489 mb[1] != QLA_CON_PRIMITIVE_RECEIVED) 1490 break; 1491 1492 if (mb[2] == QLA_CONGESTION_ARB_WARNING) { 1493 ql_dbg(ql_dbg_async, vha, 0x509b, 1494 "Congestion Warning %04x %04x.\n", mb[1], mb[2]); 1495 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { 1496 ql_log(ql_log_warn, vha, 0x509b, 1497 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); 1498 } 1499 break; 1500 /* case MBA_RIO_RESPONSE: */ 1501 case MBA_ZIO_RESPONSE: 1502 ql_dbg(ql_dbg_async, vha, 0x5015, 1503 "[R|Z]IO update completion.\n"); 1504 1505 if (IS_FWI2_CAPABLE(ha)) 1506 qla24xx_process_response_queue(vha, rsp); 1507 else 1508 qla2x00_process_response_queue(rsp); 1509 break; 1510 1511 case MBA_DISCARD_RND_FRAME: 1512 ql_dbg(ql_dbg_async, vha, 0x5016, 1513 "Discard RND Frame -- %04x %04x %04x.\n", 1514 mb[1], mb[2], mb[3]); 1515 vha->interface_err_cnt++; 1516 break; 1517 1518 case MBA_TRACE_NOTIFICATION: 1519 ql_dbg(ql_dbg_async, vha, 0x5017, 1520 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1521 break; 1522 1523 case MBA_ISP84XX_ALERT: 1524 ql_dbg(ql_dbg_async, vha, 0x5018, 1525 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1526 mb[1], mb[2], mb[3]); 1527 1528 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1529 switch (mb[1]) { 1530 case A84_PANIC_RECOVERY: 1531 ql_log(ql_log_info, vha, 0x5019, 1532 "Alert 84XX: panic recovery %04x %04x.\n", 1533 mb[2], mb[3]); 1534 break; 1535 case A84_OP_LOGIN_COMPLETE: 1536 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1537 ql_log(ql_log_info, vha, 0x501a, 1538 "Alert 84XX: firmware version %x.\n", 1539 ha->cs84xx->op_fw_version); 1540 break; 1541 case A84_DIAG_LOGIN_COMPLETE: 1542 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1543 ql_log(ql_log_info, vha, 0x501b, 1544 "Alert 84XX: diagnostic firmware version %x.\n", 1545 ha->cs84xx->diag_fw_version); 1546 break; 1547 case A84_GOLD_LOGIN_COMPLETE: 1548 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1549 ha->cs84xx->fw_update = 1; 1550 ql_log(ql_log_info, vha, 0x501c, 1551 "Alert 84XX: gold firmware version %x.\n", 1552 ha->cs84xx->gold_fw_version); 1553 break; 1554 default: 1555 ql_log(ql_log_warn, vha, 0x501d, 1556 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1557 mb[1], mb[2], mb[3]); 1558 } 1559 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1560 break; 1561 case MBA_DCBX_START: 1562 ql_dbg(ql_dbg_async, vha, 0x501e, 1563 "DCBX Started -- %04x %04x %04x.\n", 1564 mb[1], mb[2], mb[3]); 1565 break; 1566 case MBA_DCBX_PARAM_UPDATE: 1567 ql_dbg(ql_dbg_async, vha, 0x501f, 1568 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1569 mb[1], mb[2], mb[3]); 1570 break; 1571 case MBA_FCF_CONF_ERR: 1572 ql_dbg(ql_dbg_async, vha, 0x5020, 1573 "FCF Configuration Error -- %04x %04x %04x.\n", 1574 mb[1], mb[2], mb[3]); 1575 break; 1576 case MBA_IDC_NOTIFY: 1577 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1578 mb[4] = rd_reg_word(®24->mailbox4); 1579 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1580 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1581 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1582 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1583 /* 1584 * Extend loop down timer since port is active. 1585 */ 1586 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1587 atomic_set(&vha->loop_down_timer, 1588 LOOP_DOWN_TIME); 1589 qla2xxx_wake_dpc(vha); 1590 } 1591 } 1592 fallthrough; 1593 case MBA_IDC_COMPLETE: 1594 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1595 complete(&ha->lb_portup_comp); 1596 fallthrough; 1597 case MBA_IDC_TIME_EXT: 1598 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1599 IS_QLA8044(ha)) 1600 qla81xx_idc_event(vha, mb[0], mb[1]); 1601 break; 1602 1603 case MBA_IDC_AEN: 1604 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1605 vha->hw_err_cnt++; 1606 qla27xx_handle_8200_aen(vha, mb); 1607 } else if (IS_QLA83XX(ha)) { 1608 mb[4] = rd_reg_word(®24->mailbox4); 1609 mb[5] = rd_reg_word(®24->mailbox5); 1610 mb[6] = rd_reg_word(®24->mailbox6); 1611 mb[7] = rd_reg_word(®24->mailbox7); 1612 qla83xx_handle_8200_aen(vha, mb); 1613 } else { 1614 ql_dbg(ql_dbg_async, vha, 0x5052, 1615 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1616 mb[0], mb[1], mb[2], mb[3]); 1617 } 1618 break; 1619 1620 case MBA_DPORT_DIAGNOSTICS: 1621 ql_dbg(ql_dbg_async, vha, 0x5052, 1622 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1623 mb[0], mb[1], mb[2], mb[3]); 1624 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1625 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1626 static char *results[] = { 1627 "start", "done(pass)", "done(error)", "undefined" }; 1628 static char *types[] = { 1629 "none", "dynamic", "static", "other" }; 1630 uint result = mb[1] >> 0 & 0x3; 1631 uint type = mb[1] >> 6 & 0x3; 1632 uint sw = mb[1] >> 15 & 0x1; 1633 ql_dbg(ql_dbg_async, vha, 0x5052, 1634 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1635 results[result], types[type], sw); 1636 if (result == 2) { 1637 static char *reasons[] = { 1638 "reserved", "unexpected reject", 1639 "unexpected phase", "retry exceeded", 1640 "timed out", "not supported", 1641 "user stopped" }; 1642 uint reason = mb[2] >> 0 & 0xf; 1643 uint phase = mb[2] >> 12 & 0xf; 1644 ql_dbg(ql_dbg_async, vha, 0x5052, 1645 "D-Port Diagnostics: reason=%s phase=%u \n", 1646 reason < 7 ? reasons[reason] : "other", 1647 phase >> 1); 1648 } 1649 } 1650 break; 1651 1652 case MBA_TEMPERATURE_ALERT: 1653 ql_dbg(ql_dbg_async, vha, 0x505e, 1654 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1655 break; 1656 1657 case MBA_TRANS_INSERT: 1658 ql_dbg(ql_dbg_async, vha, 0x5091, 1659 "Transceiver Insertion: %04x\n", mb[1]); 1660 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1661 break; 1662 1663 case MBA_TRANS_REMOVE: 1664 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1665 break; 1666 1667 default: 1668 ql_dbg(ql_dbg_async, vha, 0x5057, 1669 "Unknown AEN:%04x %04x %04x %04x\n", 1670 mb[0], mb[1], mb[2], mb[3]); 1671 } 1672 1673 qlt_async_event(mb[0], vha, mb); 1674 1675 if (!vha->vp_idx && ha->num_vhosts) 1676 qla2x00_alert_all_vps(rsp, mb); 1677 } 1678 1679 /** 1680 * qla2x00_process_completed_request() - Process a Fast Post response. 1681 * @vha: SCSI driver HA context 1682 * @req: request queue 1683 * @index: SRB index 1684 */ 1685 void 1686 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1687 struct req_que *req, uint32_t index) 1688 { 1689 srb_t *sp; 1690 struct qla_hw_data *ha = vha->hw; 1691 1692 /* Validate handle. */ 1693 if (index >= req->num_outstanding_cmds) { 1694 ql_log(ql_log_warn, vha, 0x3014, 1695 "Invalid SCSI command index (%x).\n", index); 1696 1697 if (IS_P3P_TYPE(ha)) 1698 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1699 else 1700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1701 return; 1702 } 1703 1704 sp = req->outstanding_cmds[index]; 1705 if (sp) { 1706 /* Free outstanding command slot. */ 1707 req->outstanding_cmds[index] = NULL; 1708 1709 /* Save ISP completion status */ 1710 sp->done(sp, DID_OK << 16); 1711 } else { 1712 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1713 1714 if (IS_P3P_TYPE(ha)) 1715 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1716 else 1717 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1718 } 1719 } 1720 1721 srb_t * 1722 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1723 struct req_que *req, void *iocb) 1724 { 1725 struct qla_hw_data *ha = vha->hw; 1726 sts_entry_t *pkt = iocb; 1727 srb_t *sp; 1728 uint16_t index; 1729 1730 index = LSW(pkt->handle); 1731 if (index >= req->num_outstanding_cmds) { 1732 ql_log(ql_log_warn, vha, 0x5031, 1733 "%s: Invalid command index (%x) type %8ph.\n", 1734 func, index, iocb); 1735 if (IS_P3P_TYPE(ha)) 1736 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1737 else 1738 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1739 return NULL; 1740 } 1741 sp = req->outstanding_cmds[index]; 1742 if (!sp) { 1743 ql_log(ql_log_warn, vha, 0x5032, 1744 "%s: Invalid completion handle (%x) -- timed-out.\n", 1745 func, index); 1746 return NULL; 1747 } 1748 if (sp->handle != index) { 1749 ql_log(ql_log_warn, vha, 0x5033, 1750 "%s: SRB handle (%x) mismatch %x.\n", func, 1751 sp->handle, index); 1752 return NULL; 1753 } 1754 1755 req->outstanding_cmds[index] = NULL; 1756 return sp; 1757 } 1758 1759 static void 1760 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1761 struct mbx_entry *mbx) 1762 { 1763 const char func[] = "MBX-IOCB"; 1764 const char *type; 1765 fc_port_t *fcport; 1766 srb_t *sp; 1767 struct srb_iocb *lio; 1768 uint16_t *data; 1769 uint16_t status; 1770 1771 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1772 if (!sp) 1773 return; 1774 1775 lio = &sp->u.iocb_cmd; 1776 type = sp->name; 1777 fcport = sp->fcport; 1778 data = lio->u.logio.data; 1779 1780 data[0] = MBS_COMMAND_ERROR; 1781 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1782 QLA_LOGIO_LOGIN_RETRIED : 0; 1783 if (mbx->entry_status) { 1784 ql_dbg(ql_dbg_async, vha, 0x5043, 1785 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1786 "entry-status=%x status=%x state-flag=%x " 1787 "status-flags=%x.\n", type, sp->handle, 1788 fcport->d_id.b.domain, fcport->d_id.b.area, 1789 fcport->d_id.b.al_pa, mbx->entry_status, 1790 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1791 le16_to_cpu(mbx->status_flags)); 1792 1793 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1794 mbx, sizeof(*mbx)); 1795 1796 goto logio_done; 1797 } 1798 1799 status = le16_to_cpu(mbx->status); 1800 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1801 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1802 status = 0; 1803 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1804 ql_dbg(ql_dbg_async, vha, 0x5045, 1805 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1806 type, sp->handle, fcport->d_id.b.domain, 1807 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1808 le16_to_cpu(mbx->mb1)); 1809 1810 data[0] = MBS_COMMAND_COMPLETE; 1811 if (sp->type == SRB_LOGIN_CMD) { 1812 fcport->port_type = FCT_TARGET; 1813 if (le16_to_cpu(mbx->mb1) & BIT_0) 1814 fcport->port_type = FCT_INITIATOR; 1815 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1816 fcport->flags |= FCF_FCP2_DEVICE; 1817 } 1818 goto logio_done; 1819 } 1820 1821 data[0] = le16_to_cpu(mbx->mb0); 1822 switch (data[0]) { 1823 case MBS_PORT_ID_USED: 1824 data[1] = le16_to_cpu(mbx->mb1); 1825 break; 1826 case MBS_LOOP_ID_USED: 1827 break; 1828 default: 1829 data[0] = MBS_COMMAND_ERROR; 1830 break; 1831 } 1832 1833 ql_log(ql_log_warn, vha, 0x5046, 1834 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1835 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1836 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1837 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1838 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1839 le16_to_cpu(mbx->mb7)); 1840 1841 logio_done: 1842 sp->done(sp, 0); 1843 } 1844 1845 static void 1846 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1847 struct mbx_24xx_entry *pkt) 1848 { 1849 const char func[] = "MBX-IOCB2"; 1850 struct qla_hw_data *ha = vha->hw; 1851 srb_t *sp; 1852 struct srb_iocb *si; 1853 u16 sz, i; 1854 int res; 1855 1856 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1857 if (!sp) 1858 return; 1859 1860 if (sp->type == SRB_SCSI_CMD || 1861 sp->type == SRB_NVME_CMD || 1862 sp->type == SRB_TM_CMD) { 1863 ql_log(ql_log_warn, vha, 0x509d, 1864 "Inconsistent event entry type %d\n", sp->type); 1865 if (IS_P3P_TYPE(ha)) 1866 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1867 else 1868 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1869 return; 1870 } 1871 1872 si = &sp->u.iocb_cmd; 1873 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1874 1875 for (i = 0; i < sz; i++) 1876 si->u.mbx.in_mb[i] = pkt->mb[i]; 1877 1878 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1879 1880 sp->done(sp, res); 1881 } 1882 1883 static void 1884 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1885 struct nack_to_isp *pkt) 1886 { 1887 const char func[] = "nack"; 1888 srb_t *sp; 1889 int res = 0; 1890 1891 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1892 if (!sp) 1893 return; 1894 1895 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1896 res = QLA_FUNCTION_FAILED; 1897 1898 sp->done(sp, res); 1899 } 1900 1901 static void 1902 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1903 sts_entry_t *pkt, int iocb_type) 1904 { 1905 const char func[] = "CT_IOCB"; 1906 const char *type; 1907 srb_t *sp; 1908 struct bsg_job *bsg_job; 1909 struct fc_bsg_reply *bsg_reply; 1910 uint16_t comp_status; 1911 int res = 0; 1912 1913 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1914 if (!sp) 1915 return; 1916 1917 switch (sp->type) { 1918 case SRB_CT_CMD: 1919 bsg_job = sp->u.bsg_job; 1920 bsg_reply = bsg_job->reply; 1921 1922 type = "ct pass-through"; 1923 1924 comp_status = le16_to_cpu(pkt->comp_status); 1925 1926 /* 1927 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1928 * fc payload to the caller 1929 */ 1930 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1931 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1932 1933 if (comp_status != CS_COMPLETE) { 1934 if (comp_status == CS_DATA_UNDERRUN) { 1935 res = DID_OK << 16; 1936 bsg_reply->reply_payload_rcv_len = 1937 le16_to_cpu(pkt->rsp_info_len); 1938 1939 ql_log(ql_log_warn, vha, 0x5048, 1940 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1941 type, comp_status, 1942 bsg_reply->reply_payload_rcv_len); 1943 } else { 1944 ql_log(ql_log_warn, vha, 0x5049, 1945 "CT pass-through-%s error comp_status=0x%x.\n", 1946 type, comp_status); 1947 res = DID_ERROR << 16; 1948 bsg_reply->reply_payload_rcv_len = 0; 1949 } 1950 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1951 pkt, sizeof(*pkt)); 1952 } else { 1953 res = DID_OK << 16; 1954 bsg_reply->reply_payload_rcv_len = 1955 bsg_job->reply_payload.payload_len; 1956 bsg_job->reply_len = 0; 1957 } 1958 break; 1959 case SRB_CT_PTHRU_CMD: 1960 /* 1961 * borrowing sts_entry_24xx.comp_status. 1962 * same location as ct_entry_24xx.comp_status 1963 */ 1964 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1965 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1966 sp->name); 1967 break; 1968 } 1969 1970 sp->done(sp, res); 1971 } 1972 1973 static void 1974 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1975 struct sts_entry_24xx *pkt, int iocb_type) 1976 { 1977 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; 1978 const char func[] = "ELS_CT_IOCB"; 1979 const char *type; 1980 srb_t *sp; 1981 struct bsg_job *bsg_job; 1982 struct fc_bsg_reply *bsg_reply; 1983 uint16_t comp_status; 1984 uint32_t fw_status[3]; 1985 int res; 1986 struct srb_iocb *els; 1987 1988 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1989 if (!sp) 1990 return; 1991 1992 type = NULL; 1993 switch (sp->type) { 1994 case SRB_ELS_CMD_RPT: 1995 case SRB_ELS_CMD_HST: 1996 type = "els"; 1997 break; 1998 case SRB_CT_CMD: 1999 type = "ct pass-through"; 2000 break; 2001 case SRB_ELS_DCMD: 2002 type = "Driver ELS logo"; 2003 if (iocb_type != ELS_IOCB_TYPE) { 2004 ql_dbg(ql_dbg_user, vha, 0x5047, 2005 "Completing %s: (%p) type=%d.\n", 2006 type, sp, sp->type); 2007 sp->done(sp, 0); 2008 return; 2009 } 2010 break; 2011 case SRB_CT_PTHRU_CMD: 2012 /* borrowing sts_entry_24xx.comp_status. 2013 same location as ct_entry_24xx.comp_status 2014 */ 2015 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 2016 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 2017 sp->name); 2018 sp->done(sp, res); 2019 return; 2020 default: 2021 ql_dbg(ql_dbg_user, vha, 0x503e, 2022 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 2023 return; 2024 } 2025 2026 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 2027 fw_status[1] = le32_to_cpu(ese->error_subcode_1); 2028 fw_status[2] = le32_to_cpu(ese->error_subcode_2); 2029 2030 if (iocb_type == ELS_IOCB_TYPE) { 2031 els = &sp->u.iocb_cmd; 2032 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); 2033 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); 2034 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); 2035 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); 2036 if (comp_status == CS_COMPLETE) { 2037 res = DID_OK << 16; 2038 } else { 2039 if (comp_status == CS_DATA_UNDERRUN) { 2040 res = DID_OK << 16; 2041 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( 2042 ese->total_byte_count)); 2043 } else { 2044 els->u.els_plogi.len = 0; 2045 res = DID_ERROR << 16; 2046 } 2047 } 2048 ql_dbg(ql_dbg_disc, vha, 0x503f, 2049 "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", 2050 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2051 le32_to_cpu(ese->total_byte_count)); 2052 goto els_ct_done; 2053 } 2054 2055 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2056 * fc payload to the caller 2057 */ 2058 bsg_job = sp->u.bsg_job; 2059 bsg_reply = bsg_job->reply; 2060 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2061 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 2062 2063 if (comp_status != CS_COMPLETE) { 2064 if (comp_status == CS_DATA_UNDERRUN) { 2065 res = DID_OK << 16; 2066 bsg_reply->reply_payload_rcv_len = 2067 le32_to_cpu(ese->total_byte_count); 2068 2069 ql_dbg(ql_dbg_user, vha, 0x503f, 2070 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2071 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 2072 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2073 le32_to_cpu(ese->total_byte_count)); 2074 } else { 2075 ql_dbg(ql_dbg_user, vha, 0x5040, 2076 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2077 "error subcode 1=0x%x error subcode 2=0x%x.\n", 2078 type, sp->handle, comp_status, 2079 le32_to_cpu(ese->error_subcode_1), 2080 le32_to_cpu(ese->error_subcode_2)); 2081 res = DID_ERROR << 16; 2082 bsg_reply->reply_payload_rcv_len = 0; 2083 } 2084 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 2085 fw_status, sizeof(fw_status)); 2086 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 2087 pkt, sizeof(*pkt)); 2088 } 2089 else { 2090 res = DID_OK << 16; 2091 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 2092 bsg_job->reply_len = 0; 2093 } 2094 els_ct_done: 2095 2096 sp->done(sp, res); 2097 } 2098 2099 static void 2100 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 2101 struct logio_entry_24xx *logio) 2102 { 2103 const char func[] = "LOGIO-IOCB"; 2104 const char *type; 2105 fc_port_t *fcport; 2106 srb_t *sp; 2107 struct srb_iocb *lio; 2108 uint16_t *data; 2109 uint32_t iop[2]; 2110 2111 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 2112 if (!sp) 2113 return; 2114 2115 lio = &sp->u.iocb_cmd; 2116 type = sp->name; 2117 fcport = sp->fcport; 2118 data = lio->u.logio.data; 2119 2120 data[0] = MBS_COMMAND_ERROR; 2121 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 2122 QLA_LOGIO_LOGIN_RETRIED : 0; 2123 if (logio->entry_status) { 2124 ql_log(ql_log_warn, fcport->vha, 0x5034, 2125 "Async-%s error entry - %8phC hdl=%x" 2126 "portid=%02x%02x%02x entry-status=%x.\n", 2127 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 2128 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2129 logio->entry_status); 2130 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 2131 logio, sizeof(*logio)); 2132 2133 goto logio_done; 2134 } 2135 2136 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 2137 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 2138 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 2139 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2140 le32_to_cpu(logio->io_parameter[0])); 2141 2142 vha->hw->exch_starvation = 0; 2143 data[0] = MBS_COMMAND_COMPLETE; 2144 2145 if (sp->type == SRB_PRLI_CMD) { 2146 lio->u.logio.iop[0] = 2147 le32_to_cpu(logio->io_parameter[0]); 2148 lio->u.logio.iop[1] = 2149 le32_to_cpu(logio->io_parameter[1]); 2150 goto logio_done; 2151 } 2152 2153 if (sp->type != SRB_LOGIN_CMD) 2154 goto logio_done; 2155 2156 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2157 if (iop[0] & BIT_4) { 2158 fcport->port_type = FCT_TARGET; 2159 if (iop[0] & BIT_8) 2160 fcport->flags |= FCF_FCP2_DEVICE; 2161 } else if (iop[0] & BIT_5) 2162 fcport->port_type = FCT_INITIATOR; 2163 2164 if (iop[0] & BIT_7) 2165 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2166 2167 if (logio->io_parameter[7] || logio->io_parameter[8]) 2168 fcport->supported_classes |= FC_COS_CLASS2; 2169 if (logio->io_parameter[9] || logio->io_parameter[10]) 2170 fcport->supported_classes |= FC_COS_CLASS3; 2171 2172 goto logio_done; 2173 } 2174 2175 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2176 iop[1] = le32_to_cpu(logio->io_parameter[1]); 2177 lio->u.logio.iop[0] = iop[0]; 2178 lio->u.logio.iop[1] = iop[1]; 2179 switch (iop[0]) { 2180 case LSC_SCODE_PORTID_USED: 2181 data[0] = MBS_PORT_ID_USED; 2182 data[1] = LSW(iop[1]); 2183 break; 2184 case LSC_SCODE_NPORT_USED: 2185 data[0] = MBS_LOOP_ID_USED; 2186 break; 2187 case LSC_SCODE_CMD_FAILED: 2188 if (iop[1] == 0x0606) { 2189 /* 2190 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 2191 * Target side acked. 2192 */ 2193 data[0] = MBS_COMMAND_COMPLETE; 2194 goto logio_done; 2195 } 2196 data[0] = MBS_COMMAND_ERROR; 2197 break; 2198 case LSC_SCODE_NOXCB: 2199 vha->hw->exch_starvation++; 2200 if (vha->hw->exch_starvation > 5) { 2201 ql_log(ql_log_warn, vha, 0xd046, 2202 "Exchange starvation. Resetting RISC\n"); 2203 2204 vha->hw->exch_starvation = 0; 2205 2206 if (IS_P3P_TYPE(vha->hw)) 2207 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2208 else 2209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2210 qla2xxx_wake_dpc(vha); 2211 } 2212 fallthrough; 2213 default: 2214 data[0] = MBS_COMMAND_ERROR; 2215 break; 2216 } 2217 2218 ql_log(ql_log_warn, sp->vha, 0x5037, 2219 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2220 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2221 le16_to_cpu(logio->comp_status), 2222 le32_to_cpu(logio->io_parameter[0]), 2223 le32_to_cpu(logio->io_parameter[1])); 2224 2225 logio_done: 2226 sp->done(sp, 0); 2227 } 2228 2229 static void 2230 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2231 { 2232 const char func[] = "TMF-IOCB"; 2233 const char *type; 2234 fc_port_t *fcport; 2235 srb_t *sp; 2236 struct srb_iocb *iocb; 2237 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2238 u16 comp_status; 2239 2240 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2241 if (!sp) 2242 return; 2243 2244 comp_status = le16_to_cpu(sts->comp_status); 2245 iocb = &sp->u.iocb_cmd; 2246 type = sp->name; 2247 fcport = sp->fcport; 2248 iocb->u.tmf.data = QLA_SUCCESS; 2249 2250 if (sts->entry_status) { 2251 ql_log(ql_log_warn, fcport->vha, 0x5038, 2252 "Async-%s error - hdl=%x entry-status(%x).\n", 2253 type, sp->handle, sts->entry_status); 2254 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2255 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2256 ql_log(ql_log_warn, fcport->vha, 0x5039, 2257 "Async-%s error - hdl=%x completion status(%x).\n", 2258 type, sp->handle, comp_status); 2259 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2260 } else if ((le16_to_cpu(sts->scsi_status) & 2261 SS_RESPONSE_INFO_LEN_VALID)) { 2262 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2263 ql_log(ql_log_warn, fcport->vha, 0x503b, 2264 "Async-%s error - hdl=%x not enough response(%d).\n", 2265 type, sp->handle, sts->rsp_data_len); 2266 } else if (sts->data[3]) { 2267 ql_log(ql_log_warn, fcport->vha, 0x503c, 2268 "Async-%s error - hdl=%x response(%x).\n", 2269 type, sp->handle, sts->data[3]); 2270 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2271 } 2272 } 2273 2274 switch (comp_status) { 2275 case CS_PORT_LOGGED_OUT: 2276 case CS_PORT_CONFIG_CHG: 2277 case CS_PORT_BUSY: 2278 case CS_INCOMPLETE: 2279 case CS_PORT_UNAVAILABLE: 2280 case CS_TIMEOUT: 2281 case CS_RESET: 2282 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2283 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2284 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n", 2285 fcport->d_id.b.domain, fcport->d_id.b.area, 2286 fcport->d_id.b.al_pa, 2287 port_state_str[FCS_ONLINE], 2288 comp_status); 2289 2290 qlt_schedule_sess_for_deletion(fcport); 2291 } 2292 break; 2293 2294 default: 2295 break; 2296 } 2297 2298 if (iocb->u.tmf.data != QLA_SUCCESS) 2299 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2300 sts, sizeof(*sts)); 2301 2302 sp->done(sp, 0); 2303 } 2304 2305 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2306 void *tsk, srb_t *sp) 2307 { 2308 fc_port_t *fcport; 2309 struct srb_iocb *iocb; 2310 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2311 uint16_t state_flags; 2312 struct nvmefc_fcp_req *fd; 2313 uint16_t ret = QLA_SUCCESS; 2314 __le16 comp_status = sts->comp_status; 2315 int logit = 0; 2316 2317 iocb = &sp->u.iocb_cmd; 2318 fcport = sp->fcport; 2319 iocb->u.nvme.comp_status = comp_status; 2320 state_flags = le16_to_cpu(sts->state_flags); 2321 fd = iocb->u.nvme.desc; 2322 2323 if (unlikely(iocb->u.nvme.aen_op)) 2324 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2325 2326 if (unlikely(comp_status != CS_COMPLETE)) 2327 logit = 1; 2328 2329 fd->transferred_length = fd->payload_length - 2330 le32_to_cpu(sts->residual_len); 2331 2332 /* 2333 * State flags: Bit 6 and 0. 2334 * If 0 is set, we don't care about 6. 2335 * both cases resp was dma'd to host buffer 2336 * if both are 0, that is good path case. 2337 * if six is set and 0 is clear, we need to 2338 * copy resp data from status iocb to resp buffer. 2339 */ 2340 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2341 iocb->u.nvme.rsp_pyld_len = 0; 2342 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2343 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2344 /* Response already DMA'd to fd->rspaddr. */ 2345 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2346 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2347 /* 2348 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2349 * as an error. 2350 */ 2351 iocb->u.nvme.rsp_pyld_len = 0; 2352 fd->transferred_length = 0; 2353 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2354 "Unexpected values in NVMe_RSP IU.\n"); 2355 logit = 1; 2356 } else if (state_flags & SF_NVME_ERSP) { 2357 uint32_t *inbuf, *outbuf; 2358 uint16_t iter; 2359 2360 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2361 outbuf = (uint32_t *)fd->rspaddr; 2362 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2363 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > 2364 sizeof(struct nvme_fc_ersp_iu))) { 2365 if (ql_mask_match(ql_dbg_io)) { 2366 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2367 iocb->u.nvme.rsp_pyld_len); 2368 ql_log(ql_log_warn, fcport->vha, 0x5100, 2369 "Unexpected response payload length %u.\n", 2370 iocb->u.nvme.rsp_pyld_len); 2371 } 2372 iocb->u.nvme.rsp_pyld_len = 2373 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); 2374 } 2375 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; 2376 for (; iter; iter--) 2377 *outbuf++ = swab32(*inbuf++); 2378 } 2379 2380 if (state_flags & SF_NVME_ERSP) { 2381 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2382 u32 tgt_xfer_len; 2383 2384 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2385 if (fd->transferred_length != tgt_xfer_len) { 2386 ql_log(ql_log_warn, fcport->vha, 0x3079, 2387 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2388 tgt_xfer_len, fd->transferred_length); 2389 logit = 1; 2390 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { 2391 /* 2392 * Do not log if this is just an underflow and there 2393 * is no data loss. 2394 */ 2395 logit = 0; 2396 } 2397 } 2398 2399 if (unlikely(logit)) 2400 ql_log(ql_log_warn, fcport->vha, 0x5060, 2401 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2402 sp->name, sp->handle, comp_status, 2403 fd->transferred_length, le32_to_cpu(sts->residual_len), 2404 sts->ox_id); 2405 2406 /* 2407 * If transport error then Failure (HBA rejects request) 2408 * otherwise transport will handle. 2409 */ 2410 switch (le16_to_cpu(comp_status)) { 2411 case CS_COMPLETE: 2412 break; 2413 2414 case CS_RESET: 2415 case CS_PORT_UNAVAILABLE: 2416 case CS_PORT_LOGGED_OUT: 2417 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2418 fallthrough; 2419 case CS_ABORTED: 2420 case CS_PORT_BUSY: 2421 fd->transferred_length = 0; 2422 iocb->u.nvme.rsp_pyld_len = 0; 2423 ret = QLA_ABORTED; 2424 break; 2425 case CS_DATA_UNDERRUN: 2426 break; 2427 default: 2428 ret = QLA_FUNCTION_FAILED; 2429 break; 2430 } 2431 sp->done(sp, ret); 2432 } 2433 2434 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2435 struct vp_ctrl_entry_24xx *vce) 2436 { 2437 const char func[] = "CTRLVP-IOCB"; 2438 srb_t *sp; 2439 int rval = QLA_SUCCESS; 2440 2441 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2442 if (!sp) 2443 return; 2444 2445 if (vce->entry_status != 0) { 2446 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2447 "%s: Failed to complete IOCB -- error status (%x)\n", 2448 sp->name, vce->entry_status); 2449 rval = QLA_FUNCTION_FAILED; 2450 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2451 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2452 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2453 sp->name, le16_to_cpu(vce->comp_status), 2454 le16_to_cpu(vce->vp_idx_failed)); 2455 rval = QLA_FUNCTION_FAILED; 2456 } else { 2457 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2458 "Done %s.\n", __func__); 2459 } 2460 2461 sp->rc = rval; 2462 sp->done(sp, rval); 2463 } 2464 2465 /* Process a single response queue entry. */ 2466 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2467 struct rsp_que *rsp, 2468 sts_entry_t *pkt) 2469 { 2470 sts21_entry_t *sts21_entry; 2471 sts22_entry_t *sts22_entry; 2472 uint16_t handle_cnt; 2473 uint16_t cnt; 2474 2475 switch (pkt->entry_type) { 2476 case STATUS_TYPE: 2477 qla2x00_status_entry(vha, rsp, pkt); 2478 break; 2479 case STATUS_TYPE_21: 2480 sts21_entry = (sts21_entry_t *)pkt; 2481 handle_cnt = sts21_entry->handle_count; 2482 for (cnt = 0; cnt < handle_cnt; cnt++) 2483 qla2x00_process_completed_request(vha, rsp->req, 2484 sts21_entry->handle[cnt]); 2485 break; 2486 case STATUS_TYPE_22: 2487 sts22_entry = (sts22_entry_t *)pkt; 2488 handle_cnt = sts22_entry->handle_count; 2489 for (cnt = 0; cnt < handle_cnt; cnt++) 2490 qla2x00_process_completed_request(vha, rsp->req, 2491 sts22_entry->handle[cnt]); 2492 break; 2493 case STATUS_CONT_TYPE: 2494 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2495 break; 2496 case MBX_IOCB_TYPE: 2497 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2498 break; 2499 case CT_IOCB_TYPE: 2500 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2501 break; 2502 default: 2503 /* Type Not Supported. */ 2504 ql_log(ql_log_warn, vha, 0x504a, 2505 "Received unknown response pkt type %x entry status=%x.\n", 2506 pkt->entry_type, pkt->entry_status); 2507 break; 2508 } 2509 } 2510 2511 /** 2512 * qla2x00_process_response_queue() - Process response queue entries. 2513 * @rsp: response queue 2514 */ 2515 void 2516 qla2x00_process_response_queue(struct rsp_que *rsp) 2517 { 2518 struct scsi_qla_host *vha; 2519 struct qla_hw_data *ha = rsp->hw; 2520 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2521 sts_entry_t *pkt; 2522 2523 vha = pci_get_drvdata(ha->pdev); 2524 2525 if (!vha->flags.online) 2526 return; 2527 2528 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2529 pkt = (sts_entry_t *)rsp->ring_ptr; 2530 2531 rsp->ring_index++; 2532 if (rsp->ring_index == rsp->length) { 2533 rsp->ring_index = 0; 2534 rsp->ring_ptr = rsp->ring; 2535 } else { 2536 rsp->ring_ptr++; 2537 } 2538 2539 if (pkt->entry_status != 0) { 2540 qla2x00_error_entry(vha, rsp, pkt); 2541 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2542 wmb(); 2543 continue; 2544 } 2545 2546 qla2x00_process_response_entry(vha, rsp, pkt); 2547 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2548 wmb(); 2549 } 2550 2551 /* Adjust ring index */ 2552 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2553 } 2554 2555 static inline void 2556 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2557 uint32_t sense_len, struct rsp_que *rsp, int res) 2558 { 2559 struct scsi_qla_host *vha = sp->vha; 2560 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2561 uint32_t track_sense_len; 2562 2563 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2564 sense_len = SCSI_SENSE_BUFFERSIZE; 2565 2566 SET_CMD_SENSE_LEN(sp, sense_len); 2567 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2568 track_sense_len = sense_len; 2569 2570 if (sense_len > par_sense_len) 2571 sense_len = par_sense_len; 2572 2573 memcpy(cp->sense_buffer, sense_data, sense_len); 2574 2575 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2576 track_sense_len -= sense_len; 2577 SET_CMD_SENSE_LEN(sp, track_sense_len); 2578 2579 if (track_sense_len != 0) { 2580 rsp->status_srb = sp; 2581 cp->result = res; 2582 } 2583 2584 if (sense_len) { 2585 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2586 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2587 sp->vha->host_no, cp->device->id, cp->device->lun, 2588 cp); 2589 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2590 cp->sense_buffer, sense_len); 2591 } 2592 } 2593 2594 struct scsi_dif_tuple { 2595 __be16 guard; /* Checksum */ 2596 __be16 app_tag; /* APPL identifier */ 2597 __be32 ref_tag; /* Target LBA or indirect LBA */ 2598 }; 2599 2600 /* 2601 * Checks the guard or meta-data for the type of error 2602 * detected by the HBA. In case of errors, we set the 2603 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2604 * to indicate to the kernel that the HBA detected error. 2605 */ 2606 static inline int 2607 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2608 { 2609 struct scsi_qla_host *vha = sp->vha; 2610 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2611 uint8_t *ap = &sts24->data[12]; 2612 uint8_t *ep = &sts24->data[20]; 2613 uint32_t e_ref_tag, a_ref_tag; 2614 uint16_t e_app_tag, a_app_tag; 2615 uint16_t e_guard, a_guard; 2616 2617 /* 2618 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2619 * would make guard field appear at offset 2 2620 */ 2621 a_guard = get_unaligned_le16(ap + 2); 2622 a_app_tag = get_unaligned_le16(ap + 0); 2623 a_ref_tag = get_unaligned_le32(ap + 4); 2624 e_guard = get_unaligned_le16(ep + 2); 2625 e_app_tag = get_unaligned_le16(ep + 0); 2626 e_ref_tag = get_unaligned_le32(ep + 4); 2627 2628 ql_dbg(ql_dbg_io, vha, 0x3023, 2629 "iocb(s) %p Returned STATUS.\n", sts24); 2630 2631 ql_dbg(ql_dbg_io, vha, 0x3024, 2632 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2633 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2634 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2635 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2636 a_app_tag, e_app_tag, a_guard, e_guard); 2637 2638 /* 2639 * Ignore sector if: 2640 * For type 3: ref & app tag is all 'f's 2641 * For type 0,1,2: app tag is all 'f's 2642 */ 2643 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && 2644 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || 2645 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { 2646 uint32_t blocks_done, resid; 2647 sector_t lba_s = scsi_get_lba(cmd); 2648 2649 /* 2TB boundary case covered automatically with this */ 2650 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2651 2652 resid = scsi_bufflen(cmd) - (blocks_done * 2653 cmd->device->sector_size); 2654 2655 scsi_set_resid(cmd, resid); 2656 cmd->result = DID_OK << 16; 2657 2658 /* Update protection tag */ 2659 if (scsi_prot_sg_count(cmd)) { 2660 uint32_t i, j = 0, k = 0, num_ent; 2661 struct scatterlist *sg; 2662 struct t10_pi_tuple *spt; 2663 2664 /* Patch the corresponding protection tags */ 2665 scsi_for_each_prot_sg(cmd, sg, 2666 scsi_prot_sg_count(cmd), i) { 2667 num_ent = sg_dma_len(sg) / 8; 2668 if (k + num_ent < blocks_done) { 2669 k += num_ent; 2670 continue; 2671 } 2672 j = blocks_done - k - 1; 2673 k = blocks_done; 2674 break; 2675 } 2676 2677 if (k != blocks_done) { 2678 ql_log(ql_log_warn, vha, 0x302f, 2679 "unexpected tag values tag:lba=%x:%llx)\n", 2680 e_ref_tag, (unsigned long long)lba_s); 2681 return 1; 2682 } 2683 2684 spt = page_address(sg_page(sg)) + sg->offset; 2685 spt += j; 2686 2687 spt->app_tag = T10_PI_APP_ESCAPE; 2688 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2689 spt->ref_tag = T10_PI_REF_ESCAPE; 2690 } 2691 2692 return 0; 2693 } 2694 2695 /* check guard */ 2696 if (e_guard != a_guard) { 2697 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2698 set_host_byte(cmd, DID_ABORT); 2699 return 1; 2700 } 2701 2702 /* check ref tag */ 2703 if (e_ref_tag != a_ref_tag) { 2704 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2705 set_host_byte(cmd, DID_ABORT); 2706 return 1; 2707 } 2708 2709 /* check appl tag */ 2710 if (e_app_tag != a_app_tag) { 2711 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2712 set_host_byte(cmd, DID_ABORT); 2713 return 1; 2714 } 2715 2716 return 1; 2717 } 2718 2719 static void 2720 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2721 struct req_que *req, uint32_t index) 2722 { 2723 struct qla_hw_data *ha = vha->hw; 2724 srb_t *sp; 2725 uint16_t comp_status; 2726 uint16_t scsi_status; 2727 uint16_t thread_id; 2728 uint32_t rval = EXT_STATUS_OK; 2729 struct bsg_job *bsg_job = NULL; 2730 struct fc_bsg_request *bsg_request; 2731 struct fc_bsg_reply *bsg_reply; 2732 sts_entry_t *sts = pkt; 2733 struct sts_entry_24xx *sts24 = pkt; 2734 2735 /* Validate handle. */ 2736 if (index >= req->num_outstanding_cmds) { 2737 ql_log(ql_log_warn, vha, 0x70af, 2738 "Invalid SCSI completion handle 0x%x.\n", index); 2739 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2740 return; 2741 } 2742 2743 sp = req->outstanding_cmds[index]; 2744 if (!sp) { 2745 ql_log(ql_log_warn, vha, 0x70b0, 2746 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2747 req->id, index); 2748 2749 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2750 return; 2751 } 2752 2753 /* Free outstanding command slot. */ 2754 req->outstanding_cmds[index] = NULL; 2755 bsg_job = sp->u.bsg_job; 2756 bsg_request = bsg_job->request; 2757 bsg_reply = bsg_job->reply; 2758 2759 if (IS_FWI2_CAPABLE(ha)) { 2760 comp_status = le16_to_cpu(sts24->comp_status); 2761 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2762 } else { 2763 comp_status = le16_to_cpu(sts->comp_status); 2764 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2765 } 2766 2767 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2768 switch (comp_status) { 2769 case CS_COMPLETE: 2770 if (scsi_status == 0) { 2771 bsg_reply->reply_payload_rcv_len = 2772 bsg_job->reply_payload.payload_len; 2773 vha->qla_stats.input_bytes += 2774 bsg_reply->reply_payload_rcv_len; 2775 vha->qla_stats.input_requests++; 2776 rval = EXT_STATUS_OK; 2777 } 2778 goto done; 2779 2780 case CS_DATA_OVERRUN: 2781 ql_dbg(ql_dbg_user, vha, 0x70b1, 2782 "Command completed with data overrun thread_id=%d\n", 2783 thread_id); 2784 rval = EXT_STATUS_DATA_OVERRUN; 2785 break; 2786 2787 case CS_DATA_UNDERRUN: 2788 ql_dbg(ql_dbg_user, vha, 0x70b2, 2789 "Command completed with data underrun thread_id=%d\n", 2790 thread_id); 2791 rval = EXT_STATUS_DATA_UNDERRUN; 2792 break; 2793 case CS_BIDIR_RD_OVERRUN: 2794 ql_dbg(ql_dbg_user, vha, 0x70b3, 2795 "Command completed with read data overrun thread_id=%d\n", 2796 thread_id); 2797 rval = EXT_STATUS_DATA_OVERRUN; 2798 break; 2799 2800 case CS_BIDIR_RD_WR_OVERRUN: 2801 ql_dbg(ql_dbg_user, vha, 0x70b4, 2802 "Command completed with read and write data overrun " 2803 "thread_id=%d\n", thread_id); 2804 rval = EXT_STATUS_DATA_OVERRUN; 2805 break; 2806 2807 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2808 ql_dbg(ql_dbg_user, vha, 0x70b5, 2809 "Command completed with read data over and write data " 2810 "underrun thread_id=%d\n", thread_id); 2811 rval = EXT_STATUS_DATA_OVERRUN; 2812 break; 2813 2814 case CS_BIDIR_RD_UNDERRUN: 2815 ql_dbg(ql_dbg_user, vha, 0x70b6, 2816 "Command completed with read data underrun " 2817 "thread_id=%d\n", thread_id); 2818 rval = EXT_STATUS_DATA_UNDERRUN; 2819 break; 2820 2821 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2822 ql_dbg(ql_dbg_user, vha, 0x70b7, 2823 "Command completed with read data under and write data " 2824 "overrun thread_id=%d\n", thread_id); 2825 rval = EXT_STATUS_DATA_UNDERRUN; 2826 break; 2827 2828 case CS_BIDIR_RD_WR_UNDERRUN: 2829 ql_dbg(ql_dbg_user, vha, 0x70b8, 2830 "Command completed with read and write data underrun " 2831 "thread_id=%d\n", thread_id); 2832 rval = EXT_STATUS_DATA_UNDERRUN; 2833 break; 2834 2835 case CS_BIDIR_DMA: 2836 ql_dbg(ql_dbg_user, vha, 0x70b9, 2837 "Command completed with data DMA error thread_id=%d\n", 2838 thread_id); 2839 rval = EXT_STATUS_DMA_ERR; 2840 break; 2841 2842 case CS_TIMEOUT: 2843 ql_dbg(ql_dbg_user, vha, 0x70ba, 2844 "Command completed with timeout thread_id=%d\n", 2845 thread_id); 2846 rval = EXT_STATUS_TIMEOUT; 2847 break; 2848 default: 2849 ql_dbg(ql_dbg_user, vha, 0x70bb, 2850 "Command completed with completion status=0x%x " 2851 "thread_id=%d\n", comp_status, thread_id); 2852 rval = EXT_STATUS_ERR; 2853 break; 2854 } 2855 bsg_reply->reply_payload_rcv_len = 0; 2856 2857 done: 2858 /* Return the vendor specific reply to API */ 2859 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2860 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2861 /* Always return DID_OK, bsg will send the vendor specific response 2862 * in this case only */ 2863 sp->done(sp, DID_OK << 16); 2864 2865 } 2866 2867 /** 2868 * qla2x00_status_entry() - Process a Status IOCB entry. 2869 * @vha: SCSI driver HA context 2870 * @rsp: response queue 2871 * @pkt: Entry pointer 2872 */ 2873 static void 2874 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2875 { 2876 srb_t *sp; 2877 fc_port_t *fcport; 2878 struct scsi_cmnd *cp; 2879 sts_entry_t *sts = pkt; 2880 struct sts_entry_24xx *sts24 = pkt; 2881 uint16_t comp_status; 2882 uint16_t scsi_status; 2883 uint16_t ox_id; 2884 uint8_t lscsi_status; 2885 int32_t resid; 2886 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2887 fw_resid_len; 2888 uint8_t *rsp_info, *sense_data; 2889 struct qla_hw_data *ha = vha->hw; 2890 uint32_t handle; 2891 uint16_t que; 2892 struct req_que *req; 2893 int logit = 1; 2894 int res = 0; 2895 uint16_t state_flags = 0; 2896 uint16_t sts_qual = 0; 2897 2898 if (IS_FWI2_CAPABLE(ha)) { 2899 comp_status = le16_to_cpu(sts24->comp_status); 2900 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2901 state_flags = le16_to_cpu(sts24->state_flags); 2902 } else { 2903 comp_status = le16_to_cpu(sts->comp_status); 2904 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2905 } 2906 handle = (uint32_t) LSW(sts->handle); 2907 que = MSW(sts->handle); 2908 req = ha->req_q_map[que]; 2909 2910 /* Check for invalid queue pointer */ 2911 if (req == NULL || 2912 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2913 ql_dbg(ql_dbg_io, vha, 0x3059, 2914 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2915 "que=%u.\n", sts->handle, req, que); 2916 return; 2917 } 2918 2919 /* Validate handle. */ 2920 if (handle < req->num_outstanding_cmds) { 2921 sp = req->outstanding_cmds[handle]; 2922 if (!sp) { 2923 ql_dbg(ql_dbg_io, vha, 0x3075, 2924 "%s(%ld): Already returned command for status handle (0x%x).\n", 2925 __func__, vha->host_no, sts->handle); 2926 return; 2927 } 2928 } else { 2929 ql_dbg(ql_dbg_io, vha, 0x3017, 2930 "Invalid status handle, out of range (0x%x).\n", 2931 sts->handle); 2932 2933 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2934 if (IS_P3P_TYPE(ha)) 2935 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2936 else 2937 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2938 qla2xxx_wake_dpc(vha); 2939 } 2940 return; 2941 } 2942 qla_put_iocbs(sp->qpair, &sp->iores); 2943 2944 if (sp->cmd_type != TYPE_SRB) { 2945 req->outstanding_cmds[handle] = NULL; 2946 ql_dbg(ql_dbg_io, vha, 0x3015, 2947 "Unknown sp->cmd_type %x %p).\n", 2948 sp->cmd_type, sp); 2949 return; 2950 } 2951 2952 /* NVME completion. */ 2953 if (sp->type == SRB_NVME_CMD) { 2954 req->outstanding_cmds[handle] = NULL; 2955 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 2956 return; 2957 } 2958 2959 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2960 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2961 return; 2962 } 2963 2964 /* Task Management completion. */ 2965 if (sp->type == SRB_TM_CMD) { 2966 qla24xx_tm_iocb_entry(vha, req, pkt); 2967 return; 2968 } 2969 2970 /* Fast path completion. */ 2971 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2972 qla2x00_process_completed_request(vha, req, handle); 2973 2974 return; 2975 } 2976 2977 req->outstanding_cmds[handle] = NULL; 2978 cp = GET_CMD_SP(sp); 2979 if (cp == NULL) { 2980 ql_dbg(ql_dbg_io, vha, 0x3018, 2981 "Command already returned (0x%x/%p).\n", 2982 sts->handle, sp); 2983 2984 return; 2985 } 2986 2987 lscsi_status = scsi_status & STATUS_MASK; 2988 2989 fcport = sp->fcport; 2990 2991 ox_id = 0; 2992 sense_len = par_sense_len = rsp_info_len = resid_len = 2993 fw_resid_len = 0; 2994 if (IS_FWI2_CAPABLE(ha)) { 2995 if (scsi_status & SS_SENSE_LEN_VALID) 2996 sense_len = le32_to_cpu(sts24->sense_len); 2997 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2998 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2999 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 3000 resid_len = le32_to_cpu(sts24->rsp_residual_count); 3001 if (comp_status == CS_DATA_UNDERRUN) 3002 fw_resid_len = le32_to_cpu(sts24->residual_len); 3003 rsp_info = sts24->data; 3004 sense_data = sts24->data; 3005 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 3006 ox_id = le16_to_cpu(sts24->ox_id); 3007 par_sense_len = sizeof(sts24->data); 3008 sts_qual = le16_to_cpu(sts24->status_qualifier); 3009 } else { 3010 if (scsi_status & SS_SENSE_LEN_VALID) 3011 sense_len = le16_to_cpu(sts->req_sense_length); 3012 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3013 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 3014 resid_len = le32_to_cpu(sts->residual_length); 3015 rsp_info = sts->rsp_info; 3016 sense_data = sts->req_sense_data; 3017 par_sense_len = sizeof(sts->req_sense_data); 3018 } 3019 3020 /* Check for any FCP transport errors. */ 3021 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 3022 /* Sense data lies beyond any FCP RESPONSE data. */ 3023 if (IS_FWI2_CAPABLE(ha)) { 3024 sense_data += rsp_info_len; 3025 par_sense_len -= rsp_info_len; 3026 } 3027 if (rsp_info_len > 3 && rsp_info[3]) { 3028 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 3029 "FCP I/O protocol failure (0x%x/0x%x).\n", 3030 rsp_info_len, rsp_info[3]); 3031 3032 res = DID_BUS_BUSY << 16; 3033 goto out; 3034 } 3035 } 3036 3037 /* Check for overrun. */ 3038 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 3039 scsi_status & SS_RESIDUAL_OVER) 3040 comp_status = CS_DATA_OVERRUN; 3041 3042 /* 3043 * Check retry_delay_timer value if we receive a busy or 3044 * queue full. 3045 */ 3046 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || 3047 lscsi_status == SAM_STAT_BUSY)) 3048 qla2x00_set_retry_delay_timestamp(fcport, sts_qual); 3049 3050 /* 3051 * Based on Host and scsi status generate status code for Linux 3052 */ 3053 switch (comp_status) { 3054 case CS_COMPLETE: 3055 case CS_QUEUE_FULL: 3056 if (scsi_status == 0) { 3057 res = DID_OK << 16; 3058 break; 3059 } 3060 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 3061 resid = resid_len; 3062 scsi_set_resid(cp, resid); 3063 3064 if (!lscsi_status && 3065 ((unsigned)(scsi_bufflen(cp) - resid) < 3066 cp->underflow)) { 3067 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 3068 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3069 resid, scsi_bufflen(cp)); 3070 3071 res = DID_ERROR << 16; 3072 break; 3073 } 3074 } 3075 res = DID_OK << 16 | lscsi_status; 3076 3077 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3078 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 3079 "QUEUE FULL detected.\n"); 3080 break; 3081 } 3082 logit = 0; 3083 if (lscsi_status != SS_CHECK_CONDITION) 3084 break; 3085 3086 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3087 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3088 break; 3089 3090 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 3091 rsp, res); 3092 break; 3093 3094 case CS_DATA_UNDERRUN: 3095 /* Use F/W calculated residual length. */ 3096 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 3097 scsi_set_resid(cp, resid); 3098 if (scsi_status & SS_RESIDUAL_UNDER) { 3099 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 3100 ql_log(ql_log_warn, fcport->vha, 0x301d, 3101 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3102 resid, scsi_bufflen(cp)); 3103 3104 vha->interface_err_cnt++; 3105 3106 res = DID_ERROR << 16 | lscsi_status; 3107 goto check_scsi_status; 3108 } 3109 3110 if (!lscsi_status && 3111 ((unsigned)(scsi_bufflen(cp) - resid) < 3112 cp->underflow)) { 3113 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 3114 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3115 resid, scsi_bufflen(cp)); 3116 3117 res = DID_ERROR << 16; 3118 break; 3119 } 3120 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 3121 lscsi_status != SAM_STAT_BUSY) { 3122 /* 3123 * scsi status of task set and busy are considered to be 3124 * task not completed. 3125 */ 3126 3127 ql_log(ql_log_warn, fcport->vha, 0x301f, 3128 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3129 resid, scsi_bufflen(cp)); 3130 3131 vha->interface_err_cnt++; 3132 3133 res = DID_ERROR << 16 | lscsi_status; 3134 goto check_scsi_status; 3135 } else { 3136 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 3137 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 3138 scsi_status, lscsi_status); 3139 } 3140 3141 res = DID_OK << 16 | lscsi_status; 3142 logit = 0; 3143 3144 check_scsi_status: 3145 /* 3146 * Check to see if SCSI Status is non zero. If so report SCSI 3147 * Status. 3148 */ 3149 if (lscsi_status != 0) { 3150 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3151 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 3152 "QUEUE FULL detected.\n"); 3153 logit = 1; 3154 break; 3155 } 3156 if (lscsi_status != SS_CHECK_CONDITION) 3157 break; 3158 3159 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3160 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3161 break; 3162 3163 qla2x00_handle_sense(sp, sense_data, par_sense_len, 3164 sense_len, rsp, res); 3165 } 3166 break; 3167 3168 case CS_PORT_LOGGED_OUT: 3169 case CS_PORT_CONFIG_CHG: 3170 case CS_PORT_BUSY: 3171 case CS_INCOMPLETE: 3172 case CS_PORT_UNAVAILABLE: 3173 case CS_TIMEOUT: 3174 case CS_RESET: 3175 3176 /* 3177 * We are going to have the fc class block the rport 3178 * while we try to recover so instruct the mid layer 3179 * to requeue until the class decides how to handle this. 3180 */ 3181 res = DID_TRANSPORT_DISRUPTED << 16; 3182 3183 if (comp_status == CS_TIMEOUT) { 3184 if (IS_FWI2_CAPABLE(ha)) 3185 break; 3186 else if ((le16_to_cpu(sts->status_flags) & 3187 SF_LOGOUT_SENT) == 0) 3188 break; 3189 } 3190 3191 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3192 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 3193 "Port to be marked lost on fcport=%02x%02x%02x, current " 3194 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 3195 fcport->d_id.b.area, fcport->d_id.b.al_pa, 3196 port_state_str[FCS_ONLINE], 3197 comp_status); 3198 3199 qlt_schedule_sess_for_deletion(fcport); 3200 } 3201 3202 break; 3203 3204 case CS_ABORTED: 3205 res = DID_RESET << 16; 3206 break; 3207 3208 case CS_DIF_ERROR: 3209 logit = qla2x00_handle_dif_error(sp, sts24); 3210 res = cp->result; 3211 break; 3212 3213 case CS_TRANSPORT: 3214 res = DID_ERROR << 16; 3215 vha->hw_err_cnt++; 3216 3217 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 3218 break; 3219 3220 if (state_flags & BIT_4) 3221 scmd_printk(KERN_WARNING, cp, 3222 "Unsupported device '%s' found.\n", 3223 cp->device->vendor); 3224 break; 3225 3226 case CS_DMA: 3227 ql_log(ql_log_info, fcport->vha, 0x3022, 3228 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3229 comp_status, scsi_status, res, vha->host_no, 3230 cp->device->id, cp->device->lun, fcport->d_id.b24, 3231 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3232 resid_len, fw_resid_len, sp, cp); 3233 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 3234 pkt, sizeof(*sts24)); 3235 res = DID_ERROR << 16; 3236 vha->hw_err_cnt++; 3237 break; 3238 default: 3239 res = DID_ERROR << 16; 3240 break; 3241 } 3242 3243 out: 3244 if (logit) 3245 ql_log(ql_log_warn, fcport->vha, 0x3022, 3246 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3247 comp_status, scsi_status, res, vha->host_no, 3248 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3249 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3250 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3251 resid_len, fw_resid_len, sp, cp); 3252 3253 if (rsp->status_srb == NULL) 3254 sp->done(sp, res); 3255 } 3256 3257 /** 3258 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3259 * @rsp: response queue 3260 * @pkt: Entry pointer 3261 * 3262 * Extended sense data. 3263 */ 3264 static void 3265 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3266 { 3267 uint8_t sense_sz = 0; 3268 struct qla_hw_data *ha = rsp->hw; 3269 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3270 srb_t *sp = rsp->status_srb; 3271 struct scsi_cmnd *cp; 3272 uint32_t sense_len; 3273 uint8_t *sense_ptr; 3274 3275 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3276 return; 3277 3278 sense_len = GET_CMD_SENSE_LEN(sp); 3279 sense_ptr = GET_CMD_SENSE_PTR(sp); 3280 3281 cp = GET_CMD_SP(sp); 3282 if (cp == NULL) { 3283 ql_log(ql_log_warn, vha, 0x3025, 3284 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3285 3286 rsp->status_srb = NULL; 3287 return; 3288 } 3289 3290 if (sense_len > sizeof(pkt->data)) 3291 sense_sz = sizeof(pkt->data); 3292 else 3293 sense_sz = sense_len; 3294 3295 /* Move sense data. */ 3296 if (IS_FWI2_CAPABLE(ha)) 3297 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3298 memcpy(sense_ptr, pkt->data, sense_sz); 3299 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3300 sense_ptr, sense_sz); 3301 3302 sense_len -= sense_sz; 3303 sense_ptr += sense_sz; 3304 3305 SET_CMD_SENSE_PTR(sp, sense_ptr); 3306 SET_CMD_SENSE_LEN(sp, sense_len); 3307 3308 /* Place command on done queue. */ 3309 if (sense_len == 0) { 3310 rsp->status_srb = NULL; 3311 sp->done(sp, cp->result); 3312 } 3313 } 3314 3315 /** 3316 * qla2x00_error_entry() - Process an error entry. 3317 * @vha: SCSI driver HA context 3318 * @rsp: response queue 3319 * @pkt: Entry pointer 3320 * return : 1=allow further error analysis. 0=no additional error analysis. 3321 */ 3322 static int 3323 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3324 { 3325 srb_t *sp; 3326 struct qla_hw_data *ha = vha->hw; 3327 const char func[] = "ERROR-IOCB"; 3328 uint16_t que = MSW(pkt->handle); 3329 struct req_que *req = NULL; 3330 int res = DID_ERROR << 16; 3331 3332 ql_dbg(ql_dbg_async, vha, 0x502a, 3333 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3334 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3335 3336 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3337 goto fatal; 3338 3339 req = ha->req_q_map[que]; 3340 3341 if (pkt->entry_status & RF_BUSY) 3342 res = DID_BUS_BUSY << 16; 3343 3344 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3345 return 0; 3346 3347 switch (pkt->entry_type) { 3348 case NOTIFY_ACK_TYPE: 3349 case STATUS_TYPE: 3350 case STATUS_CONT_TYPE: 3351 case LOGINOUT_PORT_IOCB_TYPE: 3352 case CT_IOCB_TYPE: 3353 case ELS_IOCB_TYPE: 3354 case ABORT_IOCB_TYPE: 3355 case MBX_IOCB_TYPE: 3356 default: 3357 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3358 if (sp) { 3359 qla_put_iocbs(sp->qpair, &sp->iores); 3360 sp->done(sp, res); 3361 return 0; 3362 } 3363 break; 3364 3365 case ABTS_RESP_24XX: 3366 case CTIO_TYPE7: 3367 case CTIO_CRC2: 3368 return 1; 3369 } 3370 fatal: 3371 ql_log(ql_log_warn, vha, 0x5030, 3372 "Error entry - invalid handle/queue (%04x).\n", que); 3373 return 0; 3374 } 3375 3376 /** 3377 * qla24xx_mbx_completion() - Process mailbox command completions. 3378 * @vha: SCSI driver HA context 3379 * @mb0: Mailbox0 register 3380 */ 3381 static void 3382 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3383 { 3384 uint16_t cnt; 3385 uint32_t mboxes; 3386 __le16 __iomem *wptr; 3387 struct qla_hw_data *ha = vha->hw; 3388 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3389 3390 /* Read all mbox registers? */ 3391 WARN_ON_ONCE(ha->mbx_count > 32); 3392 mboxes = (1ULL << ha->mbx_count) - 1; 3393 if (!ha->mcp) 3394 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3395 else 3396 mboxes = ha->mcp->in_mb; 3397 3398 /* Load return mailbox registers. */ 3399 ha->flags.mbox_int = 1; 3400 ha->mailbox_out[0] = mb0; 3401 mboxes >>= 1; 3402 wptr = ®->mailbox1; 3403 3404 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3405 if (mboxes & BIT_0) 3406 ha->mailbox_out[cnt] = rd_reg_word(wptr); 3407 3408 mboxes >>= 1; 3409 wptr++; 3410 } 3411 } 3412 3413 static void 3414 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3415 struct abort_entry_24xx *pkt) 3416 { 3417 const char func[] = "ABT_IOCB"; 3418 srb_t *sp; 3419 srb_t *orig_sp = NULL; 3420 struct srb_iocb *abt; 3421 3422 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3423 if (!sp) 3424 return; 3425 3426 abt = &sp->u.iocb_cmd; 3427 abt->u.abt.comp_status = pkt->comp_status; 3428 orig_sp = sp->cmd_sp; 3429 /* Need to pass original sp */ 3430 if (orig_sp) 3431 qla_nvme_abort_process_comp_status(pkt, orig_sp); 3432 3433 sp->done(sp, 0); 3434 } 3435 3436 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3437 struct pt_ls4_request *pkt, struct req_que *req) 3438 { 3439 srb_t *sp; 3440 const char func[] = "LS4_IOCB"; 3441 uint16_t comp_status; 3442 3443 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3444 if (!sp) 3445 return; 3446 3447 comp_status = le16_to_cpu(pkt->status); 3448 sp->done(sp, comp_status); 3449 } 3450 3451 /** 3452 * qla24xx_process_response_queue() - Process response queue entries. 3453 * @vha: SCSI driver HA context 3454 * @rsp: response queue 3455 */ 3456 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3457 struct rsp_que *rsp) 3458 { 3459 struct sts_entry_24xx *pkt; 3460 struct qla_hw_data *ha = vha->hw; 3461 struct purex_entry_24xx *purex_entry; 3462 struct purex_item *pure_item; 3463 3464 if (!ha->flags.fw_started) 3465 return; 3466 3467 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { 3468 rsp->qpair->rcv_intr = 1; 3469 qla_cpu_update(rsp->qpair, smp_processor_id()); 3470 } 3471 3472 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 3473 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 3474 3475 rsp->ring_index++; 3476 if (rsp->ring_index == rsp->length) { 3477 rsp->ring_index = 0; 3478 rsp->ring_ptr = rsp->ring; 3479 } else { 3480 rsp->ring_ptr++; 3481 } 3482 3483 if (pkt->entry_status != 0) { 3484 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 3485 goto process_err; 3486 3487 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3488 wmb(); 3489 continue; 3490 } 3491 process_err: 3492 3493 switch (pkt->entry_type) { 3494 case STATUS_TYPE: 3495 qla2x00_status_entry(vha, rsp, pkt); 3496 break; 3497 case STATUS_CONT_TYPE: 3498 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 3499 break; 3500 case VP_RPT_ID_IOCB_TYPE: 3501 qla24xx_report_id_acquisition(vha, 3502 (struct vp_rpt_id_entry_24xx *)pkt); 3503 break; 3504 case LOGINOUT_PORT_IOCB_TYPE: 3505 qla24xx_logio_entry(vha, rsp->req, 3506 (struct logio_entry_24xx *)pkt); 3507 break; 3508 case CT_IOCB_TYPE: 3509 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 3510 break; 3511 case ELS_IOCB_TYPE: 3512 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 3513 break; 3514 case ABTS_RECV_24XX: 3515 if (qla_ini_mode_enabled(vha)) { 3516 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3517 if (!pure_item) 3518 break; 3519 qla24xx_queue_purex_item(vha, pure_item, 3520 qla24xx_process_abts); 3521 break; 3522 } 3523 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3524 IS_QLA28XX(ha)) { 3525 /* ensure that the ATIO queue is empty */ 3526 qlt_handle_abts_recv(vha, rsp, 3527 (response_t *)pkt); 3528 break; 3529 } else { 3530 qlt_24xx_process_atio_queue(vha, 1); 3531 } 3532 fallthrough; 3533 case ABTS_RESP_24XX: 3534 case CTIO_TYPE7: 3535 case CTIO_CRC2: 3536 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3537 break; 3538 case PT_LS4_REQUEST: 3539 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3540 rsp->req); 3541 break; 3542 case NOTIFY_ACK_TYPE: 3543 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3544 qlt_response_pkt_all_vps(vha, rsp, 3545 (response_t *)pkt); 3546 else 3547 qla24xxx_nack_iocb_entry(vha, rsp->req, 3548 (struct nack_to_isp *)pkt); 3549 break; 3550 case MARKER_TYPE: 3551 /* Do nothing in this case, this check is to prevent it 3552 * from falling into default case 3553 */ 3554 break; 3555 case ABORT_IOCB_TYPE: 3556 qla24xx_abort_iocb_entry(vha, rsp->req, 3557 (struct abort_entry_24xx *)pkt); 3558 break; 3559 case MBX_IOCB_TYPE: 3560 qla24xx_mbx_iocb_entry(vha, rsp->req, 3561 (struct mbx_24xx_entry *)pkt); 3562 break; 3563 case VP_CTRL_IOCB_TYPE: 3564 qla_ctrlvp_completed(vha, rsp->req, 3565 (struct vp_ctrl_entry_24xx *)pkt); 3566 break; 3567 case PUREX_IOCB_TYPE: 3568 purex_entry = (void *)pkt; 3569 switch (purex_entry->els_frame_payload[3]) { 3570 case ELS_RDP: 3571 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3572 if (!pure_item) 3573 break; 3574 qla24xx_queue_purex_item(vha, pure_item, 3575 qla24xx_process_purex_rdp); 3576 break; 3577 case ELS_FPIN: 3578 if (!vha->hw->flags.scm_enabled) { 3579 ql_log(ql_log_warn, vha, 0x5094, 3580 "SCM not active for this port\n"); 3581 break; 3582 } 3583 pure_item = qla27xx_copy_fpin_pkt(vha, 3584 (void **)&pkt, &rsp); 3585 if (!pure_item) 3586 break; 3587 qla24xx_queue_purex_item(vha, pure_item, 3588 qla27xx_process_purex_fpin); 3589 break; 3590 3591 default: 3592 ql_log(ql_log_warn, vha, 0x509c, 3593 "Discarding ELS Request opcode 0x%x\n", 3594 purex_entry->els_frame_payload[3]); 3595 } 3596 break; 3597 default: 3598 /* Type Not Supported. */ 3599 ql_dbg(ql_dbg_async, vha, 0x5042, 3600 "Received unknown response pkt type 0x%x entry status=%x.\n", 3601 pkt->entry_type, pkt->entry_status); 3602 break; 3603 } 3604 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3605 wmb(); 3606 } 3607 3608 /* Adjust ring index */ 3609 if (IS_P3P_TYPE(ha)) { 3610 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3611 3612 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); 3613 } else { 3614 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); 3615 } 3616 } 3617 3618 static void 3619 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3620 { 3621 int rval; 3622 uint32_t cnt; 3623 struct qla_hw_data *ha = vha->hw; 3624 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3625 3626 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3627 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3628 return; 3629 3630 rval = QLA_SUCCESS; 3631 wrt_reg_dword(®->iobase_addr, 0x7C00); 3632 rd_reg_dword(®->iobase_addr); 3633 wrt_reg_dword(®->iobase_window, 0x0001); 3634 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3635 rval == QLA_SUCCESS; cnt--) { 3636 if (cnt) { 3637 wrt_reg_dword(®->iobase_window, 0x0001); 3638 udelay(10); 3639 } else 3640 rval = QLA_FUNCTION_TIMEOUT; 3641 } 3642 if (rval == QLA_SUCCESS) 3643 goto next_test; 3644 3645 rval = QLA_SUCCESS; 3646 wrt_reg_dword(®->iobase_window, 0x0003); 3647 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3648 rval == QLA_SUCCESS; cnt--) { 3649 if (cnt) { 3650 wrt_reg_dword(®->iobase_window, 0x0003); 3651 udelay(10); 3652 } else 3653 rval = QLA_FUNCTION_TIMEOUT; 3654 } 3655 if (rval != QLA_SUCCESS) 3656 goto done; 3657 3658 next_test: 3659 if (rd_reg_dword(®->iobase_c8) & BIT_3) 3660 ql_log(ql_log_info, vha, 0x504c, 3661 "Additional code -- 0x55AA.\n"); 3662 3663 done: 3664 wrt_reg_dword(®->iobase_window, 0x0000); 3665 rd_reg_dword(®->iobase_window); 3666 } 3667 3668 /** 3669 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3670 * @irq: interrupt number 3671 * @dev_id: SCSI driver HA context 3672 * 3673 * Called by system whenever the host adapter generates an interrupt. 3674 * 3675 * Returns handled flag. 3676 */ 3677 irqreturn_t 3678 qla24xx_intr_handler(int irq, void *dev_id) 3679 { 3680 scsi_qla_host_t *vha; 3681 struct qla_hw_data *ha; 3682 struct device_reg_24xx __iomem *reg; 3683 int status; 3684 unsigned long iter; 3685 uint32_t stat; 3686 uint32_t hccr; 3687 uint16_t mb[8]; 3688 struct rsp_que *rsp; 3689 unsigned long flags; 3690 bool process_atio = false; 3691 3692 rsp = (struct rsp_que *) dev_id; 3693 if (!rsp) { 3694 ql_log(ql_log_info, NULL, 0x5059, 3695 "%s: NULL response queue pointer.\n", __func__); 3696 return IRQ_NONE; 3697 } 3698 3699 ha = rsp->hw; 3700 reg = &ha->iobase->isp24; 3701 status = 0; 3702 3703 if (unlikely(pci_channel_offline(ha->pdev))) 3704 return IRQ_HANDLED; 3705 3706 spin_lock_irqsave(&ha->hardware_lock, flags); 3707 vha = pci_get_drvdata(ha->pdev); 3708 for (iter = 50; iter--; ) { 3709 stat = rd_reg_dword(®->host_status); 3710 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3711 break; 3712 if (stat & HSRX_RISC_PAUSED) { 3713 if (unlikely(pci_channel_offline(ha->pdev))) 3714 break; 3715 3716 hccr = rd_reg_dword(®->hccr); 3717 3718 ql_log(ql_log_warn, vha, 0x504b, 3719 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3720 hccr); 3721 3722 qla2xxx_check_risc_status(vha); 3723 3724 ha->isp_ops->fw_dump(vha); 3725 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3726 break; 3727 } else if ((stat & HSRX_RISC_INT) == 0) 3728 break; 3729 3730 switch (stat & 0xff) { 3731 case INTR_ROM_MB_SUCCESS: 3732 case INTR_ROM_MB_FAILED: 3733 case INTR_MB_SUCCESS: 3734 case INTR_MB_FAILED: 3735 qla24xx_mbx_completion(vha, MSW(stat)); 3736 status |= MBX_INTERRUPT; 3737 3738 break; 3739 case INTR_ASYNC_EVENT: 3740 mb[0] = MSW(stat); 3741 mb[1] = rd_reg_word(®->mailbox1); 3742 mb[2] = rd_reg_word(®->mailbox2); 3743 mb[3] = rd_reg_word(®->mailbox3); 3744 qla2x00_async_event(vha, rsp, mb); 3745 break; 3746 case INTR_RSP_QUE_UPDATE: 3747 case INTR_RSP_QUE_UPDATE_83XX: 3748 qla24xx_process_response_queue(vha, rsp); 3749 break; 3750 case INTR_ATIO_QUE_UPDATE_27XX: 3751 case INTR_ATIO_QUE_UPDATE: 3752 process_atio = true; 3753 break; 3754 case INTR_ATIO_RSP_QUE_UPDATE: 3755 process_atio = true; 3756 qla24xx_process_response_queue(vha, rsp); 3757 break; 3758 default: 3759 ql_dbg(ql_dbg_async, vha, 0x504f, 3760 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3761 break; 3762 } 3763 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3764 rd_reg_dword_relaxed(®->hccr); 3765 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3766 ndelay(3500); 3767 } 3768 qla2x00_handle_mbx_completion(ha, status); 3769 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3770 3771 if (process_atio) { 3772 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3773 qlt_24xx_process_atio_queue(vha, 0); 3774 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3775 } 3776 3777 return IRQ_HANDLED; 3778 } 3779 3780 static irqreturn_t 3781 qla24xx_msix_rsp_q(int irq, void *dev_id) 3782 { 3783 struct qla_hw_data *ha; 3784 struct rsp_que *rsp; 3785 struct device_reg_24xx __iomem *reg; 3786 struct scsi_qla_host *vha; 3787 unsigned long flags; 3788 3789 rsp = (struct rsp_que *) dev_id; 3790 if (!rsp) { 3791 ql_log(ql_log_info, NULL, 0x505a, 3792 "%s: NULL response queue pointer.\n", __func__); 3793 return IRQ_NONE; 3794 } 3795 ha = rsp->hw; 3796 reg = &ha->iobase->isp24; 3797 3798 spin_lock_irqsave(&ha->hardware_lock, flags); 3799 3800 vha = pci_get_drvdata(ha->pdev); 3801 qla24xx_process_response_queue(vha, rsp); 3802 if (!ha->flags.disable_msix_handshake) { 3803 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3804 rd_reg_dword_relaxed(®->hccr); 3805 } 3806 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3807 3808 return IRQ_HANDLED; 3809 } 3810 3811 static irqreturn_t 3812 qla24xx_msix_default(int irq, void *dev_id) 3813 { 3814 scsi_qla_host_t *vha; 3815 struct qla_hw_data *ha; 3816 struct rsp_que *rsp; 3817 struct device_reg_24xx __iomem *reg; 3818 int status; 3819 uint32_t stat; 3820 uint32_t hccr; 3821 uint16_t mb[8]; 3822 unsigned long flags; 3823 bool process_atio = false; 3824 3825 rsp = (struct rsp_que *) dev_id; 3826 if (!rsp) { 3827 ql_log(ql_log_info, NULL, 0x505c, 3828 "%s: NULL response queue pointer.\n", __func__); 3829 return IRQ_NONE; 3830 } 3831 ha = rsp->hw; 3832 reg = &ha->iobase->isp24; 3833 status = 0; 3834 3835 spin_lock_irqsave(&ha->hardware_lock, flags); 3836 vha = pci_get_drvdata(ha->pdev); 3837 do { 3838 stat = rd_reg_dword(®->host_status); 3839 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3840 break; 3841 if (stat & HSRX_RISC_PAUSED) { 3842 if (unlikely(pci_channel_offline(ha->pdev))) 3843 break; 3844 3845 hccr = rd_reg_dword(®->hccr); 3846 3847 ql_log(ql_log_info, vha, 0x5050, 3848 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3849 hccr); 3850 3851 qla2xxx_check_risc_status(vha); 3852 vha->hw_err_cnt++; 3853 3854 ha->isp_ops->fw_dump(vha); 3855 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3856 break; 3857 } else if ((stat & HSRX_RISC_INT) == 0) 3858 break; 3859 3860 switch (stat & 0xff) { 3861 case INTR_ROM_MB_SUCCESS: 3862 case INTR_ROM_MB_FAILED: 3863 case INTR_MB_SUCCESS: 3864 case INTR_MB_FAILED: 3865 qla24xx_mbx_completion(vha, MSW(stat)); 3866 status |= MBX_INTERRUPT; 3867 3868 break; 3869 case INTR_ASYNC_EVENT: 3870 mb[0] = MSW(stat); 3871 mb[1] = rd_reg_word(®->mailbox1); 3872 mb[2] = rd_reg_word(®->mailbox2); 3873 mb[3] = rd_reg_word(®->mailbox3); 3874 qla2x00_async_event(vha, rsp, mb); 3875 break; 3876 case INTR_RSP_QUE_UPDATE: 3877 case INTR_RSP_QUE_UPDATE_83XX: 3878 qla24xx_process_response_queue(vha, rsp); 3879 break; 3880 case INTR_ATIO_QUE_UPDATE_27XX: 3881 case INTR_ATIO_QUE_UPDATE: 3882 process_atio = true; 3883 break; 3884 case INTR_ATIO_RSP_QUE_UPDATE: 3885 process_atio = true; 3886 qla24xx_process_response_queue(vha, rsp); 3887 break; 3888 default: 3889 ql_dbg(ql_dbg_async, vha, 0x5051, 3890 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3891 break; 3892 } 3893 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3894 } while (0); 3895 qla2x00_handle_mbx_completion(ha, status); 3896 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3897 3898 if (process_atio) { 3899 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3900 qlt_24xx_process_atio_queue(vha, 0); 3901 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3902 } 3903 3904 return IRQ_HANDLED; 3905 } 3906 3907 irqreturn_t 3908 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3909 { 3910 struct qla_hw_data *ha; 3911 struct qla_qpair *qpair; 3912 3913 qpair = dev_id; 3914 if (!qpair) { 3915 ql_log(ql_log_info, NULL, 0x505b, 3916 "%s: NULL response queue pointer.\n", __func__); 3917 return IRQ_NONE; 3918 } 3919 ha = qpair->hw; 3920 3921 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3922 3923 return IRQ_HANDLED; 3924 } 3925 3926 irqreturn_t 3927 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) 3928 { 3929 struct qla_hw_data *ha; 3930 struct qla_qpair *qpair; 3931 struct device_reg_24xx __iomem *reg; 3932 unsigned long flags; 3933 3934 qpair = dev_id; 3935 if (!qpair) { 3936 ql_log(ql_log_info, NULL, 0x505b, 3937 "%s: NULL response queue pointer.\n", __func__); 3938 return IRQ_NONE; 3939 } 3940 ha = qpair->hw; 3941 3942 reg = &ha->iobase->isp24; 3943 spin_lock_irqsave(&ha->hardware_lock, flags); 3944 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3945 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3946 3947 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3948 3949 return IRQ_HANDLED; 3950 } 3951 3952 /* Interrupt handling helpers. */ 3953 3954 struct qla_init_msix_entry { 3955 const char *name; 3956 irq_handler_t handler; 3957 }; 3958 3959 static const struct qla_init_msix_entry msix_entries[] = { 3960 { "default", qla24xx_msix_default }, 3961 { "rsp_q", qla24xx_msix_rsp_q }, 3962 { "atio_q", qla83xx_msix_atio_q }, 3963 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3964 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, 3965 }; 3966 3967 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3968 { "qla2xxx (default)", qla82xx_msix_default }, 3969 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3970 }; 3971 3972 static int 3973 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3974 { 3975 int i, ret; 3976 struct qla_msix_entry *qentry; 3977 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3978 int min_vecs = QLA_BASE_VECTORS; 3979 struct irq_affinity desc = { 3980 .pre_vectors = QLA_BASE_VECTORS, 3981 }; 3982 3983 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3984 IS_ATIO_MSIX_CAPABLE(ha)) { 3985 desc.pre_vectors++; 3986 min_vecs++; 3987 } 3988 3989 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 3990 /* user wants to control IRQ setting for target mode */ 3991 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 3992 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 3993 PCI_IRQ_MSIX); 3994 } else 3995 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 3996 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 3997 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 3998 &desc); 3999 4000 if (ret < 0) { 4001 ql_log(ql_log_fatal, vha, 0x00c7, 4002 "MSI-X: Failed to enable support, " 4003 "giving up -- %d/%d.\n", 4004 ha->msix_count, ret); 4005 goto msix_out; 4006 } else if (ret < ha->msix_count) { 4007 ql_log(ql_log_info, vha, 0x00c6, 4008 "MSI-X: Using %d vectors\n", ret); 4009 ha->msix_count = ret; 4010 /* Recalculate queue values */ 4011 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 4012 ha->max_req_queues = ha->msix_count - 1; 4013 4014 /* ATIOQ needs 1 vector. That's 1 less QPair */ 4015 if (QLA_TGT_MODE_ENABLED()) 4016 ha->max_req_queues--; 4017 4018 ha->max_rsp_queues = ha->max_req_queues; 4019 4020 ha->max_qpairs = ha->max_req_queues - 1; 4021 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 4022 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 4023 } 4024 } 4025 vha->irq_offset = desc.pre_vectors; 4026 ha->msix_entries = kcalloc(ha->msix_count, 4027 sizeof(struct qla_msix_entry), 4028 GFP_KERNEL); 4029 if (!ha->msix_entries) { 4030 ql_log(ql_log_fatal, vha, 0x00c8, 4031 "Failed to allocate memory for ha->msix_entries.\n"); 4032 ret = -ENOMEM; 4033 goto free_irqs; 4034 } 4035 ha->flags.msix_enabled = 1; 4036 4037 for (i = 0; i < ha->msix_count; i++) { 4038 qentry = &ha->msix_entries[i]; 4039 qentry->vector = pci_irq_vector(ha->pdev, i); 4040 qentry->entry = i; 4041 qentry->have_irq = 0; 4042 qentry->in_use = 0; 4043 qentry->handle = NULL; 4044 } 4045 4046 /* Enable MSI-X vectors for the base queue */ 4047 for (i = 0; i < QLA_BASE_VECTORS; i++) { 4048 qentry = &ha->msix_entries[i]; 4049 qentry->handle = rsp; 4050 rsp->msix = qentry; 4051 scnprintf(qentry->name, sizeof(qentry->name), 4052 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 4053 if (IS_P3P_TYPE(ha)) 4054 ret = request_irq(qentry->vector, 4055 qla82xx_msix_entries[i].handler, 4056 0, qla82xx_msix_entries[i].name, rsp); 4057 else 4058 ret = request_irq(qentry->vector, 4059 msix_entries[i].handler, 4060 0, qentry->name, rsp); 4061 if (ret) 4062 goto msix_register_fail; 4063 qentry->have_irq = 1; 4064 qentry->in_use = 1; 4065 } 4066 4067 /* 4068 * If target mode is enable, also request the vector for the ATIO 4069 * queue. 4070 */ 4071 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4072 IS_ATIO_MSIX_CAPABLE(ha)) { 4073 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 4074 rsp->msix = qentry; 4075 qentry->handle = rsp; 4076 scnprintf(qentry->name, sizeof(qentry->name), 4077 "qla2xxx%lu_%s", vha->host_no, 4078 msix_entries[QLA_ATIO_VECTOR].name); 4079 qentry->in_use = 1; 4080 ret = request_irq(qentry->vector, 4081 msix_entries[QLA_ATIO_VECTOR].handler, 4082 0, qentry->name, rsp); 4083 qentry->have_irq = 1; 4084 } 4085 4086 msix_register_fail: 4087 if (ret) { 4088 ql_log(ql_log_fatal, vha, 0x00cb, 4089 "MSI-X: unable to register handler -- %x/%d.\n", 4090 qentry->vector, ret); 4091 qla2x00_free_irqs(vha); 4092 ha->mqenable = 0; 4093 goto msix_out; 4094 } 4095 4096 /* Enable MSI-X vector for response queue update for queue 0 */ 4097 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4098 if (ha->msixbase && ha->mqiobase && 4099 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4100 ql2xmqsupport)) 4101 ha->mqenable = 1; 4102 } else 4103 if (ha->mqiobase && 4104 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4105 ql2xmqsupport)) 4106 ha->mqenable = 1; 4107 ql_dbg(ql_dbg_multiq, vha, 0xc005, 4108 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4109 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4110 ql_dbg(ql_dbg_init, vha, 0x0055, 4111 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4112 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4113 4114 msix_out: 4115 return ret; 4116 4117 free_irqs: 4118 pci_free_irq_vectors(ha->pdev); 4119 goto msix_out; 4120 } 4121 4122 int 4123 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 4124 { 4125 int ret = QLA_FUNCTION_FAILED; 4126 device_reg_t *reg = ha->iobase; 4127 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4128 4129 /* If possible, enable MSI-X. */ 4130 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 4131 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 4132 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 4133 goto skip_msi; 4134 4135 if (ql2xenablemsix == 2) 4136 goto skip_msix; 4137 4138 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 4139 (ha->pdev->subsystem_device == 0x7040 || 4140 ha->pdev->subsystem_device == 0x7041 || 4141 ha->pdev->subsystem_device == 0x1705)) { 4142 ql_log(ql_log_warn, vha, 0x0034, 4143 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 4144 ha->pdev->subsystem_vendor, 4145 ha->pdev->subsystem_device); 4146 goto skip_msi; 4147 } 4148 4149 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 4150 ql_log(ql_log_warn, vha, 0x0035, 4151 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 4152 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 4153 goto skip_msix; 4154 } 4155 4156 ret = qla24xx_enable_msix(ha, rsp); 4157 if (!ret) { 4158 ql_dbg(ql_dbg_init, vha, 0x0036, 4159 "MSI-X: Enabled (0x%X, 0x%X).\n", 4160 ha->chip_revision, ha->fw_attributes); 4161 goto clear_risc_ints; 4162 } 4163 4164 skip_msix: 4165 4166 ql_log(ql_log_info, vha, 0x0037, 4167 "Falling back-to MSI mode -- ret=%d.\n", ret); 4168 4169 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 4170 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 4171 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4172 goto skip_msi; 4173 4174 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 4175 if (ret > 0) { 4176 ql_dbg(ql_dbg_init, vha, 0x0038, 4177 "MSI: Enabled.\n"); 4178 ha->flags.msi_enabled = 1; 4179 } else 4180 ql_log(ql_log_warn, vha, 0x0039, 4181 "Falling back-to INTa mode -- ret=%d.\n", ret); 4182 skip_msi: 4183 4184 /* Skip INTx on ISP82xx. */ 4185 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 4186 return QLA_FUNCTION_FAILED; 4187 4188 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 4189 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 4190 QLA2XXX_DRIVER_NAME, rsp); 4191 if (ret) { 4192 ql_log(ql_log_warn, vha, 0x003a, 4193 "Failed to reserve interrupt %d already in use.\n", 4194 ha->pdev->irq); 4195 goto fail; 4196 } else if (!ha->flags.msi_enabled) { 4197 ql_dbg(ql_dbg_init, vha, 0x0125, 4198 "INTa mode: Enabled.\n"); 4199 ha->flags.mr_intr_valid = 1; 4200 } 4201 4202 clear_risc_ints: 4203 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 4204 goto fail; 4205 4206 spin_lock_irq(&ha->hardware_lock); 4207 wrt_reg_word(®->isp.semaphore, 0); 4208 spin_unlock_irq(&ha->hardware_lock); 4209 4210 fail: 4211 return ret; 4212 } 4213 4214 void 4215 qla2x00_free_irqs(scsi_qla_host_t *vha) 4216 { 4217 struct qla_hw_data *ha = vha->hw; 4218 struct rsp_que *rsp; 4219 struct qla_msix_entry *qentry; 4220 int i; 4221 4222 /* 4223 * We need to check that ha->rsp_q_map is valid in case we are called 4224 * from a probe failure context. 4225 */ 4226 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 4227 goto free_irqs; 4228 rsp = ha->rsp_q_map[0]; 4229 4230 if (ha->flags.msix_enabled) { 4231 for (i = 0; i < ha->msix_count; i++) { 4232 qentry = &ha->msix_entries[i]; 4233 if (qentry->have_irq) { 4234 irq_set_affinity_notifier(qentry->vector, NULL); 4235 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 4236 } 4237 } 4238 kfree(ha->msix_entries); 4239 ha->msix_entries = NULL; 4240 ha->flags.msix_enabled = 0; 4241 ql_dbg(ql_dbg_init, vha, 0x0042, 4242 "Disabled MSI-X.\n"); 4243 } else { 4244 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 4245 } 4246 4247 free_irqs: 4248 pci_free_irq_vectors(ha->pdev); 4249 } 4250 4251 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 4252 struct qla_msix_entry *msix, int vector_type) 4253 { 4254 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 4255 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4256 int ret; 4257 4258 scnprintf(msix->name, sizeof(msix->name), 4259 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 4260 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 4261 if (ret) { 4262 ql_log(ql_log_fatal, vha, 0x00e6, 4263 "MSI-X: Unable to register handler -- %x/%d.\n", 4264 msix->vector, ret); 4265 return ret; 4266 } 4267 msix->have_irq = 1; 4268 msix->handle = qpair; 4269 return ret; 4270 } 4271