1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, 26 struct purex_item *item); 27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, 28 uint16_t size); 29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, 30 void *pkt); 31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, 32 void **pkt, struct rsp_que **rsp); 33 34 static void 35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) 36 { 37 void *pkt = &item->iocb; 38 uint16_t pkt_size = item->size; 39 40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, 41 "%s: Enter\n", __func__); 42 43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, 44 "-------- ELS REQ -------\n"); 45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, 46 pkt, pkt_size); 47 48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt); 49 } 50 51 const char *const port_state_str[] = { 52 "Unknown", 53 "UNCONFIGURED", 54 "DEAD", 55 "LOST", 56 "ONLINE" 57 }; 58 59 static void 60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) 61 { 62 struct abts_entry_24xx *abts = 63 (struct abts_entry_24xx *)&pkt->iocb; 64 struct qla_hw_data *ha = vha->hw; 65 struct els_entry_24xx *rsp_els; 66 struct abts_entry_24xx *abts_rsp; 67 dma_addr_t dma; 68 uint32_t fctl; 69 int rval; 70 71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 72 73 ql_log(ql_log_warn, vha, 0x0287, 74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 76 abts->seq_id, abts->seq_cnt); 77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 78 "-------- ABTS RCV -------\n"); 79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 80 (uint8_t *)abts, sizeof(*abts)); 81 82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 83 GFP_KERNEL); 84 if (!rsp_els) { 85 ql_log(ql_log_warn, vha, 0x0287, 86 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 87 return; 88 } 89 90 /* terminate exchange */ 91 rsp_els->entry_type = ELS_IOCB_TYPE; 92 rsp_els->entry_count = 1; 93 rsp_els->nport_handle = cpu_to_le16(~0); 94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); 96 ql_dbg(ql_dbg_init, vha, 0x0283, 97 "Sending ELS Response to terminate exchange %#x...\n", 98 abts->rx_xch_addr_to_abort); 99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 100 "-------- ELS RSP -------\n"); 101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 102 (uint8_t *)rsp_els, sizeof(*rsp_els)); 103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 104 if (rval) { 105 ql_log(ql_log_warn, vha, 0x0288, 106 "%s: iocb failed to execute -> %x\n", __func__, rval); 107 } else if (rsp_els->comp_status) { 108 ql_log(ql_log_warn, vha, 0x0289, 109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 110 __func__, rsp_els->comp_status, 111 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 112 } else { 113 ql_dbg(ql_dbg_init, vha, 0x028a, 114 "%s: abort exchange done.\n", __func__); 115 } 116 117 /* send ABTS response */ 118 abts_rsp = (void *)rsp_els; 119 memset(abts_rsp, 0, sizeof(*abts_rsp)); 120 abts_rsp->entry_type = ABTS_RSP_TYPE; 121 abts_rsp->entry_count = 1; 122 abts_rsp->nport_handle = abts->nport_handle; 123 abts_rsp->vp_idx = abts->vp_idx; 124 abts_rsp->sof_type = abts->sof_type & 0xf0; 125 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 126 abts_rsp->d_id[0] = abts->s_id[0]; 127 abts_rsp->d_id[1] = abts->s_id[1]; 128 abts_rsp->d_id[2] = abts->s_id[2]; 129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 130 abts_rsp->s_id[0] = abts->d_id[0]; 131 abts_rsp->s_id[1] = abts->d_id[1]; 132 abts_rsp->s_id[2] = abts->d_id[2]; 133 abts_rsp->cs_ctl = abts->cs_ctl; 134 /* include flipping bit23 in fctl */ 135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 140 abts_rsp->type = FC_TYPE_BLD; 141 abts_rsp->rx_id = abts->rx_id; 142 abts_rsp->ox_id = abts->ox_id; 143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); 146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 147 ql_dbg(ql_dbg_init, vha, 0x028b, 148 "Sending BA ACC response to ABTS %#x...\n", 149 abts->rx_xch_addr_to_abort); 150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 151 "-------- ELS RSP -------\n"); 152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 153 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 155 if (rval) { 156 ql_log(ql_log_warn, vha, 0x028c, 157 "%s: iocb failed to execute -> %x\n", __func__, rval); 158 } else if (abts_rsp->comp_status) { 159 ql_log(ql_log_warn, vha, 0x028d, 160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 161 __func__, abts_rsp->comp_status, 162 abts_rsp->payload.error.subcode1, 163 abts_rsp->payload.error.subcode2); 164 } else { 165 ql_dbg(ql_dbg_init, vha, 0x028ea, 166 "%s: done.\n", __func__); 167 } 168 169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 170 } 171 172 /** 173 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 174 * @irq: interrupt number 175 * @dev_id: SCSI driver HA context 176 * 177 * Called by system whenever the host adapter generates an interrupt. 178 * 179 * Returns handled flag. 180 */ 181 irqreturn_t 182 qla2100_intr_handler(int irq, void *dev_id) 183 { 184 scsi_qla_host_t *vha; 185 struct qla_hw_data *ha; 186 struct device_reg_2xxx __iomem *reg; 187 int status; 188 unsigned long iter; 189 uint16_t hccr; 190 uint16_t mb[8]; 191 struct rsp_que *rsp; 192 unsigned long flags; 193 194 rsp = (struct rsp_que *) dev_id; 195 if (!rsp) { 196 ql_log(ql_log_info, NULL, 0x505d, 197 "%s: NULL response queue pointer.\n", __func__); 198 return (IRQ_NONE); 199 } 200 201 ha = rsp->hw; 202 reg = &ha->iobase->isp; 203 status = 0; 204 205 spin_lock_irqsave(&ha->hardware_lock, flags); 206 vha = pci_get_drvdata(ha->pdev); 207 for (iter = 50; iter--; ) { 208 hccr = rd_reg_word(®->hccr); 209 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 210 break; 211 if (hccr & HCCR_RISC_PAUSE) { 212 if (pci_channel_offline(ha->pdev)) 213 break; 214 215 /* 216 * Issue a "HARD" reset in order for the RISC interrupt 217 * bit to be cleared. Schedule a big hammer to get 218 * out of the RISC PAUSED state. 219 */ 220 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 221 rd_reg_word(®->hccr); 222 223 ha->isp_ops->fw_dump(vha); 224 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 225 break; 226 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) 227 break; 228 229 if (rd_reg_word(®->semaphore) & BIT_0) { 230 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 231 rd_reg_word(®->hccr); 232 233 /* Get mailbox data. */ 234 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 235 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 236 qla2x00_mbx_completion(vha, mb[0]); 237 status |= MBX_INTERRUPT; 238 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 239 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 240 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 241 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 242 qla2x00_async_event(vha, rsp, mb); 243 } else { 244 /*EMPTY*/ 245 ql_dbg(ql_dbg_async, vha, 0x5025, 246 "Unrecognized interrupt type (%d).\n", 247 mb[0]); 248 } 249 /* Release mailbox registers. */ 250 wrt_reg_word(®->semaphore, 0); 251 rd_reg_word(®->semaphore); 252 } else { 253 qla2x00_process_response_queue(rsp); 254 255 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 256 rd_reg_word(®->hccr); 257 } 258 } 259 qla2x00_handle_mbx_completion(ha, status); 260 spin_unlock_irqrestore(&ha->hardware_lock, flags); 261 262 return (IRQ_HANDLED); 263 } 264 265 bool 266 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 267 { 268 /* Check for PCI disconnection */ 269 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 270 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 271 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 272 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 273 qla_schedule_eeh_work(vha); 274 } 275 return true; 276 } else 277 return false; 278 } 279 280 bool 281 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 282 { 283 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 284 } 285 286 /** 287 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 288 * @irq: interrupt number 289 * @dev_id: SCSI driver HA context 290 * 291 * Called by system whenever the host adapter generates an interrupt. 292 * 293 * Returns handled flag. 294 */ 295 irqreturn_t 296 qla2300_intr_handler(int irq, void *dev_id) 297 { 298 scsi_qla_host_t *vha; 299 struct device_reg_2xxx __iomem *reg; 300 int status; 301 unsigned long iter; 302 uint32_t stat; 303 uint16_t hccr; 304 uint16_t mb[8]; 305 struct rsp_que *rsp; 306 struct qla_hw_data *ha; 307 unsigned long flags; 308 309 rsp = (struct rsp_que *) dev_id; 310 if (!rsp) { 311 ql_log(ql_log_info, NULL, 0x5058, 312 "%s: NULL response queue pointer.\n", __func__); 313 return (IRQ_NONE); 314 } 315 316 ha = rsp->hw; 317 reg = &ha->iobase->isp; 318 status = 0; 319 320 spin_lock_irqsave(&ha->hardware_lock, flags); 321 vha = pci_get_drvdata(ha->pdev); 322 for (iter = 50; iter--; ) { 323 stat = rd_reg_dword(®->u.isp2300.host_status); 324 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 325 break; 326 if (stat & HSR_RISC_PAUSED) { 327 if (unlikely(pci_channel_offline(ha->pdev))) 328 break; 329 330 hccr = rd_reg_word(®->hccr); 331 332 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 333 ql_log(ql_log_warn, vha, 0x5026, 334 "Parity error -- HCCR=%x, Dumping " 335 "firmware.\n", hccr); 336 else 337 ql_log(ql_log_warn, vha, 0x5027, 338 "RISC paused -- HCCR=%x, Dumping " 339 "firmware.\n", hccr); 340 341 /* 342 * Issue a "HARD" reset in order for the RISC 343 * interrupt bit to be cleared. Schedule a big 344 * hammer to get out of the RISC PAUSED state. 345 */ 346 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 347 rd_reg_word(®->hccr); 348 349 ha->isp_ops->fw_dump(vha); 350 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 351 break; 352 } else if ((stat & HSR_RISC_INT) == 0) 353 break; 354 355 switch (stat & 0xff) { 356 case 0x1: 357 case 0x2: 358 case 0x10: 359 case 0x11: 360 qla2x00_mbx_completion(vha, MSW(stat)); 361 status |= MBX_INTERRUPT; 362 363 /* Release mailbox registers. */ 364 wrt_reg_word(®->semaphore, 0); 365 break; 366 case 0x12: 367 mb[0] = MSW(stat); 368 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 369 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 370 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 371 qla2x00_async_event(vha, rsp, mb); 372 break; 373 case 0x13: 374 qla2x00_process_response_queue(rsp); 375 break; 376 case 0x15: 377 mb[0] = MBA_CMPLT_1_16BIT; 378 mb[1] = MSW(stat); 379 qla2x00_async_event(vha, rsp, mb); 380 break; 381 case 0x16: 382 mb[0] = MBA_SCSI_COMPLETION; 383 mb[1] = MSW(stat); 384 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 385 qla2x00_async_event(vha, rsp, mb); 386 break; 387 default: 388 ql_dbg(ql_dbg_async, vha, 0x5028, 389 "Unrecognized interrupt type (%d).\n", stat & 0xff); 390 break; 391 } 392 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 393 rd_reg_word_relaxed(®->hccr); 394 } 395 qla2x00_handle_mbx_completion(ha, status); 396 spin_unlock_irqrestore(&ha->hardware_lock, flags); 397 398 return (IRQ_HANDLED); 399 } 400 401 /** 402 * qla2x00_mbx_completion() - Process mailbox command completions. 403 * @vha: SCSI driver HA context 404 * @mb0: Mailbox0 register 405 */ 406 static void 407 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 408 { 409 uint16_t cnt; 410 uint32_t mboxes; 411 __le16 __iomem *wptr; 412 struct qla_hw_data *ha = vha->hw; 413 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 414 415 /* Read all mbox registers? */ 416 WARN_ON_ONCE(ha->mbx_count > 32); 417 mboxes = (1ULL << ha->mbx_count) - 1; 418 if (!ha->mcp) 419 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 420 else 421 mboxes = ha->mcp->in_mb; 422 423 /* Load return mailbox registers. */ 424 ha->flags.mbox_int = 1; 425 ha->mailbox_out[0] = mb0; 426 mboxes >>= 1; 427 wptr = MAILBOX_REG(ha, reg, 1); 428 429 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 430 if (IS_QLA2200(ha) && cnt == 8) 431 wptr = MAILBOX_REG(ha, reg, 8); 432 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 433 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 434 else if (mboxes & BIT_0) 435 ha->mailbox_out[cnt] = rd_reg_word(wptr); 436 437 wptr++; 438 mboxes >>= 1; 439 } 440 } 441 442 static void 443 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 444 { 445 static char *event[] = 446 { "Complete", "Request Notification", "Time Extension" }; 447 int rval; 448 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 449 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 450 __le16 __iomem *wptr; 451 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 452 453 /* Seed data -- mailbox1 -> mailbox7. */ 454 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 455 wptr = ®24->mailbox1; 456 else if (IS_QLA8044(vha->hw)) 457 wptr = ®82->mailbox_out[1]; 458 else 459 return; 460 461 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 462 mb[cnt] = rd_reg_word(wptr); 463 464 ql_dbg(ql_dbg_async, vha, 0x5021, 465 "Inter-Driver Communication %s -- " 466 "%04x %04x %04x %04x %04x %04x %04x.\n", 467 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 468 mb[4], mb[5], mb[6]); 469 switch (aen) { 470 /* Handle IDC Error completion case. */ 471 case MBA_IDC_COMPLETE: 472 if (mb[1] >> 15) { 473 vha->hw->flags.idc_compl_status = 1; 474 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 475 complete(&vha->hw->dcbx_comp); 476 } 477 break; 478 479 case MBA_IDC_NOTIFY: 480 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 481 timeout = (descr >> 8) & 0xf; 482 ql_dbg(ql_dbg_async, vha, 0x5022, 483 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 484 vha->host_no, event[aen & 0xff], timeout); 485 486 if (!timeout) 487 return; 488 rval = qla2x00_post_idc_ack_work(vha, mb); 489 if (rval != QLA_SUCCESS) 490 ql_log(ql_log_warn, vha, 0x5023, 491 "IDC failed to post ACK.\n"); 492 break; 493 case MBA_IDC_TIME_EXT: 494 vha->hw->idc_extend_tmo = descr; 495 ql_dbg(ql_dbg_async, vha, 0x5087, 496 "%lu Inter-Driver Communication %s -- " 497 "Extend timeout by=%d.\n", 498 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 499 break; 500 } 501 } 502 503 #define LS_UNKNOWN 2 504 const char * 505 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 506 { 507 static const char *const link_speeds[] = { 508 "1", "2", "?", "4", "8", "16", "32", "10" 509 }; 510 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 511 512 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 513 return link_speeds[0]; 514 else if (speed == 0x13) 515 return link_speeds[QLA_LAST_SPEED]; 516 else if (speed < QLA_LAST_SPEED) 517 return link_speeds[speed]; 518 else 519 return link_speeds[LS_UNKNOWN]; 520 } 521 522 static void 523 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 524 { 525 struct qla_hw_data *ha = vha->hw; 526 527 /* 528 * 8200 AEN Interpretation: 529 * mb[0] = AEN code 530 * mb[1] = AEN Reason code 531 * mb[2] = LSW of Peg-Halt Status-1 Register 532 * mb[6] = MSW of Peg-Halt Status-1 Register 533 * mb[3] = LSW of Peg-Halt Status-2 register 534 * mb[7] = MSW of Peg-Halt Status-2 register 535 * mb[4] = IDC Device-State Register value 536 * mb[5] = IDC Driver-Presence Register value 537 */ 538 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 539 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 540 mb[0], mb[1], mb[2], mb[6]); 541 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 542 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 543 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 544 545 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 546 IDC_HEARTBEAT_FAILURE)) { 547 ha->flags.nic_core_hung = 1; 548 ql_log(ql_log_warn, vha, 0x5060, 549 "83XX: F/W Error Reported: Check if reset required.\n"); 550 551 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 552 uint32_t protocol_engine_id, fw_err_code, err_level; 553 554 /* 555 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 556 * - PEG-Halt Status-1 Register: 557 * (LSW = mb[2], MSW = mb[6]) 558 * Bits 0-7 = protocol-engine ID 559 * Bits 8-28 = f/w error code 560 * Bits 29-31 = Error-level 561 * Error-level 0x1 = Non-Fatal error 562 * Error-level 0x2 = Recoverable Fatal error 563 * Error-level 0x4 = UnRecoverable Fatal error 564 * - PEG-Halt Status-2 Register: 565 * (LSW = mb[3], MSW = mb[7]) 566 */ 567 protocol_engine_id = (mb[2] & 0xff); 568 fw_err_code = (((mb[2] & 0xff00) >> 8) | 569 ((mb[6] & 0x1fff) << 8)); 570 err_level = ((mb[6] & 0xe000) >> 13); 571 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 572 "Register: protocol_engine_id=0x%x " 573 "fw_err_code=0x%x err_level=0x%x.\n", 574 protocol_engine_id, fw_err_code, err_level); 575 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 576 "Register: 0x%x%x.\n", mb[7], mb[3]); 577 if (err_level == ERR_LEVEL_NON_FATAL) { 578 ql_log(ql_log_warn, vha, 0x5063, 579 "Not a fatal error, f/w has recovered itself.\n"); 580 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 581 ql_log(ql_log_fatal, vha, 0x5064, 582 "Recoverable Fatal error: Chip reset " 583 "required.\n"); 584 qla83xx_schedule_work(vha, 585 QLA83XX_NIC_CORE_RESET); 586 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 587 ql_log(ql_log_fatal, vha, 0x5065, 588 "Unrecoverable Fatal error: Set FAILED " 589 "state, reboot required.\n"); 590 qla83xx_schedule_work(vha, 591 QLA83XX_NIC_CORE_UNRECOVERABLE); 592 } 593 } 594 595 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 596 uint16_t peg_fw_state, nw_interface_link_up; 597 uint16_t nw_interface_signal_detect, sfp_status; 598 uint16_t htbt_counter, htbt_monitor_enable; 599 uint16_t sfp_additional_info, sfp_multirate; 600 uint16_t sfp_tx_fault, link_speed, dcbx_status; 601 602 /* 603 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 604 * - PEG-to-FC Status Register: 605 * (LSW = mb[2], MSW = mb[6]) 606 * Bits 0-7 = Peg-Firmware state 607 * Bit 8 = N/W Interface Link-up 608 * Bit 9 = N/W Interface signal detected 609 * Bits 10-11 = SFP Status 610 * SFP Status 0x0 = SFP+ transceiver not expected 611 * SFP Status 0x1 = SFP+ transceiver not present 612 * SFP Status 0x2 = SFP+ transceiver invalid 613 * SFP Status 0x3 = SFP+ transceiver present and 614 * valid 615 * Bits 12-14 = Heartbeat Counter 616 * Bit 15 = Heartbeat Monitor Enable 617 * Bits 16-17 = SFP Additional Info 618 * SFP info 0x0 = Unregocnized transceiver for 619 * Ethernet 620 * SFP info 0x1 = SFP+ brand validation failed 621 * SFP info 0x2 = SFP+ speed validation failed 622 * SFP info 0x3 = SFP+ access error 623 * Bit 18 = SFP Multirate 624 * Bit 19 = SFP Tx Fault 625 * Bits 20-22 = Link Speed 626 * Bits 23-27 = Reserved 627 * Bits 28-30 = DCBX Status 628 * DCBX Status 0x0 = DCBX Disabled 629 * DCBX Status 0x1 = DCBX Enabled 630 * DCBX Status 0x2 = DCBX Exchange error 631 * Bit 31 = Reserved 632 */ 633 peg_fw_state = (mb[2] & 0x00ff); 634 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 635 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 636 sfp_status = ((mb[2] & 0x0c00) >> 10); 637 htbt_counter = ((mb[2] & 0x7000) >> 12); 638 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 639 sfp_additional_info = (mb[6] & 0x0003); 640 sfp_multirate = ((mb[6] & 0x0004) >> 2); 641 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 642 link_speed = ((mb[6] & 0x0070) >> 4); 643 dcbx_status = ((mb[6] & 0x7000) >> 12); 644 645 ql_log(ql_log_warn, vha, 0x5066, 646 "Peg-to-Fc Status Register:\n" 647 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 648 "nw_interface_signal_detect=0x%x" 649 "\nsfp_statis=0x%x.\n ", peg_fw_state, 650 nw_interface_link_up, nw_interface_signal_detect, 651 sfp_status); 652 ql_log(ql_log_warn, vha, 0x5067, 653 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 654 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 655 htbt_counter, htbt_monitor_enable, 656 sfp_additional_info, sfp_multirate); 657 ql_log(ql_log_warn, vha, 0x5068, 658 "sfp_tx_fault=0x%x, link_state=0x%x, " 659 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 660 dcbx_status); 661 662 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 663 } 664 665 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 666 ql_log(ql_log_warn, vha, 0x5069, 667 "Heartbeat Failure encountered, chip reset " 668 "required.\n"); 669 670 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 671 } 672 } 673 674 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 675 ql_log(ql_log_info, vha, 0x506a, 676 "IDC Device-State changed = 0x%x.\n", mb[4]); 677 if (ha->flags.nic_core_reset_owner) 678 return; 679 qla83xx_schedule_work(vha, MBA_IDC_AEN); 680 } 681 } 682 683 int 684 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 685 { 686 struct qla_hw_data *ha = vha->hw; 687 scsi_qla_host_t *vp; 688 uint32_t vp_did; 689 unsigned long flags; 690 int ret = 0; 691 692 if (!ha->num_vhosts) 693 return ret; 694 695 spin_lock_irqsave(&ha->vport_slock, flags); 696 list_for_each_entry(vp, &ha->vp_list, list) { 697 vp_did = vp->d_id.b24; 698 if (vp_did == rscn_entry) { 699 ret = 1; 700 break; 701 } 702 } 703 spin_unlock_irqrestore(&ha->vport_slock, flags); 704 705 return ret; 706 } 707 708 fc_port_t * 709 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 710 { 711 fc_port_t *f, *tf; 712 713 f = tf = NULL; 714 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 715 if (f->loop_id == loop_id) 716 return f; 717 return NULL; 718 } 719 720 fc_port_t * 721 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 722 { 723 fc_port_t *f, *tf; 724 725 f = tf = NULL; 726 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 727 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 728 if (incl_deleted) 729 return f; 730 else if (f->deleted == 0) 731 return f; 732 } 733 } 734 return NULL; 735 } 736 737 fc_port_t * 738 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 739 u8 incl_deleted) 740 { 741 fc_port_t *f, *tf; 742 743 f = tf = NULL; 744 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 745 if (f->d_id.b24 == id->b24) { 746 if (incl_deleted) 747 return f; 748 else if (f->deleted == 0) 749 return f; 750 } 751 } 752 return NULL; 753 } 754 755 /* Shall be called only on supported adapters. */ 756 static void 757 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 758 { 759 struct qla_hw_data *ha = vha->hw; 760 bool reset_isp_needed = false; 761 762 ql_log(ql_log_warn, vha, 0x02f0, 763 "MPI Heartbeat stop. MPI reset is%s needed. " 764 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 765 mb[1] & BIT_8 ? "" : " not", 766 mb[0], mb[1], mb[2], mb[3]); 767 768 if ((mb[1] & BIT_8) == 0) 769 return; 770 771 ql_log(ql_log_warn, vha, 0x02f1, 772 "MPI Heartbeat stop. FW dump needed\n"); 773 774 if (ql2xfulldump_on_mpifail) { 775 ha->isp_ops->fw_dump(vha); 776 reset_isp_needed = true; 777 } 778 779 ha->isp_ops->mpi_fw_dump(vha, 1); 780 781 if (reset_isp_needed) { 782 vha->hw->flags.fw_init_done = 0; 783 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 784 qla2xxx_wake_dpc(vha); 785 } 786 } 787 788 static struct purex_item * 789 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) 790 { 791 struct purex_item *item = NULL; 792 uint8_t item_hdr_size = sizeof(*item); 793 794 if (size > QLA_DEFAULT_PAYLOAD_SIZE) { 795 item = kzalloc(item_hdr_size + 796 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); 797 } else { 798 if (atomic_inc_return(&vha->default_item.in_use) == 1) { 799 item = &vha->default_item; 800 goto initialize_purex_header; 801 } else { 802 item = kzalloc(item_hdr_size, GFP_ATOMIC); 803 } 804 } 805 if (!item) { 806 ql_log(ql_log_warn, vha, 0x5092, 807 ">> Failed allocate purex list item.\n"); 808 809 return NULL; 810 } 811 812 initialize_purex_header: 813 item->vha = vha; 814 item->size = size; 815 return item; 816 } 817 818 static void 819 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, 820 void (*process_item)(struct scsi_qla_host *vha, 821 struct purex_item *pkt)) 822 { 823 struct purex_list *list = &vha->purex_list; 824 ulong flags; 825 826 pkt->process_item = process_item; 827 828 spin_lock_irqsave(&list->lock, flags); 829 list_add_tail(&pkt->list, &list->head); 830 spin_unlock_irqrestore(&list->lock, flags); 831 832 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 833 } 834 835 /** 836 * qla24xx_copy_std_pkt() - Copy over purex ELS which is 837 * contained in a single IOCB. 838 * purex packet. 839 * @vha: SCSI driver HA context 840 * @pkt: ELS packet 841 */ 842 static struct purex_item 843 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) 844 { 845 struct purex_item *item; 846 847 item = qla24xx_alloc_purex_item(vha, 848 QLA_DEFAULT_PAYLOAD_SIZE); 849 if (!item) 850 return item; 851 852 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 853 return item; 854 } 855 856 /** 857 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can 858 * span over multiple IOCBs. 859 * @vha: SCSI driver HA context 860 * @pkt: ELS packet 861 * @rsp: Response queue 862 */ 863 static struct purex_item * 864 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, 865 struct rsp_que **rsp) 866 { 867 struct purex_entry_24xx *purex = *pkt; 868 struct rsp_que *rsp_q = *rsp; 869 sts_cont_entry_t *new_pkt; 870 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 871 uint16_t buffer_copy_offset = 0; 872 uint16_t entry_count, entry_count_remaining; 873 struct purex_item *item; 874 void *fpin_pkt = NULL; 875 876 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 877 - PURX_ELS_HEADER_SIZE; 878 pending_bytes = total_bytes; 879 entry_count = entry_count_remaining = purex->entry_count; 880 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 881 sizeof(purex->els_frame_payload) : pending_bytes; 882 ql_log(ql_log_info, vha, 0x509a, 883 "FPIN ELS, frame_size 0x%x, entry count %d\n", 884 total_bytes, entry_count); 885 886 item = qla24xx_alloc_purex_item(vha, total_bytes); 887 if (!item) 888 return item; 889 890 fpin_pkt = &item->iocb; 891 892 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); 893 buffer_copy_offset += no_bytes; 894 pending_bytes -= no_bytes; 895 --entry_count_remaining; 896 897 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 898 wmb(); 899 900 do { 901 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 902 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { 903 ql_dbg(ql_dbg_async, vha, 0x5084, 904 "Ran out of IOCBs, partial data 0x%x\n", 905 buffer_copy_offset); 906 cpu_relax(); 907 continue; 908 } 909 910 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 911 *pkt = new_pkt; 912 913 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 914 ql_log(ql_log_warn, vha, 0x507a, 915 "Unexpected IOCB type, partial data 0x%x\n", 916 buffer_copy_offset); 917 break; 918 } 919 920 rsp_q->ring_index++; 921 if (rsp_q->ring_index == rsp_q->length) { 922 rsp_q->ring_index = 0; 923 rsp_q->ring_ptr = rsp_q->ring; 924 } else { 925 rsp_q->ring_ptr++; 926 } 927 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 928 sizeof(new_pkt->data) : pending_bytes; 929 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 930 memcpy(((uint8_t *)fpin_pkt + 931 buffer_copy_offset), new_pkt->data, 932 no_bytes); 933 buffer_copy_offset += no_bytes; 934 pending_bytes -= no_bytes; 935 --entry_count_remaining; 936 } else { 937 ql_log(ql_log_warn, vha, 0x5044, 938 "Attempt to copy more that we got, optimizing..%x\n", 939 buffer_copy_offset); 940 memcpy(((uint8_t *)fpin_pkt + 941 buffer_copy_offset), new_pkt->data, 942 total_bytes - buffer_copy_offset); 943 } 944 945 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 946 wmb(); 947 } 948 949 if (pending_bytes != 0 || entry_count_remaining != 0) { 950 ql_log(ql_log_fatal, vha, 0x508b, 951 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", 952 total_bytes, entry_count_remaining); 953 qla24xx_free_purex_item(item); 954 return NULL; 955 } 956 } while (entry_count_remaining > 0); 957 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); 958 return item; 959 } 960 961 /** 962 * qla2x00_async_event() - Process aynchronous events. 963 * @vha: SCSI driver HA context 964 * @rsp: response queue 965 * @mb: Mailbox registers (0 - 3) 966 */ 967 void 968 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 969 { 970 uint16_t handle_cnt; 971 uint16_t cnt, mbx; 972 uint32_t handles[5]; 973 struct qla_hw_data *ha = vha->hw; 974 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 975 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 976 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 977 uint32_t rscn_entry, host_pid; 978 unsigned long flags; 979 fc_port_t *fcport = NULL; 980 981 if (!vha->hw->flags.fw_started) 982 return; 983 984 /* Setup to process RIO completion. */ 985 handle_cnt = 0; 986 if (IS_CNA_CAPABLE(ha)) 987 goto skip_rio; 988 switch (mb[0]) { 989 case MBA_SCSI_COMPLETION: 990 handles[0] = make_handle(mb[2], mb[1]); 991 handle_cnt = 1; 992 break; 993 case MBA_CMPLT_1_16BIT: 994 handles[0] = mb[1]; 995 handle_cnt = 1; 996 mb[0] = MBA_SCSI_COMPLETION; 997 break; 998 case MBA_CMPLT_2_16BIT: 999 handles[0] = mb[1]; 1000 handles[1] = mb[2]; 1001 handle_cnt = 2; 1002 mb[0] = MBA_SCSI_COMPLETION; 1003 break; 1004 case MBA_CMPLT_3_16BIT: 1005 handles[0] = mb[1]; 1006 handles[1] = mb[2]; 1007 handles[2] = mb[3]; 1008 handle_cnt = 3; 1009 mb[0] = MBA_SCSI_COMPLETION; 1010 break; 1011 case MBA_CMPLT_4_16BIT: 1012 handles[0] = mb[1]; 1013 handles[1] = mb[2]; 1014 handles[2] = mb[3]; 1015 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1016 handle_cnt = 4; 1017 mb[0] = MBA_SCSI_COMPLETION; 1018 break; 1019 case MBA_CMPLT_5_16BIT: 1020 handles[0] = mb[1]; 1021 handles[1] = mb[2]; 1022 handles[2] = mb[3]; 1023 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1024 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 1025 handle_cnt = 5; 1026 mb[0] = MBA_SCSI_COMPLETION; 1027 break; 1028 case MBA_CMPLT_2_32BIT: 1029 handles[0] = make_handle(mb[2], mb[1]); 1030 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), 1031 RD_MAILBOX_REG(ha, reg, 6)); 1032 handle_cnt = 2; 1033 mb[0] = MBA_SCSI_COMPLETION; 1034 break; 1035 default: 1036 break; 1037 } 1038 skip_rio: 1039 switch (mb[0]) { 1040 case MBA_SCSI_COMPLETION: /* Fast Post */ 1041 if (!vha->flags.online) 1042 break; 1043 1044 for (cnt = 0; cnt < handle_cnt; cnt++) 1045 qla2x00_process_completed_request(vha, rsp->req, 1046 handles[cnt]); 1047 break; 1048 1049 case MBA_RESET: /* Reset */ 1050 ql_dbg(ql_dbg_async, vha, 0x5002, 1051 "Asynchronous RESET.\n"); 1052 1053 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1054 break; 1055 1056 case MBA_SYSTEM_ERR: /* System Error */ 1057 mbx = 0; 1058 1059 vha->hw_err_cnt++; 1060 1061 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 1062 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1063 u16 m[4]; 1064 1065 m[0] = rd_reg_word(®24->mailbox4); 1066 m[1] = rd_reg_word(®24->mailbox5); 1067 m[2] = rd_reg_word(®24->mailbox6); 1068 mbx = m[3] = rd_reg_word(®24->mailbox7); 1069 1070 ql_log(ql_log_warn, vha, 0x5003, 1071 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 1072 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 1073 } else 1074 ql_log(ql_log_warn, vha, 0x5003, 1075 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 1076 mb[1], mb[2], mb[3]); 1077 1078 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 1079 rd_reg_word(®24->mailbox7) & BIT_8) 1080 ha->isp_ops->mpi_fw_dump(vha, 1); 1081 ha->isp_ops->fw_dump(vha); 1082 ha->flags.fw_init_done = 0; 1083 QLA_FW_STOPPED(ha); 1084 1085 if (IS_FWI2_CAPABLE(ha)) { 1086 if (mb[1] == 0 && mb[2] == 0) { 1087 ql_log(ql_log_fatal, vha, 0x5004, 1088 "Unrecoverable Hardware Error: adapter " 1089 "marked OFFLINE!\n"); 1090 vha->flags.online = 0; 1091 vha->device_flags |= DFLG_DEV_FAILED; 1092 } else { 1093 /* Check to see if MPI timeout occurred */ 1094 if ((mbx & MBX_3) && (ha->port_no == 0)) 1095 set_bit(MPI_RESET_NEEDED, 1096 &vha->dpc_flags); 1097 1098 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1099 } 1100 } else if (mb[1] == 0) { 1101 ql_log(ql_log_fatal, vha, 0x5005, 1102 "Unrecoverable Hardware Error: adapter marked " 1103 "OFFLINE!\n"); 1104 vha->flags.online = 0; 1105 vha->device_flags |= DFLG_DEV_FAILED; 1106 } else 1107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1108 break; 1109 1110 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 1111 ql_log(ql_log_warn, vha, 0x5006, 1112 "ISP Request Transfer Error (%x).\n", mb[1]); 1113 1114 vha->hw_err_cnt++; 1115 1116 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1117 break; 1118 1119 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 1120 ql_log(ql_log_warn, vha, 0x5007, 1121 "ISP Response Transfer Error (%x).\n", mb[1]); 1122 1123 vha->hw_err_cnt++; 1124 1125 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1126 break; 1127 1128 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 1129 ql_dbg(ql_dbg_async, vha, 0x5008, 1130 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 1131 break; 1132 1133 case MBA_LOOP_INIT_ERR: 1134 ql_log(ql_log_warn, vha, 0x5090, 1135 "LOOP INIT ERROR (%x).\n", mb[1]); 1136 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1137 break; 1138 1139 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 1140 ha->flags.lip_ae = 1; 1141 1142 ql_dbg(ql_dbg_async, vha, 0x5009, 1143 "LIP occurred (%x).\n", mb[1]); 1144 1145 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1146 atomic_set(&vha->loop_state, LOOP_DOWN); 1147 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1148 qla2x00_mark_all_devices_lost(vha); 1149 } 1150 1151 if (vha->vp_idx) { 1152 atomic_set(&vha->vp_state, VP_FAILED); 1153 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1154 } 1155 1156 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1157 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1158 1159 vha->flags.management_server_logged_in = 0; 1160 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 1161 break; 1162 1163 case MBA_LOOP_UP: /* Loop Up Event */ 1164 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1165 ha->link_data_rate = PORT_SPEED_1GB; 1166 else 1167 ha->link_data_rate = mb[1]; 1168 1169 ql_log(ql_log_info, vha, 0x500a, 1170 "LOOP UP detected (%s Gbps).\n", 1171 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 1172 1173 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1174 if (mb[2] & BIT_0) 1175 ql_log(ql_log_info, vha, 0x11a0, 1176 "FEC=enabled (link up).\n"); 1177 } 1178 1179 vha->flags.management_server_logged_in = 0; 1180 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 1181 1182 if (vha->link_down_time < vha->hw->port_down_retry_count) { 1183 vha->short_link_down_cnt++; 1184 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 1185 } 1186 1187 break; 1188 1189 case MBA_LOOP_DOWN: /* Loop Down Event */ 1190 SAVE_TOPO(ha); 1191 ha->flags.lip_ae = 0; 1192 ha->current_topology = 0; 1193 vha->link_down_time = 0; 1194 1195 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 1196 ? rd_reg_word(®24->mailbox4) : 0; 1197 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) 1198 : mbx; 1199 ql_log(ql_log_info, vha, 0x500b, 1200 "LOOP DOWN detected (%x %x %x %x).\n", 1201 mb[1], mb[2], mb[3], mbx); 1202 1203 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1204 atomic_set(&vha->loop_state, LOOP_DOWN); 1205 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1206 /* 1207 * In case of loop down, restore WWPN from 1208 * NVRAM in case of FA-WWPN capable ISP 1209 * Restore for Physical Port only 1210 */ 1211 if (!vha->vp_idx) { 1212 if (ha->flags.fawwpn_enabled && 1213 (ha->current_topology == ISP_CFG_F)) { 1214 void *wwpn = ha->init_cb->port_name; 1215 1216 memcpy(vha->port_name, wwpn, WWN_SIZE); 1217 fc_host_port_name(vha->host) = 1218 wwn_to_u64(vha->port_name); 1219 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1220 vha, 0x00d8, "LOOP DOWN detected," 1221 "restore WWPN %016llx\n", 1222 wwn_to_u64(vha->port_name)); 1223 } 1224 1225 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1226 } 1227 1228 vha->device_flags |= DFLG_NO_CABLE; 1229 qla2x00_mark_all_devices_lost(vha); 1230 } 1231 1232 if (vha->vp_idx) { 1233 atomic_set(&vha->vp_state, VP_FAILED); 1234 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1235 } 1236 1237 vha->flags.management_server_logged_in = 0; 1238 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1239 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1240 break; 1241 1242 case MBA_LIP_RESET: /* LIP reset occurred */ 1243 ql_dbg(ql_dbg_async, vha, 0x500c, 1244 "LIP reset occurred (%x).\n", mb[1]); 1245 1246 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1247 atomic_set(&vha->loop_state, LOOP_DOWN); 1248 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1249 qla2x00_mark_all_devices_lost(vha); 1250 } 1251 1252 if (vha->vp_idx) { 1253 atomic_set(&vha->vp_state, VP_FAILED); 1254 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1255 } 1256 1257 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1258 1259 ha->operating_mode = LOOP; 1260 vha->flags.management_server_logged_in = 0; 1261 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1262 break; 1263 1264 /* case MBA_DCBX_COMPLETE: */ 1265 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1266 ha->flags.lip_ae = 0; 1267 1268 if (IS_QLA2100(ha)) 1269 break; 1270 1271 if (IS_CNA_CAPABLE(ha)) { 1272 ql_dbg(ql_dbg_async, vha, 0x500d, 1273 "DCBX Completed -- %04x %04x %04x.\n", 1274 mb[1], mb[2], mb[3]); 1275 if (ha->notify_dcbx_comp && !vha->vp_idx) 1276 complete(&ha->dcbx_comp); 1277 1278 } else 1279 ql_dbg(ql_dbg_async, vha, 0x500e, 1280 "Asynchronous P2P MODE received.\n"); 1281 1282 /* 1283 * Until there's a transition from loop down to loop up, treat 1284 * this as loop down only. 1285 */ 1286 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1287 atomic_set(&vha->loop_state, LOOP_DOWN); 1288 if (!atomic_read(&vha->loop_down_timer)) 1289 atomic_set(&vha->loop_down_timer, 1290 LOOP_DOWN_TIME); 1291 if (!N2N_TOPO(ha)) 1292 qla2x00_mark_all_devices_lost(vha); 1293 } 1294 1295 if (vha->vp_idx) { 1296 atomic_set(&vha->vp_state, VP_FAILED); 1297 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1298 } 1299 1300 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1301 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1302 1303 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1304 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1305 1306 vha->flags.management_server_logged_in = 0; 1307 break; 1308 1309 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1310 if (IS_QLA2100(ha)) 1311 break; 1312 1313 ql_dbg(ql_dbg_async, vha, 0x500f, 1314 "Configuration change detected: value=%x.\n", mb[1]); 1315 1316 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1317 atomic_set(&vha->loop_state, LOOP_DOWN); 1318 if (!atomic_read(&vha->loop_down_timer)) 1319 atomic_set(&vha->loop_down_timer, 1320 LOOP_DOWN_TIME); 1321 qla2x00_mark_all_devices_lost(vha); 1322 } 1323 1324 if (vha->vp_idx) { 1325 atomic_set(&vha->vp_state, VP_FAILED); 1326 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1327 } 1328 1329 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1330 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1331 break; 1332 1333 case MBA_PORT_UPDATE: /* Port database update */ 1334 /* 1335 * Handle only global and vn-port update events 1336 * 1337 * Relevant inputs: 1338 * mb[1] = N_Port handle of changed port 1339 * OR 0xffff for global event 1340 * mb[2] = New login state 1341 * 7 = Port logged out 1342 * mb[3] = LSB is vp_idx, 0xff = all vps 1343 * 1344 * Skip processing if: 1345 * Event is global, vp_idx is NOT all vps, 1346 * vp_idx does not match 1347 * Event is not global, vp_idx does not match 1348 */ 1349 if (IS_QLA2XXX_MIDTYPE(ha) && 1350 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1351 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1352 break; 1353 1354 if (mb[2] == 0x7) { 1355 ql_dbg(ql_dbg_async, vha, 0x5010, 1356 "Port %s %04x %04x %04x.\n", 1357 mb[1] == 0xffff ? "unavailable" : "logout", 1358 mb[1], mb[2], mb[3]); 1359 1360 if (mb[1] == 0xffff) 1361 goto global_port_update; 1362 1363 if (mb[1] == NPH_SNS_LID(ha)) { 1364 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1365 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1366 break; 1367 } 1368 1369 /* use handle_cnt for loop id/nport handle */ 1370 if (IS_FWI2_CAPABLE(ha)) 1371 handle_cnt = NPH_SNS; 1372 else 1373 handle_cnt = SIMPLE_NAME_SERVER; 1374 if (mb[1] == handle_cnt) { 1375 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1376 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1377 break; 1378 } 1379 1380 /* Port logout */ 1381 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1382 if (!fcport) 1383 break; 1384 if (atomic_read(&fcport->state) != FCS_ONLINE) 1385 break; 1386 ql_dbg(ql_dbg_async, vha, 0x508a, 1387 "Marking port lost loopid=%04x portid=%06x.\n", 1388 fcport->loop_id, fcport->d_id.b24); 1389 if (qla_ini_mode_enabled(vha)) { 1390 fcport->logout_on_delete = 0; 1391 qlt_schedule_sess_for_deletion(fcport); 1392 } 1393 break; 1394 1395 global_port_update: 1396 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1397 atomic_set(&vha->loop_state, LOOP_DOWN); 1398 atomic_set(&vha->loop_down_timer, 1399 LOOP_DOWN_TIME); 1400 vha->device_flags |= DFLG_NO_CABLE; 1401 qla2x00_mark_all_devices_lost(vha); 1402 } 1403 1404 if (vha->vp_idx) { 1405 atomic_set(&vha->vp_state, VP_FAILED); 1406 fc_vport_set_state(vha->fc_vport, 1407 FC_VPORT_FAILED); 1408 qla2x00_mark_all_devices_lost(vha); 1409 } 1410 1411 vha->flags.management_server_logged_in = 0; 1412 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1413 break; 1414 } 1415 1416 /* 1417 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1418 * event etc. earlier indicating loop is down) then process 1419 * it. Otherwise ignore it and Wait for RSCN to come in. 1420 */ 1421 atomic_set(&vha->loop_down_timer, 0); 1422 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1423 !ha->flags.n2n_ae && 1424 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1425 ql_dbg(ql_dbg_async, vha, 0x5011, 1426 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1427 mb[1], mb[2], mb[3]); 1428 break; 1429 } 1430 1431 ql_dbg(ql_dbg_async, vha, 0x5012, 1432 "Port database changed %04x %04x %04x.\n", 1433 mb[1], mb[2], mb[3]); 1434 1435 /* 1436 * Mark all devices as missing so we will login again. 1437 */ 1438 atomic_set(&vha->loop_state, LOOP_UP); 1439 vha->scan.scan_retry = 0; 1440 1441 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1442 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1443 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1444 break; 1445 1446 case MBA_RSCN_UPDATE: /* State Change Registration */ 1447 /* Check if the Vport has issued a SCR */ 1448 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1449 break; 1450 /* Only handle SCNs for our Vport index. */ 1451 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1452 break; 1453 1454 ql_log(ql_log_warn, vha, 0x5013, 1455 "RSCN database changed -- %04x %04x %04x.\n", 1456 mb[1], mb[2], mb[3]); 1457 1458 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1459 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1460 | vha->d_id.b.al_pa; 1461 if (rscn_entry == host_pid) { 1462 ql_dbg(ql_dbg_async, vha, 0x5014, 1463 "Ignoring RSCN update to local host " 1464 "port ID (%06x).\n", host_pid); 1465 break; 1466 } 1467 1468 /* Ignore reserved bits from RSCN-payload. */ 1469 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1470 1471 /* Skip RSCNs for virtual ports on the same physical port */ 1472 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1473 break; 1474 1475 atomic_set(&vha->loop_down_timer, 0); 1476 vha->flags.management_server_logged_in = 0; 1477 { 1478 struct event_arg ea; 1479 1480 memset(&ea, 0, sizeof(ea)); 1481 ea.id.b24 = rscn_entry; 1482 ea.id.b.rsvd_1 = rscn_entry >> 24; 1483 qla2x00_handle_rscn(vha, &ea); 1484 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1485 } 1486 break; 1487 case MBA_CONGN_NOTI_RECV: 1488 if (!ha->flags.scm_enabled || 1489 mb[1] != QLA_CON_PRIMITIVE_RECEIVED) 1490 break; 1491 1492 if (mb[2] == QLA_CONGESTION_ARB_WARNING) { 1493 ql_dbg(ql_dbg_async, vha, 0x509b, 1494 "Congestion Warning %04x %04x.\n", mb[1], mb[2]); 1495 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { 1496 ql_log(ql_log_warn, vha, 0x509b, 1497 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); 1498 } 1499 break; 1500 /* case MBA_RIO_RESPONSE: */ 1501 case MBA_ZIO_RESPONSE: 1502 ql_dbg(ql_dbg_async, vha, 0x5015, 1503 "[R|Z]IO update completion.\n"); 1504 1505 if (IS_FWI2_CAPABLE(ha)) 1506 qla24xx_process_response_queue(vha, rsp); 1507 else 1508 qla2x00_process_response_queue(rsp); 1509 break; 1510 1511 case MBA_DISCARD_RND_FRAME: 1512 ql_dbg(ql_dbg_async, vha, 0x5016, 1513 "Discard RND Frame -- %04x %04x %04x.\n", 1514 mb[1], mb[2], mb[3]); 1515 vha->interface_err_cnt++; 1516 break; 1517 1518 case MBA_TRACE_NOTIFICATION: 1519 ql_dbg(ql_dbg_async, vha, 0x5017, 1520 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1521 break; 1522 1523 case MBA_ISP84XX_ALERT: 1524 ql_dbg(ql_dbg_async, vha, 0x5018, 1525 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1526 mb[1], mb[2], mb[3]); 1527 1528 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1529 switch (mb[1]) { 1530 case A84_PANIC_RECOVERY: 1531 ql_log(ql_log_info, vha, 0x5019, 1532 "Alert 84XX: panic recovery %04x %04x.\n", 1533 mb[2], mb[3]); 1534 break; 1535 case A84_OP_LOGIN_COMPLETE: 1536 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1537 ql_log(ql_log_info, vha, 0x501a, 1538 "Alert 84XX: firmware version %x.\n", 1539 ha->cs84xx->op_fw_version); 1540 break; 1541 case A84_DIAG_LOGIN_COMPLETE: 1542 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1543 ql_log(ql_log_info, vha, 0x501b, 1544 "Alert 84XX: diagnostic firmware version %x.\n", 1545 ha->cs84xx->diag_fw_version); 1546 break; 1547 case A84_GOLD_LOGIN_COMPLETE: 1548 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1549 ha->cs84xx->fw_update = 1; 1550 ql_log(ql_log_info, vha, 0x501c, 1551 "Alert 84XX: gold firmware version %x.\n", 1552 ha->cs84xx->gold_fw_version); 1553 break; 1554 default: 1555 ql_log(ql_log_warn, vha, 0x501d, 1556 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1557 mb[1], mb[2], mb[3]); 1558 } 1559 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1560 break; 1561 case MBA_DCBX_START: 1562 ql_dbg(ql_dbg_async, vha, 0x501e, 1563 "DCBX Started -- %04x %04x %04x.\n", 1564 mb[1], mb[2], mb[3]); 1565 break; 1566 case MBA_DCBX_PARAM_UPDATE: 1567 ql_dbg(ql_dbg_async, vha, 0x501f, 1568 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1569 mb[1], mb[2], mb[3]); 1570 break; 1571 case MBA_FCF_CONF_ERR: 1572 ql_dbg(ql_dbg_async, vha, 0x5020, 1573 "FCF Configuration Error -- %04x %04x %04x.\n", 1574 mb[1], mb[2], mb[3]); 1575 break; 1576 case MBA_IDC_NOTIFY: 1577 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1578 mb[4] = rd_reg_word(®24->mailbox4); 1579 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1580 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1581 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1582 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1583 /* 1584 * Extend loop down timer since port is active. 1585 */ 1586 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1587 atomic_set(&vha->loop_down_timer, 1588 LOOP_DOWN_TIME); 1589 qla2xxx_wake_dpc(vha); 1590 } 1591 } 1592 fallthrough; 1593 case MBA_IDC_COMPLETE: 1594 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1595 complete(&ha->lb_portup_comp); 1596 fallthrough; 1597 case MBA_IDC_TIME_EXT: 1598 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1599 IS_QLA8044(ha)) 1600 qla81xx_idc_event(vha, mb[0], mb[1]); 1601 break; 1602 1603 case MBA_IDC_AEN: 1604 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1605 vha->hw_err_cnt++; 1606 qla27xx_handle_8200_aen(vha, mb); 1607 } else if (IS_QLA83XX(ha)) { 1608 mb[4] = rd_reg_word(®24->mailbox4); 1609 mb[5] = rd_reg_word(®24->mailbox5); 1610 mb[6] = rd_reg_word(®24->mailbox6); 1611 mb[7] = rd_reg_word(®24->mailbox7); 1612 qla83xx_handle_8200_aen(vha, mb); 1613 } else { 1614 ql_dbg(ql_dbg_async, vha, 0x5052, 1615 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1616 mb[0], mb[1], mb[2], mb[3]); 1617 } 1618 break; 1619 1620 case MBA_DPORT_DIAGNOSTICS: 1621 ql_dbg(ql_dbg_async, vha, 0x5052, 1622 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1623 mb[0], mb[1], mb[2], mb[3]); 1624 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1625 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1626 static char *results[] = { 1627 "start", "done(pass)", "done(error)", "undefined" }; 1628 static char *types[] = { 1629 "none", "dynamic", "static", "other" }; 1630 uint result = mb[1] >> 0 & 0x3; 1631 uint type = mb[1] >> 6 & 0x3; 1632 uint sw = mb[1] >> 15 & 0x1; 1633 ql_dbg(ql_dbg_async, vha, 0x5052, 1634 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1635 results[result], types[type], sw); 1636 if (result == 2) { 1637 static char *reasons[] = { 1638 "reserved", "unexpected reject", 1639 "unexpected phase", "retry exceeded", 1640 "timed out", "not supported", 1641 "user stopped" }; 1642 uint reason = mb[2] >> 0 & 0xf; 1643 uint phase = mb[2] >> 12 & 0xf; 1644 ql_dbg(ql_dbg_async, vha, 0x5052, 1645 "D-Port Diagnostics: reason=%s phase=%u \n", 1646 reason < 7 ? reasons[reason] : "other", 1647 phase >> 1); 1648 } 1649 } 1650 break; 1651 1652 case MBA_TEMPERATURE_ALERT: 1653 ql_dbg(ql_dbg_async, vha, 0x505e, 1654 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1655 break; 1656 1657 case MBA_TRANS_INSERT: 1658 ql_dbg(ql_dbg_async, vha, 0x5091, 1659 "Transceiver Insertion: %04x\n", mb[1]); 1660 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1661 break; 1662 1663 case MBA_TRANS_REMOVE: 1664 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1665 break; 1666 1667 default: 1668 ql_dbg(ql_dbg_async, vha, 0x5057, 1669 "Unknown AEN:%04x %04x %04x %04x\n", 1670 mb[0], mb[1], mb[2], mb[3]); 1671 } 1672 1673 qlt_async_event(mb[0], vha, mb); 1674 1675 if (!vha->vp_idx && ha->num_vhosts) 1676 qla2x00_alert_all_vps(rsp, mb); 1677 } 1678 1679 /** 1680 * qla2x00_process_completed_request() - Process a Fast Post response. 1681 * @vha: SCSI driver HA context 1682 * @req: request queue 1683 * @index: SRB index 1684 */ 1685 void 1686 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1687 struct req_que *req, uint32_t index) 1688 { 1689 srb_t *sp; 1690 struct qla_hw_data *ha = vha->hw; 1691 1692 /* Validate handle. */ 1693 if (index >= req->num_outstanding_cmds) { 1694 ql_log(ql_log_warn, vha, 0x3014, 1695 "Invalid SCSI command index (%x).\n", index); 1696 1697 if (IS_P3P_TYPE(ha)) 1698 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1699 else 1700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1701 return; 1702 } 1703 1704 sp = req->outstanding_cmds[index]; 1705 if (sp) { 1706 /* Free outstanding command slot. */ 1707 req->outstanding_cmds[index] = NULL; 1708 1709 /* Save ISP completion status */ 1710 sp->done(sp, DID_OK << 16); 1711 } else { 1712 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1713 1714 if (IS_P3P_TYPE(ha)) 1715 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1716 else 1717 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1718 } 1719 } 1720 1721 srb_t * 1722 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1723 struct req_que *req, void *iocb) 1724 { 1725 struct qla_hw_data *ha = vha->hw; 1726 sts_entry_t *pkt = iocb; 1727 srb_t *sp; 1728 uint16_t index; 1729 1730 index = LSW(pkt->handle); 1731 if (index >= req->num_outstanding_cmds) { 1732 ql_log(ql_log_warn, vha, 0x5031, 1733 "%s: Invalid command index (%x) type %8ph.\n", 1734 func, index, iocb); 1735 if (IS_P3P_TYPE(ha)) 1736 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1737 else 1738 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1739 return NULL; 1740 } 1741 sp = req->outstanding_cmds[index]; 1742 if (!sp) { 1743 ql_log(ql_log_warn, vha, 0x5032, 1744 "%s: Invalid completion handle (%x) -- timed-out.\n", 1745 func, index); 1746 return NULL; 1747 } 1748 if (sp->handle != index) { 1749 ql_log(ql_log_warn, vha, 0x5033, 1750 "%s: SRB handle (%x) mismatch %x.\n", func, 1751 sp->handle, index); 1752 return NULL; 1753 } 1754 1755 req->outstanding_cmds[index] = NULL; 1756 return sp; 1757 } 1758 1759 static void 1760 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1761 struct mbx_entry *mbx) 1762 { 1763 const char func[] = "MBX-IOCB"; 1764 const char *type; 1765 fc_port_t *fcport; 1766 srb_t *sp; 1767 struct srb_iocb *lio; 1768 uint16_t *data; 1769 uint16_t status; 1770 1771 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1772 if (!sp) 1773 return; 1774 1775 lio = &sp->u.iocb_cmd; 1776 type = sp->name; 1777 fcport = sp->fcport; 1778 data = lio->u.logio.data; 1779 1780 data[0] = MBS_COMMAND_ERROR; 1781 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1782 QLA_LOGIO_LOGIN_RETRIED : 0; 1783 if (mbx->entry_status) { 1784 ql_dbg(ql_dbg_async, vha, 0x5043, 1785 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1786 "entry-status=%x status=%x state-flag=%x " 1787 "status-flags=%x.\n", type, sp->handle, 1788 fcport->d_id.b.domain, fcport->d_id.b.area, 1789 fcport->d_id.b.al_pa, mbx->entry_status, 1790 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1791 le16_to_cpu(mbx->status_flags)); 1792 1793 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1794 mbx, sizeof(*mbx)); 1795 1796 goto logio_done; 1797 } 1798 1799 status = le16_to_cpu(mbx->status); 1800 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1801 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1802 status = 0; 1803 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1804 ql_dbg(ql_dbg_async, vha, 0x5045, 1805 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1806 type, sp->handle, fcport->d_id.b.domain, 1807 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1808 le16_to_cpu(mbx->mb1)); 1809 1810 data[0] = MBS_COMMAND_COMPLETE; 1811 if (sp->type == SRB_LOGIN_CMD) { 1812 fcport->port_type = FCT_TARGET; 1813 if (le16_to_cpu(mbx->mb1) & BIT_0) 1814 fcport->port_type = FCT_INITIATOR; 1815 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1816 fcport->flags |= FCF_FCP2_DEVICE; 1817 } 1818 goto logio_done; 1819 } 1820 1821 data[0] = le16_to_cpu(mbx->mb0); 1822 switch (data[0]) { 1823 case MBS_PORT_ID_USED: 1824 data[1] = le16_to_cpu(mbx->mb1); 1825 break; 1826 case MBS_LOOP_ID_USED: 1827 break; 1828 default: 1829 data[0] = MBS_COMMAND_ERROR; 1830 break; 1831 } 1832 1833 ql_log(ql_log_warn, vha, 0x5046, 1834 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1835 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1836 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1837 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1838 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1839 le16_to_cpu(mbx->mb7)); 1840 1841 logio_done: 1842 sp->done(sp, 0); 1843 } 1844 1845 static void 1846 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1847 struct mbx_24xx_entry *pkt) 1848 { 1849 const char func[] = "MBX-IOCB2"; 1850 struct qla_hw_data *ha = vha->hw; 1851 srb_t *sp; 1852 struct srb_iocb *si; 1853 u16 sz, i; 1854 int res; 1855 1856 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1857 if (!sp) 1858 return; 1859 1860 if (sp->type == SRB_SCSI_CMD || 1861 sp->type == SRB_NVME_CMD || 1862 sp->type == SRB_TM_CMD) { 1863 ql_log(ql_log_warn, vha, 0x509d, 1864 "Inconsistent event entry type %d\n", sp->type); 1865 if (IS_P3P_TYPE(ha)) 1866 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1867 else 1868 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1869 return; 1870 } 1871 1872 si = &sp->u.iocb_cmd; 1873 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1874 1875 for (i = 0; i < sz; i++) 1876 si->u.mbx.in_mb[i] = pkt->mb[i]; 1877 1878 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1879 1880 sp->done(sp, res); 1881 } 1882 1883 static void 1884 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1885 struct nack_to_isp *pkt) 1886 { 1887 const char func[] = "nack"; 1888 srb_t *sp; 1889 int res = 0; 1890 1891 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1892 if (!sp) 1893 return; 1894 1895 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1896 res = QLA_FUNCTION_FAILED; 1897 1898 sp->done(sp, res); 1899 } 1900 1901 static void 1902 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1903 sts_entry_t *pkt, int iocb_type) 1904 { 1905 const char func[] = "CT_IOCB"; 1906 const char *type; 1907 srb_t *sp; 1908 struct bsg_job *bsg_job; 1909 struct fc_bsg_reply *bsg_reply; 1910 uint16_t comp_status; 1911 int res = 0; 1912 1913 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1914 if (!sp) 1915 return; 1916 1917 switch (sp->type) { 1918 case SRB_CT_CMD: 1919 bsg_job = sp->u.bsg_job; 1920 bsg_reply = bsg_job->reply; 1921 1922 type = "ct pass-through"; 1923 1924 comp_status = le16_to_cpu(pkt->comp_status); 1925 1926 /* 1927 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1928 * fc payload to the caller 1929 */ 1930 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1931 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1932 1933 if (comp_status != CS_COMPLETE) { 1934 if (comp_status == CS_DATA_UNDERRUN) { 1935 res = DID_OK << 16; 1936 bsg_reply->reply_payload_rcv_len = 1937 le16_to_cpu(pkt->rsp_info_len); 1938 1939 ql_log(ql_log_warn, vha, 0x5048, 1940 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1941 type, comp_status, 1942 bsg_reply->reply_payload_rcv_len); 1943 } else { 1944 ql_log(ql_log_warn, vha, 0x5049, 1945 "CT pass-through-%s error comp_status=0x%x.\n", 1946 type, comp_status); 1947 res = DID_ERROR << 16; 1948 bsg_reply->reply_payload_rcv_len = 0; 1949 } 1950 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1951 pkt, sizeof(*pkt)); 1952 } else { 1953 res = DID_OK << 16; 1954 bsg_reply->reply_payload_rcv_len = 1955 bsg_job->reply_payload.payload_len; 1956 bsg_job->reply_len = 0; 1957 } 1958 break; 1959 case SRB_CT_PTHRU_CMD: 1960 /* 1961 * borrowing sts_entry_24xx.comp_status. 1962 * same location as ct_entry_24xx.comp_status 1963 */ 1964 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1965 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1966 sp->name); 1967 break; 1968 } 1969 1970 sp->done(sp, res); 1971 } 1972 1973 static void 1974 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1975 struct sts_entry_24xx *pkt, int iocb_type) 1976 { 1977 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; 1978 const char func[] = "ELS_CT_IOCB"; 1979 const char *type; 1980 srb_t *sp; 1981 struct bsg_job *bsg_job; 1982 struct fc_bsg_reply *bsg_reply; 1983 uint16_t comp_status; 1984 uint32_t fw_status[3]; 1985 int res; 1986 struct srb_iocb *els; 1987 1988 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1989 if (!sp) 1990 return; 1991 1992 type = NULL; 1993 switch (sp->type) { 1994 case SRB_ELS_CMD_RPT: 1995 case SRB_ELS_CMD_HST: 1996 type = "els"; 1997 break; 1998 case SRB_CT_CMD: 1999 type = "ct pass-through"; 2000 break; 2001 case SRB_ELS_DCMD: 2002 type = "Driver ELS logo"; 2003 if (iocb_type != ELS_IOCB_TYPE) { 2004 ql_dbg(ql_dbg_user, vha, 0x5047, 2005 "Completing %s: (%p) type=%d.\n", 2006 type, sp, sp->type); 2007 sp->done(sp, 0); 2008 return; 2009 } 2010 break; 2011 case SRB_CT_PTHRU_CMD: 2012 /* borrowing sts_entry_24xx.comp_status. 2013 same location as ct_entry_24xx.comp_status 2014 */ 2015 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 2016 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 2017 sp->name); 2018 sp->done(sp, res); 2019 return; 2020 default: 2021 ql_dbg(ql_dbg_user, vha, 0x503e, 2022 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 2023 return; 2024 } 2025 2026 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 2027 fw_status[1] = le32_to_cpu(ese->error_subcode_1); 2028 fw_status[2] = le32_to_cpu(ese->error_subcode_2); 2029 2030 if (iocb_type == ELS_IOCB_TYPE) { 2031 els = &sp->u.iocb_cmd; 2032 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); 2033 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); 2034 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); 2035 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); 2036 if (comp_status == CS_COMPLETE) { 2037 res = DID_OK << 16; 2038 } else { 2039 if (comp_status == CS_DATA_UNDERRUN) { 2040 res = DID_OK << 16; 2041 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( 2042 ese->total_byte_count)); 2043 } else { 2044 els->u.els_plogi.len = 0; 2045 res = DID_ERROR << 16; 2046 } 2047 } 2048 ql_dbg(ql_dbg_disc, vha, 0x503f, 2049 "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", 2050 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2051 le32_to_cpu(ese->total_byte_count)); 2052 goto els_ct_done; 2053 } 2054 2055 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2056 * fc payload to the caller 2057 */ 2058 bsg_job = sp->u.bsg_job; 2059 bsg_reply = bsg_job->reply; 2060 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2061 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 2062 2063 if (comp_status != CS_COMPLETE) { 2064 if (comp_status == CS_DATA_UNDERRUN) { 2065 res = DID_OK << 16; 2066 bsg_reply->reply_payload_rcv_len = 2067 le32_to_cpu(ese->total_byte_count); 2068 2069 ql_dbg(ql_dbg_user, vha, 0x503f, 2070 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2071 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 2072 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2073 le32_to_cpu(ese->total_byte_count)); 2074 } else { 2075 ql_dbg(ql_dbg_user, vha, 0x5040, 2076 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2077 "error subcode 1=0x%x error subcode 2=0x%x.\n", 2078 type, sp->handle, comp_status, 2079 le32_to_cpu(ese->error_subcode_1), 2080 le32_to_cpu(ese->error_subcode_2)); 2081 res = DID_ERROR << 16; 2082 bsg_reply->reply_payload_rcv_len = 0; 2083 } 2084 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 2085 fw_status, sizeof(fw_status)); 2086 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 2087 pkt, sizeof(*pkt)); 2088 } 2089 else { 2090 res = DID_OK << 16; 2091 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 2092 bsg_job->reply_len = 0; 2093 } 2094 els_ct_done: 2095 2096 sp->done(sp, res); 2097 } 2098 2099 static void 2100 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 2101 struct logio_entry_24xx *logio) 2102 { 2103 const char func[] = "LOGIO-IOCB"; 2104 const char *type; 2105 fc_port_t *fcport; 2106 srb_t *sp; 2107 struct srb_iocb *lio; 2108 uint16_t *data; 2109 uint32_t iop[2]; 2110 2111 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 2112 if (!sp) 2113 return; 2114 2115 lio = &sp->u.iocb_cmd; 2116 type = sp->name; 2117 fcport = sp->fcport; 2118 data = lio->u.logio.data; 2119 2120 data[0] = MBS_COMMAND_ERROR; 2121 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 2122 QLA_LOGIO_LOGIN_RETRIED : 0; 2123 if (logio->entry_status) { 2124 ql_log(ql_log_warn, fcport->vha, 0x5034, 2125 "Async-%s error entry - %8phC hdl=%x" 2126 "portid=%02x%02x%02x entry-status=%x.\n", 2127 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 2128 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2129 logio->entry_status); 2130 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 2131 logio, sizeof(*logio)); 2132 2133 goto logio_done; 2134 } 2135 2136 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 2137 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 2138 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 2139 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2140 le32_to_cpu(logio->io_parameter[0])); 2141 2142 vha->hw->exch_starvation = 0; 2143 data[0] = MBS_COMMAND_COMPLETE; 2144 2145 if (sp->type == SRB_PRLI_CMD) { 2146 lio->u.logio.iop[0] = 2147 le32_to_cpu(logio->io_parameter[0]); 2148 lio->u.logio.iop[1] = 2149 le32_to_cpu(logio->io_parameter[1]); 2150 goto logio_done; 2151 } 2152 2153 if (sp->type != SRB_LOGIN_CMD) 2154 goto logio_done; 2155 2156 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2157 if (iop[0] & BIT_4) { 2158 fcport->port_type = FCT_TARGET; 2159 if (iop[0] & BIT_8) 2160 fcport->flags |= FCF_FCP2_DEVICE; 2161 } else if (iop[0] & BIT_5) 2162 fcport->port_type = FCT_INITIATOR; 2163 2164 if (iop[0] & BIT_7) 2165 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2166 2167 if (logio->io_parameter[7] || logio->io_parameter[8]) 2168 fcport->supported_classes |= FC_COS_CLASS2; 2169 if (logio->io_parameter[9] || logio->io_parameter[10]) 2170 fcport->supported_classes |= FC_COS_CLASS3; 2171 2172 goto logio_done; 2173 } 2174 2175 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2176 iop[1] = le32_to_cpu(logio->io_parameter[1]); 2177 lio->u.logio.iop[0] = iop[0]; 2178 lio->u.logio.iop[1] = iop[1]; 2179 switch (iop[0]) { 2180 case LSC_SCODE_PORTID_USED: 2181 data[0] = MBS_PORT_ID_USED; 2182 data[1] = LSW(iop[1]); 2183 break; 2184 case LSC_SCODE_NPORT_USED: 2185 data[0] = MBS_LOOP_ID_USED; 2186 break; 2187 case LSC_SCODE_CMD_FAILED: 2188 if (iop[1] == 0x0606) { 2189 /* 2190 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 2191 * Target side acked. 2192 */ 2193 data[0] = MBS_COMMAND_COMPLETE; 2194 goto logio_done; 2195 } 2196 data[0] = MBS_COMMAND_ERROR; 2197 break; 2198 case LSC_SCODE_NOXCB: 2199 vha->hw->exch_starvation++; 2200 if (vha->hw->exch_starvation > 5) { 2201 ql_log(ql_log_warn, vha, 0xd046, 2202 "Exchange starvation. Resetting RISC\n"); 2203 2204 vha->hw->exch_starvation = 0; 2205 2206 if (IS_P3P_TYPE(vha->hw)) 2207 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2208 else 2209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2210 qla2xxx_wake_dpc(vha); 2211 } 2212 fallthrough; 2213 default: 2214 data[0] = MBS_COMMAND_ERROR; 2215 break; 2216 } 2217 2218 ql_log(ql_log_warn, sp->vha, 0x5037, 2219 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2220 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2221 le16_to_cpu(logio->comp_status), 2222 le32_to_cpu(logio->io_parameter[0]), 2223 le32_to_cpu(logio->io_parameter[1])); 2224 2225 logio_done: 2226 sp->done(sp, 0); 2227 } 2228 2229 static void 2230 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2231 { 2232 const char func[] = "TMF-IOCB"; 2233 const char *type; 2234 fc_port_t *fcport; 2235 srb_t *sp; 2236 struct srb_iocb *iocb; 2237 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2238 u16 comp_status; 2239 2240 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2241 if (!sp) 2242 return; 2243 2244 comp_status = le16_to_cpu(sts->comp_status); 2245 iocb = &sp->u.iocb_cmd; 2246 type = sp->name; 2247 fcport = sp->fcport; 2248 iocb->u.tmf.data = QLA_SUCCESS; 2249 2250 if (sts->entry_status) { 2251 ql_log(ql_log_warn, fcport->vha, 0x5038, 2252 "Async-%s error - hdl=%x entry-status(%x).\n", 2253 type, sp->handle, sts->entry_status); 2254 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2255 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2256 ql_log(ql_log_warn, fcport->vha, 0x5039, 2257 "Async-%s error - hdl=%x completion status(%x).\n", 2258 type, sp->handle, comp_status); 2259 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2260 } else if ((le16_to_cpu(sts->scsi_status) & 2261 SS_RESPONSE_INFO_LEN_VALID)) { 2262 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2263 ql_log(ql_log_warn, fcport->vha, 0x503b, 2264 "Async-%s error - hdl=%x not enough response(%d).\n", 2265 type, sp->handle, sts->rsp_data_len); 2266 } else if (sts->data[3]) { 2267 ql_log(ql_log_warn, fcport->vha, 0x503c, 2268 "Async-%s error - hdl=%x response(%x).\n", 2269 type, sp->handle, sts->data[3]); 2270 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2271 } 2272 } 2273 2274 switch (comp_status) { 2275 case CS_PORT_LOGGED_OUT: 2276 case CS_PORT_CONFIG_CHG: 2277 case CS_PORT_BUSY: 2278 case CS_INCOMPLETE: 2279 case CS_PORT_UNAVAILABLE: 2280 case CS_TIMEOUT: 2281 case CS_RESET: 2282 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2283 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2284 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n", 2285 fcport->d_id.b.domain, fcport->d_id.b.area, 2286 fcport->d_id.b.al_pa, 2287 port_state_str[FCS_ONLINE], 2288 comp_status); 2289 2290 qlt_schedule_sess_for_deletion(fcport); 2291 } 2292 break; 2293 2294 default: 2295 break; 2296 } 2297 2298 if (iocb->u.tmf.data != QLA_SUCCESS) 2299 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2300 sts, sizeof(*sts)); 2301 2302 sp->done(sp, 0); 2303 } 2304 2305 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2306 void *tsk, srb_t *sp) 2307 { 2308 fc_port_t *fcport; 2309 struct srb_iocb *iocb; 2310 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2311 uint16_t state_flags; 2312 struct nvmefc_fcp_req *fd; 2313 uint16_t ret = QLA_SUCCESS; 2314 __le16 comp_status = sts->comp_status; 2315 int logit = 0; 2316 2317 iocb = &sp->u.iocb_cmd; 2318 fcport = sp->fcport; 2319 iocb->u.nvme.comp_status = comp_status; 2320 state_flags = le16_to_cpu(sts->state_flags); 2321 fd = iocb->u.nvme.desc; 2322 2323 if (unlikely(iocb->u.nvme.aen_op)) 2324 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2325 else 2326 sp->qpair->cmd_completion_cnt++; 2327 2328 if (unlikely(comp_status != CS_COMPLETE)) 2329 logit = 1; 2330 2331 fd->transferred_length = fd->payload_length - 2332 le32_to_cpu(sts->residual_len); 2333 2334 /* 2335 * State flags: Bit 6 and 0. 2336 * If 0 is set, we don't care about 6. 2337 * both cases resp was dma'd to host buffer 2338 * if both are 0, that is good path case. 2339 * if six is set and 0 is clear, we need to 2340 * copy resp data from status iocb to resp buffer. 2341 */ 2342 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2343 iocb->u.nvme.rsp_pyld_len = 0; 2344 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2345 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2346 /* Response already DMA'd to fd->rspaddr. */ 2347 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2348 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2349 /* 2350 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2351 * as an error. 2352 */ 2353 iocb->u.nvme.rsp_pyld_len = 0; 2354 fd->transferred_length = 0; 2355 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2356 "Unexpected values in NVMe_RSP IU.\n"); 2357 logit = 1; 2358 } else if (state_flags & SF_NVME_ERSP) { 2359 uint32_t *inbuf, *outbuf; 2360 uint16_t iter; 2361 2362 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2363 outbuf = (uint32_t *)fd->rspaddr; 2364 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2365 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > 2366 sizeof(struct nvme_fc_ersp_iu))) { 2367 if (ql_mask_match(ql_dbg_io)) { 2368 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2369 iocb->u.nvme.rsp_pyld_len); 2370 ql_log(ql_log_warn, fcport->vha, 0x5100, 2371 "Unexpected response payload length %u.\n", 2372 iocb->u.nvme.rsp_pyld_len); 2373 } 2374 iocb->u.nvme.rsp_pyld_len = 2375 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); 2376 } 2377 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; 2378 for (; iter; iter--) 2379 *outbuf++ = swab32(*inbuf++); 2380 } 2381 2382 if (state_flags & SF_NVME_ERSP) { 2383 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2384 u32 tgt_xfer_len; 2385 2386 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2387 if (fd->transferred_length != tgt_xfer_len) { 2388 ql_log(ql_log_warn, fcport->vha, 0x3079, 2389 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2390 tgt_xfer_len, fd->transferred_length); 2391 logit = 1; 2392 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { 2393 /* 2394 * Do not log if this is just an underflow and there 2395 * is no data loss. 2396 */ 2397 logit = 0; 2398 } 2399 } 2400 2401 if (unlikely(logit)) 2402 ql_log(ql_log_warn, fcport->vha, 0x5060, 2403 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2404 sp->name, sp->handle, comp_status, 2405 fd->transferred_length, le32_to_cpu(sts->residual_len), 2406 sts->ox_id); 2407 2408 /* 2409 * If transport error then Failure (HBA rejects request) 2410 * otherwise transport will handle. 2411 */ 2412 switch (le16_to_cpu(comp_status)) { 2413 case CS_COMPLETE: 2414 break; 2415 2416 case CS_RESET: 2417 case CS_PORT_UNAVAILABLE: 2418 case CS_PORT_LOGGED_OUT: 2419 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2420 fallthrough; 2421 case CS_ABORTED: 2422 case CS_PORT_BUSY: 2423 fd->transferred_length = 0; 2424 iocb->u.nvme.rsp_pyld_len = 0; 2425 ret = QLA_ABORTED; 2426 break; 2427 case CS_DATA_UNDERRUN: 2428 break; 2429 default: 2430 ret = QLA_FUNCTION_FAILED; 2431 break; 2432 } 2433 sp->done(sp, ret); 2434 } 2435 2436 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2437 struct vp_ctrl_entry_24xx *vce) 2438 { 2439 const char func[] = "CTRLVP-IOCB"; 2440 srb_t *sp; 2441 int rval = QLA_SUCCESS; 2442 2443 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2444 if (!sp) 2445 return; 2446 2447 if (vce->entry_status != 0) { 2448 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2449 "%s: Failed to complete IOCB -- error status (%x)\n", 2450 sp->name, vce->entry_status); 2451 rval = QLA_FUNCTION_FAILED; 2452 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2453 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2454 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2455 sp->name, le16_to_cpu(vce->comp_status), 2456 le16_to_cpu(vce->vp_idx_failed)); 2457 rval = QLA_FUNCTION_FAILED; 2458 } else { 2459 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2460 "Done %s.\n", __func__); 2461 } 2462 2463 sp->rc = rval; 2464 sp->done(sp, rval); 2465 } 2466 2467 /* Process a single response queue entry. */ 2468 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2469 struct rsp_que *rsp, 2470 sts_entry_t *pkt) 2471 { 2472 sts21_entry_t *sts21_entry; 2473 sts22_entry_t *sts22_entry; 2474 uint16_t handle_cnt; 2475 uint16_t cnt; 2476 2477 switch (pkt->entry_type) { 2478 case STATUS_TYPE: 2479 qla2x00_status_entry(vha, rsp, pkt); 2480 break; 2481 case STATUS_TYPE_21: 2482 sts21_entry = (sts21_entry_t *)pkt; 2483 handle_cnt = sts21_entry->handle_count; 2484 for (cnt = 0; cnt < handle_cnt; cnt++) 2485 qla2x00_process_completed_request(vha, rsp->req, 2486 sts21_entry->handle[cnt]); 2487 break; 2488 case STATUS_TYPE_22: 2489 sts22_entry = (sts22_entry_t *)pkt; 2490 handle_cnt = sts22_entry->handle_count; 2491 for (cnt = 0; cnt < handle_cnt; cnt++) 2492 qla2x00_process_completed_request(vha, rsp->req, 2493 sts22_entry->handle[cnt]); 2494 break; 2495 case STATUS_CONT_TYPE: 2496 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2497 break; 2498 case MBX_IOCB_TYPE: 2499 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2500 break; 2501 case CT_IOCB_TYPE: 2502 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2503 break; 2504 default: 2505 /* Type Not Supported. */ 2506 ql_log(ql_log_warn, vha, 0x504a, 2507 "Received unknown response pkt type %x entry status=%x.\n", 2508 pkt->entry_type, pkt->entry_status); 2509 break; 2510 } 2511 } 2512 2513 /** 2514 * qla2x00_process_response_queue() - Process response queue entries. 2515 * @rsp: response queue 2516 */ 2517 void 2518 qla2x00_process_response_queue(struct rsp_que *rsp) 2519 { 2520 struct scsi_qla_host *vha; 2521 struct qla_hw_data *ha = rsp->hw; 2522 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2523 sts_entry_t *pkt; 2524 2525 vha = pci_get_drvdata(ha->pdev); 2526 2527 if (!vha->flags.online) 2528 return; 2529 2530 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2531 pkt = (sts_entry_t *)rsp->ring_ptr; 2532 2533 rsp->ring_index++; 2534 if (rsp->ring_index == rsp->length) { 2535 rsp->ring_index = 0; 2536 rsp->ring_ptr = rsp->ring; 2537 } else { 2538 rsp->ring_ptr++; 2539 } 2540 2541 if (pkt->entry_status != 0) { 2542 qla2x00_error_entry(vha, rsp, pkt); 2543 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2544 wmb(); 2545 continue; 2546 } 2547 2548 qla2x00_process_response_entry(vha, rsp, pkt); 2549 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2550 wmb(); 2551 } 2552 2553 /* Adjust ring index */ 2554 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2555 } 2556 2557 static inline void 2558 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2559 uint32_t sense_len, struct rsp_que *rsp, int res) 2560 { 2561 struct scsi_qla_host *vha = sp->vha; 2562 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2563 uint32_t track_sense_len; 2564 2565 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2566 sense_len = SCSI_SENSE_BUFFERSIZE; 2567 2568 SET_CMD_SENSE_LEN(sp, sense_len); 2569 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2570 track_sense_len = sense_len; 2571 2572 if (sense_len > par_sense_len) 2573 sense_len = par_sense_len; 2574 2575 memcpy(cp->sense_buffer, sense_data, sense_len); 2576 2577 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2578 track_sense_len -= sense_len; 2579 SET_CMD_SENSE_LEN(sp, track_sense_len); 2580 2581 if (track_sense_len != 0) { 2582 rsp->status_srb = sp; 2583 cp->result = res; 2584 } 2585 2586 if (sense_len) { 2587 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2588 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2589 sp->vha->host_no, cp->device->id, cp->device->lun, 2590 cp); 2591 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2592 cp->sense_buffer, sense_len); 2593 } 2594 } 2595 2596 struct scsi_dif_tuple { 2597 __be16 guard; /* Checksum */ 2598 __be16 app_tag; /* APPL identifier */ 2599 __be32 ref_tag; /* Target LBA or indirect LBA */ 2600 }; 2601 2602 /* 2603 * Checks the guard or meta-data for the type of error 2604 * detected by the HBA. In case of errors, we set the 2605 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2606 * to indicate to the kernel that the HBA detected error. 2607 */ 2608 static inline int 2609 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2610 { 2611 struct scsi_qla_host *vha = sp->vha; 2612 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2613 uint8_t *ap = &sts24->data[12]; 2614 uint8_t *ep = &sts24->data[20]; 2615 uint32_t e_ref_tag, a_ref_tag; 2616 uint16_t e_app_tag, a_app_tag; 2617 uint16_t e_guard, a_guard; 2618 2619 /* 2620 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2621 * would make guard field appear at offset 2 2622 */ 2623 a_guard = get_unaligned_le16(ap + 2); 2624 a_app_tag = get_unaligned_le16(ap + 0); 2625 a_ref_tag = get_unaligned_le32(ap + 4); 2626 e_guard = get_unaligned_le16(ep + 2); 2627 e_app_tag = get_unaligned_le16(ep + 0); 2628 e_ref_tag = get_unaligned_le32(ep + 4); 2629 2630 ql_dbg(ql_dbg_io, vha, 0x3023, 2631 "iocb(s) %p Returned STATUS.\n", sts24); 2632 2633 ql_dbg(ql_dbg_io, vha, 0x3024, 2634 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2635 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2636 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2637 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2638 a_app_tag, e_app_tag, a_guard, e_guard); 2639 2640 /* 2641 * Ignore sector if: 2642 * For type 3: ref & app tag is all 'f's 2643 * For type 0,1,2: app tag is all 'f's 2644 */ 2645 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && 2646 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || 2647 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { 2648 uint32_t blocks_done, resid; 2649 sector_t lba_s = scsi_get_lba(cmd); 2650 2651 /* 2TB boundary case covered automatically with this */ 2652 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2653 2654 resid = scsi_bufflen(cmd) - (blocks_done * 2655 cmd->device->sector_size); 2656 2657 scsi_set_resid(cmd, resid); 2658 cmd->result = DID_OK << 16; 2659 2660 /* Update protection tag */ 2661 if (scsi_prot_sg_count(cmd)) { 2662 uint32_t i, j = 0, k = 0, num_ent; 2663 struct scatterlist *sg; 2664 struct t10_pi_tuple *spt; 2665 2666 /* Patch the corresponding protection tags */ 2667 scsi_for_each_prot_sg(cmd, sg, 2668 scsi_prot_sg_count(cmd), i) { 2669 num_ent = sg_dma_len(sg) / 8; 2670 if (k + num_ent < blocks_done) { 2671 k += num_ent; 2672 continue; 2673 } 2674 j = blocks_done - k - 1; 2675 k = blocks_done; 2676 break; 2677 } 2678 2679 if (k != blocks_done) { 2680 ql_log(ql_log_warn, vha, 0x302f, 2681 "unexpected tag values tag:lba=%x:%llx)\n", 2682 e_ref_tag, (unsigned long long)lba_s); 2683 return 1; 2684 } 2685 2686 spt = page_address(sg_page(sg)) + sg->offset; 2687 spt += j; 2688 2689 spt->app_tag = T10_PI_APP_ESCAPE; 2690 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2691 spt->ref_tag = T10_PI_REF_ESCAPE; 2692 } 2693 2694 return 0; 2695 } 2696 2697 /* check guard */ 2698 if (e_guard != a_guard) { 2699 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2700 set_host_byte(cmd, DID_ABORT); 2701 return 1; 2702 } 2703 2704 /* check ref tag */ 2705 if (e_ref_tag != a_ref_tag) { 2706 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2707 set_host_byte(cmd, DID_ABORT); 2708 return 1; 2709 } 2710 2711 /* check appl tag */ 2712 if (e_app_tag != a_app_tag) { 2713 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2714 set_host_byte(cmd, DID_ABORT); 2715 return 1; 2716 } 2717 2718 return 1; 2719 } 2720 2721 static void 2722 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2723 struct req_que *req, uint32_t index) 2724 { 2725 struct qla_hw_data *ha = vha->hw; 2726 srb_t *sp; 2727 uint16_t comp_status; 2728 uint16_t scsi_status; 2729 uint16_t thread_id; 2730 uint32_t rval = EXT_STATUS_OK; 2731 struct bsg_job *bsg_job = NULL; 2732 struct fc_bsg_request *bsg_request; 2733 struct fc_bsg_reply *bsg_reply; 2734 sts_entry_t *sts = pkt; 2735 struct sts_entry_24xx *sts24 = pkt; 2736 2737 /* Validate handle. */ 2738 if (index >= req->num_outstanding_cmds) { 2739 ql_log(ql_log_warn, vha, 0x70af, 2740 "Invalid SCSI completion handle 0x%x.\n", index); 2741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2742 return; 2743 } 2744 2745 sp = req->outstanding_cmds[index]; 2746 if (!sp) { 2747 ql_log(ql_log_warn, vha, 0x70b0, 2748 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2749 req->id, index); 2750 2751 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2752 return; 2753 } 2754 2755 /* Free outstanding command slot. */ 2756 req->outstanding_cmds[index] = NULL; 2757 bsg_job = sp->u.bsg_job; 2758 bsg_request = bsg_job->request; 2759 bsg_reply = bsg_job->reply; 2760 2761 if (IS_FWI2_CAPABLE(ha)) { 2762 comp_status = le16_to_cpu(sts24->comp_status); 2763 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2764 } else { 2765 comp_status = le16_to_cpu(sts->comp_status); 2766 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2767 } 2768 2769 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2770 switch (comp_status) { 2771 case CS_COMPLETE: 2772 if (scsi_status == 0) { 2773 bsg_reply->reply_payload_rcv_len = 2774 bsg_job->reply_payload.payload_len; 2775 vha->qla_stats.input_bytes += 2776 bsg_reply->reply_payload_rcv_len; 2777 vha->qla_stats.input_requests++; 2778 rval = EXT_STATUS_OK; 2779 } 2780 goto done; 2781 2782 case CS_DATA_OVERRUN: 2783 ql_dbg(ql_dbg_user, vha, 0x70b1, 2784 "Command completed with data overrun thread_id=%d\n", 2785 thread_id); 2786 rval = EXT_STATUS_DATA_OVERRUN; 2787 break; 2788 2789 case CS_DATA_UNDERRUN: 2790 ql_dbg(ql_dbg_user, vha, 0x70b2, 2791 "Command completed with data underrun thread_id=%d\n", 2792 thread_id); 2793 rval = EXT_STATUS_DATA_UNDERRUN; 2794 break; 2795 case CS_BIDIR_RD_OVERRUN: 2796 ql_dbg(ql_dbg_user, vha, 0x70b3, 2797 "Command completed with read data overrun thread_id=%d\n", 2798 thread_id); 2799 rval = EXT_STATUS_DATA_OVERRUN; 2800 break; 2801 2802 case CS_BIDIR_RD_WR_OVERRUN: 2803 ql_dbg(ql_dbg_user, vha, 0x70b4, 2804 "Command completed with read and write data overrun " 2805 "thread_id=%d\n", thread_id); 2806 rval = EXT_STATUS_DATA_OVERRUN; 2807 break; 2808 2809 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2810 ql_dbg(ql_dbg_user, vha, 0x70b5, 2811 "Command completed with read data over and write data " 2812 "underrun thread_id=%d\n", thread_id); 2813 rval = EXT_STATUS_DATA_OVERRUN; 2814 break; 2815 2816 case CS_BIDIR_RD_UNDERRUN: 2817 ql_dbg(ql_dbg_user, vha, 0x70b6, 2818 "Command completed with read data underrun " 2819 "thread_id=%d\n", thread_id); 2820 rval = EXT_STATUS_DATA_UNDERRUN; 2821 break; 2822 2823 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2824 ql_dbg(ql_dbg_user, vha, 0x70b7, 2825 "Command completed with read data under and write data " 2826 "overrun thread_id=%d\n", thread_id); 2827 rval = EXT_STATUS_DATA_UNDERRUN; 2828 break; 2829 2830 case CS_BIDIR_RD_WR_UNDERRUN: 2831 ql_dbg(ql_dbg_user, vha, 0x70b8, 2832 "Command completed with read and write data underrun " 2833 "thread_id=%d\n", thread_id); 2834 rval = EXT_STATUS_DATA_UNDERRUN; 2835 break; 2836 2837 case CS_BIDIR_DMA: 2838 ql_dbg(ql_dbg_user, vha, 0x70b9, 2839 "Command completed with data DMA error thread_id=%d\n", 2840 thread_id); 2841 rval = EXT_STATUS_DMA_ERR; 2842 break; 2843 2844 case CS_TIMEOUT: 2845 ql_dbg(ql_dbg_user, vha, 0x70ba, 2846 "Command completed with timeout thread_id=%d\n", 2847 thread_id); 2848 rval = EXT_STATUS_TIMEOUT; 2849 break; 2850 default: 2851 ql_dbg(ql_dbg_user, vha, 0x70bb, 2852 "Command completed with completion status=0x%x " 2853 "thread_id=%d\n", comp_status, thread_id); 2854 rval = EXT_STATUS_ERR; 2855 break; 2856 } 2857 bsg_reply->reply_payload_rcv_len = 0; 2858 2859 done: 2860 /* Return the vendor specific reply to API */ 2861 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2862 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2863 /* Always return DID_OK, bsg will send the vendor specific response 2864 * in this case only */ 2865 sp->done(sp, DID_OK << 16); 2866 2867 } 2868 2869 /** 2870 * qla2x00_status_entry() - Process a Status IOCB entry. 2871 * @vha: SCSI driver HA context 2872 * @rsp: response queue 2873 * @pkt: Entry pointer 2874 */ 2875 static void 2876 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2877 { 2878 srb_t *sp; 2879 fc_port_t *fcport; 2880 struct scsi_cmnd *cp; 2881 sts_entry_t *sts = pkt; 2882 struct sts_entry_24xx *sts24 = pkt; 2883 uint16_t comp_status; 2884 uint16_t scsi_status; 2885 uint16_t ox_id; 2886 uint8_t lscsi_status; 2887 int32_t resid; 2888 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2889 fw_resid_len; 2890 uint8_t *rsp_info, *sense_data; 2891 struct qla_hw_data *ha = vha->hw; 2892 uint32_t handle; 2893 uint16_t que; 2894 struct req_que *req; 2895 int logit = 1; 2896 int res = 0; 2897 uint16_t state_flags = 0; 2898 uint16_t sts_qual = 0; 2899 2900 if (IS_FWI2_CAPABLE(ha)) { 2901 comp_status = le16_to_cpu(sts24->comp_status); 2902 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2903 state_flags = le16_to_cpu(sts24->state_flags); 2904 } else { 2905 comp_status = le16_to_cpu(sts->comp_status); 2906 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2907 } 2908 handle = (uint32_t) LSW(sts->handle); 2909 que = MSW(sts->handle); 2910 req = ha->req_q_map[que]; 2911 2912 /* Check for invalid queue pointer */ 2913 if (req == NULL || 2914 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2915 ql_dbg(ql_dbg_io, vha, 0x3059, 2916 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2917 "que=%u.\n", sts->handle, req, que); 2918 return; 2919 } 2920 2921 /* Validate handle. */ 2922 if (handle < req->num_outstanding_cmds) { 2923 sp = req->outstanding_cmds[handle]; 2924 if (!sp) { 2925 ql_dbg(ql_dbg_io, vha, 0x3075, 2926 "%s(%ld): Already returned command for status handle (0x%x).\n", 2927 __func__, vha->host_no, sts->handle); 2928 return; 2929 } 2930 } else { 2931 ql_dbg(ql_dbg_io, vha, 0x3017, 2932 "Invalid status handle, out of range (0x%x).\n", 2933 sts->handle); 2934 2935 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2936 if (IS_P3P_TYPE(ha)) 2937 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2938 else 2939 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2940 qla2xxx_wake_dpc(vha); 2941 } 2942 return; 2943 } 2944 qla_put_iocbs(sp->qpair, &sp->iores); 2945 2946 if (sp->cmd_type != TYPE_SRB) { 2947 req->outstanding_cmds[handle] = NULL; 2948 ql_dbg(ql_dbg_io, vha, 0x3015, 2949 "Unknown sp->cmd_type %x %p).\n", 2950 sp->cmd_type, sp); 2951 return; 2952 } 2953 2954 /* NVME completion. */ 2955 if (sp->type == SRB_NVME_CMD) { 2956 req->outstanding_cmds[handle] = NULL; 2957 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 2958 return; 2959 } 2960 2961 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2962 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2963 return; 2964 } 2965 2966 /* Task Management completion. */ 2967 if (sp->type == SRB_TM_CMD) { 2968 qla24xx_tm_iocb_entry(vha, req, pkt); 2969 return; 2970 } 2971 2972 sp->qpair->cmd_completion_cnt++; 2973 2974 /* Fast path completion. */ 2975 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2976 qla2x00_process_completed_request(vha, req, handle); 2977 2978 return; 2979 } 2980 2981 req->outstanding_cmds[handle] = NULL; 2982 cp = GET_CMD_SP(sp); 2983 if (cp == NULL) { 2984 ql_dbg(ql_dbg_io, vha, 0x3018, 2985 "Command already returned (0x%x/%p).\n", 2986 sts->handle, sp); 2987 2988 return; 2989 } 2990 2991 lscsi_status = scsi_status & STATUS_MASK; 2992 2993 fcport = sp->fcport; 2994 2995 ox_id = 0; 2996 sense_len = par_sense_len = rsp_info_len = resid_len = 2997 fw_resid_len = 0; 2998 if (IS_FWI2_CAPABLE(ha)) { 2999 if (scsi_status & SS_SENSE_LEN_VALID) 3000 sense_len = le32_to_cpu(sts24->sense_len); 3001 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3002 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 3003 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 3004 resid_len = le32_to_cpu(sts24->rsp_residual_count); 3005 if (comp_status == CS_DATA_UNDERRUN) 3006 fw_resid_len = le32_to_cpu(sts24->residual_len); 3007 rsp_info = sts24->data; 3008 sense_data = sts24->data; 3009 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 3010 ox_id = le16_to_cpu(sts24->ox_id); 3011 par_sense_len = sizeof(sts24->data); 3012 sts_qual = le16_to_cpu(sts24->status_qualifier); 3013 } else { 3014 if (scsi_status & SS_SENSE_LEN_VALID) 3015 sense_len = le16_to_cpu(sts->req_sense_length); 3016 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3017 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 3018 resid_len = le32_to_cpu(sts->residual_length); 3019 rsp_info = sts->rsp_info; 3020 sense_data = sts->req_sense_data; 3021 par_sense_len = sizeof(sts->req_sense_data); 3022 } 3023 3024 /* Check for any FCP transport errors. */ 3025 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 3026 /* Sense data lies beyond any FCP RESPONSE data. */ 3027 if (IS_FWI2_CAPABLE(ha)) { 3028 sense_data += rsp_info_len; 3029 par_sense_len -= rsp_info_len; 3030 } 3031 if (rsp_info_len > 3 && rsp_info[3]) { 3032 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 3033 "FCP I/O protocol failure (0x%x/0x%x).\n", 3034 rsp_info_len, rsp_info[3]); 3035 3036 res = DID_BUS_BUSY << 16; 3037 goto out; 3038 } 3039 } 3040 3041 /* Check for overrun. */ 3042 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 3043 scsi_status & SS_RESIDUAL_OVER) 3044 comp_status = CS_DATA_OVERRUN; 3045 3046 /* 3047 * Check retry_delay_timer value if we receive a busy or 3048 * queue full. 3049 */ 3050 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || 3051 lscsi_status == SAM_STAT_BUSY)) 3052 qla2x00_set_retry_delay_timestamp(fcport, sts_qual); 3053 3054 /* 3055 * Based on Host and scsi status generate status code for Linux 3056 */ 3057 switch (comp_status) { 3058 case CS_COMPLETE: 3059 case CS_QUEUE_FULL: 3060 if (scsi_status == 0) { 3061 res = DID_OK << 16; 3062 break; 3063 } 3064 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 3065 resid = resid_len; 3066 scsi_set_resid(cp, resid); 3067 3068 if (!lscsi_status && 3069 ((unsigned)(scsi_bufflen(cp) - resid) < 3070 cp->underflow)) { 3071 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 3072 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3073 resid, scsi_bufflen(cp)); 3074 3075 res = DID_ERROR << 16; 3076 break; 3077 } 3078 } 3079 res = DID_OK << 16 | lscsi_status; 3080 3081 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3082 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 3083 "QUEUE FULL detected.\n"); 3084 break; 3085 } 3086 logit = 0; 3087 if (lscsi_status != SS_CHECK_CONDITION) 3088 break; 3089 3090 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3091 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3092 break; 3093 3094 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 3095 rsp, res); 3096 break; 3097 3098 case CS_DATA_UNDERRUN: 3099 /* Use F/W calculated residual length. */ 3100 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 3101 scsi_set_resid(cp, resid); 3102 if (scsi_status & SS_RESIDUAL_UNDER) { 3103 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 3104 ql_log(ql_log_warn, fcport->vha, 0x301d, 3105 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3106 resid, scsi_bufflen(cp)); 3107 3108 vha->interface_err_cnt++; 3109 3110 res = DID_ERROR << 16 | lscsi_status; 3111 goto check_scsi_status; 3112 } 3113 3114 if (!lscsi_status && 3115 ((unsigned)(scsi_bufflen(cp) - resid) < 3116 cp->underflow)) { 3117 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 3118 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3119 resid, scsi_bufflen(cp)); 3120 3121 res = DID_ERROR << 16; 3122 break; 3123 } 3124 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 3125 lscsi_status != SAM_STAT_BUSY) { 3126 /* 3127 * scsi status of task set and busy are considered to be 3128 * task not completed. 3129 */ 3130 3131 ql_log(ql_log_warn, fcport->vha, 0x301f, 3132 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3133 resid, scsi_bufflen(cp)); 3134 3135 vha->interface_err_cnt++; 3136 3137 res = DID_ERROR << 16 | lscsi_status; 3138 goto check_scsi_status; 3139 } else { 3140 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 3141 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 3142 scsi_status, lscsi_status); 3143 } 3144 3145 res = DID_OK << 16 | lscsi_status; 3146 logit = 0; 3147 3148 check_scsi_status: 3149 /* 3150 * Check to see if SCSI Status is non zero. If so report SCSI 3151 * Status. 3152 */ 3153 if (lscsi_status != 0) { 3154 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3155 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 3156 "QUEUE FULL detected.\n"); 3157 logit = 1; 3158 break; 3159 } 3160 if (lscsi_status != SS_CHECK_CONDITION) 3161 break; 3162 3163 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3164 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3165 break; 3166 3167 qla2x00_handle_sense(sp, sense_data, par_sense_len, 3168 sense_len, rsp, res); 3169 } 3170 break; 3171 3172 case CS_PORT_LOGGED_OUT: 3173 case CS_PORT_CONFIG_CHG: 3174 case CS_PORT_BUSY: 3175 case CS_INCOMPLETE: 3176 case CS_PORT_UNAVAILABLE: 3177 case CS_TIMEOUT: 3178 case CS_RESET: 3179 3180 /* 3181 * We are going to have the fc class block the rport 3182 * while we try to recover so instruct the mid layer 3183 * to requeue until the class decides how to handle this. 3184 */ 3185 res = DID_TRANSPORT_DISRUPTED << 16; 3186 3187 if (comp_status == CS_TIMEOUT) { 3188 if (IS_FWI2_CAPABLE(ha)) 3189 break; 3190 else if ((le16_to_cpu(sts->status_flags) & 3191 SF_LOGOUT_SENT) == 0) 3192 break; 3193 } 3194 3195 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3196 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 3197 "Port to be marked lost on fcport=%02x%02x%02x, current " 3198 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 3199 fcport->d_id.b.area, fcport->d_id.b.al_pa, 3200 port_state_str[FCS_ONLINE], 3201 comp_status); 3202 3203 qlt_schedule_sess_for_deletion(fcport); 3204 } 3205 3206 break; 3207 3208 case CS_ABORTED: 3209 res = DID_RESET << 16; 3210 break; 3211 3212 case CS_DIF_ERROR: 3213 logit = qla2x00_handle_dif_error(sp, sts24); 3214 res = cp->result; 3215 break; 3216 3217 case CS_TRANSPORT: 3218 res = DID_ERROR << 16; 3219 vha->hw_err_cnt++; 3220 3221 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 3222 break; 3223 3224 if (state_flags & BIT_4) 3225 scmd_printk(KERN_WARNING, cp, 3226 "Unsupported device '%s' found.\n", 3227 cp->device->vendor); 3228 break; 3229 3230 case CS_DMA: 3231 ql_log(ql_log_info, fcport->vha, 0x3022, 3232 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3233 comp_status, scsi_status, res, vha->host_no, 3234 cp->device->id, cp->device->lun, fcport->d_id.b24, 3235 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3236 resid_len, fw_resid_len, sp, cp); 3237 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 3238 pkt, sizeof(*sts24)); 3239 res = DID_ERROR << 16; 3240 vha->hw_err_cnt++; 3241 break; 3242 default: 3243 res = DID_ERROR << 16; 3244 break; 3245 } 3246 3247 out: 3248 if (logit) 3249 ql_log(ql_log_warn, fcport->vha, 0x3022, 3250 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3251 comp_status, scsi_status, res, vha->host_no, 3252 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3253 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3254 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3255 resid_len, fw_resid_len, sp, cp); 3256 3257 if (rsp->status_srb == NULL) 3258 sp->done(sp, res); 3259 } 3260 3261 /** 3262 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3263 * @rsp: response queue 3264 * @pkt: Entry pointer 3265 * 3266 * Extended sense data. 3267 */ 3268 static void 3269 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3270 { 3271 uint8_t sense_sz = 0; 3272 struct qla_hw_data *ha = rsp->hw; 3273 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3274 srb_t *sp = rsp->status_srb; 3275 struct scsi_cmnd *cp; 3276 uint32_t sense_len; 3277 uint8_t *sense_ptr; 3278 3279 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3280 return; 3281 3282 sense_len = GET_CMD_SENSE_LEN(sp); 3283 sense_ptr = GET_CMD_SENSE_PTR(sp); 3284 3285 cp = GET_CMD_SP(sp); 3286 if (cp == NULL) { 3287 ql_log(ql_log_warn, vha, 0x3025, 3288 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3289 3290 rsp->status_srb = NULL; 3291 return; 3292 } 3293 3294 if (sense_len > sizeof(pkt->data)) 3295 sense_sz = sizeof(pkt->data); 3296 else 3297 sense_sz = sense_len; 3298 3299 /* Move sense data. */ 3300 if (IS_FWI2_CAPABLE(ha)) 3301 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3302 memcpy(sense_ptr, pkt->data, sense_sz); 3303 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3304 sense_ptr, sense_sz); 3305 3306 sense_len -= sense_sz; 3307 sense_ptr += sense_sz; 3308 3309 SET_CMD_SENSE_PTR(sp, sense_ptr); 3310 SET_CMD_SENSE_LEN(sp, sense_len); 3311 3312 /* Place command on done queue. */ 3313 if (sense_len == 0) { 3314 rsp->status_srb = NULL; 3315 sp->done(sp, cp->result); 3316 } 3317 } 3318 3319 /** 3320 * qla2x00_error_entry() - Process an error entry. 3321 * @vha: SCSI driver HA context 3322 * @rsp: response queue 3323 * @pkt: Entry pointer 3324 * return : 1=allow further error analysis. 0=no additional error analysis. 3325 */ 3326 static int 3327 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3328 { 3329 srb_t *sp; 3330 struct qla_hw_data *ha = vha->hw; 3331 const char func[] = "ERROR-IOCB"; 3332 uint16_t que = MSW(pkt->handle); 3333 struct req_que *req = NULL; 3334 int res = DID_ERROR << 16; 3335 3336 ql_dbg(ql_dbg_async, vha, 0x502a, 3337 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3338 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3339 3340 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3341 goto fatal; 3342 3343 req = ha->req_q_map[que]; 3344 3345 if (pkt->entry_status & RF_BUSY) 3346 res = DID_BUS_BUSY << 16; 3347 3348 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3349 return 0; 3350 3351 switch (pkt->entry_type) { 3352 case NOTIFY_ACK_TYPE: 3353 case STATUS_TYPE: 3354 case STATUS_CONT_TYPE: 3355 case LOGINOUT_PORT_IOCB_TYPE: 3356 case CT_IOCB_TYPE: 3357 case ELS_IOCB_TYPE: 3358 case ABORT_IOCB_TYPE: 3359 case MBX_IOCB_TYPE: 3360 default: 3361 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3362 if (sp) { 3363 qla_put_iocbs(sp->qpair, &sp->iores); 3364 sp->done(sp, res); 3365 return 0; 3366 } 3367 break; 3368 3369 case ABTS_RESP_24XX: 3370 case CTIO_TYPE7: 3371 case CTIO_CRC2: 3372 return 1; 3373 } 3374 fatal: 3375 ql_log(ql_log_warn, vha, 0x5030, 3376 "Error entry - invalid handle/queue (%04x).\n", que); 3377 return 0; 3378 } 3379 3380 /** 3381 * qla24xx_mbx_completion() - Process mailbox command completions. 3382 * @vha: SCSI driver HA context 3383 * @mb0: Mailbox0 register 3384 */ 3385 static void 3386 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3387 { 3388 uint16_t cnt; 3389 uint32_t mboxes; 3390 __le16 __iomem *wptr; 3391 struct qla_hw_data *ha = vha->hw; 3392 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3393 3394 /* Read all mbox registers? */ 3395 WARN_ON_ONCE(ha->mbx_count > 32); 3396 mboxes = (1ULL << ha->mbx_count) - 1; 3397 if (!ha->mcp) 3398 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3399 else 3400 mboxes = ha->mcp->in_mb; 3401 3402 /* Load return mailbox registers. */ 3403 ha->flags.mbox_int = 1; 3404 ha->mailbox_out[0] = mb0; 3405 mboxes >>= 1; 3406 wptr = ®->mailbox1; 3407 3408 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3409 if (mboxes & BIT_0) 3410 ha->mailbox_out[cnt] = rd_reg_word(wptr); 3411 3412 mboxes >>= 1; 3413 wptr++; 3414 } 3415 } 3416 3417 static void 3418 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3419 struct abort_entry_24xx *pkt) 3420 { 3421 const char func[] = "ABT_IOCB"; 3422 srb_t *sp; 3423 srb_t *orig_sp = NULL; 3424 struct srb_iocb *abt; 3425 3426 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3427 if (!sp) 3428 return; 3429 3430 abt = &sp->u.iocb_cmd; 3431 abt->u.abt.comp_status = pkt->comp_status; 3432 orig_sp = sp->cmd_sp; 3433 /* Need to pass original sp */ 3434 if (orig_sp) 3435 qla_nvme_abort_process_comp_status(pkt, orig_sp); 3436 3437 sp->done(sp, 0); 3438 } 3439 3440 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3441 struct pt_ls4_request *pkt, struct req_que *req) 3442 { 3443 srb_t *sp; 3444 const char func[] = "LS4_IOCB"; 3445 uint16_t comp_status; 3446 3447 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3448 if (!sp) 3449 return; 3450 3451 comp_status = le16_to_cpu(pkt->status); 3452 sp->done(sp, comp_status); 3453 } 3454 3455 /** 3456 * qla24xx_process_response_queue() - Process response queue entries. 3457 * @vha: SCSI driver HA context 3458 * @rsp: response queue 3459 */ 3460 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3461 struct rsp_que *rsp) 3462 { 3463 struct sts_entry_24xx *pkt; 3464 struct qla_hw_data *ha = vha->hw; 3465 struct purex_entry_24xx *purex_entry; 3466 struct purex_item *pure_item; 3467 3468 if (!ha->flags.fw_started) 3469 return; 3470 3471 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { 3472 rsp->qpair->rcv_intr = 1; 3473 qla_cpu_update(rsp->qpair, smp_processor_id()); 3474 } 3475 3476 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 3477 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 3478 3479 rsp->ring_index++; 3480 if (rsp->ring_index == rsp->length) { 3481 rsp->ring_index = 0; 3482 rsp->ring_ptr = rsp->ring; 3483 } else { 3484 rsp->ring_ptr++; 3485 } 3486 3487 if (pkt->entry_status != 0) { 3488 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 3489 goto process_err; 3490 3491 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3492 wmb(); 3493 continue; 3494 } 3495 process_err: 3496 3497 switch (pkt->entry_type) { 3498 case STATUS_TYPE: 3499 qla2x00_status_entry(vha, rsp, pkt); 3500 break; 3501 case STATUS_CONT_TYPE: 3502 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 3503 break; 3504 case VP_RPT_ID_IOCB_TYPE: 3505 qla24xx_report_id_acquisition(vha, 3506 (struct vp_rpt_id_entry_24xx *)pkt); 3507 break; 3508 case LOGINOUT_PORT_IOCB_TYPE: 3509 qla24xx_logio_entry(vha, rsp->req, 3510 (struct logio_entry_24xx *)pkt); 3511 break; 3512 case CT_IOCB_TYPE: 3513 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 3514 break; 3515 case ELS_IOCB_TYPE: 3516 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 3517 break; 3518 case ABTS_RECV_24XX: 3519 if (qla_ini_mode_enabled(vha)) { 3520 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3521 if (!pure_item) 3522 break; 3523 qla24xx_queue_purex_item(vha, pure_item, 3524 qla24xx_process_abts); 3525 break; 3526 } 3527 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3528 IS_QLA28XX(ha)) { 3529 /* ensure that the ATIO queue is empty */ 3530 qlt_handle_abts_recv(vha, rsp, 3531 (response_t *)pkt); 3532 break; 3533 } else { 3534 qlt_24xx_process_atio_queue(vha, 1); 3535 } 3536 fallthrough; 3537 case ABTS_RESP_24XX: 3538 case CTIO_TYPE7: 3539 case CTIO_CRC2: 3540 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3541 break; 3542 case PT_LS4_REQUEST: 3543 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3544 rsp->req); 3545 break; 3546 case NOTIFY_ACK_TYPE: 3547 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3548 qlt_response_pkt_all_vps(vha, rsp, 3549 (response_t *)pkt); 3550 else 3551 qla24xxx_nack_iocb_entry(vha, rsp->req, 3552 (struct nack_to_isp *)pkt); 3553 break; 3554 case MARKER_TYPE: 3555 /* Do nothing in this case, this check is to prevent it 3556 * from falling into default case 3557 */ 3558 break; 3559 case ABORT_IOCB_TYPE: 3560 qla24xx_abort_iocb_entry(vha, rsp->req, 3561 (struct abort_entry_24xx *)pkt); 3562 break; 3563 case MBX_IOCB_TYPE: 3564 qla24xx_mbx_iocb_entry(vha, rsp->req, 3565 (struct mbx_24xx_entry *)pkt); 3566 break; 3567 case VP_CTRL_IOCB_TYPE: 3568 qla_ctrlvp_completed(vha, rsp->req, 3569 (struct vp_ctrl_entry_24xx *)pkt); 3570 break; 3571 case PUREX_IOCB_TYPE: 3572 purex_entry = (void *)pkt; 3573 switch (purex_entry->els_frame_payload[3]) { 3574 case ELS_RDP: 3575 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3576 if (!pure_item) 3577 break; 3578 qla24xx_queue_purex_item(vha, pure_item, 3579 qla24xx_process_purex_rdp); 3580 break; 3581 case ELS_FPIN: 3582 if (!vha->hw->flags.scm_enabled) { 3583 ql_log(ql_log_warn, vha, 0x5094, 3584 "SCM not active for this port\n"); 3585 break; 3586 } 3587 pure_item = qla27xx_copy_fpin_pkt(vha, 3588 (void **)&pkt, &rsp); 3589 if (!pure_item) 3590 break; 3591 qla24xx_queue_purex_item(vha, pure_item, 3592 qla27xx_process_purex_fpin); 3593 break; 3594 3595 default: 3596 ql_log(ql_log_warn, vha, 0x509c, 3597 "Discarding ELS Request opcode 0x%x\n", 3598 purex_entry->els_frame_payload[3]); 3599 } 3600 break; 3601 default: 3602 /* Type Not Supported. */ 3603 ql_dbg(ql_dbg_async, vha, 0x5042, 3604 "Received unknown response pkt type 0x%x entry status=%x.\n", 3605 pkt->entry_type, pkt->entry_status); 3606 break; 3607 } 3608 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3609 wmb(); 3610 } 3611 3612 /* Adjust ring index */ 3613 if (IS_P3P_TYPE(ha)) { 3614 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3615 3616 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); 3617 } else { 3618 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); 3619 } 3620 } 3621 3622 static void 3623 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3624 { 3625 int rval; 3626 uint32_t cnt; 3627 struct qla_hw_data *ha = vha->hw; 3628 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3629 3630 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3631 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3632 return; 3633 3634 rval = QLA_SUCCESS; 3635 wrt_reg_dword(®->iobase_addr, 0x7C00); 3636 rd_reg_dword(®->iobase_addr); 3637 wrt_reg_dword(®->iobase_window, 0x0001); 3638 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3639 rval == QLA_SUCCESS; cnt--) { 3640 if (cnt) { 3641 wrt_reg_dword(®->iobase_window, 0x0001); 3642 udelay(10); 3643 } else 3644 rval = QLA_FUNCTION_TIMEOUT; 3645 } 3646 if (rval == QLA_SUCCESS) 3647 goto next_test; 3648 3649 rval = QLA_SUCCESS; 3650 wrt_reg_dword(®->iobase_window, 0x0003); 3651 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3652 rval == QLA_SUCCESS; cnt--) { 3653 if (cnt) { 3654 wrt_reg_dword(®->iobase_window, 0x0003); 3655 udelay(10); 3656 } else 3657 rval = QLA_FUNCTION_TIMEOUT; 3658 } 3659 if (rval != QLA_SUCCESS) 3660 goto done; 3661 3662 next_test: 3663 if (rd_reg_dword(®->iobase_c8) & BIT_3) 3664 ql_log(ql_log_info, vha, 0x504c, 3665 "Additional code -- 0x55AA.\n"); 3666 3667 done: 3668 wrt_reg_dword(®->iobase_window, 0x0000); 3669 rd_reg_dword(®->iobase_window); 3670 } 3671 3672 /** 3673 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3674 * @irq: interrupt number 3675 * @dev_id: SCSI driver HA context 3676 * 3677 * Called by system whenever the host adapter generates an interrupt. 3678 * 3679 * Returns handled flag. 3680 */ 3681 irqreturn_t 3682 qla24xx_intr_handler(int irq, void *dev_id) 3683 { 3684 scsi_qla_host_t *vha; 3685 struct qla_hw_data *ha; 3686 struct device_reg_24xx __iomem *reg; 3687 int status; 3688 unsigned long iter; 3689 uint32_t stat; 3690 uint32_t hccr; 3691 uint16_t mb[8]; 3692 struct rsp_que *rsp; 3693 unsigned long flags; 3694 bool process_atio = false; 3695 3696 rsp = (struct rsp_que *) dev_id; 3697 if (!rsp) { 3698 ql_log(ql_log_info, NULL, 0x5059, 3699 "%s: NULL response queue pointer.\n", __func__); 3700 return IRQ_NONE; 3701 } 3702 3703 ha = rsp->hw; 3704 reg = &ha->iobase->isp24; 3705 status = 0; 3706 3707 if (unlikely(pci_channel_offline(ha->pdev))) 3708 return IRQ_HANDLED; 3709 3710 spin_lock_irqsave(&ha->hardware_lock, flags); 3711 vha = pci_get_drvdata(ha->pdev); 3712 for (iter = 50; iter--; ) { 3713 stat = rd_reg_dword(®->host_status); 3714 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3715 break; 3716 if (stat & HSRX_RISC_PAUSED) { 3717 if (unlikely(pci_channel_offline(ha->pdev))) 3718 break; 3719 3720 hccr = rd_reg_dword(®->hccr); 3721 3722 ql_log(ql_log_warn, vha, 0x504b, 3723 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3724 hccr); 3725 3726 qla2xxx_check_risc_status(vha); 3727 3728 ha->isp_ops->fw_dump(vha); 3729 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3730 break; 3731 } else if ((stat & HSRX_RISC_INT) == 0) 3732 break; 3733 3734 switch (stat & 0xff) { 3735 case INTR_ROM_MB_SUCCESS: 3736 case INTR_ROM_MB_FAILED: 3737 case INTR_MB_SUCCESS: 3738 case INTR_MB_FAILED: 3739 qla24xx_mbx_completion(vha, MSW(stat)); 3740 status |= MBX_INTERRUPT; 3741 3742 break; 3743 case INTR_ASYNC_EVENT: 3744 mb[0] = MSW(stat); 3745 mb[1] = rd_reg_word(®->mailbox1); 3746 mb[2] = rd_reg_word(®->mailbox2); 3747 mb[3] = rd_reg_word(®->mailbox3); 3748 qla2x00_async_event(vha, rsp, mb); 3749 break; 3750 case INTR_RSP_QUE_UPDATE: 3751 case INTR_RSP_QUE_UPDATE_83XX: 3752 qla24xx_process_response_queue(vha, rsp); 3753 break; 3754 case INTR_ATIO_QUE_UPDATE_27XX: 3755 case INTR_ATIO_QUE_UPDATE: 3756 process_atio = true; 3757 break; 3758 case INTR_ATIO_RSP_QUE_UPDATE: 3759 process_atio = true; 3760 qla24xx_process_response_queue(vha, rsp); 3761 break; 3762 default: 3763 ql_dbg(ql_dbg_async, vha, 0x504f, 3764 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3765 break; 3766 } 3767 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3768 rd_reg_dword_relaxed(®->hccr); 3769 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3770 ndelay(3500); 3771 } 3772 qla2x00_handle_mbx_completion(ha, status); 3773 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3774 3775 if (process_atio) { 3776 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3777 qlt_24xx_process_atio_queue(vha, 0); 3778 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3779 } 3780 3781 return IRQ_HANDLED; 3782 } 3783 3784 static irqreturn_t 3785 qla24xx_msix_rsp_q(int irq, void *dev_id) 3786 { 3787 struct qla_hw_data *ha; 3788 struct rsp_que *rsp; 3789 struct device_reg_24xx __iomem *reg; 3790 struct scsi_qla_host *vha; 3791 unsigned long flags; 3792 3793 rsp = (struct rsp_que *) dev_id; 3794 if (!rsp) { 3795 ql_log(ql_log_info, NULL, 0x505a, 3796 "%s: NULL response queue pointer.\n", __func__); 3797 return IRQ_NONE; 3798 } 3799 ha = rsp->hw; 3800 reg = &ha->iobase->isp24; 3801 3802 spin_lock_irqsave(&ha->hardware_lock, flags); 3803 3804 vha = pci_get_drvdata(ha->pdev); 3805 qla24xx_process_response_queue(vha, rsp); 3806 if (!ha->flags.disable_msix_handshake) { 3807 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3808 rd_reg_dword_relaxed(®->hccr); 3809 } 3810 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3811 3812 return IRQ_HANDLED; 3813 } 3814 3815 static irqreturn_t 3816 qla24xx_msix_default(int irq, void *dev_id) 3817 { 3818 scsi_qla_host_t *vha; 3819 struct qla_hw_data *ha; 3820 struct rsp_que *rsp; 3821 struct device_reg_24xx __iomem *reg; 3822 int status; 3823 uint32_t stat; 3824 uint32_t hccr; 3825 uint16_t mb[8]; 3826 unsigned long flags; 3827 bool process_atio = false; 3828 3829 rsp = (struct rsp_que *) dev_id; 3830 if (!rsp) { 3831 ql_log(ql_log_info, NULL, 0x505c, 3832 "%s: NULL response queue pointer.\n", __func__); 3833 return IRQ_NONE; 3834 } 3835 ha = rsp->hw; 3836 reg = &ha->iobase->isp24; 3837 status = 0; 3838 3839 spin_lock_irqsave(&ha->hardware_lock, flags); 3840 vha = pci_get_drvdata(ha->pdev); 3841 do { 3842 stat = rd_reg_dword(®->host_status); 3843 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3844 break; 3845 if (stat & HSRX_RISC_PAUSED) { 3846 if (unlikely(pci_channel_offline(ha->pdev))) 3847 break; 3848 3849 hccr = rd_reg_dword(®->hccr); 3850 3851 ql_log(ql_log_info, vha, 0x5050, 3852 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3853 hccr); 3854 3855 qla2xxx_check_risc_status(vha); 3856 vha->hw_err_cnt++; 3857 3858 ha->isp_ops->fw_dump(vha); 3859 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3860 break; 3861 } else if ((stat & HSRX_RISC_INT) == 0) 3862 break; 3863 3864 switch (stat & 0xff) { 3865 case INTR_ROM_MB_SUCCESS: 3866 case INTR_ROM_MB_FAILED: 3867 case INTR_MB_SUCCESS: 3868 case INTR_MB_FAILED: 3869 qla24xx_mbx_completion(vha, MSW(stat)); 3870 status |= MBX_INTERRUPT; 3871 3872 break; 3873 case INTR_ASYNC_EVENT: 3874 mb[0] = MSW(stat); 3875 mb[1] = rd_reg_word(®->mailbox1); 3876 mb[2] = rd_reg_word(®->mailbox2); 3877 mb[3] = rd_reg_word(®->mailbox3); 3878 qla2x00_async_event(vha, rsp, mb); 3879 break; 3880 case INTR_RSP_QUE_UPDATE: 3881 case INTR_RSP_QUE_UPDATE_83XX: 3882 qla24xx_process_response_queue(vha, rsp); 3883 break; 3884 case INTR_ATIO_QUE_UPDATE_27XX: 3885 case INTR_ATIO_QUE_UPDATE: 3886 process_atio = true; 3887 break; 3888 case INTR_ATIO_RSP_QUE_UPDATE: 3889 process_atio = true; 3890 qla24xx_process_response_queue(vha, rsp); 3891 break; 3892 default: 3893 ql_dbg(ql_dbg_async, vha, 0x5051, 3894 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3895 break; 3896 } 3897 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3898 } while (0); 3899 qla2x00_handle_mbx_completion(ha, status); 3900 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3901 3902 if (process_atio) { 3903 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3904 qlt_24xx_process_atio_queue(vha, 0); 3905 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3906 } 3907 3908 return IRQ_HANDLED; 3909 } 3910 3911 irqreturn_t 3912 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3913 { 3914 struct qla_hw_data *ha; 3915 struct qla_qpair *qpair; 3916 3917 qpair = dev_id; 3918 if (!qpair) { 3919 ql_log(ql_log_info, NULL, 0x505b, 3920 "%s: NULL response queue pointer.\n", __func__); 3921 return IRQ_NONE; 3922 } 3923 ha = qpair->hw; 3924 3925 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3926 3927 return IRQ_HANDLED; 3928 } 3929 3930 irqreturn_t 3931 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) 3932 { 3933 struct qla_hw_data *ha; 3934 struct qla_qpair *qpair; 3935 struct device_reg_24xx __iomem *reg; 3936 unsigned long flags; 3937 3938 qpair = dev_id; 3939 if (!qpair) { 3940 ql_log(ql_log_info, NULL, 0x505b, 3941 "%s: NULL response queue pointer.\n", __func__); 3942 return IRQ_NONE; 3943 } 3944 ha = qpair->hw; 3945 3946 reg = &ha->iobase->isp24; 3947 spin_lock_irqsave(&ha->hardware_lock, flags); 3948 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3949 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3950 3951 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3952 3953 return IRQ_HANDLED; 3954 } 3955 3956 /* Interrupt handling helpers. */ 3957 3958 struct qla_init_msix_entry { 3959 const char *name; 3960 irq_handler_t handler; 3961 }; 3962 3963 static const struct qla_init_msix_entry msix_entries[] = { 3964 { "default", qla24xx_msix_default }, 3965 { "rsp_q", qla24xx_msix_rsp_q }, 3966 { "atio_q", qla83xx_msix_atio_q }, 3967 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3968 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, 3969 }; 3970 3971 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3972 { "qla2xxx (default)", qla82xx_msix_default }, 3973 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3974 }; 3975 3976 static int 3977 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3978 { 3979 int i, ret; 3980 struct qla_msix_entry *qentry; 3981 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3982 int min_vecs = QLA_BASE_VECTORS; 3983 struct irq_affinity desc = { 3984 .pre_vectors = QLA_BASE_VECTORS, 3985 }; 3986 3987 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3988 IS_ATIO_MSIX_CAPABLE(ha)) { 3989 desc.pre_vectors++; 3990 min_vecs++; 3991 } 3992 3993 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 3994 /* user wants to control IRQ setting for target mode */ 3995 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 3996 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 3997 PCI_IRQ_MSIX); 3998 } else 3999 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 4000 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 4001 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 4002 &desc); 4003 4004 if (ret < 0) { 4005 ql_log(ql_log_fatal, vha, 0x00c7, 4006 "MSI-X: Failed to enable support, " 4007 "giving up -- %d/%d.\n", 4008 ha->msix_count, ret); 4009 goto msix_out; 4010 } else if (ret < ha->msix_count) { 4011 ql_log(ql_log_info, vha, 0x00c6, 4012 "MSI-X: Using %d vectors\n", ret); 4013 ha->msix_count = ret; 4014 /* Recalculate queue values */ 4015 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 4016 ha->max_req_queues = ha->msix_count - 1; 4017 4018 /* ATIOQ needs 1 vector. That's 1 less QPair */ 4019 if (QLA_TGT_MODE_ENABLED()) 4020 ha->max_req_queues--; 4021 4022 ha->max_rsp_queues = ha->max_req_queues; 4023 4024 ha->max_qpairs = ha->max_req_queues - 1; 4025 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 4026 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 4027 } 4028 } 4029 vha->irq_offset = desc.pre_vectors; 4030 ha->msix_entries = kcalloc(ha->msix_count, 4031 sizeof(struct qla_msix_entry), 4032 GFP_KERNEL); 4033 if (!ha->msix_entries) { 4034 ql_log(ql_log_fatal, vha, 0x00c8, 4035 "Failed to allocate memory for ha->msix_entries.\n"); 4036 ret = -ENOMEM; 4037 goto free_irqs; 4038 } 4039 ha->flags.msix_enabled = 1; 4040 4041 for (i = 0; i < ha->msix_count; i++) { 4042 qentry = &ha->msix_entries[i]; 4043 qentry->vector = pci_irq_vector(ha->pdev, i); 4044 qentry->entry = i; 4045 qentry->have_irq = 0; 4046 qentry->in_use = 0; 4047 qentry->handle = NULL; 4048 } 4049 4050 /* Enable MSI-X vectors for the base queue */ 4051 for (i = 0; i < QLA_BASE_VECTORS; i++) { 4052 qentry = &ha->msix_entries[i]; 4053 qentry->handle = rsp; 4054 rsp->msix = qentry; 4055 scnprintf(qentry->name, sizeof(qentry->name), 4056 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 4057 if (IS_P3P_TYPE(ha)) 4058 ret = request_irq(qentry->vector, 4059 qla82xx_msix_entries[i].handler, 4060 0, qla82xx_msix_entries[i].name, rsp); 4061 else 4062 ret = request_irq(qentry->vector, 4063 msix_entries[i].handler, 4064 0, qentry->name, rsp); 4065 if (ret) 4066 goto msix_register_fail; 4067 qentry->have_irq = 1; 4068 qentry->in_use = 1; 4069 } 4070 4071 /* 4072 * If target mode is enable, also request the vector for the ATIO 4073 * queue. 4074 */ 4075 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4076 IS_ATIO_MSIX_CAPABLE(ha)) { 4077 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 4078 rsp->msix = qentry; 4079 qentry->handle = rsp; 4080 scnprintf(qentry->name, sizeof(qentry->name), 4081 "qla2xxx%lu_%s", vha->host_no, 4082 msix_entries[QLA_ATIO_VECTOR].name); 4083 qentry->in_use = 1; 4084 ret = request_irq(qentry->vector, 4085 msix_entries[QLA_ATIO_VECTOR].handler, 4086 0, qentry->name, rsp); 4087 qentry->have_irq = 1; 4088 } 4089 4090 msix_register_fail: 4091 if (ret) { 4092 ql_log(ql_log_fatal, vha, 0x00cb, 4093 "MSI-X: unable to register handler -- %x/%d.\n", 4094 qentry->vector, ret); 4095 qla2x00_free_irqs(vha); 4096 ha->mqenable = 0; 4097 goto msix_out; 4098 } 4099 4100 /* Enable MSI-X vector for response queue update for queue 0 */ 4101 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4102 if (ha->msixbase && ha->mqiobase && 4103 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4104 ql2xmqsupport)) 4105 ha->mqenable = 1; 4106 } else 4107 if (ha->mqiobase && 4108 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4109 ql2xmqsupport)) 4110 ha->mqenable = 1; 4111 ql_dbg(ql_dbg_multiq, vha, 0xc005, 4112 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4113 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4114 ql_dbg(ql_dbg_init, vha, 0x0055, 4115 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4116 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4117 4118 msix_out: 4119 return ret; 4120 4121 free_irqs: 4122 pci_free_irq_vectors(ha->pdev); 4123 goto msix_out; 4124 } 4125 4126 int 4127 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 4128 { 4129 int ret = QLA_FUNCTION_FAILED; 4130 device_reg_t *reg = ha->iobase; 4131 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4132 4133 /* If possible, enable MSI-X. */ 4134 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 4135 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 4136 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 4137 goto skip_msi; 4138 4139 if (ql2xenablemsix == 2) 4140 goto skip_msix; 4141 4142 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 4143 (ha->pdev->subsystem_device == 0x7040 || 4144 ha->pdev->subsystem_device == 0x7041 || 4145 ha->pdev->subsystem_device == 0x1705)) { 4146 ql_log(ql_log_warn, vha, 0x0034, 4147 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 4148 ha->pdev->subsystem_vendor, 4149 ha->pdev->subsystem_device); 4150 goto skip_msi; 4151 } 4152 4153 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 4154 ql_log(ql_log_warn, vha, 0x0035, 4155 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 4156 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 4157 goto skip_msix; 4158 } 4159 4160 ret = qla24xx_enable_msix(ha, rsp); 4161 if (!ret) { 4162 ql_dbg(ql_dbg_init, vha, 0x0036, 4163 "MSI-X: Enabled (0x%X, 0x%X).\n", 4164 ha->chip_revision, ha->fw_attributes); 4165 goto clear_risc_ints; 4166 } 4167 4168 skip_msix: 4169 4170 ql_log(ql_log_info, vha, 0x0037, 4171 "Falling back-to MSI mode -- ret=%d.\n", ret); 4172 4173 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 4174 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 4175 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4176 goto skip_msi; 4177 4178 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 4179 if (ret > 0) { 4180 ql_dbg(ql_dbg_init, vha, 0x0038, 4181 "MSI: Enabled.\n"); 4182 ha->flags.msi_enabled = 1; 4183 } else 4184 ql_log(ql_log_warn, vha, 0x0039, 4185 "Falling back-to INTa mode -- ret=%d.\n", ret); 4186 skip_msi: 4187 4188 /* Skip INTx on ISP82xx. */ 4189 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 4190 return QLA_FUNCTION_FAILED; 4191 4192 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 4193 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 4194 QLA2XXX_DRIVER_NAME, rsp); 4195 if (ret) { 4196 ql_log(ql_log_warn, vha, 0x003a, 4197 "Failed to reserve interrupt %d already in use.\n", 4198 ha->pdev->irq); 4199 goto fail; 4200 } else if (!ha->flags.msi_enabled) { 4201 ql_dbg(ql_dbg_init, vha, 0x0125, 4202 "INTa mode: Enabled.\n"); 4203 ha->flags.mr_intr_valid = 1; 4204 } 4205 4206 clear_risc_ints: 4207 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 4208 goto fail; 4209 4210 spin_lock_irq(&ha->hardware_lock); 4211 wrt_reg_word(®->isp.semaphore, 0); 4212 spin_unlock_irq(&ha->hardware_lock); 4213 4214 fail: 4215 return ret; 4216 } 4217 4218 void 4219 qla2x00_free_irqs(scsi_qla_host_t *vha) 4220 { 4221 struct qla_hw_data *ha = vha->hw; 4222 struct rsp_que *rsp; 4223 struct qla_msix_entry *qentry; 4224 int i; 4225 4226 /* 4227 * We need to check that ha->rsp_q_map is valid in case we are called 4228 * from a probe failure context. 4229 */ 4230 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 4231 goto free_irqs; 4232 rsp = ha->rsp_q_map[0]; 4233 4234 if (ha->flags.msix_enabled) { 4235 for (i = 0; i < ha->msix_count; i++) { 4236 qentry = &ha->msix_entries[i]; 4237 if (qentry->have_irq) { 4238 irq_set_affinity_notifier(qentry->vector, NULL); 4239 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 4240 } 4241 } 4242 kfree(ha->msix_entries); 4243 ha->msix_entries = NULL; 4244 ha->flags.msix_enabled = 0; 4245 ql_dbg(ql_dbg_init, vha, 0x0042, 4246 "Disabled MSI-X.\n"); 4247 } else { 4248 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 4249 } 4250 4251 free_irqs: 4252 pci_free_irq_vectors(ha->pdev); 4253 } 4254 4255 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 4256 struct qla_msix_entry *msix, int vector_type) 4257 { 4258 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 4259 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4260 int ret; 4261 4262 scnprintf(msix->name, sizeof(msix->name), 4263 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 4264 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 4265 if (ret) { 4266 ql_log(ql_log_fatal, vha, 0x00e6, 4267 "MSI-X: Unable to register handler -- %x/%d.\n", 4268 msix->vector, ret); 4269 return ret; 4270 } 4271 msix->have_irq = 1; 4272 msix->handle = qpair; 4273 return ret; 4274 } 4275