1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, 26 struct purex_item *item); 27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, 28 uint16_t size); 29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, 30 void *pkt); 31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, 32 void **pkt, struct rsp_que **rsp); 33 34 static void 35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) 36 { 37 void *pkt = &item->iocb; 38 uint16_t pkt_size = item->size; 39 40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, 41 "%s: Enter\n", __func__); 42 43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, 44 "-------- ELS REQ -------\n"); 45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, 46 pkt, pkt_size); 47 48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt); 49 } 50 51 const char *const port_state_str[] = { 52 "Unknown", 53 "UNCONFIGURED", 54 "DEAD", 55 "LOST", 56 "ONLINE" 57 }; 58 59 static void 60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) 61 { 62 struct abts_entry_24xx *abts = 63 (struct abts_entry_24xx *)&pkt->iocb; 64 struct qla_hw_data *ha = vha->hw; 65 struct els_entry_24xx *rsp_els; 66 struct abts_entry_24xx *abts_rsp; 67 dma_addr_t dma; 68 uint32_t fctl; 69 int rval; 70 71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 72 73 ql_log(ql_log_warn, vha, 0x0287, 74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 76 abts->seq_id, abts->seq_cnt); 77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 78 "-------- ABTS RCV -------\n"); 79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 80 (uint8_t *)abts, sizeof(*abts)); 81 82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 83 GFP_KERNEL); 84 if (!rsp_els) { 85 ql_log(ql_log_warn, vha, 0x0287, 86 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 87 return; 88 } 89 90 /* terminate exchange */ 91 rsp_els->entry_type = ELS_IOCB_TYPE; 92 rsp_els->entry_count = 1; 93 rsp_els->nport_handle = cpu_to_le16(~0); 94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); 96 ql_dbg(ql_dbg_init, vha, 0x0283, 97 "Sending ELS Response to terminate exchange %#x...\n", 98 abts->rx_xch_addr_to_abort); 99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 100 "-------- ELS RSP -------\n"); 101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 102 (uint8_t *)rsp_els, sizeof(*rsp_els)); 103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 104 if (rval) { 105 ql_log(ql_log_warn, vha, 0x0288, 106 "%s: iocb failed to execute -> %x\n", __func__, rval); 107 } else if (rsp_els->comp_status) { 108 ql_log(ql_log_warn, vha, 0x0289, 109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 110 __func__, rsp_els->comp_status, 111 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 112 } else { 113 ql_dbg(ql_dbg_init, vha, 0x028a, 114 "%s: abort exchange done.\n", __func__); 115 } 116 117 /* send ABTS response */ 118 abts_rsp = (void *)rsp_els; 119 memset(abts_rsp, 0, sizeof(*abts_rsp)); 120 abts_rsp->entry_type = ABTS_RSP_TYPE; 121 abts_rsp->entry_count = 1; 122 abts_rsp->nport_handle = abts->nport_handle; 123 abts_rsp->vp_idx = abts->vp_idx; 124 abts_rsp->sof_type = abts->sof_type & 0xf0; 125 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 126 abts_rsp->d_id[0] = abts->s_id[0]; 127 abts_rsp->d_id[1] = abts->s_id[1]; 128 abts_rsp->d_id[2] = abts->s_id[2]; 129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 130 abts_rsp->s_id[0] = abts->d_id[0]; 131 abts_rsp->s_id[1] = abts->d_id[1]; 132 abts_rsp->s_id[2] = abts->d_id[2]; 133 abts_rsp->cs_ctl = abts->cs_ctl; 134 /* include flipping bit23 in fctl */ 135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 140 abts_rsp->type = FC_TYPE_BLD; 141 abts_rsp->rx_id = abts->rx_id; 142 abts_rsp->ox_id = abts->ox_id; 143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); 146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 147 ql_dbg(ql_dbg_init, vha, 0x028b, 148 "Sending BA ACC response to ABTS %#x...\n", 149 abts->rx_xch_addr_to_abort); 150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 151 "-------- ELS RSP -------\n"); 152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 153 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 155 if (rval) { 156 ql_log(ql_log_warn, vha, 0x028c, 157 "%s: iocb failed to execute -> %x\n", __func__, rval); 158 } else if (abts_rsp->comp_status) { 159 ql_log(ql_log_warn, vha, 0x028d, 160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 161 __func__, abts_rsp->comp_status, 162 abts_rsp->payload.error.subcode1, 163 abts_rsp->payload.error.subcode2); 164 } else { 165 ql_dbg(ql_dbg_init, vha, 0x028ea, 166 "%s: done.\n", __func__); 167 } 168 169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 170 } 171 172 /** 173 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 174 * @irq: interrupt number 175 * @dev_id: SCSI driver HA context 176 * 177 * Called by system whenever the host adapter generates an interrupt. 178 * 179 * Returns handled flag. 180 */ 181 irqreturn_t 182 qla2100_intr_handler(int irq, void *dev_id) 183 { 184 scsi_qla_host_t *vha; 185 struct qla_hw_data *ha; 186 struct device_reg_2xxx __iomem *reg; 187 int status; 188 unsigned long iter; 189 uint16_t hccr; 190 uint16_t mb[8]; 191 struct rsp_que *rsp; 192 unsigned long flags; 193 194 rsp = (struct rsp_que *) dev_id; 195 if (!rsp) { 196 ql_log(ql_log_info, NULL, 0x505d, 197 "%s: NULL response queue pointer.\n", __func__); 198 return (IRQ_NONE); 199 } 200 201 ha = rsp->hw; 202 reg = &ha->iobase->isp; 203 status = 0; 204 205 spin_lock_irqsave(&ha->hardware_lock, flags); 206 vha = pci_get_drvdata(ha->pdev); 207 for (iter = 50; iter--; ) { 208 hccr = rd_reg_word(®->hccr); 209 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 210 break; 211 if (hccr & HCCR_RISC_PAUSE) { 212 if (pci_channel_offline(ha->pdev)) 213 break; 214 215 /* 216 * Issue a "HARD" reset in order for the RISC interrupt 217 * bit to be cleared. Schedule a big hammer to get 218 * out of the RISC PAUSED state. 219 */ 220 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 221 rd_reg_word(®->hccr); 222 223 ha->isp_ops->fw_dump(vha); 224 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 225 break; 226 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) 227 break; 228 229 if (rd_reg_word(®->semaphore) & BIT_0) { 230 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 231 rd_reg_word(®->hccr); 232 233 /* Get mailbox data. */ 234 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 235 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 236 qla2x00_mbx_completion(vha, mb[0]); 237 status |= MBX_INTERRUPT; 238 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 239 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 240 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 241 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 242 qla2x00_async_event(vha, rsp, mb); 243 } else { 244 /*EMPTY*/ 245 ql_dbg(ql_dbg_async, vha, 0x5025, 246 "Unrecognized interrupt type (%d).\n", 247 mb[0]); 248 } 249 /* Release mailbox registers. */ 250 wrt_reg_word(®->semaphore, 0); 251 rd_reg_word(®->semaphore); 252 } else { 253 qla2x00_process_response_queue(rsp); 254 255 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 256 rd_reg_word(®->hccr); 257 } 258 } 259 qla2x00_handle_mbx_completion(ha, status); 260 spin_unlock_irqrestore(&ha->hardware_lock, flags); 261 262 return (IRQ_HANDLED); 263 } 264 265 bool 266 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 267 { 268 /* Check for PCI disconnection */ 269 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 270 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 271 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 272 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 273 qla_schedule_eeh_work(vha); 274 } 275 return true; 276 } else 277 return false; 278 } 279 280 bool 281 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 282 { 283 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 284 } 285 286 /** 287 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 288 * @irq: interrupt number 289 * @dev_id: SCSI driver HA context 290 * 291 * Called by system whenever the host adapter generates an interrupt. 292 * 293 * Returns handled flag. 294 */ 295 irqreturn_t 296 qla2300_intr_handler(int irq, void *dev_id) 297 { 298 scsi_qla_host_t *vha; 299 struct device_reg_2xxx __iomem *reg; 300 int status; 301 unsigned long iter; 302 uint32_t stat; 303 uint16_t hccr; 304 uint16_t mb[8]; 305 struct rsp_que *rsp; 306 struct qla_hw_data *ha; 307 unsigned long flags; 308 309 rsp = (struct rsp_que *) dev_id; 310 if (!rsp) { 311 ql_log(ql_log_info, NULL, 0x5058, 312 "%s: NULL response queue pointer.\n", __func__); 313 return (IRQ_NONE); 314 } 315 316 ha = rsp->hw; 317 reg = &ha->iobase->isp; 318 status = 0; 319 320 spin_lock_irqsave(&ha->hardware_lock, flags); 321 vha = pci_get_drvdata(ha->pdev); 322 for (iter = 50; iter--; ) { 323 stat = rd_reg_dword(®->u.isp2300.host_status); 324 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 325 break; 326 if (stat & HSR_RISC_PAUSED) { 327 if (unlikely(pci_channel_offline(ha->pdev))) 328 break; 329 330 hccr = rd_reg_word(®->hccr); 331 332 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 333 ql_log(ql_log_warn, vha, 0x5026, 334 "Parity error -- HCCR=%x, Dumping " 335 "firmware.\n", hccr); 336 else 337 ql_log(ql_log_warn, vha, 0x5027, 338 "RISC paused -- HCCR=%x, Dumping " 339 "firmware.\n", hccr); 340 341 /* 342 * Issue a "HARD" reset in order for the RISC 343 * interrupt bit to be cleared. Schedule a big 344 * hammer to get out of the RISC PAUSED state. 345 */ 346 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 347 rd_reg_word(®->hccr); 348 349 ha->isp_ops->fw_dump(vha); 350 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 351 break; 352 } else if ((stat & HSR_RISC_INT) == 0) 353 break; 354 355 switch (stat & 0xff) { 356 case 0x1: 357 case 0x2: 358 case 0x10: 359 case 0x11: 360 qla2x00_mbx_completion(vha, MSW(stat)); 361 status |= MBX_INTERRUPT; 362 363 /* Release mailbox registers. */ 364 wrt_reg_word(®->semaphore, 0); 365 break; 366 case 0x12: 367 mb[0] = MSW(stat); 368 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 369 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 370 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 371 qla2x00_async_event(vha, rsp, mb); 372 break; 373 case 0x13: 374 qla2x00_process_response_queue(rsp); 375 break; 376 case 0x15: 377 mb[0] = MBA_CMPLT_1_16BIT; 378 mb[1] = MSW(stat); 379 qla2x00_async_event(vha, rsp, mb); 380 break; 381 case 0x16: 382 mb[0] = MBA_SCSI_COMPLETION; 383 mb[1] = MSW(stat); 384 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 385 qla2x00_async_event(vha, rsp, mb); 386 break; 387 default: 388 ql_dbg(ql_dbg_async, vha, 0x5028, 389 "Unrecognized interrupt type (%d).\n", stat & 0xff); 390 break; 391 } 392 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 393 rd_reg_word_relaxed(®->hccr); 394 } 395 qla2x00_handle_mbx_completion(ha, status); 396 spin_unlock_irqrestore(&ha->hardware_lock, flags); 397 398 return (IRQ_HANDLED); 399 } 400 401 /** 402 * qla2x00_mbx_completion() - Process mailbox command completions. 403 * @vha: SCSI driver HA context 404 * @mb0: Mailbox0 register 405 */ 406 static void 407 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 408 { 409 uint16_t cnt; 410 uint32_t mboxes; 411 __le16 __iomem *wptr; 412 struct qla_hw_data *ha = vha->hw; 413 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 414 415 /* Read all mbox registers? */ 416 WARN_ON_ONCE(ha->mbx_count > 32); 417 mboxes = (1ULL << ha->mbx_count) - 1; 418 if (!ha->mcp) 419 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 420 else 421 mboxes = ha->mcp->in_mb; 422 423 /* Load return mailbox registers. */ 424 ha->flags.mbox_int = 1; 425 ha->mailbox_out[0] = mb0; 426 mboxes >>= 1; 427 wptr = MAILBOX_REG(ha, reg, 1); 428 429 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 430 if (IS_QLA2200(ha) && cnt == 8) 431 wptr = MAILBOX_REG(ha, reg, 8); 432 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 433 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 434 else if (mboxes & BIT_0) 435 ha->mailbox_out[cnt] = rd_reg_word(wptr); 436 437 wptr++; 438 mboxes >>= 1; 439 } 440 } 441 442 static void 443 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 444 { 445 static char *event[] = 446 { "Complete", "Request Notification", "Time Extension" }; 447 int rval; 448 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 449 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 450 __le16 __iomem *wptr; 451 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 452 453 /* Seed data -- mailbox1 -> mailbox7. */ 454 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 455 wptr = ®24->mailbox1; 456 else if (IS_QLA8044(vha->hw)) 457 wptr = ®82->mailbox_out[1]; 458 else 459 return; 460 461 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 462 mb[cnt] = rd_reg_word(wptr); 463 464 ql_dbg(ql_dbg_async, vha, 0x5021, 465 "Inter-Driver Communication %s -- " 466 "%04x %04x %04x %04x %04x %04x %04x.\n", 467 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 468 mb[4], mb[5], mb[6]); 469 switch (aen) { 470 /* Handle IDC Error completion case. */ 471 case MBA_IDC_COMPLETE: 472 if (mb[1] >> 15) { 473 vha->hw->flags.idc_compl_status = 1; 474 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 475 complete(&vha->hw->dcbx_comp); 476 } 477 break; 478 479 case MBA_IDC_NOTIFY: 480 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 481 timeout = (descr >> 8) & 0xf; 482 ql_dbg(ql_dbg_async, vha, 0x5022, 483 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 484 vha->host_no, event[aen & 0xff], timeout); 485 486 if (!timeout) 487 return; 488 rval = qla2x00_post_idc_ack_work(vha, mb); 489 if (rval != QLA_SUCCESS) 490 ql_log(ql_log_warn, vha, 0x5023, 491 "IDC failed to post ACK.\n"); 492 break; 493 case MBA_IDC_TIME_EXT: 494 vha->hw->idc_extend_tmo = descr; 495 ql_dbg(ql_dbg_async, vha, 0x5087, 496 "%lu Inter-Driver Communication %s -- " 497 "Extend timeout by=%d.\n", 498 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 499 break; 500 } 501 } 502 503 #define LS_UNKNOWN 2 504 const char * 505 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 506 { 507 static const char *const link_speeds[] = { 508 "1", "2", "?", "4", "8", "16", "32", "10" 509 }; 510 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 511 512 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 513 return link_speeds[0]; 514 else if (speed == 0x13) 515 return link_speeds[QLA_LAST_SPEED]; 516 else if (speed < QLA_LAST_SPEED) 517 return link_speeds[speed]; 518 else 519 return link_speeds[LS_UNKNOWN]; 520 } 521 522 static void 523 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 524 { 525 struct qla_hw_data *ha = vha->hw; 526 527 /* 528 * 8200 AEN Interpretation: 529 * mb[0] = AEN code 530 * mb[1] = AEN Reason code 531 * mb[2] = LSW of Peg-Halt Status-1 Register 532 * mb[6] = MSW of Peg-Halt Status-1 Register 533 * mb[3] = LSW of Peg-Halt Status-2 register 534 * mb[7] = MSW of Peg-Halt Status-2 register 535 * mb[4] = IDC Device-State Register value 536 * mb[5] = IDC Driver-Presence Register value 537 */ 538 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 539 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 540 mb[0], mb[1], mb[2], mb[6]); 541 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 542 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 543 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 544 545 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 546 IDC_HEARTBEAT_FAILURE)) { 547 ha->flags.nic_core_hung = 1; 548 ql_log(ql_log_warn, vha, 0x5060, 549 "83XX: F/W Error Reported: Check if reset required.\n"); 550 551 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 552 uint32_t protocol_engine_id, fw_err_code, err_level; 553 554 /* 555 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 556 * - PEG-Halt Status-1 Register: 557 * (LSW = mb[2], MSW = mb[6]) 558 * Bits 0-7 = protocol-engine ID 559 * Bits 8-28 = f/w error code 560 * Bits 29-31 = Error-level 561 * Error-level 0x1 = Non-Fatal error 562 * Error-level 0x2 = Recoverable Fatal error 563 * Error-level 0x4 = UnRecoverable Fatal error 564 * - PEG-Halt Status-2 Register: 565 * (LSW = mb[3], MSW = mb[7]) 566 */ 567 protocol_engine_id = (mb[2] & 0xff); 568 fw_err_code = (((mb[2] & 0xff00) >> 8) | 569 ((mb[6] & 0x1fff) << 8)); 570 err_level = ((mb[6] & 0xe000) >> 13); 571 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 572 "Register: protocol_engine_id=0x%x " 573 "fw_err_code=0x%x err_level=0x%x.\n", 574 protocol_engine_id, fw_err_code, err_level); 575 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 576 "Register: 0x%x%x.\n", mb[7], mb[3]); 577 if (err_level == ERR_LEVEL_NON_FATAL) { 578 ql_log(ql_log_warn, vha, 0x5063, 579 "Not a fatal error, f/w has recovered itself.\n"); 580 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 581 ql_log(ql_log_fatal, vha, 0x5064, 582 "Recoverable Fatal error: Chip reset " 583 "required.\n"); 584 qla83xx_schedule_work(vha, 585 QLA83XX_NIC_CORE_RESET); 586 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 587 ql_log(ql_log_fatal, vha, 0x5065, 588 "Unrecoverable Fatal error: Set FAILED " 589 "state, reboot required.\n"); 590 qla83xx_schedule_work(vha, 591 QLA83XX_NIC_CORE_UNRECOVERABLE); 592 } 593 } 594 595 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 596 uint16_t peg_fw_state, nw_interface_link_up; 597 uint16_t nw_interface_signal_detect, sfp_status; 598 uint16_t htbt_counter, htbt_monitor_enable; 599 uint16_t sfp_additional_info, sfp_multirate; 600 uint16_t sfp_tx_fault, link_speed, dcbx_status; 601 602 /* 603 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 604 * - PEG-to-FC Status Register: 605 * (LSW = mb[2], MSW = mb[6]) 606 * Bits 0-7 = Peg-Firmware state 607 * Bit 8 = N/W Interface Link-up 608 * Bit 9 = N/W Interface signal detected 609 * Bits 10-11 = SFP Status 610 * SFP Status 0x0 = SFP+ transceiver not expected 611 * SFP Status 0x1 = SFP+ transceiver not present 612 * SFP Status 0x2 = SFP+ transceiver invalid 613 * SFP Status 0x3 = SFP+ transceiver present and 614 * valid 615 * Bits 12-14 = Heartbeat Counter 616 * Bit 15 = Heartbeat Monitor Enable 617 * Bits 16-17 = SFP Additional Info 618 * SFP info 0x0 = Unregocnized transceiver for 619 * Ethernet 620 * SFP info 0x1 = SFP+ brand validation failed 621 * SFP info 0x2 = SFP+ speed validation failed 622 * SFP info 0x3 = SFP+ access error 623 * Bit 18 = SFP Multirate 624 * Bit 19 = SFP Tx Fault 625 * Bits 20-22 = Link Speed 626 * Bits 23-27 = Reserved 627 * Bits 28-30 = DCBX Status 628 * DCBX Status 0x0 = DCBX Disabled 629 * DCBX Status 0x1 = DCBX Enabled 630 * DCBX Status 0x2 = DCBX Exchange error 631 * Bit 31 = Reserved 632 */ 633 peg_fw_state = (mb[2] & 0x00ff); 634 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 635 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 636 sfp_status = ((mb[2] & 0x0c00) >> 10); 637 htbt_counter = ((mb[2] & 0x7000) >> 12); 638 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 639 sfp_additional_info = (mb[6] & 0x0003); 640 sfp_multirate = ((mb[6] & 0x0004) >> 2); 641 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 642 link_speed = ((mb[6] & 0x0070) >> 4); 643 dcbx_status = ((mb[6] & 0x7000) >> 12); 644 645 ql_log(ql_log_warn, vha, 0x5066, 646 "Peg-to-Fc Status Register:\n" 647 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 648 "nw_interface_signal_detect=0x%x" 649 "\nsfp_statis=0x%x.\n ", peg_fw_state, 650 nw_interface_link_up, nw_interface_signal_detect, 651 sfp_status); 652 ql_log(ql_log_warn, vha, 0x5067, 653 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 654 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 655 htbt_counter, htbt_monitor_enable, 656 sfp_additional_info, sfp_multirate); 657 ql_log(ql_log_warn, vha, 0x5068, 658 "sfp_tx_fault=0x%x, link_state=0x%x, " 659 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 660 dcbx_status); 661 662 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 663 } 664 665 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 666 ql_log(ql_log_warn, vha, 0x5069, 667 "Heartbeat Failure encountered, chip reset " 668 "required.\n"); 669 670 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 671 } 672 } 673 674 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 675 ql_log(ql_log_info, vha, 0x506a, 676 "IDC Device-State changed = 0x%x.\n", mb[4]); 677 if (ha->flags.nic_core_reset_owner) 678 return; 679 qla83xx_schedule_work(vha, MBA_IDC_AEN); 680 } 681 } 682 683 int 684 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 685 { 686 struct qla_hw_data *ha = vha->hw; 687 scsi_qla_host_t *vp; 688 uint32_t vp_did; 689 unsigned long flags; 690 int ret = 0; 691 692 if (!ha->num_vhosts) 693 return ret; 694 695 spin_lock_irqsave(&ha->vport_slock, flags); 696 list_for_each_entry(vp, &ha->vp_list, list) { 697 vp_did = vp->d_id.b24; 698 if (vp_did == rscn_entry) { 699 ret = 1; 700 break; 701 } 702 } 703 spin_unlock_irqrestore(&ha->vport_slock, flags); 704 705 return ret; 706 } 707 708 fc_port_t * 709 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 710 { 711 fc_port_t *f, *tf; 712 713 f = tf = NULL; 714 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 715 if (f->loop_id == loop_id) 716 return f; 717 return NULL; 718 } 719 720 fc_port_t * 721 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 722 { 723 fc_port_t *f, *tf; 724 725 f = tf = NULL; 726 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 727 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 728 if (incl_deleted) 729 return f; 730 else if (f->deleted == 0) 731 return f; 732 } 733 } 734 return NULL; 735 } 736 737 fc_port_t * 738 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 739 u8 incl_deleted) 740 { 741 fc_port_t *f, *tf; 742 743 f = tf = NULL; 744 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 745 if (f->d_id.b24 == id->b24) { 746 if (incl_deleted) 747 return f; 748 else if (f->deleted == 0) 749 return f; 750 } 751 } 752 return NULL; 753 } 754 755 /* Shall be called only on supported adapters. */ 756 static void 757 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 758 { 759 struct qla_hw_data *ha = vha->hw; 760 bool reset_isp_needed = false; 761 762 ql_log(ql_log_warn, vha, 0x02f0, 763 "MPI Heartbeat stop. MPI reset is%s needed. " 764 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 765 mb[1] & BIT_8 ? "" : " not", 766 mb[0], mb[1], mb[2], mb[3]); 767 768 if ((mb[1] & BIT_8) == 0) 769 return; 770 771 ql_log(ql_log_warn, vha, 0x02f1, 772 "MPI Heartbeat stop. FW dump needed\n"); 773 774 if (ql2xfulldump_on_mpifail) { 775 ha->isp_ops->fw_dump(vha); 776 reset_isp_needed = true; 777 } 778 779 ha->isp_ops->mpi_fw_dump(vha, 1); 780 781 if (reset_isp_needed) { 782 vha->hw->flags.fw_init_done = 0; 783 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 784 qla2xxx_wake_dpc(vha); 785 } 786 } 787 788 static struct purex_item * 789 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) 790 { 791 struct purex_item *item = NULL; 792 uint8_t item_hdr_size = sizeof(*item); 793 794 if (size > QLA_DEFAULT_PAYLOAD_SIZE) { 795 item = kzalloc(item_hdr_size + 796 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); 797 } else { 798 if (atomic_inc_return(&vha->default_item.in_use) == 1) { 799 item = &vha->default_item; 800 goto initialize_purex_header; 801 } else { 802 item = kzalloc(item_hdr_size, GFP_ATOMIC); 803 } 804 } 805 if (!item) { 806 ql_log(ql_log_warn, vha, 0x5092, 807 ">> Failed allocate purex list item.\n"); 808 809 return NULL; 810 } 811 812 initialize_purex_header: 813 item->vha = vha; 814 item->size = size; 815 return item; 816 } 817 818 static void 819 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, 820 void (*process_item)(struct scsi_qla_host *vha, 821 struct purex_item *pkt)) 822 { 823 struct purex_list *list = &vha->purex_list; 824 ulong flags; 825 826 pkt->process_item = process_item; 827 828 spin_lock_irqsave(&list->lock, flags); 829 list_add_tail(&pkt->list, &list->head); 830 spin_unlock_irqrestore(&list->lock, flags); 831 832 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 833 } 834 835 /** 836 * qla24xx_copy_std_pkt() - Copy over purex ELS which is 837 * contained in a single IOCB. 838 * purex packet. 839 * @vha: SCSI driver HA context 840 * @pkt: ELS packet 841 */ 842 static struct purex_item 843 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) 844 { 845 struct purex_item *item; 846 847 item = qla24xx_alloc_purex_item(vha, 848 QLA_DEFAULT_PAYLOAD_SIZE); 849 if (!item) 850 return item; 851 852 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 853 return item; 854 } 855 856 /** 857 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can 858 * span over multiple IOCBs. 859 * @vha: SCSI driver HA context 860 * @pkt: ELS packet 861 * @rsp: Response queue 862 */ 863 static struct purex_item * 864 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, 865 struct rsp_que **rsp) 866 { 867 struct purex_entry_24xx *purex = *pkt; 868 struct rsp_que *rsp_q = *rsp; 869 sts_cont_entry_t *new_pkt; 870 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 871 uint16_t buffer_copy_offset = 0; 872 uint16_t entry_count, entry_count_remaining; 873 struct purex_item *item; 874 void *fpin_pkt = NULL; 875 876 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 877 - PURX_ELS_HEADER_SIZE; 878 pending_bytes = total_bytes; 879 entry_count = entry_count_remaining = purex->entry_count; 880 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 881 sizeof(purex->els_frame_payload) : pending_bytes; 882 ql_log(ql_log_info, vha, 0x509a, 883 "FPIN ELS, frame_size 0x%x, entry count %d\n", 884 total_bytes, entry_count); 885 886 item = qla24xx_alloc_purex_item(vha, total_bytes); 887 if (!item) 888 return item; 889 890 fpin_pkt = &item->iocb; 891 892 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); 893 buffer_copy_offset += no_bytes; 894 pending_bytes -= no_bytes; 895 --entry_count_remaining; 896 897 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 898 wmb(); 899 900 do { 901 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 902 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { 903 ql_dbg(ql_dbg_async, vha, 0x5084, 904 "Ran out of IOCBs, partial data 0x%x\n", 905 buffer_copy_offset); 906 cpu_relax(); 907 continue; 908 } 909 910 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 911 *pkt = new_pkt; 912 913 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 914 ql_log(ql_log_warn, vha, 0x507a, 915 "Unexpected IOCB type, partial data 0x%x\n", 916 buffer_copy_offset); 917 break; 918 } 919 920 rsp_q->ring_index++; 921 if (rsp_q->ring_index == rsp_q->length) { 922 rsp_q->ring_index = 0; 923 rsp_q->ring_ptr = rsp_q->ring; 924 } else { 925 rsp_q->ring_ptr++; 926 } 927 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 928 sizeof(new_pkt->data) : pending_bytes; 929 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 930 memcpy(((uint8_t *)fpin_pkt + 931 buffer_copy_offset), new_pkt->data, 932 no_bytes); 933 buffer_copy_offset += no_bytes; 934 pending_bytes -= no_bytes; 935 --entry_count_remaining; 936 } else { 937 ql_log(ql_log_warn, vha, 0x5044, 938 "Attempt to copy more that we got, optimizing..%x\n", 939 buffer_copy_offset); 940 memcpy(((uint8_t *)fpin_pkt + 941 buffer_copy_offset), new_pkt->data, 942 total_bytes - buffer_copy_offset); 943 } 944 945 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 946 wmb(); 947 } 948 949 if (pending_bytes != 0 || entry_count_remaining != 0) { 950 ql_log(ql_log_fatal, vha, 0x508b, 951 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", 952 total_bytes, entry_count_remaining); 953 qla24xx_free_purex_item(item); 954 return NULL; 955 } 956 } while (entry_count_remaining > 0); 957 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); 958 return item; 959 } 960 961 /** 962 * qla2x00_async_event() - Process aynchronous events. 963 * @vha: SCSI driver HA context 964 * @rsp: response queue 965 * @mb: Mailbox registers (0 - 3) 966 */ 967 void 968 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 969 { 970 uint16_t handle_cnt; 971 uint16_t cnt, mbx; 972 uint32_t handles[5]; 973 struct qla_hw_data *ha = vha->hw; 974 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 975 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 976 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 977 uint32_t rscn_entry, host_pid; 978 unsigned long flags; 979 fc_port_t *fcport = NULL; 980 981 if (!vha->hw->flags.fw_started) 982 return; 983 984 /* Setup to process RIO completion. */ 985 handle_cnt = 0; 986 if (IS_CNA_CAPABLE(ha)) 987 goto skip_rio; 988 switch (mb[0]) { 989 case MBA_SCSI_COMPLETION: 990 handles[0] = make_handle(mb[2], mb[1]); 991 handle_cnt = 1; 992 break; 993 case MBA_CMPLT_1_16BIT: 994 handles[0] = mb[1]; 995 handle_cnt = 1; 996 mb[0] = MBA_SCSI_COMPLETION; 997 break; 998 case MBA_CMPLT_2_16BIT: 999 handles[0] = mb[1]; 1000 handles[1] = mb[2]; 1001 handle_cnt = 2; 1002 mb[0] = MBA_SCSI_COMPLETION; 1003 break; 1004 case MBA_CMPLT_3_16BIT: 1005 handles[0] = mb[1]; 1006 handles[1] = mb[2]; 1007 handles[2] = mb[3]; 1008 handle_cnt = 3; 1009 mb[0] = MBA_SCSI_COMPLETION; 1010 break; 1011 case MBA_CMPLT_4_16BIT: 1012 handles[0] = mb[1]; 1013 handles[1] = mb[2]; 1014 handles[2] = mb[3]; 1015 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1016 handle_cnt = 4; 1017 mb[0] = MBA_SCSI_COMPLETION; 1018 break; 1019 case MBA_CMPLT_5_16BIT: 1020 handles[0] = mb[1]; 1021 handles[1] = mb[2]; 1022 handles[2] = mb[3]; 1023 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1024 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 1025 handle_cnt = 5; 1026 mb[0] = MBA_SCSI_COMPLETION; 1027 break; 1028 case MBA_CMPLT_2_32BIT: 1029 handles[0] = make_handle(mb[2], mb[1]); 1030 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), 1031 RD_MAILBOX_REG(ha, reg, 6)); 1032 handle_cnt = 2; 1033 mb[0] = MBA_SCSI_COMPLETION; 1034 break; 1035 default: 1036 break; 1037 } 1038 skip_rio: 1039 switch (mb[0]) { 1040 case MBA_SCSI_COMPLETION: /* Fast Post */ 1041 if (!vha->flags.online) 1042 break; 1043 1044 for (cnt = 0; cnt < handle_cnt; cnt++) 1045 qla2x00_process_completed_request(vha, rsp->req, 1046 handles[cnt]); 1047 break; 1048 1049 case MBA_RESET: /* Reset */ 1050 ql_dbg(ql_dbg_async, vha, 0x5002, 1051 "Asynchronous RESET.\n"); 1052 1053 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1054 break; 1055 1056 case MBA_SYSTEM_ERR: /* System Error */ 1057 mbx = 0; 1058 1059 vha->hw_err_cnt++; 1060 1061 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 1062 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1063 u16 m[4]; 1064 1065 m[0] = rd_reg_word(®24->mailbox4); 1066 m[1] = rd_reg_word(®24->mailbox5); 1067 m[2] = rd_reg_word(®24->mailbox6); 1068 mbx = m[3] = rd_reg_word(®24->mailbox7); 1069 1070 ql_log(ql_log_warn, vha, 0x5003, 1071 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 1072 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 1073 } else 1074 ql_log(ql_log_warn, vha, 0x5003, 1075 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 1076 mb[1], mb[2], mb[3]); 1077 1078 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 1079 rd_reg_word(®24->mailbox7) & BIT_8) 1080 ha->isp_ops->mpi_fw_dump(vha, 1); 1081 ha->isp_ops->fw_dump(vha); 1082 ha->flags.fw_init_done = 0; 1083 QLA_FW_STOPPED(ha); 1084 1085 if (IS_FWI2_CAPABLE(ha)) { 1086 if (mb[1] == 0 && mb[2] == 0) { 1087 ql_log(ql_log_fatal, vha, 0x5004, 1088 "Unrecoverable Hardware Error: adapter " 1089 "marked OFFLINE!\n"); 1090 vha->flags.online = 0; 1091 vha->device_flags |= DFLG_DEV_FAILED; 1092 } else { 1093 /* Check to see if MPI timeout occurred */ 1094 if ((mbx & MBX_3) && (ha->port_no == 0)) 1095 set_bit(MPI_RESET_NEEDED, 1096 &vha->dpc_flags); 1097 1098 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1099 } 1100 } else if (mb[1] == 0) { 1101 ql_log(ql_log_fatal, vha, 0x5005, 1102 "Unrecoverable Hardware Error: adapter marked " 1103 "OFFLINE!\n"); 1104 vha->flags.online = 0; 1105 vha->device_flags |= DFLG_DEV_FAILED; 1106 } else 1107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1108 break; 1109 1110 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 1111 ql_log(ql_log_warn, vha, 0x5006, 1112 "ISP Request Transfer Error (%x).\n", mb[1]); 1113 1114 vha->hw_err_cnt++; 1115 1116 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1117 break; 1118 1119 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 1120 ql_log(ql_log_warn, vha, 0x5007, 1121 "ISP Response Transfer Error (%x).\n", mb[1]); 1122 1123 vha->hw_err_cnt++; 1124 1125 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1126 break; 1127 1128 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 1129 ql_dbg(ql_dbg_async, vha, 0x5008, 1130 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 1131 break; 1132 1133 case MBA_LOOP_INIT_ERR: 1134 ql_log(ql_log_warn, vha, 0x5090, 1135 "LOOP INIT ERROR (%x).\n", mb[1]); 1136 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1137 break; 1138 1139 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 1140 ha->flags.lip_ae = 1; 1141 1142 ql_dbg(ql_dbg_async, vha, 0x5009, 1143 "LIP occurred (%x).\n", mb[1]); 1144 1145 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1146 atomic_set(&vha->loop_state, LOOP_DOWN); 1147 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1148 qla2x00_mark_all_devices_lost(vha); 1149 } 1150 1151 if (vha->vp_idx) { 1152 atomic_set(&vha->vp_state, VP_FAILED); 1153 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1154 } 1155 1156 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1157 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1158 1159 vha->flags.management_server_logged_in = 0; 1160 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 1161 break; 1162 1163 case MBA_LOOP_UP: /* Loop Up Event */ 1164 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1165 ha->link_data_rate = PORT_SPEED_1GB; 1166 else 1167 ha->link_data_rate = mb[1]; 1168 1169 ql_log(ql_log_info, vha, 0x500a, 1170 "LOOP UP detected (%s Gbps).\n", 1171 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 1172 1173 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1174 if (mb[2] & BIT_0) 1175 ql_log(ql_log_info, vha, 0x11a0, 1176 "FEC=enabled (link up).\n"); 1177 } 1178 1179 vha->flags.management_server_logged_in = 0; 1180 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 1181 1182 if (vha->link_down_time < vha->hw->port_down_retry_count) { 1183 vha->short_link_down_cnt++; 1184 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 1185 } 1186 1187 break; 1188 1189 case MBA_LOOP_DOWN: /* Loop Down Event */ 1190 SAVE_TOPO(ha); 1191 ha->flags.lip_ae = 0; 1192 ha->current_topology = 0; 1193 vha->link_down_time = 0; 1194 1195 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 1196 ? rd_reg_word(®24->mailbox4) : 0; 1197 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) 1198 : mbx; 1199 ql_log(ql_log_info, vha, 0x500b, 1200 "LOOP DOWN detected (%x %x %x %x).\n", 1201 mb[1], mb[2], mb[3], mbx); 1202 1203 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1204 atomic_set(&vha->loop_state, LOOP_DOWN); 1205 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1206 /* 1207 * In case of loop down, restore WWPN from 1208 * NVRAM in case of FA-WWPN capable ISP 1209 * Restore for Physical Port only 1210 */ 1211 if (!vha->vp_idx) { 1212 if (ha->flags.fawwpn_enabled && 1213 (ha->current_topology == ISP_CFG_F)) { 1214 void *wwpn = ha->init_cb->port_name; 1215 1216 memcpy(vha->port_name, wwpn, WWN_SIZE); 1217 fc_host_port_name(vha->host) = 1218 wwn_to_u64(vha->port_name); 1219 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1220 vha, 0x00d8, "LOOP DOWN detected," 1221 "restore WWPN %016llx\n", 1222 wwn_to_u64(vha->port_name)); 1223 } 1224 1225 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1226 } 1227 1228 vha->device_flags |= DFLG_NO_CABLE; 1229 qla2x00_mark_all_devices_lost(vha); 1230 } 1231 1232 if (vha->vp_idx) { 1233 atomic_set(&vha->vp_state, VP_FAILED); 1234 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1235 } 1236 1237 vha->flags.management_server_logged_in = 0; 1238 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1239 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1240 break; 1241 1242 case MBA_LIP_RESET: /* LIP reset occurred */ 1243 ql_dbg(ql_dbg_async, vha, 0x500c, 1244 "LIP reset occurred (%x).\n", mb[1]); 1245 1246 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1247 atomic_set(&vha->loop_state, LOOP_DOWN); 1248 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1249 qla2x00_mark_all_devices_lost(vha); 1250 } 1251 1252 if (vha->vp_idx) { 1253 atomic_set(&vha->vp_state, VP_FAILED); 1254 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1255 } 1256 1257 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1258 1259 ha->operating_mode = LOOP; 1260 vha->flags.management_server_logged_in = 0; 1261 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1262 break; 1263 1264 /* case MBA_DCBX_COMPLETE: */ 1265 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1266 ha->flags.lip_ae = 0; 1267 1268 if (IS_QLA2100(ha)) 1269 break; 1270 1271 if (IS_CNA_CAPABLE(ha)) { 1272 ql_dbg(ql_dbg_async, vha, 0x500d, 1273 "DCBX Completed -- %04x %04x %04x.\n", 1274 mb[1], mb[2], mb[3]); 1275 if (ha->notify_dcbx_comp && !vha->vp_idx) 1276 complete(&ha->dcbx_comp); 1277 1278 } else 1279 ql_dbg(ql_dbg_async, vha, 0x500e, 1280 "Asynchronous P2P MODE received.\n"); 1281 1282 /* 1283 * Until there's a transition from loop down to loop up, treat 1284 * this as loop down only. 1285 */ 1286 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1287 atomic_set(&vha->loop_state, LOOP_DOWN); 1288 if (!atomic_read(&vha->loop_down_timer)) 1289 atomic_set(&vha->loop_down_timer, 1290 LOOP_DOWN_TIME); 1291 if (!N2N_TOPO(ha)) 1292 qla2x00_mark_all_devices_lost(vha); 1293 } 1294 1295 if (vha->vp_idx) { 1296 atomic_set(&vha->vp_state, VP_FAILED); 1297 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1298 } 1299 1300 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1301 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1302 1303 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1304 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1305 1306 vha->flags.management_server_logged_in = 0; 1307 break; 1308 1309 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1310 if (IS_QLA2100(ha)) 1311 break; 1312 1313 ql_dbg(ql_dbg_async, vha, 0x500f, 1314 "Configuration change detected: value=%x.\n", mb[1]); 1315 1316 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1317 atomic_set(&vha->loop_state, LOOP_DOWN); 1318 if (!atomic_read(&vha->loop_down_timer)) 1319 atomic_set(&vha->loop_down_timer, 1320 LOOP_DOWN_TIME); 1321 qla2x00_mark_all_devices_lost(vha); 1322 } 1323 1324 if (vha->vp_idx) { 1325 atomic_set(&vha->vp_state, VP_FAILED); 1326 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1327 } 1328 1329 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1330 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1331 break; 1332 1333 case MBA_PORT_UPDATE: /* Port database update */ 1334 /* 1335 * Handle only global and vn-port update events 1336 * 1337 * Relevant inputs: 1338 * mb[1] = N_Port handle of changed port 1339 * OR 0xffff for global event 1340 * mb[2] = New login state 1341 * 7 = Port logged out 1342 * mb[3] = LSB is vp_idx, 0xff = all vps 1343 * 1344 * Skip processing if: 1345 * Event is global, vp_idx is NOT all vps, 1346 * vp_idx does not match 1347 * Event is not global, vp_idx does not match 1348 */ 1349 if (IS_QLA2XXX_MIDTYPE(ha) && 1350 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1351 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1352 break; 1353 1354 if (mb[2] == 0x7) { 1355 ql_dbg(ql_dbg_async, vha, 0x5010, 1356 "Port %s %04x %04x %04x.\n", 1357 mb[1] == 0xffff ? "unavailable" : "logout", 1358 mb[1], mb[2], mb[3]); 1359 1360 if (mb[1] == 0xffff) 1361 goto global_port_update; 1362 1363 if (mb[1] == NPH_SNS_LID(ha)) { 1364 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1365 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1366 break; 1367 } 1368 1369 /* use handle_cnt for loop id/nport handle */ 1370 if (IS_FWI2_CAPABLE(ha)) 1371 handle_cnt = NPH_SNS; 1372 else 1373 handle_cnt = SIMPLE_NAME_SERVER; 1374 if (mb[1] == handle_cnt) { 1375 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1376 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1377 break; 1378 } 1379 1380 /* Port logout */ 1381 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1382 if (!fcport) 1383 break; 1384 if (atomic_read(&fcport->state) != FCS_ONLINE) 1385 break; 1386 ql_dbg(ql_dbg_async, vha, 0x508a, 1387 "Marking port lost loopid=%04x portid=%06x.\n", 1388 fcport->loop_id, fcport->d_id.b24); 1389 if (qla_ini_mode_enabled(vha)) { 1390 fcport->logout_on_delete = 0; 1391 qlt_schedule_sess_for_deletion(fcport); 1392 } 1393 break; 1394 1395 global_port_update: 1396 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1397 atomic_set(&vha->loop_state, LOOP_DOWN); 1398 atomic_set(&vha->loop_down_timer, 1399 LOOP_DOWN_TIME); 1400 vha->device_flags |= DFLG_NO_CABLE; 1401 qla2x00_mark_all_devices_lost(vha); 1402 } 1403 1404 if (vha->vp_idx) { 1405 atomic_set(&vha->vp_state, VP_FAILED); 1406 fc_vport_set_state(vha->fc_vport, 1407 FC_VPORT_FAILED); 1408 qla2x00_mark_all_devices_lost(vha); 1409 } 1410 1411 vha->flags.management_server_logged_in = 0; 1412 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1413 break; 1414 } 1415 1416 /* 1417 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1418 * event etc. earlier indicating loop is down) then process 1419 * it. Otherwise ignore it and Wait for RSCN to come in. 1420 */ 1421 atomic_set(&vha->loop_down_timer, 0); 1422 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1423 !ha->flags.n2n_ae && 1424 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1425 ql_dbg(ql_dbg_async, vha, 0x5011, 1426 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1427 mb[1], mb[2], mb[3]); 1428 break; 1429 } 1430 1431 ql_dbg(ql_dbg_async, vha, 0x5012, 1432 "Port database changed %04x %04x %04x.\n", 1433 mb[1], mb[2], mb[3]); 1434 1435 /* 1436 * Mark all devices as missing so we will login again. 1437 */ 1438 atomic_set(&vha->loop_state, LOOP_UP); 1439 vha->scan.scan_retry = 0; 1440 1441 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1442 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1443 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1444 break; 1445 1446 case MBA_RSCN_UPDATE: /* State Change Registration */ 1447 /* Check if the Vport has issued a SCR */ 1448 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1449 break; 1450 /* Only handle SCNs for our Vport index. */ 1451 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1452 break; 1453 1454 ql_log(ql_log_warn, vha, 0x5013, 1455 "RSCN database changed -- %04x %04x %04x.\n", 1456 mb[1], mb[2], mb[3]); 1457 1458 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1459 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1460 | vha->d_id.b.al_pa; 1461 if (rscn_entry == host_pid) { 1462 ql_dbg(ql_dbg_async, vha, 0x5014, 1463 "Ignoring RSCN update to local host " 1464 "port ID (%06x).\n", host_pid); 1465 break; 1466 } 1467 1468 /* Ignore reserved bits from RSCN-payload. */ 1469 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1470 1471 /* Skip RSCNs for virtual ports on the same physical port */ 1472 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1473 break; 1474 1475 atomic_set(&vha->loop_down_timer, 0); 1476 vha->flags.management_server_logged_in = 0; 1477 { 1478 struct event_arg ea; 1479 1480 memset(&ea, 0, sizeof(ea)); 1481 ea.id.b24 = rscn_entry; 1482 ea.id.b.rsvd_1 = rscn_entry >> 24; 1483 qla2x00_handle_rscn(vha, &ea); 1484 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1485 } 1486 break; 1487 case MBA_CONGN_NOTI_RECV: 1488 if (!ha->flags.scm_enabled || 1489 mb[1] != QLA_CON_PRIMITIVE_RECEIVED) 1490 break; 1491 1492 if (mb[2] == QLA_CONGESTION_ARB_WARNING) { 1493 ql_dbg(ql_dbg_async, vha, 0x509b, 1494 "Congestion Warning %04x %04x.\n", mb[1], mb[2]); 1495 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { 1496 ql_log(ql_log_warn, vha, 0x509b, 1497 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); 1498 } 1499 break; 1500 /* case MBA_RIO_RESPONSE: */ 1501 case MBA_ZIO_RESPONSE: 1502 ql_dbg(ql_dbg_async, vha, 0x5015, 1503 "[R|Z]IO update completion.\n"); 1504 1505 if (IS_FWI2_CAPABLE(ha)) 1506 qla24xx_process_response_queue(vha, rsp); 1507 else 1508 qla2x00_process_response_queue(rsp); 1509 break; 1510 1511 case MBA_DISCARD_RND_FRAME: 1512 ql_dbg(ql_dbg_async, vha, 0x5016, 1513 "Discard RND Frame -- %04x %04x %04x.\n", 1514 mb[1], mb[2], mb[3]); 1515 vha->interface_err_cnt++; 1516 break; 1517 1518 case MBA_TRACE_NOTIFICATION: 1519 ql_dbg(ql_dbg_async, vha, 0x5017, 1520 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1521 break; 1522 1523 case MBA_ISP84XX_ALERT: 1524 ql_dbg(ql_dbg_async, vha, 0x5018, 1525 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1526 mb[1], mb[2], mb[3]); 1527 1528 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1529 switch (mb[1]) { 1530 case A84_PANIC_RECOVERY: 1531 ql_log(ql_log_info, vha, 0x5019, 1532 "Alert 84XX: panic recovery %04x %04x.\n", 1533 mb[2], mb[3]); 1534 break; 1535 case A84_OP_LOGIN_COMPLETE: 1536 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1537 ql_log(ql_log_info, vha, 0x501a, 1538 "Alert 84XX: firmware version %x.\n", 1539 ha->cs84xx->op_fw_version); 1540 break; 1541 case A84_DIAG_LOGIN_COMPLETE: 1542 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1543 ql_log(ql_log_info, vha, 0x501b, 1544 "Alert 84XX: diagnostic firmware version %x.\n", 1545 ha->cs84xx->diag_fw_version); 1546 break; 1547 case A84_GOLD_LOGIN_COMPLETE: 1548 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1549 ha->cs84xx->fw_update = 1; 1550 ql_log(ql_log_info, vha, 0x501c, 1551 "Alert 84XX: gold firmware version %x.\n", 1552 ha->cs84xx->gold_fw_version); 1553 break; 1554 default: 1555 ql_log(ql_log_warn, vha, 0x501d, 1556 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1557 mb[1], mb[2], mb[3]); 1558 } 1559 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1560 break; 1561 case MBA_DCBX_START: 1562 ql_dbg(ql_dbg_async, vha, 0x501e, 1563 "DCBX Started -- %04x %04x %04x.\n", 1564 mb[1], mb[2], mb[3]); 1565 break; 1566 case MBA_DCBX_PARAM_UPDATE: 1567 ql_dbg(ql_dbg_async, vha, 0x501f, 1568 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1569 mb[1], mb[2], mb[3]); 1570 break; 1571 case MBA_FCF_CONF_ERR: 1572 ql_dbg(ql_dbg_async, vha, 0x5020, 1573 "FCF Configuration Error -- %04x %04x %04x.\n", 1574 mb[1], mb[2], mb[3]); 1575 break; 1576 case MBA_IDC_NOTIFY: 1577 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1578 mb[4] = rd_reg_word(®24->mailbox4); 1579 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1580 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1581 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1582 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1583 /* 1584 * Extend loop down timer since port is active. 1585 */ 1586 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1587 atomic_set(&vha->loop_down_timer, 1588 LOOP_DOWN_TIME); 1589 qla2xxx_wake_dpc(vha); 1590 } 1591 } 1592 fallthrough; 1593 case MBA_IDC_COMPLETE: 1594 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1595 complete(&ha->lb_portup_comp); 1596 fallthrough; 1597 case MBA_IDC_TIME_EXT: 1598 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1599 IS_QLA8044(ha)) 1600 qla81xx_idc_event(vha, mb[0], mb[1]); 1601 break; 1602 1603 case MBA_IDC_AEN: 1604 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1605 vha->hw_err_cnt++; 1606 qla27xx_handle_8200_aen(vha, mb); 1607 } else if (IS_QLA83XX(ha)) { 1608 mb[4] = rd_reg_word(®24->mailbox4); 1609 mb[5] = rd_reg_word(®24->mailbox5); 1610 mb[6] = rd_reg_word(®24->mailbox6); 1611 mb[7] = rd_reg_word(®24->mailbox7); 1612 qla83xx_handle_8200_aen(vha, mb); 1613 } else { 1614 ql_dbg(ql_dbg_async, vha, 0x5052, 1615 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1616 mb[0], mb[1], mb[2], mb[3]); 1617 } 1618 break; 1619 1620 case MBA_DPORT_DIAGNOSTICS: 1621 ql_dbg(ql_dbg_async, vha, 0x5052, 1622 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1623 mb[0], mb[1], mb[2], mb[3]); 1624 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1625 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1626 static char *results[] = { 1627 "start", "done(pass)", "done(error)", "undefined" }; 1628 static char *types[] = { 1629 "none", "dynamic", "static", "other" }; 1630 uint result = mb[1] >> 0 & 0x3; 1631 uint type = mb[1] >> 6 & 0x3; 1632 uint sw = mb[1] >> 15 & 0x1; 1633 ql_dbg(ql_dbg_async, vha, 0x5052, 1634 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1635 results[result], types[type], sw); 1636 if (result == 2) { 1637 static char *reasons[] = { 1638 "reserved", "unexpected reject", 1639 "unexpected phase", "retry exceeded", 1640 "timed out", "not supported", 1641 "user stopped" }; 1642 uint reason = mb[2] >> 0 & 0xf; 1643 uint phase = mb[2] >> 12 & 0xf; 1644 ql_dbg(ql_dbg_async, vha, 0x5052, 1645 "D-Port Diagnostics: reason=%s phase=%u \n", 1646 reason < 7 ? reasons[reason] : "other", 1647 phase >> 1); 1648 } 1649 } 1650 break; 1651 1652 case MBA_TEMPERATURE_ALERT: 1653 ql_dbg(ql_dbg_async, vha, 0x505e, 1654 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1655 break; 1656 1657 case MBA_TRANS_INSERT: 1658 ql_dbg(ql_dbg_async, vha, 0x5091, 1659 "Transceiver Insertion: %04x\n", mb[1]); 1660 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1661 break; 1662 1663 case MBA_TRANS_REMOVE: 1664 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1665 break; 1666 1667 default: 1668 ql_dbg(ql_dbg_async, vha, 0x5057, 1669 "Unknown AEN:%04x %04x %04x %04x\n", 1670 mb[0], mb[1], mb[2], mb[3]); 1671 } 1672 1673 qlt_async_event(mb[0], vha, mb); 1674 1675 if (!vha->vp_idx && ha->num_vhosts) 1676 qla2x00_alert_all_vps(rsp, mb); 1677 } 1678 1679 /** 1680 * qla2x00_process_completed_request() - Process a Fast Post response. 1681 * @vha: SCSI driver HA context 1682 * @req: request queue 1683 * @index: SRB index 1684 */ 1685 void 1686 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1687 struct req_que *req, uint32_t index) 1688 { 1689 srb_t *sp; 1690 struct qla_hw_data *ha = vha->hw; 1691 1692 /* Validate handle. */ 1693 if (index >= req->num_outstanding_cmds) { 1694 ql_log(ql_log_warn, vha, 0x3014, 1695 "Invalid SCSI command index (%x).\n", index); 1696 1697 if (IS_P3P_TYPE(ha)) 1698 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1699 else 1700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1701 return; 1702 } 1703 1704 sp = req->outstanding_cmds[index]; 1705 if (sp) { 1706 /* Free outstanding command slot. */ 1707 req->outstanding_cmds[index] = NULL; 1708 1709 /* Save ISP completion status */ 1710 sp->done(sp, DID_OK << 16); 1711 } else { 1712 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1713 1714 if (IS_P3P_TYPE(ha)) 1715 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1716 else 1717 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1718 } 1719 } 1720 1721 srb_t * 1722 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1723 struct req_que *req, void *iocb) 1724 { 1725 struct qla_hw_data *ha = vha->hw; 1726 sts_entry_t *pkt = iocb; 1727 srb_t *sp; 1728 uint16_t index; 1729 1730 index = LSW(pkt->handle); 1731 if (index >= req->num_outstanding_cmds) { 1732 ql_log(ql_log_warn, vha, 0x5031, 1733 "%s: Invalid command index (%x) type %8ph.\n", 1734 func, index, iocb); 1735 if (IS_P3P_TYPE(ha)) 1736 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1737 else 1738 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1739 return NULL; 1740 } 1741 sp = req->outstanding_cmds[index]; 1742 if (!sp) { 1743 ql_log(ql_log_warn, vha, 0x5032, 1744 "%s: Invalid completion handle (%x) -- timed-out.\n", 1745 func, index); 1746 return NULL; 1747 } 1748 if (sp->handle != index) { 1749 ql_log(ql_log_warn, vha, 0x5033, 1750 "%s: SRB handle (%x) mismatch %x.\n", func, 1751 sp->handle, index); 1752 return NULL; 1753 } 1754 1755 req->outstanding_cmds[index] = NULL; 1756 return sp; 1757 } 1758 1759 static void 1760 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1761 struct mbx_entry *mbx) 1762 { 1763 const char func[] = "MBX-IOCB"; 1764 const char *type; 1765 fc_port_t *fcport; 1766 srb_t *sp; 1767 struct srb_iocb *lio; 1768 uint16_t *data; 1769 uint16_t status; 1770 1771 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1772 if (!sp) 1773 return; 1774 1775 lio = &sp->u.iocb_cmd; 1776 type = sp->name; 1777 fcport = sp->fcport; 1778 data = lio->u.logio.data; 1779 1780 data[0] = MBS_COMMAND_ERROR; 1781 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1782 QLA_LOGIO_LOGIN_RETRIED : 0; 1783 if (mbx->entry_status) { 1784 ql_dbg(ql_dbg_async, vha, 0x5043, 1785 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1786 "entry-status=%x status=%x state-flag=%x " 1787 "status-flags=%x.\n", type, sp->handle, 1788 fcport->d_id.b.domain, fcport->d_id.b.area, 1789 fcport->d_id.b.al_pa, mbx->entry_status, 1790 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1791 le16_to_cpu(mbx->status_flags)); 1792 1793 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1794 mbx, sizeof(*mbx)); 1795 1796 goto logio_done; 1797 } 1798 1799 status = le16_to_cpu(mbx->status); 1800 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1801 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1802 status = 0; 1803 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1804 ql_dbg(ql_dbg_async, vha, 0x5045, 1805 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1806 type, sp->handle, fcport->d_id.b.domain, 1807 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1808 le16_to_cpu(mbx->mb1)); 1809 1810 data[0] = MBS_COMMAND_COMPLETE; 1811 if (sp->type == SRB_LOGIN_CMD) { 1812 fcport->port_type = FCT_TARGET; 1813 if (le16_to_cpu(mbx->mb1) & BIT_0) 1814 fcport->port_type = FCT_INITIATOR; 1815 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1816 fcport->flags |= FCF_FCP2_DEVICE; 1817 } 1818 goto logio_done; 1819 } 1820 1821 data[0] = le16_to_cpu(mbx->mb0); 1822 switch (data[0]) { 1823 case MBS_PORT_ID_USED: 1824 data[1] = le16_to_cpu(mbx->mb1); 1825 break; 1826 case MBS_LOOP_ID_USED: 1827 break; 1828 default: 1829 data[0] = MBS_COMMAND_ERROR; 1830 break; 1831 } 1832 1833 ql_log(ql_log_warn, vha, 0x5046, 1834 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1835 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1836 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1837 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1838 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1839 le16_to_cpu(mbx->mb7)); 1840 1841 logio_done: 1842 sp->done(sp, 0); 1843 } 1844 1845 static void 1846 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1847 struct mbx_24xx_entry *pkt) 1848 { 1849 const char func[] = "MBX-IOCB2"; 1850 struct qla_hw_data *ha = vha->hw; 1851 srb_t *sp; 1852 struct srb_iocb *si; 1853 u16 sz, i; 1854 int res; 1855 1856 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1857 if (!sp) 1858 return; 1859 1860 if (sp->type == SRB_SCSI_CMD || 1861 sp->type == SRB_NVME_CMD || 1862 sp->type == SRB_TM_CMD) { 1863 ql_log(ql_log_warn, vha, 0x509d, 1864 "Inconsistent event entry type %d\n", sp->type); 1865 if (IS_P3P_TYPE(ha)) 1866 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1867 else 1868 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1869 return; 1870 } 1871 1872 si = &sp->u.iocb_cmd; 1873 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1874 1875 for (i = 0; i < sz; i++) 1876 si->u.mbx.in_mb[i] = pkt->mb[i]; 1877 1878 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1879 1880 sp->done(sp, res); 1881 } 1882 1883 static void 1884 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1885 struct nack_to_isp *pkt) 1886 { 1887 const char func[] = "nack"; 1888 srb_t *sp; 1889 int res = 0; 1890 1891 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1892 if (!sp) 1893 return; 1894 1895 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1896 res = QLA_FUNCTION_FAILED; 1897 1898 sp->done(sp, res); 1899 } 1900 1901 static void 1902 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1903 sts_entry_t *pkt, int iocb_type) 1904 { 1905 const char func[] = "CT_IOCB"; 1906 const char *type; 1907 srb_t *sp; 1908 struct bsg_job *bsg_job; 1909 struct fc_bsg_reply *bsg_reply; 1910 uint16_t comp_status; 1911 int res = 0; 1912 1913 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1914 if (!sp) 1915 return; 1916 1917 switch (sp->type) { 1918 case SRB_CT_CMD: 1919 bsg_job = sp->u.bsg_job; 1920 bsg_reply = bsg_job->reply; 1921 1922 type = "ct pass-through"; 1923 1924 comp_status = le16_to_cpu(pkt->comp_status); 1925 1926 /* 1927 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1928 * fc payload to the caller 1929 */ 1930 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1931 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1932 1933 if (comp_status != CS_COMPLETE) { 1934 if (comp_status == CS_DATA_UNDERRUN) { 1935 res = DID_OK << 16; 1936 bsg_reply->reply_payload_rcv_len = 1937 le16_to_cpu(pkt->rsp_info_len); 1938 1939 ql_log(ql_log_warn, vha, 0x5048, 1940 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1941 type, comp_status, 1942 bsg_reply->reply_payload_rcv_len); 1943 } else { 1944 ql_log(ql_log_warn, vha, 0x5049, 1945 "CT pass-through-%s error comp_status=0x%x.\n", 1946 type, comp_status); 1947 res = DID_ERROR << 16; 1948 bsg_reply->reply_payload_rcv_len = 0; 1949 } 1950 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1951 pkt, sizeof(*pkt)); 1952 } else { 1953 res = DID_OK << 16; 1954 bsg_reply->reply_payload_rcv_len = 1955 bsg_job->reply_payload.payload_len; 1956 bsg_job->reply_len = 0; 1957 } 1958 break; 1959 case SRB_CT_PTHRU_CMD: 1960 /* 1961 * borrowing sts_entry_24xx.comp_status. 1962 * same location as ct_entry_24xx.comp_status 1963 */ 1964 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1965 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1966 sp->name); 1967 break; 1968 } 1969 1970 sp->done(sp, res); 1971 } 1972 1973 static void 1974 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1975 struct sts_entry_24xx *pkt, int iocb_type) 1976 { 1977 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; 1978 const char func[] = "ELS_CT_IOCB"; 1979 const char *type; 1980 srb_t *sp; 1981 struct bsg_job *bsg_job; 1982 struct fc_bsg_reply *bsg_reply; 1983 uint16_t comp_status; 1984 uint32_t fw_status[3]; 1985 int res; 1986 struct srb_iocb *els; 1987 1988 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1989 if (!sp) 1990 return; 1991 1992 type = NULL; 1993 switch (sp->type) { 1994 case SRB_ELS_CMD_RPT: 1995 case SRB_ELS_CMD_HST: 1996 type = "els"; 1997 break; 1998 case SRB_CT_CMD: 1999 type = "ct pass-through"; 2000 break; 2001 case SRB_ELS_DCMD: 2002 type = "Driver ELS logo"; 2003 if (iocb_type != ELS_IOCB_TYPE) { 2004 ql_dbg(ql_dbg_user, vha, 0x5047, 2005 "Completing %s: (%p) type=%d.\n", 2006 type, sp, sp->type); 2007 sp->done(sp, 0); 2008 return; 2009 } 2010 break; 2011 case SRB_CT_PTHRU_CMD: 2012 /* borrowing sts_entry_24xx.comp_status. 2013 same location as ct_entry_24xx.comp_status 2014 */ 2015 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 2016 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 2017 sp->name); 2018 sp->done(sp, res); 2019 return; 2020 default: 2021 ql_dbg(ql_dbg_user, vha, 0x503e, 2022 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 2023 return; 2024 } 2025 2026 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 2027 fw_status[1] = le32_to_cpu(ese->error_subcode_1); 2028 fw_status[2] = le32_to_cpu(ese->error_subcode_2); 2029 2030 if (iocb_type == ELS_IOCB_TYPE) { 2031 els = &sp->u.iocb_cmd; 2032 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); 2033 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); 2034 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); 2035 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); 2036 if (comp_status == CS_COMPLETE) { 2037 res = DID_OK << 16; 2038 } else { 2039 if (comp_status == CS_DATA_UNDERRUN) { 2040 res = DID_OK << 16; 2041 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( 2042 ese->total_byte_count)); 2043 } else { 2044 els->u.els_plogi.len = 0; 2045 res = DID_ERROR << 16; 2046 } 2047 } 2048 ql_dbg(ql_dbg_disc, vha, 0x503f, 2049 "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", 2050 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2051 le32_to_cpu(ese->total_byte_count)); 2052 goto els_ct_done; 2053 } 2054 2055 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2056 * fc payload to the caller 2057 */ 2058 bsg_job = sp->u.bsg_job; 2059 bsg_reply = bsg_job->reply; 2060 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2061 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 2062 2063 if (comp_status != CS_COMPLETE) { 2064 if (comp_status == CS_DATA_UNDERRUN) { 2065 res = DID_OK << 16; 2066 bsg_reply->reply_payload_rcv_len = 2067 le32_to_cpu(ese->total_byte_count); 2068 2069 ql_dbg(ql_dbg_user, vha, 0x503f, 2070 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2071 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 2072 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2073 le32_to_cpu(ese->total_byte_count)); 2074 } else { 2075 ql_dbg(ql_dbg_user, vha, 0x5040, 2076 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2077 "error subcode 1=0x%x error subcode 2=0x%x.\n", 2078 type, sp->handle, comp_status, 2079 le32_to_cpu(ese->error_subcode_1), 2080 le32_to_cpu(ese->error_subcode_2)); 2081 res = DID_ERROR << 16; 2082 bsg_reply->reply_payload_rcv_len = 0; 2083 } 2084 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 2085 fw_status, sizeof(fw_status)); 2086 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 2087 pkt, sizeof(*pkt)); 2088 } 2089 else { 2090 res = DID_OK << 16; 2091 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 2092 bsg_job->reply_len = 0; 2093 } 2094 els_ct_done: 2095 2096 sp->done(sp, res); 2097 } 2098 2099 static void 2100 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 2101 struct logio_entry_24xx *logio) 2102 { 2103 const char func[] = "LOGIO-IOCB"; 2104 const char *type; 2105 fc_port_t *fcport; 2106 srb_t *sp; 2107 struct srb_iocb *lio; 2108 uint16_t *data; 2109 uint32_t iop[2]; 2110 2111 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 2112 if (!sp) 2113 return; 2114 2115 lio = &sp->u.iocb_cmd; 2116 type = sp->name; 2117 fcport = sp->fcport; 2118 data = lio->u.logio.data; 2119 2120 data[0] = MBS_COMMAND_ERROR; 2121 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 2122 QLA_LOGIO_LOGIN_RETRIED : 0; 2123 if (logio->entry_status) { 2124 ql_log(ql_log_warn, fcport->vha, 0x5034, 2125 "Async-%s error entry - %8phC hdl=%x" 2126 "portid=%02x%02x%02x entry-status=%x.\n", 2127 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 2128 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2129 logio->entry_status); 2130 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 2131 logio, sizeof(*logio)); 2132 2133 goto logio_done; 2134 } 2135 2136 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 2137 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 2138 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 2139 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2140 le32_to_cpu(logio->io_parameter[0])); 2141 2142 vha->hw->exch_starvation = 0; 2143 data[0] = MBS_COMMAND_COMPLETE; 2144 2145 if (sp->type == SRB_PRLI_CMD) { 2146 lio->u.logio.iop[0] = 2147 le32_to_cpu(logio->io_parameter[0]); 2148 lio->u.logio.iop[1] = 2149 le32_to_cpu(logio->io_parameter[1]); 2150 goto logio_done; 2151 } 2152 2153 if (sp->type != SRB_LOGIN_CMD) 2154 goto logio_done; 2155 2156 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2157 if (iop[0] & BIT_4) { 2158 fcport->port_type = FCT_TARGET; 2159 if (iop[0] & BIT_8) 2160 fcport->flags |= FCF_FCP2_DEVICE; 2161 } else if (iop[0] & BIT_5) 2162 fcport->port_type = FCT_INITIATOR; 2163 2164 if (iop[0] & BIT_7) 2165 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2166 2167 if (logio->io_parameter[7] || logio->io_parameter[8]) 2168 fcport->supported_classes |= FC_COS_CLASS2; 2169 if (logio->io_parameter[9] || logio->io_parameter[10]) 2170 fcport->supported_classes |= FC_COS_CLASS3; 2171 2172 goto logio_done; 2173 } 2174 2175 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2176 iop[1] = le32_to_cpu(logio->io_parameter[1]); 2177 lio->u.logio.iop[0] = iop[0]; 2178 lio->u.logio.iop[1] = iop[1]; 2179 switch (iop[0]) { 2180 case LSC_SCODE_PORTID_USED: 2181 data[0] = MBS_PORT_ID_USED; 2182 data[1] = LSW(iop[1]); 2183 break; 2184 case LSC_SCODE_NPORT_USED: 2185 data[0] = MBS_LOOP_ID_USED; 2186 break; 2187 case LSC_SCODE_CMD_FAILED: 2188 if (iop[1] == 0x0606) { 2189 /* 2190 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 2191 * Target side acked. 2192 */ 2193 data[0] = MBS_COMMAND_COMPLETE; 2194 goto logio_done; 2195 } 2196 data[0] = MBS_COMMAND_ERROR; 2197 break; 2198 case LSC_SCODE_NOXCB: 2199 vha->hw->exch_starvation++; 2200 if (vha->hw->exch_starvation > 5) { 2201 ql_log(ql_log_warn, vha, 0xd046, 2202 "Exchange starvation. Resetting RISC\n"); 2203 2204 vha->hw->exch_starvation = 0; 2205 2206 if (IS_P3P_TYPE(vha->hw)) 2207 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2208 else 2209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2210 qla2xxx_wake_dpc(vha); 2211 } 2212 fallthrough; 2213 default: 2214 data[0] = MBS_COMMAND_ERROR; 2215 break; 2216 } 2217 2218 ql_log(ql_log_warn, sp->vha, 0x5037, 2219 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2220 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2221 le16_to_cpu(logio->comp_status), 2222 le32_to_cpu(logio->io_parameter[0]), 2223 le32_to_cpu(logio->io_parameter[1])); 2224 2225 logio_done: 2226 sp->done(sp, 0); 2227 } 2228 2229 static void 2230 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2231 { 2232 const char func[] = "TMF-IOCB"; 2233 const char *type; 2234 fc_port_t *fcport; 2235 srb_t *sp; 2236 struct srb_iocb *iocb; 2237 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2238 u16 comp_status; 2239 2240 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2241 if (!sp) 2242 return; 2243 2244 comp_status = le16_to_cpu(sts->comp_status); 2245 iocb = &sp->u.iocb_cmd; 2246 type = sp->name; 2247 fcport = sp->fcport; 2248 iocb->u.tmf.data = QLA_SUCCESS; 2249 2250 if (sts->entry_status) { 2251 ql_log(ql_log_warn, fcport->vha, 0x5038, 2252 "Async-%s error - hdl=%x entry-status(%x).\n", 2253 type, sp->handle, sts->entry_status); 2254 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2255 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2256 ql_log(ql_log_warn, fcport->vha, 0x5039, 2257 "Async-%s error - hdl=%x completion status(%x).\n", 2258 type, sp->handle, comp_status); 2259 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2260 } else if ((le16_to_cpu(sts->scsi_status) & 2261 SS_RESPONSE_INFO_LEN_VALID)) { 2262 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2263 ql_log(ql_log_warn, fcport->vha, 0x503b, 2264 "Async-%s error - hdl=%x not enough response(%d).\n", 2265 type, sp->handle, sts->rsp_data_len); 2266 } else if (sts->data[3]) { 2267 ql_log(ql_log_warn, fcport->vha, 0x503c, 2268 "Async-%s error - hdl=%x response(%x).\n", 2269 type, sp->handle, sts->data[3]); 2270 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2271 } 2272 } 2273 2274 switch (comp_status) { 2275 case CS_PORT_LOGGED_OUT: 2276 case CS_PORT_CONFIG_CHG: 2277 case CS_PORT_BUSY: 2278 case CS_INCOMPLETE: 2279 case CS_PORT_UNAVAILABLE: 2280 case CS_TIMEOUT: 2281 case CS_RESET: 2282 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2283 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2284 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n", 2285 fcport->d_id.b.domain, fcport->d_id.b.area, 2286 fcport->d_id.b.al_pa, 2287 port_state_str[FCS_ONLINE], 2288 comp_status); 2289 2290 qlt_schedule_sess_for_deletion(fcport); 2291 } 2292 break; 2293 2294 default: 2295 break; 2296 } 2297 2298 if (iocb->u.tmf.data != QLA_SUCCESS) 2299 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2300 sts, sizeof(*sts)); 2301 2302 sp->done(sp, 0); 2303 } 2304 2305 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2306 void *tsk, srb_t *sp) 2307 { 2308 fc_port_t *fcport; 2309 struct srb_iocb *iocb; 2310 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2311 uint16_t state_flags; 2312 struct nvmefc_fcp_req *fd; 2313 uint16_t ret = QLA_SUCCESS; 2314 __le16 comp_status = sts->comp_status; 2315 int logit = 0; 2316 2317 iocb = &sp->u.iocb_cmd; 2318 fcport = sp->fcport; 2319 iocb->u.nvme.comp_status = comp_status; 2320 state_flags = le16_to_cpu(sts->state_flags); 2321 fd = iocb->u.nvme.desc; 2322 2323 if (unlikely(iocb->u.nvme.aen_op)) 2324 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2325 2326 if (unlikely(comp_status != CS_COMPLETE)) 2327 logit = 1; 2328 2329 fd->transferred_length = fd->payload_length - 2330 le32_to_cpu(sts->residual_len); 2331 2332 /* 2333 * State flags: Bit 6 and 0. 2334 * If 0 is set, we don't care about 6. 2335 * both cases resp was dma'd to host buffer 2336 * if both are 0, that is good path case. 2337 * if six is set and 0 is clear, we need to 2338 * copy resp data from status iocb to resp buffer. 2339 */ 2340 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2341 iocb->u.nvme.rsp_pyld_len = 0; 2342 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2343 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2344 /* Response already DMA'd to fd->rspaddr. */ 2345 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2346 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2347 /* 2348 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2349 * as an error. 2350 */ 2351 iocb->u.nvme.rsp_pyld_len = 0; 2352 fd->transferred_length = 0; 2353 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2354 "Unexpected values in NVMe_RSP IU.\n"); 2355 logit = 1; 2356 } else if (state_flags & SF_NVME_ERSP) { 2357 uint32_t *inbuf, *outbuf; 2358 uint16_t iter; 2359 2360 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2361 outbuf = (uint32_t *)fd->rspaddr; 2362 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2363 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > 2364 sizeof(struct nvme_fc_ersp_iu))) { 2365 if (ql_mask_match(ql_dbg_io)) { 2366 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2367 iocb->u.nvme.rsp_pyld_len); 2368 ql_log(ql_log_warn, fcport->vha, 0x5100, 2369 "Unexpected response payload length %u.\n", 2370 iocb->u.nvme.rsp_pyld_len); 2371 } 2372 iocb->u.nvme.rsp_pyld_len = 2373 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); 2374 } 2375 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; 2376 for (; iter; iter--) 2377 *outbuf++ = swab32(*inbuf++); 2378 } 2379 2380 if (state_flags & SF_NVME_ERSP) { 2381 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2382 u32 tgt_xfer_len; 2383 2384 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2385 if (fd->transferred_length != tgt_xfer_len) { 2386 ql_log(ql_log_warn, fcport->vha, 0x3079, 2387 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2388 tgt_xfer_len, fd->transferred_length); 2389 logit = 1; 2390 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { 2391 /* 2392 * Do not log if this is just an underflow and there 2393 * is no data loss. 2394 */ 2395 logit = 0; 2396 } 2397 } 2398 2399 if (unlikely(logit)) 2400 ql_log(ql_log_warn, fcport->vha, 0x5060, 2401 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2402 sp->name, sp->handle, comp_status, 2403 fd->transferred_length, le32_to_cpu(sts->residual_len), 2404 sts->ox_id); 2405 2406 /* 2407 * If transport error then Failure (HBA rejects request) 2408 * otherwise transport will handle. 2409 */ 2410 switch (le16_to_cpu(comp_status)) { 2411 case CS_COMPLETE: 2412 break; 2413 2414 case CS_RESET: 2415 case CS_PORT_UNAVAILABLE: 2416 case CS_PORT_LOGGED_OUT: 2417 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2418 fallthrough; 2419 case CS_ABORTED: 2420 case CS_PORT_BUSY: 2421 fd->transferred_length = 0; 2422 iocb->u.nvme.rsp_pyld_len = 0; 2423 ret = QLA_ABORTED; 2424 break; 2425 case CS_DATA_UNDERRUN: 2426 break; 2427 default: 2428 ret = QLA_FUNCTION_FAILED; 2429 break; 2430 } 2431 sp->done(sp, ret); 2432 } 2433 2434 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2435 struct vp_ctrl_entry_24xx *vce) 2436 { 2437 const char func[] = "CTRLVP-IOCB"; 2438 srb_t *sp; 2439 int rval = QLA_SUCCESS; 2440 2441 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2442 if (!sp) 2443 return; 2444 2445 if (vce->entry_status != 0) { 2446 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2447 "%s: Failed to complete IOCB -- error status (%x)\n", 2448 sp->name, vce->entry_status); 2449 rval = QLA_FUNCTION_FAILED; 2450 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2451 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2452 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2453 sp->name, le16_to_cpu(vce->comp_status), 2454 le16_to_cpu(vce->vp_idx_failed)); 2455 rval = QLA_FUNCTION_FAILED; 2456 } else { 2457 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2458 "Done %s.\n", __func__); 2459 } 2460 2461 sp->rc = rval; 2462 sp->done(sp, rval); 2463 } 2464 2465 /* Process a single response queue entry. */ 2466 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2467 struct rsp_que *rsp, 2468 sts_entry_t *pkt) 2469 { 2470 sts21_entry_t *sts21_entry; 2471 sts22_entry_t *sts22_entry; 2472 uint16_t handle_cnt; 2473 uint16_t cnt; 2474 2475 switch (pkt->entry_type) { 2476 case STATUS_TYPE: 2477 qla2x00_status_entry(vha, rsp, pkt); 2478 break; 2479 case STATUS_TYPE_21: 2480 sts21_entry = (sts21_entry_t *)pkt; 2481 handle_cnt = sts21_entry->handle_count; 2482 for (cnt = 0; cnt < handle_cnt; cnt++) 2483 qla2x00_process_completed_request(vha, rsp->req, 2484 sts21_entry->handle[cnt]); 2485 break; 2486 case STATUS_TYPE_22: 2487 sts22_entry = (sts22_entry_t *)pkt; 2488 handle_cnt = sts22_entry->handle_count; 2489 for (cnt = 0; cnt < handle_cnt; cnt++) 2490 qla2x00_process_completed_request(vha, rsp->req, 2491 sts22_entry->handle[cnt]); 2492 break; 2493 case STATUS_CONT_TYPE: 2494 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2495 break; 2496 case MBX_IOCB_TYPE: 2497 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2498 break; 2499 case CT_IOCB_TYPE: 2500 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2501 break; 2502 default: 2503 /* Type Not Supported. */ 2504 ql_log(ql_log_warn, vha, 0x504a, 2505 "Received unknown response pkt type %x entry status=%x.\n", 2506 pkt->entry_type, pkt->entry_status); 2507 break; 2508 } 2509 } 2510 2511 /** 2512 * qla2x00_process_response_queue() - Process response queue entries. 2513 * @rsp: response queue 2514 */ 2515 void 2516 qla2x00_process_response_queue(struct rsp_que *rsp) 2517 { 2518 struct scsi_qla_host *vha; 2519 struct qla_hw_data *ha = rsp->hw; 2520 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2521 sts_entry_t *pkt; 2522 2523 vha = pci_get_drvdata(ha->pdev); 2524 2525 if (!vha->flags.online) 2526 return; 2527 2528 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2529 pkt = (sts_entry_t *)rsp->ring_ptr; 2530 2531 rsp->ring_index++; 2532 if (rsp->ring_index == rsp->length) { 2533 rsp->ring_index = 0; 2534 rsp->ring_ptr = rsp->ring; 2535 } else { 2536 rsp->ring_ptr++; 2537 } 2538 2539 if (pkt->entry_status != 0) { 2540 qla2x00_error_entry(vha, rsp, pkt); 2541 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2542 wmb(); 2543 continue; 2544 } 2545 2546 qla2x00_process_response_entry(vha, rsp, pkt); 2547 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2548 wmb(); 2549 } 2550 2551 /* Adjust ring index */ 2552 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2553 } 2554 2555 static inline void 2556 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2557 uint32_t sense_len, struct rsp_que *rsp, int res) 2558 { 2559 struct scsi_qla_host *vha = sp->vha; 2560 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2561 uint32_t track_sense_len; 2562 2563 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2564 sense_len = SCSI_SENSE_BUFFERSIZE; 2565 2566 SET_CMD_SENSE_LEN(sp, sense_len); 2567 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2568 track_sense_len = sense_len; 2569 2570 if (sense_len > par_sense_len) 2571 sense_len = par_sense_len; 2572 2573 memcpy(cp->sense_buffer, sense_data, sense_len); 2574 2575 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2576 track_sense_len -= sense_len; 2577 SET_CMD_SENSE_LEN(sp, track_sense_len); 2578 2579 if (track_sense_len != 0) { 2580 rsp->status_srb = sp; 2581 cp->result = res; 2582 } 2583 2584 if (sense_len) { 2585 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2586 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2587 sp->vha->host_no, cp->device->id, cp->device->lun, 2588 cp); 2589 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2590 cp->sense_buffer, sense_len); 2591 } 2592 } 2593 2594 struct scsi_dif_tuple { 2595 __be16 guard; /* Checksum */ 2596 __be16 app_tag; /* APPL identifier */ 2597 __be32 ref_tag; /* Target LBA or indirect LBA */ 2598 }; 2599 2600 /* 2601 * Checks the guard or meta-data for the type of error 2602 * detected by the HBA. In case of errors, we set the 2603 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2604 * to indicate to the kernel that the HBA detected error. 2605 */ 2606 static inline int 2607 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2608 { 2609 struct scsi_qla_host *vha = sp->vha; 2610 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2611 uint8_t *ap = &sts24->data[12]; 2612 uint8_t *ep = &sts24->data[20]; 2613 uint32_t e_ref_tag, a_ref_tag; 2614 uint16_t e_app_tag, a_app_tag; 2615 uint16_t e_guard, a_guard; 2616 2617 /* 2618 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2619 * would make guard field appear at offset 2 2620 */ 2621 a_guard = get_unaligned_le16(ap + 2); 2622 a_app_tag = get_unaligned_le16(ap + 0); 2623 a_ref_tag = get_unaligned_le32(ap + 4); 2624 e_guard = get_unaligned_le16(ep + 2); 2625 e_app_tag = get_unaligned_le16(ep + 0); 2626 e_ref_tag = get_unaligned_le32(ep + 4); 2627 2628 ql_dbg(ql_dbg_io, vha, 0x3023, 2629 "iocb(s) %p Returned STATUS.\n", sts24); 2630 2631 ql_dbg(ql_dbg_io, vha, 0x3024, 2632 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2633 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2634 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2635 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2636 a_app_tag, e_app_tag, a_guard, e_guard); 2637 2638 /* 2639 * Ignore sector if: 2640 * For type 3: ref & app tag is all 'f's 2641 * For type 0,1,2: app tag is all 'f's 2642 */ 2643 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && 2644 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || 2645 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { 2646 uint32_t blocks_done, resid; 2647 sector_t lba_s = scsi_get_lba(cmd); 2648 2649 /* 2TB boundary case covered automatically with this */ 2650 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2651 2652 resid = scsi_bufflen(cmd) - (blocks_done * 2653 cmd->device->sector_size); 2654 2655 scsi_set_resid(cmd, resid); 2656 cmd->result = DID_OK << 16; 2657 2658 /* Update protection tag */ 2659 if (scsi_prot_sg_count(cmd)) { 2660 uint32_t i, j = 0, k = 0, num_ent; 2661 struct scatterlist *sg; 2662 struct t10_pi_tuple *spt; 2663 2664 /* Patch the corresponding protection tags */ 2665 scsi_for_each_prot_sg(cmd, sg, 2666 scsi_prot_sg_count(cmd), i) { 2667 num_ent = sg_dma_len(sg) / 8; 2668 if (k + num_ent < blocks_done) { 2669 k += num_ent; 2670 continue; 2671 } 2672 j = blocks_done - k - 1; 2673 k = blocks_done; 2674 break; 2675 } 2676 2677 if (k != blocks_done) { 2678 ql_log(ql_log_warn, vha, 0x302f, 2679 "unexpected tag values tag:lba=%x:%llx)\n", 2680 e_ref_tag, (unsigned long long)lba_s); 2681 return 1; 2682 } 2683 2684 spt = page_address(sg_page(sg)) + sg->offset; 2685 spt += j; 2686 2687 spt->app_tag = T10_PI_APP_ESCAPE; 2688 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2689 spt->ref_tag = T10_PI_REF_ESCAPE; 2690 } 2691 2692 return 0; 2693 } 2694 2695 /* check guard */ 2696 if (e_guard != a_guard) { 2697 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2698 0x10, 0x1); 2699 set_driver_byte(cmd, DRIVER_SENSE); 2700 set_host_byte(cmd, DID_ABORT); 2701 cmd->result |= SAM_STAT_CHECK_CONDITION; 2702 return 1; 2703 } 2704 2705 /* check ref tag */ 2706 if (e_ref_tag != a_ref_tag) { 2707 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2708 0x10, 0x3); 2709 set_driver_byte(cmd, DRIVER_SENSE); 2710 set_host_byte(cmd, DID_ABORT); 2711 cmd->result |= SAM_STAT_CHECK_CONDITION; 2712 return 1; 2713 } 2714 2715 /* check appl tag */ 2716 if (e_app_tag != a_app_tag) { 2717 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2718 0x10, 0x2); 2719 set_driver_byte(cmd, DRIVER_SENSE); 2720 set_host_byte(cmd, DID_ABORT); 2721 cmd->result |= SAM_STAT_CHECK_CONDITION; 2722 return 1; 2723 } 2724 2725 return 1; 2726 } 2727 2728 static void 2729 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2730 struct req_que *req, uint32_t index) 2731 { 2732 struct qla_hw_data *ha = vha->hw; 2733 srb_t *sp; 2734 uint16_t comp_status; 2735 uint16_t scsi_status; 2736 uint16_t thread_id; 2737 uint32_t rval = EXT_STATUS_OK; 2738 struct bsg_job *bsg_job = NULL; 2739 struct fc_bsg_request *bsg_request; 2740 struct fc_bsg_reply *bsg_reply; 2741 sts_entry_t *sts = pkt; 2742 struct sts_entry_24xx *sts24 = pkt; 2743 2744 /* Validate handle. */ 2745 if (index >= req->num_outstanding_cmds) { 2746 ql_log(ql_log_warn, vha, 0x70af, 2747 "Invalid SCSI completion handle 0x%x.\n", index); 2748 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2749 return; 2750 } 2751 2752 sp = req->outstanding_cmds[index]; 2753 if (!sp) { 2754 ql_log(ql_log_warn, vha, 0x70b0, 2755 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2756 req->id, index); 2757 2758 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2759 return; 2760 } 2761 2762 /* Free outstanding command slot. */ 2763 req->outstanding_cmds[index] = NULL; 2764 bsg_job = sp->u.bsg_job; 2765 bsg_request = bsg_job->request; 2766 bsg_reply = bsg_job->reply; 2767 2768 if (IS_FWI2_CAPABLE(ha)) { 2769 comp_status = le16_to_cpu(sts24->comp_status); 2770 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2771 } else { 2772 comp_status = le16_to_cpu(sts->comp_status); 2773 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2774 } 2775 2776 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2777 switch (comp_status) { 2778 case CS_COMPLETE: 2779 if (scsi_status == 0) { 2780 bsg_reply->reply_payload_rcv_len = 2781 bsg_job->reply_payload.payload_len; 2782 vha->qla_stats.input_bytes += 2783 bsg_reply->reply_payload_rcv_len; 2784 vha->qla_stats.input_requests++; 2785 rval = EXT_STATUS_OK; 2786 } 2787 goto done; 2788 2789 case CS_DATA_OVERRUN: 2790 ql_dbg(ql_dbg_user, vha, 0x70b1, 2791 "Command completed with data overrun thread_id=%d\n", 2792 thread_id); 2793 rval = EXT_STATUS_DATA_OVERRUN; 2794 break; 2795 2796 case CS_DATA_UNDERRUN: 2797 ql_dbg(ql_dbg_user, vha, 0x70b2, 2798 "Command completed with data underrun thread_id=%d\n", 2799 thread_id); 2800 rval = EXT_STATUS_DATA_UNDERRUN; 2801 break; 2802 case CS_BIDIR_RD_OVERRUN: 2803 ql_dbg(ql_dbg_user, vha, 0x70b3, 2804 "Command completed with read data overrun thread_id=%d\n", 2805 thread_id); 2806 rval = EXT_STATUS_DATA_OVERRUN; 2807 break; 2808 2809 case CS_BIDIR_RD_WR_OVERRUN: 2810 ql_dbg(ql_dbg_user, vha, 0x70b4, 2811 "Command completed with read and write data overrun " 2812 "thread_id=%d\n", thread_id); 2813 rval = EXT_STATUS_DATA_OVERRUN; 2814 break; 2815 2816 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2817 ql_dbg(ql_dbg_user, vha, 0x70b5, 2818 "Command completed with read data over and write data " 2819 "underrun thread_id=%d\n", thread_id); 2820 rval = EXT_STATUS_DATA_OVERRUN; 2821 break; 2822 2823 case CS_BIDIR_RD_UNDERRUN: 2824 ql_dbg(ql_dbg_user, vha, 0x70b6, 2825 "Command completed with read data underrun " 2826 "thread_id=%d\n", thread_id); 2827 rval = EXT_STATUS_DATA_UNDERRUN; 2828 break; 2829 2830 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2831 ql_dbg(ql_dbg_user, vha, 0x70b7, 2832 "Command completed with read data under and write data " 2833 "overrun thread_id=%d\n", thread_id); 2834 rval = EXT_STATUS_DATA_UNDERRUN; 2835 break; 2836 2837 case CS_BIDIR_RD_WR_UNDERRUN: 2838 ql_dbg(ql_dbg_user, vha, 0x70b8, 2839 "Command completed with read and write data underrun " 2840 "thread_id=%d\n", thread_id); 2841 rval = EXT_STATUS_DATA_UNDERRUN; 2842 break; 2843 2844 case CS_BIDIR_DMA: 2845 ql_dbg(ql_dbg_user, vha, 0x70b9, 2846 "Command completed with data DMA error thread_id=%d\n", 2847 thread_id); 2848 rval = EXT_STATUS_DMA_ERR; 2849 break; 2850 2851 case CS_TIMEOUT: 2852 ql_dbg(ql_dbg_user, vha, 0x70ba, 2853 "Command completed with timeout thread_id=%d\n", 2854 thread_id); 2855 rval = EXT_STATUS_TIMEOUT; 2856 break; 2857 default: 2858 ql_dbg(ql_dbg_user, vha, 0x70bb, 2859 "Command completed with completion status=0x%x " 2860 "thread_id=%d\n", comp_status, thread_id); 2861 rval = EXT_STATUS_ERR; 2862 break; 2863 } 2864 bsg_reply->reply_payload_rcv_len = 0; 2865 2866 done: 2867 /* Return the vendor specific reply to API */ 2868 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2869 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2870 /* Always return DID_OK, bsg will send the vendor specific response 2871 * in this case only */ 2872 sp->done(sp, DID_OK << 16); 2873 2874 } 2875 2876 /** 2877 * qla2x00_status_entry() - Process a Status IOCB entry. 2878 * @vha: SCSI driver HA context 2879 * @rsp: response queue 2880 * @pkt: Entry pointer 2881 */ 2882 static void 2883 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2884 { 2885 srb_t *sp; 2886 fc_port_t *fcport; 2887 struct scsi_cmnd *cp; 2888 sts_entry_t *sts = pkt; 2889 struct sts_entry_24xx *sts24 = pkt; 2890 uint16_t comp_status; 2891 uint16_t scsi_status; 2892 uint16_t ox_id; 2893 uint8_t lscsi_status; 2894 int32_t resid; 2895 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2896 fw_resid_len; 2897 uint8_t *rsp_info, *sense_data; 2898 struct qla_hw_data *ha = vha->hw; 2899 uint32_t handle; 2900 uint16_t que; 2901 struct req_que *req; 2902 int logit = 1; 2903 int res = 0; 2904 uint16_t state_flags = 0; 2905 uint16_t sts_qual = 0; 2906 2907 if (IS_FWI2_CAPABLE(ha)) { 2908 comp_status = le16_to_cpu(sts24->comp_status); 2909 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2910 state_flags = le16_to_cpu(sts24->state_flags); 2911 } else { 2912 comp_status = le16_to_cpu(sts->comp_status); 2913 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2914 } 2915 handle = (uint32_t) LSW(sts->handle); 2916 que = MSW(sts->handle); 2917 req = ha->req_q_map[que]; 2918 2919 /* Check for invalid queue pointer */ 2920 if (req == NULL || 2921 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2922 ql_dbg(ql_dbg_io, vha, 0x3059, 2923 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2924 "que=%u.\n", sts->handle, req, que); 2925 return; 2926 } 2927 2928 /* Validate handle. */ 2929 if (handle < req->num_outstanding_cmds) { 2930 sp = req->outstanding_cmds[handle]; 2931 if (!sp) { 2932 ql_dbg(ql_dbg_io, vha, 0x3075, 2933 "%s(%ld): Already returned command for status handle (0x%x).\n", 2934 __func__, vha->host_no, sts->handle); 2935 return; 2936 } 2937 } else { 2938 ql_dbg(ql_dbg_io, vha, 0x3017, 2939 "Invalid status handle, out of range (0x%x).\n", 2940 sts->handle); 2941 2942 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2943 if (IS_P3P_TYPE(ha)) 2944 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2945 else 2946 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2947 qla2xxx_wake_dpc(vha); 2948 } 2949 return; 2950 } 2951 qla_put_iocbs(sp->qpair, &sp->iores); 2952 2953 if (sp->cmd_type != TYPE_SRB) { 2954 req->outstanding_cmds[handle] = NULL; 2955 ql_dbg(ql_dbg_io, vha, 0x3015, 2956 "Unknown sp->cmd_type %x %p).\n", 2957 sp->cmd_type, sp); 2958 return; 2959 } 2960 2961 /* NVME completion. */ 2962 if (sp->type == SRB_NVME_CMD) { 2963 req->outstanding_cmds[handle] = NULL; 2964 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 2965 return; 2966 } 2967 2968 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2969 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2970 return; 2971 } 2972 2973 /* Task Management completion. */ 2974 if (sp->type == SRB_TM_CMD) { 2975 qla24xx_tm_iocb_entry(vha, req, pkt); 2976 return; 2977 } 2978 2979 /* Fast path completion. */ 2980 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2981 qla2x00_process_completed_request(vha, req, handle); 2982 2983 return; 2984 } 2985 2986 req->outstanding_cmds[handle] = NULL; 2987 cp = GET_CMD_SP(sp); 2988 if (cp == NULL) { 2989 ql_dbg(ql_dbg_io, vha, 0x3018, 2990 "Command already returned (0x%x/%p).\n", 2991 sts->handle, sp); 2992 2993 return; 2994 } 2995 2996 lscsi_status = scsi_status & STATUS_MASK; 2997 2998 fcport = sp->fcport; 2999 3000 ox_id = 0; 3001 sense_len = par_sense_len = rsp_info_len = resid_len = 3002 fw_resid_len = 0; 3003 if (IS_FWI2_CAPABLE(ha)) { 3004 if (scsi_status & SS_SENSE_LEN_VALID) 3005 sense_len = le32_to_cpu(sts24->sense_len); 3006 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3007 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 3008 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 3009 resid_len = le32_to_cpu(sts24->rsp_residual_count); 3010 if (comp_status == CS_DATA_UNDERRUN) 3011 fw_resid_len = le32_to_cpu(sts24->residual_len); 3012 rsp_info = sts24->data; 3013 sense_data = sts24->data; 3014 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 3015 ox_id = le16_to_cpu(sts24->ox_id); 3016 par_sense_len = sizeof(sts24->data); 3017 sts_qual = le16_to_cpu(sts24->status_qualifier); 3018 } else { 3019 if (scsi_status & SS_SENSE_LEN_VALID) 3020 sense_len = le16_to_cpu(sts->req_sense_length); 3021 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3022 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 3023 resid_len = le32_to_cpu(sts->residual_length); 3024 rsp_info = sts->rsp_info; 3025 sense_data = sts->req_sense_data; 3026 par_sense_len = sizeof(sts->req_sense_data); 3027 } 3028 3029 /* Check for any FCP transport errors. */ 3030 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 3031 /* Sense data lies beyond any FCP RESPONSE data. */ 3032 if (IS_FWI2_CAPABLE(ha)) { 3033 sense_data += rsp_info_len; 3034 par_sense_len -= rsp_info_len; 3035 } 3036 if (rsp_info_len > 3 && rsp_info[3]) { 3037 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 3038 "FCP I/O protocol failure (0x%x/0x%x).\n", 3039 rsp_info_len, rsp_info[3]); 3040 3041 res = DID_BUS_BUSY << 16; 3042 goto out; 3043 } 3044 } 3045 3046 /* Check for overrun. */ 3047 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 3048 scsi_status & SS_RESIDUAL_OVER) 3049 comp_status = CS_DATA_OVERRUN; 3050 3051 /* 3052 * Check retry_delay_timer value if we receive a busy or 3053 * queue full. 3054 */ 3055 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || 3056 lscsi_status == SAM_STAT_BUSY)) 3057 qla2x00_set_retry_delay_timestamp(fcport, sts_qual); 3058 3059 /* 3060 * Based on Host and scsi status generate status code for Linux 3061 */ 3062 switch (comp_status) { 3063 case CS_COMPLETE: 3064 case CS_QUEUE_FULL: 3065 if (scsi_status == 0) { 3066 res = DID_OK << 16; 3067 break; 3068 } 3069 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 3070 resid = resid_len; 3071 scsi_set_resid(cp, resid); 3072 3073 if (!lscsi_status && 3074 ((unsigned)(scsi_bufflen(cp) - resid) < 3075 cp->underflow)) { 3076 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 3077 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3078 resid, scsi_bufflen(cp)); 3079 3080 res = DID_ERROR << 16; 3081 break; 3082 } 3083 } 3084 res = DID_OK << 16 | lscsi_status; 3085 3086 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3087 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 3088 "QUEUE FULL detected.\n"); 3089 break; 3090 } 3091 logit = 0; 3092 if (lscsi_status != SS_CHECK_CONDITION) 3093 break; 3094 3095 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3096 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3097 break; 3098 3099 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 3100 rsp, res); 3101 break; 3102 3103 case CS_DATA_UNDERRUN: 3104 /* Use F/W calculated residual length. */ 3105 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 3106 scsi_set_resid(cp, resid); 3107 if (scsi_status & SS_RESIDUAL_UNDER) { 3108 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 3109 ql_log(ql_log_warn, fcport->vha, 0x301d, 3110 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3111 resid, scsi_bufflen(cp)); 3112 3113 vha->interface_err_cnt++; 3114 3115 res = DID_ERROR << 16 | lscsi_status; 3116 goto check_scsi_status; 3117 } 3118 3119 if (!lscsi_status && 3120 ((unsigned)(scsi_bufflen(cp) - resid) < 3121 cp->underflow)) { 3122 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 3123 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3124 resid, scsi_bufflen(cp)); 3125 3126 res = DID_ERROR << 16; 3127 break; 3128 } 3129 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 3130 lscsi_status != SAM_STAT_BUSY) { 3131 /* 3132 * scsi status of task set and busy are considered to be 3133 * task not completed. 3134 */ 3135 3136 ql_log(ql_log_warn, fcport->vha, 0x301f, 3137 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3138 resid, scsi_bufflen(cp)); 3139 3140 vha->interface_err_cnt++; 3141 3142 res = DID_ERROR << 16 | lscsi_status; 3143 goto check_scsi_status; 3144 } else { 3145 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 3146 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 3147 scsi_status, lscsi_status); 3148 } 3149 3150 res = DID_OK << 16 | lscsi_status; 3151 logit = 0; 3152 3153 check_scsi_status: 3154 /* 3155 * Check to see if SCSI Status is non zero. If so report SCSI 3156 * Status. 3157 */ 3158 if (lscsi_status != 0) { 3159 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3160 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 3161 "QUEUE FULL detected.\n"); 3162 logit = 1; 3163 break; 3164 } 3165 if (lscsi_status != SS_CHECK_CONDITION) 3166 break; 3167 3168 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3169 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3170 break; 3171 3172 qla2x00_handle_sense(sp, sense_data, par_sense_len, 3173 sense_len, rsp, res); 3174 } 3175 break; 3176 3177 case CS_PORT_LOGGED_OUT: 3178 case CS_PORT_CONFIG_CHG: 3179 case CS_PORT_BUSY: 3180 case CS_INCOMPLETE: 3181 case CS_PORT_UNAVAILABLE: 3182 case CS_TIMEOUT: 3183 case CS_RESET: 3184 3185 /* 3186 * We are going to have the fc class block the rport 3187 * while we try to recover so instruct the mid layer 3188 * to requeue until the class decides how to handle this. 3189 */ 3190 res = DID_TRANSPORT_DISRUPTED << 16; 3191 3192 if (comp_status == CS_TIMEOUT) { 3193 if (IS_FWI2_CAPABLE(ha)) 3194 break; 3195 else if ((le16_to_cpu(sts->status_flags) & 3196 SF_LOGOUT_SENT) == 0) 3197 break; 3198 } 3199 3200 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3201 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 3202 "Port to be marked lost on fcport=%02x%02x%02x, current " 3203 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 3204 fcport->d_id.b.area, fcport->d_id.b.al_pa, 3205 port_state_str[FCS_ONLINE], 3206 comp_status); 3207 3208 qlt_schedule_sess_for_deletion(fcport); 3209 } 3210 3211 break; 3212 3213 case CS_ABORTED: 3214 res = DID_RESET << 16; 3215 break; 3216 3217 case CS_DIF_ERROR: 3218 logit = qla2x00_handle_dif_error(sp, sts24); 3219 res = cp->result; 3220 break; 3221 3222 case CS_TRANSPORT: 3223 res = DID_ERROR << 16; 3224 vha->hw_err_cnt++; 3225 3226 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 3227 break; 3228 3229 if (state_flags & BIT_4) 3230 scmd_printk(KERN_WARNING, cp, 3231 "Unsupported device '%s' found.\n", 3232 cp->device->vendor); 3233 break; 3234 3235 case CS_DMA: 3236 ql_log(ql_log_info, fcport->vha, 0x3022, 3237 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3238 comp_status, scsi_status, res, vha->host_no, 3239 cp->device->id, cp->device->lun, fcport->d_id.b24, 3240 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3241 resid_len, fw_resid_len, sp, cp); 3242 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 3243 pkt, sizeof(*sts24)); 3244 res = DID_ERROR << 16; 3245 vha->hw_err_cnt++; 3246 break; 3247 default: 3248 res = DID_ERROR << 16; 3249 break; 3250 } 3251 3252 out: 3253 if (logit) 3254 ql_log(ql_log_warn, fcport->vha, 0x3022, 3255 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3256 comp_status, scsi_status, res, vha->host_no, 3257 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3258 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3259 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3260 resid_len, fw_resid_len, sp, cp); 3261 3262 if (rsp->status_srb == NULL) 3263 sp->done(sp, res); 3264 } 3265 3266 /** 3267 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3268 * @rsp: response queue 3269 * @pkt: Entry pointer 3270 * 3271 * Extended sense data. 3272 */ 3273 static void 3274 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3275 { 3276 uint8_t sense_sz = 0; 3277 struct qla_hw_data *ha = rsp->hw; 3278 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3279 srb_t *sp = rsp->status_srb; 3280 struct scsi_cmnd *cp; 3281 uint32_t sense_len; 3282 uint8_t *sense_ptr; 3283 3284 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3285 return; 3286 3287 sense_len = GET_CMD_SENSE_LEN(sp); 3288 sense_ptr = GET_CMD_SENSE_PTR(sp); 3289 3290 cp = GET_CMD_SP(sp); 3291 if (cp == NULL) { 3292 ql_log(ql_log_warn, vha, 0x3025, 3293 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3294 3295 rsp->status_srb = NULL; 3296 return; 3297 } 3298 3299 if (sense_len > sizeof(pkt->data)) 3300 sense_sz = sizeof(pkt->data); 3301 else 3302 sense_sz = sense_len; 3303 3304 /* Move sense data. */ 3305 if (IS_FWI2_CAPABLE(ha)) 3306 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3307 memcpy(sense_ptr, pkt->data, sense_sz); 3308 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3309 sense_ptr, sense_sz); 3310 3311 sense_len -= sense_sz; 3312 sense_ptr += sense_sz; 3313 3314 SET_CMD_SENSE_PTR(sp, sense_ptr); 3315 SET_CMD_SENSE_LEN(sp, sense_len); 3316 3317 /* Place command on done queue. */ 3318 if (sense_len == 0) { 3319 rsp->status_srb = NULL; 3320 sp->done(sp, cp->result); 3321 } 3322 } 3323 3324 /** 3325 * qla2x00_error_entry() - Process an error entry. 3326 * @vha: SCSI driver HA context 3327 * @rsp: response queue 3328 * @pkt: Entry pointer 3329 * return : 1=allow further error analysis. 0=no additional error analysis. 3330 */ 3331 static int 3332 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3333 { 3334 srb_t *sp; 3335 struct qla_hw_data *ha = vha->hw; 3336 const char func[] = "ERROR-IOCB"; 3337 uint16_t que = MSW(pkt->handle); 3338 struct req_que *req = NULL; 3339 int res = DID_ERROR << 16; 3340 3341 ql_dbg(ql_dbg_async, vha, 0x502a, 3342 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3343 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3344 3345 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3346 goto fatal; 3347 3348 req = ha->req_q_map[que]; 3349 3350 if (pkt->entry_status & RF_BUSY) 3351 res = DID_BUS_BUSY << 16; 3352 3353 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3354 return 0; 3355 3356 switch (pkt->entry_type) { 3357 case NOTIFY_ACK_TYPE: 3358 case STATUS_TYPE: 3359 case STATUS_CONT_TYPE: 3360 case LOGINOUT_PORT_IOCB_TYPE: 3361 case CT_IOCB_TYPE: 3362 case ELS_IOCB_TYPE: 3363 case ABORT_IOCB_TYPE: 3364 case MBX_IOCB_TYPE: 3365 default: 3366 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3367 if (sp) { 3368 qla_put_iocbs(sp->qpair, &sp->iores); 3369 sp->done(sp, res); 3370 return 0; 3371 } 3372 break; 3373 3374 case ABTS_RESP_24XX: 3375 case CTIO_TYPE7: 3376 case CTIO_CRC2: 3377 return 1; 3378 } 3379 fatal: 3380 ql_log(ql_log_warn, vha, 0x5030, 3381 "Error entry - invalid handle/queue (%04x).\n", que); 3382 return 0; 3383 } 3384 3385 /** 3386 * qla24xx_mbx_completion() - Process mailbox command completions. 3387 * @vha: SCSI driver HA context 3388 * @mb0: Mailbox0 register 3389 */ 3390 static void 3391 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3392 { 3393 uint16_t cnt; 3394 uint32_t mboxes; 3395 __le16 __iomem *wptr; 3396 struct qla_hw_data *ha = vha->hw; 3397 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3398 3399 /* Read all mbox registers? */ 3400 WARN_ON_ONCE(ha->mbx_count > 32); 3401 mboxes = (1ULL << ha->mbx_count) - 1; 3402 if (!ha->mcp) 3403 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3404 else 3405 mboxes = ha->mcp->in_mb; 3406 3407 /* Load return mailbox registers. */ 3408 ha->flags.mbox_int = 1; 3409 ha->mailbox_out[0] = mb0; 3410 mboxes >>= 1; 3411 wptr = ®->mailbox1; 3412 3413 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3414 if (mboxes & BIT_0) 3415 ha->mailbox_out[cnt] = rd_reg_word(wptr); 3416 3417 mboxes >>= 1; 3418 wptr++; 3419 } 3420 } 3421 3422 static void 3423 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3424 struct abort_entry_24xx *pkt) 3425 { 3426 const char func[] = "ABT_IOCB"; 3427 srb_t *sp; 3428 srb_t *orig_sp = NULL; 3429 struct srb_iocb *abt; 3430 3431 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3432 if (!sp) 3433 return; 3434 3435 abt = &sp->u.iocb_cmd; 3436 abt->u.abt.comp_status = pkt->comp_status; 3437 orig_sp = sp->cmd_sp; 3438 /* Need to pass original sp */ 3439 if (orig_sp) 3440 qla_nvme_abort_process_comp_status(pkt, orig_sp); 3441 3442 sp->done(sp, 0); 3443 } 3444 3445 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3446 struct pt_ls4_request *pkt, struct req_que *req) 3447 { 3448 srb_t *sp; 3449 const char func[] = "LS4_IOCB"; 3450 uint16_t comp_status; 3451 3452 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3453 if (!sp) 3454 return; 3455 3456 comp_status = le16_to_cpu(pkt->status); 3457 sp->done(sp, comp_status); 3458 } 3459 3460 /** 3461 * qla24xx_process_response_queue() - Process response queue entries. 3462 * @vha: SCSI driver HA context 3463 * @rsp: response queue 3464 */ 3465 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3466 struct rsp_que *rsp) 3467 { 3468 struct sts_entry_24xx *pkt; 3469 struct qla_hw_data *ha = vha->hw; 3470 struct purex_entry_24xx *purex_entry; 3471 struct purex_item *pure_item; 3472 3473 if (!ha->flags.fw_started) 3474 return; 3475 3476 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { 3477 rsp->qpair->rcv_intr = 1; 3478 qla_cpu_update(rsp->qpair, smp_processor_id()); 3479 } 3480 3481 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 3482 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 3483 3484 rsp->ring_index++; 3485 if (rsp->ring_index == rsp->length) { 3486 rsp->ring_index = 0; 3487 rsp->ring_ptr = rsp->ring; 3488 } else { 3489 rsp->ring_ptr++; 3490 } 3491 3492 if (pkt->entry_status != 0) { 3493 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 3494 goto process_err; 3495 3496 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3497 wmb(); 3498 continue; 3499 } 3500 process_err: 3501 3502 switch (pkt->entry_type) { 3503 case STATUS_TYPE: 3504 qla2x00_status_entry(vha, rsp, pkt); 3505 break; 3506 case STATUS_CONT_TYPE: 3507 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 3508 break; 3509 case VP_RPT_ID_IOCB_TYPE: 3510 qla24xx_report_id_acquisition(vha, 3511 (struct vp_rpt_id_entry_24xx *)pkt); 3512 break; 3513 case LOGINOUT_PORT_IOCB_TYPE: 3514 qla24xx_logio_entry(vha, rsp->req, 3515 (struct logio_entry_24xx *)pkt); 3516 break; 3517 case CT_IOCB_TYPE: 3518 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 3519 break; 3520 case ELS_IOCB_TYPE: 3521 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 3522 break; 3523 case ABTS_RECV_24XX: 3524 if (qla_ini_mode_enabled(vha)) { 3525 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3526 if (!pure_item) 3527 break; 3528 qla24xx_queue_purex_item(vha, pure_item, 3529 qla24xx_process_abts); 3530 break; 3531 } 3532 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3533 IS_QLA28XX(ha)) { 3534 /* ensure that the ATIO queue is empty */ 3535 qlt_handle_abts_recv(vha, rsp, 3536 (response_t *)pkt); 3537 break; 3538 } else { 3539 qlt_24xx_process_atio_queue(vha, 1); 3540 } 3541 fallthrough; 3542 case ABTS_RESP_24XX: 3543 case CTIO_TYPE7: 3544 case CTIO_CRC2: 3545 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3546 break; 3547 case PT_LS4_REQUEST: 3548 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3549 rsp->req); 3550 break; 3551 case NOTIFY_ACK_TYPE: 3552 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3553 qlt_response_pkt_all_vps(vha, rsp, 3554 (response_t *)pkt); 3555 else 3556 qla24xxx_nack_iocb_entry(vha, rsp->req, 3557 (struct nack_to_isp *)pkt); 3558 break; 3559 case MARKER_TYPE: 3560 /* Do nothing in this case, this check is to prevent it 3561 * from falling into default case 3562 */ 3563 break; 3564 case ABORT_IOCB_TYPE: 3565 qla24xx_abort_iocb_entry(vha, rsp->req, 3566 (struct abort_entry_24xx *)pkt); 3567 break; 3568 case MBX_IOCB_TYPE: 3569 qla24xx_mbx_iocb_entry(vha, rsp->req, 3570 (struct mbx_24xx_entry *)pkt); 3571 break; 3572 case VP_CTRL_IOCB_TYPE: 3573 qla_ctrlvp_completed(vha, rsp->req, 3574 (struct vp_ctrl_entry_24xx *)pkt); 3575 break; 3576 case PUREX_IOCB_TYPE: 3577 purex_entry = (void *)pkt; 3578 switch (purex_entry->els_frame_payload[3]) { 3579 case ELS_RDP: 3580 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3581 if (!pure_item) 3582 break; 3583 qla24xx_queue_purex_item(vha, pure_item, 3584 qla24xx_process_purex_rdp); 3585 break; 3586 case ELS_FPIN: 3587 if (!vha->hw->flags.scm_enabled) { 3588 ql_log(ql_log_warn, vha, 0x5094, 3589 "SCM not active for this port\n"); 3590 break; 3591 } 3592 pure_item = qla27xx_copy_fpin_pkt(vha, 3593 (void **)&pkt, &rsp); 3594 if (!pure_item) 3595 break; 3596 qla24xx_queue_purex_item(vha, pure_item, 3597 qla27xx_process_purex_fpin); 3598 break; 3599 3600 default: 3601 ql_log(ql_log_warn, vha, 0x509c, 3602 "Discarding ELS Request opcode 0x%x\n", 3603 purex_entry->els_frame_payload[3]); 3604 } 3605 break; 3606 default: 3607 /* Type Not Supported. */ 3608 ql_dbg(ql_dbg_async, vha, 0x5042, 3609 "Received unknown response pkt type 0x%x entry status=%x.\n", 3610 pkt->entry_type, pkt->entry_status); 3611 break; 3612 } 3613 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3614 wmb(); 3615 } 3616 3617 /* Adjust ring index */ 3618 if (IS_P3P_TYPE(ha)) { 3619 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3620 3621 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); 3622 } else { 3623 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); 3624 } 3625 } 3626 3627 static void 3628 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3629 { 3630 int rval; 3631 uint32_t cnt; 3632 struct qla_hw_data *ha = vha->hw; 3633 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3634 3635 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3636 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3637 return; 3638 3639 rval = QLA_SUCCESS; 3640 wrt_reg_dword(®->iobase_addr, 0x7C00); 3641 rd_reg_dword(®->iobase_addr); 3642 wrt_reg_dword(®->iobase_window, 0x0001); 3643 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3644 rval == QLA_SUCCESS; cnt--) { 3645 if (cnt) { 3646 wrt_reg_dword(®->iobase_window, 0x0001); 3647 udelay(10); 3648 } else 3649 rval = QLA_FUNCTION_TIMEOUT; 3650 } 3651 if (rval == QLA_SUCCESS) 3652 goto next_test; 3653 3654 rval = QLA_SUCCESS; 3655 wrt_reg_dword(®->iobase_window, 0x0003); 3656 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3657 rval == QLA_SUCCESS; cnt--) { 3658 if (cnt) { 3659 wrt_reg_dword(®->iobase_window, 0x0003); 3660 udelay(10); 3661 } else 3662 rval = QLA_FUNCTION_TIMEOUT; 3663 } 3664 if (rval != QLA_SUCCESS) 3665 goto done; 3666 3667 next_test: 3668 if (rd_reg_dword(®->iobase_c8) & BIT_3) 3669 ql_log(ql_log_info, vha, 0x504c, 3670 "Additional code -- 0x55AA.\n"); 3671 3672 done: 3673 wrt_reg_dword(®->iobase_window, 0x0000); 3674 rd_reg_dword(®->iobase_window); 3675 } 3676 3677 /** 3678 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3679 * @irq: interrupt number 3680 * @dev_id: SCSI driver HA context 3681 * 3682 * Called by system whenever the host adapter generates an interrupt. 3683 * 3684 * Returns handled flag. 3685 */ 3686 irqreturn_t 3687 qla24xx_intr_handler(int irq, void *dev_id) 3688 { 3689 scsi_qla_host_t *vha; 3690 struct qla_hw_data *ha; 3691 struct device_reg_24xx __iomem *reg; 3692 int status; 3693 unsigned long iter; 3694 uint32_t stat; 3695 uint32_t hccr; 3696 uint16_t mb[8]; 3697 struct rsp_que *rsp; 3698 unsigned long flags; 3699 bool process_atio = false; 3700 3701 rsp = (struct rsp_que *) dev_id; 3702 if (!rsp) { 3703 ql_log(ql_log_info, NULL, 0x5059, 3704 "%s: NULL response queue pointer.\n", __func__); 3705 return IRQ_NONE; 3706 } 3707 3708 ha = rsp->hw; 3709 reg = &ha->iobase->isp24; 3710 status = 0; 3711 3712 if (unlikely(pci_channel_offline(ha->pdev))) 3713 return IRQ_HANDLED; 3714 3715 spin_lock_irqsave(&ha->hardware_lock, flags); 3716 vha = pci_get_drvdata(ha->pdev); 3717 for (iter = 50; iter--; ) { 3718 stat = rd_reg_dword(®->host_status); 3719 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3720 break; 3721 if (stat & HSRX_RISC_PAUSED) { 3722 if (unlikely(pci_channel_offline(ha->pdev))) 3723 break; 3724 3725 hccr = rd_reg_dword(®->hccr); 3726 3727 ql_log(ql_log_warn, vha, 0x504b, 3728 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3729 hccr); 3730 3731 qla2xxx_check_risc_status(vha); 3732 3733 ha->isp_ops->fw_dump(vha); 3734 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3735 break; 3736 } else if ((stat & HSRX_RISC_INT) == 0) 3737 break; 3738 3739 switch (stat & 0xff) { 3740 case INTR_ROM_MB_SUCCESS: 3741 case INTR_ROM_MB_FAILED: 3742 case INTR_MB_SUCCESS: 3743 case INTR_MB_FAILED: 3744 qla24xx_mbx_completion(vha, MSW(stat)); 3745 status |= MBX_INTERRUPT; 3746 3747 break; 3748 case INTR_ASYNC_EVENT: 3749 mb[0] = MSW(stat); 3750 mb[1] = rd_reg_word(®->mailbox1); 3751 mb[2] = rd_reg_word(®->mailbox2); 3752 mb[3] = rd_reg_word(®->mailbox3); 3753 qla2x00_async_event(vha, rsp, mb); 3754 break; 3755 case INTR_RSP_QUE_UPDATE: 3756 case INTR_RSP_QUE_UPDATE_83XX: 3757 qla24xx_process_response_queue(vha, rsp); 3758 break; 3759 case INTR_ATIO_QUE_UPDATE_27XX: 3760 case INTR_ATIO_QUE_UPDATE: 3761 process_atio = true; 3762 break; 3763 case INTR_ATIO_RSP_QUE_UPDATE: 3764 process_atio = true; 3765 qla24xx_process_response_queue(vha, rsp); 3766 break; 3767 default: 3768 ql_dbg(ql_dbg_async, vha, 0x504f, 3769 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3770 break; 3771 } 3772 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3773 rd_reg_dword_relaxed(®->hccr); 3774 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3775 ndelay(3500); 3776 } 3777 qla2x00_handle_mbx_completion(ha, status); 3778 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3779 3780 if (process_atio) { 3781 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3782 qlt_24xx_process_atio_queue(vha, 0); 3783 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3784 } 3785 3786 return IRQ_HANDLED; 3787 } 3788 3789 static irqreturn_t 3790 qla24xx_msix_rsp_q(int irq, void *dev_id) 3791 { 3792 struct qla_hw_data *ha; 3793 struct rsp_que *rsp; 3794 struct device_reg_24xx __iomem *reg; 3795 struct scsi_qla_host *vha; 3796 unsigned long flags; 3797 3798 rsp = (struct rsp_que *) dev_id; 3799 if (!rsp) { 3800 ql_log(ql_log_info, NULL, 0x505a, 3801 "%s: NULL response queue pointer.\n", __func__); 3802 return IRQ_NONE; 3803 } 3804 ha = rsp->hw; 3805 reg = &ha->iobase->isp24; 3806 3807 spin_lock_irqsave(&ha->hardware_lock, flags); 3808 3809 vha = pci_get_drvdata(ha->pdev); 3810 qla24xx_process_response_queue(vha, rsp); 3811 if (!ha->flags.disable_msix_handshake) { 3812 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3813 rd_reg_dword_relaxed(®->hccr); 3814 } 3815 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3816 3817 return IRQ_HANDLED; 3818 } 3819 3820 static irqreturn_t 3821 qla24xx_msix_default(int irq, void *dev_id) 3822 { 3823 scsi_qla_host_t *vha; 3824 struct qla_hw_data *ha; 3825 struct rsp_que *rsp; 3826 struct device_reg_24xx __iomem *reg; 3827 int status; 3828 uint32_t stat; 3829 uint32_t hccr; 3830 uint16_t mb[8]; 3831 unsigned long flags; 3832 bool process_atio = false; 3833 3834 rsp = (struct rsp_que *) dev_id; 3835 if (!rsp) { 3836 ql_log(ql_log_info, NULL, 0x505c, 3837 "%s: NULL response queue pointer.\n", __func__); 3838 return IRQ_NONE; 3839 } 3840 ha = rsp->hw; 3841 reg = &ha->iobase->isp24; 3842 status = 0; 3843 3844 spin_lock_irqsave(&ha->hardware_lock, flags); 3845 vha = pci_get_drvdata(ha->pdev); 3846 do { 3847 stat = rd_reg_dword(®->host_status); 3848 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3849 break; 3850 if (stat & HSRX_RISC_PAUSED) { 3851 if (unlikely(pci_channel_offline(ha->pdev))) 3852 break; 3853 3854 hccr = rd_reg_dword(®->hccr); 3855 3856 ql_log(ql_log_info, vha, 0x5050, 3857 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3858 hccr); 3859 3860 qla2xxx_check_risc_status(vha); 3861 vha->hw_err_cnt++; 3862 3863 ha->isp_ops->fw_dump(vha); 3864 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3865 break; 3866 } else if ((stat & HSRX_RISC_INT) == 0) 3867 break; 3868 3869 switch (stat & 0xff) { 3870 case INTR_ROM_MB_SUCCESS: 3871 case INTR_ROM_MB_FAILED: 3872 case INTR_MB_SUCCESS: 3873 case INTR_MB_FAILED: 3874 qla24xx_mbx_completion(vha, MSW(stat)); 3875 status |= MBX_INTERRUPT; 3876 3877 break; 3878 case INTR_ASYNC_EVENT: 3879 mb[0] = MSW(stat); 3880 mb[1] = rd_reg_word(®->mailbox1); 3881 mb[2] = rd_reg_word(®->mailbox2); 3882 mb[3] = rd_reg_word(®->mailbox3); 3883 qla2x00_async_event(vha, rsp, mb); 3884 break; 3885 case INTR_RSP_QUE_UPDATE: 3886 case INTR_RSP_QUE_UPDATE_83XX: 3887 qla24xx_process_response_queue(vha, rsp); 3888 break; 3889 case INTR_ATIO_QUE_UPDATE_27XX: 3890 case INTR_ATIO_QUE_UPDATE: 3891 process_atio = true; 3892 break; 3893 case INTR_ATIO_RSP_QUE_UPDATE: 3894 process_atio = true; 3895 qla24xx_process_response_queue(vha, rsp); 3896 break; 3897 default: 3898 ql_dbg(ql_dbg_async, vha, 0x5051, 3899 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3900 break; 3901 } 3902 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3903 } while (0); 3904 qla2x00_handle_mbx_completion(ha, status); 3905 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3906 3907 if (process_atio) { 3908 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3909 qlt_24xx_process_atio_queue(vha, 0); 3910 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3911 } 3912 3913 return IRQ_HANDLED; 3914 } 3915 3916 irqreturn_t 3917 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3918 { 3919 struct qla_hw_data *ha; 3920 struct qla_qpair *qpair; 3921 3922 qpair = dev_id; 3923 if (!qpair) { 3924 ql_log(ql_log_info, NULL, 0x505b, 3925 "%s: NULL response queue pointer.\n", __func__); 3926 return IRQ_NONE; 3927 } 3928 ha = qpair->hw; 3929 3930 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3931 3932 return IRQ_HANDLED; 3933 } 3934 3935 irqreturn_t 3936 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) 3937 { 3938 struct qla_hw_data *ha; 3939 struct qla_qpair *qpair; 3940 struct device_reg_24xx __iomem *reg; 3941 unsigned long flags; 3942 3943 qpair = dev_id; 3944 if (!qpair) { 3945 ql_log(ql_log_info, NULL, 0x505b, 3946 "%s: NULL response queue pointer.\n", __func__); 3947 return IRQ_NONE; 3948 } 3949 ha = qpair->hw; 3950 3951 reg = &ha->iobase->isp24; 3952 spin_lock_irqsave(&ha->hardware_lock, flags); 3953 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3954 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3955 3956 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3957 3958 return IRQ_HANDLED; 3959 } 3960 3961 /* Interrupt handling helpers. */ 3962 3963 struct qla_init_msix_entry { 3964 const char *name; 3965 irq_handler_t handler; 3966 }; 3967 3968 static const struct qla_init_msix_entry msix_entries[] = { 3969 { "default", qla24xx_msix_default }, 3970 { "rsp_q", qla24xx_msix_rsp_q }, 3971 { "atio_q", qla83xx_msix_atio_q }, 3972 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3973 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, 3974 }; 3975 3976 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3977 { "qla2xxx (default)", qla82xx_msix_default }, 3978 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3979 }; 3980 3981 static int 3982 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3983 { 3984 int i, ret; 3985 struct qla_msix_entry *qentry; 3986 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3987 int min_vecs = QLA_BASE_VECTORS; 3988 struct irq_affinity desc = { 3989 .pre_vectors = QLA_BASE_VECTORS, 3990 }; 3991 3992 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3993 IS_ATIO_MSIX_CAPABLE(ha)) { 3994 desc.pre_vectors++; 3995 min_vecs++; 3996 } 3997 3998 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 3999 /* user wants to control IRQ setting for target mode */ 4000 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 4001 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 4002 PCI_IRQ_MSIX); 4003 } else 4004 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 4005 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 4006 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 4007 &desc); 4008 4009 if (ret < 0) { 4010 ql_log(ql_log_fatal, vha, 0x00c7, 4011 "MSI-X: Failed to enable support, " 4012 "giving up -- %d/%d.\n", 4013 ha->msix_count, ret); 4014 goto msix_out; 4015 } else if (ret < ha->msix_count) { 4016 ql_log(ql_log_info, vha, 0x00c6, 4017 "MSI-X: Using %d vectors\n", ret); 4018 ha->msix_count = ret; 4019 /* Recalculate queue values */ 4020 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 4021 ha->max_req_queues = ha->msix_count - 1; 4022 4023 /* ATIOQ needs 1 vector. That's 1 less QPair */ 4024 if (QLA_TGT_MODE_ENABLED()) 4025 ha->max_req_queues--; 4026 4027 ha->max_rsp_queues = ha->max_req_queues; 4028 4029 ha->max_qpairs = ha->max_req_queues - 1; 4030 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 4031 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 4032 } 4033 } 4034 vha->irq_offset = desc.pre_vectors; 4035 ha->msix_entries = kcalloc(ha->msix_count, 4036 sizeof(struct qla_msix_entry), 4037 GFP_KERNEL); 4038 if (!ha->msix_entries) { 4039 ql_log(ql_log_fatal, vha, 0x00c8, 4040 "Failed to allocate memory for ha->msix_entries.\n"); 4041 ret = -ENOMEM; 4042 goto free_irqs; 4043 } 4044 ha->flags.msix_enabled = 1; 4045 4046 for (i = 0; i < ha->msix_count; i++) { 4047 qentry = &ha->msix_entries[i]; 4048 qentry->vector = pci_irq_vector(ha->pdev, i); 4049 qentry->entry = i; 4050 qentry->have_irq = 0; 4051 qentry->in_use = 0; 4052 qentry->handle = NULL; 4053 } 4054 4055 /* Enable MSI-X vectors for the base queue */ 4056 for (i = 0; i < QLA_BASE_VECTORS; i++) { 4057 qentry = &ha->msix_entries[i]; 4058 qentry->handle = rsp; 4059 rsp->msix = qentry; 4060 scnprintf(qentry->name, sizeof(qentry->name), 4061 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 4062 if (IS_P3P_TYPE(ha)) 4063 ret = request_irq(qentry->vector, 4064 qla82xx_msix_entries[i].handler, 4065 0, qla82xx_msix_entries[i].name, rsp); 4066 else 4067 ret = request_irq(qentry->vector, 4068 msix_entries[i].handler, 4069 0, qentry->name, rsp); 4070 if (ret) 4071 goto msix_register_fail; 4072 qentry->have_irq = 1; 4073 qentry->in_use = 1; 4074 } 4075 4076 /* 4077 * If target mode is enable, also request the vector for the ATIO 4078 * queue. 4079 */ 4080 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4081 IS_ATIO_MSIX_CAPABLE(ha)) { 4082 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 4083 rsp->msix = qentry; 4084 qentry->handle = rsp; 4085 scnprintf(qentry->name, sizeof(qentry->name), 4086 "qla2xxx%lu_%s", vha->host_no, 4087 msix_entries[QLA_ATIO_VECTOR].name); 4088 qentry->in_use = 1; 4089 ret = request_irq(qentry->vector, 4090 msix_entries[QLA_ATIO_VECTOR].handler, 4091 0, qentry->name, rsp); 4092 qentry->have_irq = 1; 4093 } 4094 4095 msix_register_fail: 4096 if (ret) { 4097 ql_log(ql_log_fatal, vha, 0x00cb, 4098 "MSI-X: unable to register handler -- %x/%d.\n", 4099 qentry->vector, ret); 4100 qla2x00_free_irqs(vha); 4101 ha->mqenable = 0; 4102 goto msix_out; 4103 } 4104 4105 /* Enable MSI-X vector for response queue update for queue 0 */ 4106 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4107 if (ha->msixbase && ha->mqiobase && 4108 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4109 ql2xmqsupport)) 4110 ha->mqenable = 1; 4111 } else 4112 if (ha->mqiobase && 4113 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4114 ql2xmqsupport)) 4115 ha->mqenable = 1; 4116 ql_dbg(ql_dbg_multiq, vha, 0xc005, 4117 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4118 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4119 ql_dbg(ql_dbg_init, vha, 0x0055, 4120 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4121 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4122 4123 msix_out: 4124 return ret; 4125 4126 free_irqs: 4127 pci_free_irq_vectors(ha->pdev); 4128 goto msix_out; 4129 } 4130 4131 int 4132 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 4133 { 4134 int ret = QLA_FUNCTION_FAILED; 4135 device_reg_t *reg = ha->iobase; 4136 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4137 4138 /* If possible, enable MSI-X. */ 4139 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 4140 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 4141 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 4142 goto skip_msi; 4143 4144 if (ql2xenablemsix == 2) 4145 goto skip_msix; 4146 4147 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 4148 (ha->pdev->subsystem_device == 0x7040 || 4149 ha->pdev->subsystem_device == 0x7041 || 4150 ha->pdev->subsystem_device == 0x1705)) { 4151 ql_log(ql_log_warn, vha, 0x0034, 4152 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 4153 ha->pdev->subsystem_vendor, 4154 ha->pdev->subsystem_device); 4155 goto skip_msi; 4156 } 4157 4158 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 4159 ql_log(ql_log_warn, vha, 0x0035, 4160 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 4161 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 4162 goto skip_msix; 4163 } 4164 4165 ret = qla24xx_enable_msix(ha, rsp); 4166 if (!ret) { 4167 ql_dbg(ql_dbg_init, vha, 0x0036, 4168 "MSI-X: Enabled (0x%X, 0x%X).\n", 4169 ha->chip_revision, ha->fw_attributes); 4170 goto clear_risc_ints; 4171 } 4172 4173 skip_msix: 4174 4175 ql_log(ql_log_info, vha, 0x0037, 4176 "Falling back-to MSI mode -- ret=%d.\n", ret); 4177 4178 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 4179 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 4180 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4181 goto skip_msi; 4182 4183 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 4184 if (ret > 0) { 4185 ql_dbg(ql_dbg_init, vha, 0x0038, 4186 "MSI: Enabled.\n"); 4187 ha->flags.msi_enabled = 1; 4188 } else 4189 ql_log(ql_log_warn, vha, 0x0039, 4190 "Falling back-to INTa mode -- ret=%d.\n", ret); 4191 skip_msi: 4192 4193 /* Skip INTx on ISP82xx. */ 4194 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 4195 return QLA_FUNCTION_FAILED; 4196 4197 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 4198 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 4199 QLA2XXX_DRIVER_NAME, rsp); 4200 if (ret) { 4201 ql_log(ql_log_warn, vha, 0x003a, 4202 "Failed to reserve interrupt %d already in use.\n", 4203 ha->pdev->irq); 4204 goto fail; 4205 } else if (!ha->flags.msi_enabled) { 4206 ql_dbg(ql_dbg_init, vha, 0x0125, 4207 "INTa mode: Enabled.\n"); 4208 ha->flags.mr_intr_valid = 1; 4209 } 4210 4211 clear_risc_ints: 4212 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 4213 goto fail; 4214 4215 spin_lock_irq(&ha->hardware_lock); 4216 wrt_reg_word(®->isp.semaphore, 0); 4217 spin_unlock_irq(&ha->hardware_lock); 4218 4219 fail: 4220 return ret; 4221 } 4222 4223 void 4224 qla2x00_free_irqs(scsi_qla_host_t *vha) 4225 { 4226 struct qla_hw_data *ha = vha->hw; 4227 struct rsp_que *rsp; 4228 struct qla_msix_entry *qentry; 4229 int i; 4230 4231 /* 4232 * We need to check that ha->rsp_q_map is valid in case we are called 4233 * from a probe failure context. 4234 */ 4235 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 4236 goto free_irqs; 4237 rsp = ha->rsp_q_map[0]; 4238 4239 if (ha->flags.msix_enabled) { 4240 for (i = 0; i < ha->msix_count; i++) { 4241 qentry = &ha->msix_entries[i]; 4242 if (qentry->have_irq) { 4243 irq_set_affinity_notifier(qentry->vector, NULL); 4244 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 4245 } 4246 } 4247 kfree(ha->msix_entries); 4248 ha->msix_entries = NULL; 4249 ha->flags.msix_enabled = 0; 4250 ql_dbg(ql_dbg_init, vha, 0x0042, 4251 "Disabled MSI-X.\n"); 4252 } else { 4253 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 4254 } 4255 4256 free_irqs: 4257 pci_free_irq_vectors(ha->pdev); 4258 } 4259 4260 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 4261 struct qla_msix_entry *msix, int vector_type) 4262 { 4263 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 4264 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4265 int ret; 4266 4267 scnprintf(msix->name, sizeof(msix->name), 4268 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 4269 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 4270 if (ret) { 4271 ql_log(ql_log_fatal, vha, 0x00e6, 4272 "MSI-X: Unable to register handler -- %x/%d.\n", 4273 msix->vector, ret); 4274 return ret; 4275 } 4276 msix->have_irq = 1; 4277 msix->handle = qpair; 4278 return ret; 4279 } 4280