1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 9 #include <linux/delay.h> 10 #include <linux/slab.h> 11 #include <linux/cpu.h> 12 #include <linux/t10-pi.h> 13 #include <scsi/scsi_tcq.h> 14 #include <scsi/scsi_bsg_fc.h> 15 #include <scsi/scsi_eh.h> 16 #include <scsi/fc/fc_fs.h> 17 #include <linux/nvme-fc-driver.h> 18 19 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 20 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 21 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 22 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 23 sts_entry_t *); 24 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, 25 struct purex_item *item); 26 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, 27 uint16_t size); 28 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, 29 void *pkt); 30 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, 31 void **pkt, struct rsp_que **rsp); 32 33 static void 34 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) 35 { 36 void *pkt = &item->iocb; 37 uint16_t pkt_size = item->size; 38 39 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, 40 "%s: Enter\n", __func__); 41 42 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, 43 "-------- ELS REQ -------\n"); 44 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, 45 pkt, pkt_size); 46 47 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt); 48 } 49 50 const char *const port_state_str[] = { 51 "Unknown", 52 "UNCONFIGURED", 53 "DEAD", 54 "LOST", 55 "ONLINE" 56 }; 57 58 static void 59 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) 60 { 61 struct abts_entry_24xx *abts = 62 (struct abts_entry_24xx *)&pkt->iocb; 63 struct qla_hw_data *ha = vha->hw; 64 struct els_entry_24xx *rsp_els; 65 struct abts_entry_24xx *abts_rsp; 66 dma_addr_t dma; 67 uint32_t fctl; 68 int rval; 69 70 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 71 72 ql_log(ql_log_warn, vha, 0x0287, 73 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 74 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 75 abts->seq_id, abts->seq_cnt); 76 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 77 "-------- ABTS RCV -------\n"); 78 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 79 (uint8_t *)abts, sizeof(*abts)); 80 81 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 82 GFP_KERNEL); 83 if (!rsp_els) { 84 ql_log(ql_log_warn, vha, 0x0287, 85 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 86 return; 87 } 88 89 /* terminate exchange */ 90 rsp_els->entry_type = ELS_IOCB_TYPE; 91 rsp_els->entry_count = 1; 92 rsp_els->nport_handle = cpu_to_le16(~0); 93 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 94 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); 95 ql_dbg(ql_dbg_init, vha, 0x0283, 96 "Sending ELS Response to terminate exchange %#x...\n", 97 abts->rx_xch_addr_to_abort); 98 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 99 "-------- ELS RSP -------\n"); 100 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 101 (uint8_t *)rsp_els, sizeof(*rsp_els)); 102 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 103 if (rval) { 104 ql_log(ql_log_warn, vha, 0x0288, 105 "%s: iocb failed to execute -> %x\n", __func__, rval); 106 } else if (rsp_els->comp_status) { 107 ql_log(ql_log_warn, vha, 0x0289, 108 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 109 __func__, rsp_els->comp_status, 110 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 111 } else { 112 ql_dbg(ql_dbg_init, vha, 0x028a, 113 "%s: abort exchange done.\n", __func__); 114 } 115 116 /* send ABTS response */ 117 abts_rsp = (void *)rsp_els; 118 memset(abts_rsp, 0, sizeof(*abts_rsp)); 119 abts_rsp->entry_type = ABTS_RSP_TYPE; 120 abts_rsp->entry_count = 1; 121 abts_rsp->nport_handle = abts->nport_handle; 122 abts_rsp->vp_idx = abts->vp_idx; 123 abts_rsp->sof_type = abts->sof_type & 0xf0; 124 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 125 abts_rsp->d_id[0] = abts->s_id[0]; 126 abts_rsp->d_id[1] = abts->s_id[1]; 127 abts_rsp->d_id[2] = abts->s_id[2]; 128 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 129 abts_rsp->s_id[0] = abts->d_id[0]; 130 abts_rsp->s_id[1] = abts->d_id[1]; 131 abts_rsp->s_id[2] = abts->d_id[2]; 132 abts_rsp->cs_ctl = abts->cs_ctl; 133 /* include flipping bit23 in fctl */ 134 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 135 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 136 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 137 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 138 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 139 abts_rsp->type = FC_TYPE_BLD; 140 abts_rsp->rx_id = abts->rx_id; 141 abts_rsp->ox_id = abts->ox_id; 142 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 143 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 144 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); 145 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 146 ql_dbg(ql_dbg_init, vha, 0x028b, 147 "Sending BA ACC response to ABTS %#x...\n", 148 abts->rx_xch_addr_to_abort); 149 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 150 "-------- ELS RSP -------\n"); 151 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 152 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 153 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 154 if (rval) { 155 ql_log(ql_log_warn, vha, 0x028c, 156 "%s: iocb failed to execute -> %x\n", __func__, rval); 157 } else if (abts_rsp->comp_status) { 158 ql_log(ql_log_warn, vha, 0x028d, 159 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 160 __func__, abts_rsp->comp_status, 161 abts_rsp->payload.error.subcode1, 162 abts_rsp->payload.error.subcode2); 163 } else { 164 ql_dbg(ql_dbg_init, vha, 0x028ea, 165 "%s: done.\n", __func__); 166 } 167 168 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 169 } 170 171 /** 172 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 173 * @irq: interrupt number 174 * @dev_id: SCSI driver HA context 175 * 176 * Called by system whenever the host adapter generates an interrupt. 177 * 178 * Returns handled flag. 179 */ 180 irqreturn_t 181 qla2100_intr_handler(int irq, void *dev_id) 182 { 183 scsi_qla_host_t *vha; 184 struct qla_hw_data *ha; 185 struct device_reg_2xxx __iomem *reg; 186 int status; 187 unsigned long iter; 188 uint16_t hccr; 189 uint16_t mb[8]; 190 struct rsp_que *rsp; 191 unsigned long flags; 192 193 rsp = (struct rsp_que *) dev_id; 194 if (!rsp) { 195 ql_log(ql_log_info, NULL, 0x505d, 196 "%s: NULL response queue pointer.\n", __func__); 197 return (IRQ_NONE); 198 } 199 200 ha = rsp->hw; 201 reg = &ha->iobase->isp; 202 status = 0; 203 204 spin_lock_irqsave(&ha->hardware_lock, flags); 205 vha = pci_get_drvdata(ha->pdev); 206 for (iter = 50; iter--; ) { 207 hccr = rd_reg_word(®->hccr); 208 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 209 break; 210 if (hccr & HCCR_RISC_PAUSE) { 211 if (pci_channel_offline(ha->pdev)) 212 break; 213 214 /* 215 * Issue a "HARD" reset in order for the RISC interrupt 216 * bit to be cleared. Schedule a big hammer to get 217 * out of the RISC PAUSED state. 218 */ 219 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 220 rd_reg_word(®->hccr); 221 222 ha->isp_ops->fw_dump(vha); 223 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 224 break; 225 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) 226 break; 227 228 if (rd_reg_word(®->semaphore) & BIT_0) { 229 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 230 rd_reg_word(®->hccr); 231 232 /* Get mailbox data. */ 233 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 234 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 235 qla2x00_mbx_completion(vha, mb[0]); 236 status |= MBX_INTERRUPT; 237 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 238 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 239 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 240 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 241 qla2x00_async_event(vha, rsp, mb); 242 } else { 243 /*EMPTY*/ 244 ql_dbg(ql_dbg_async, vha, 0x5025, 245 "Unrecognized interrupt type (%d).\n", 246 mb[0]); 247 } 248 /* Release mailbox registers. */ 249 wrt_reg_word(®->semaphore, 0); 250 rd_reg_word(®->semaphore); 251 } else { 252 qla2x00_process_response_queue(rsp); 253 254 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 255 rd_reg_word(®->hccr); 256 } 257 } 258 qla2x00_handle_mbx_completion(ha, status); 259 spin_unlock_irqrestore(&ha->hardware_lock, flags); 260 261 return (IRQ_HANDLED); 262 } 263 264 bool 265 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 266 { 267 /* Check for PCI disconnection */ 268 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 269 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 270 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 271 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 272 /* 273 * Schedule this (only once) on the default system 274 * workqueue so that all the adapter workqueues and the 275 * DPC thread can be shutdown cleanly. 276 */ 277 schedule_work(&vha->hw->board_disable); 278 } 279 return true; 280 } else 281 return false; 282 } 283 284 bool 285 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 286 { 287 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 288 } 289 290 /** 291 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 292 * @irq: interrupt number 293 * @dev_id: SCSI driver HA context 294 * 295 * Called by system whenever the host adapter generates an interrupt. 296 * 297 * Returns handled flag. 298 */ 299 irqreturn_t 300 qla2300_intr_handler(int irq, void *dev_id) 301 { 302 scsi_qla_host_t *vha; 303 struct device_reg_2xxx __iomem *reg; 304 int status; 305 unsigned long iter; 306 uint32_t stat; 307 uint16_t hccr; 308 uint16_t mb[8]; 309 struct rsp_que *rsp; 310 struct qla_hw_data *ha; 311 unsigned long flags; 312 313 rsp = (struct rsp_que *) dev_id; 314 if (!rsp) { 315 ql_log(ql_log_info, NULL, 0x5058, 316 "%s: NULL response queue pointer.\n", __func__); 317 return (IRQ_NONE); 318 } 319 320 ha = rsp->hw; 321 reg = &ha->iobase->isp; 322 status = 0; 323 324 spin_lock_irqsave(&ha->hardware_lock, flags); 325 vha = pci_get_drvdata(ha->pdev); 326 for (iter = 50; iter--; ) { 327 stat = rd_reg_dword(®->u.isp2300.host_status); 328 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 329 break; 330 if (stat & HSR_RISC_PAUSED) { 331 if (unlikely(pci_channel_offline(ha->pdev))) 332 break; 333 334 hccr = rd_reg_word(®->hccr); 335 336 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 337 ql_log(ql_log_warn, vha, 0x5026, 338 "Parity error -- HCCR=%x, Dumping " 339 "firmware.\n", hccr); 340 else 341 ql_log(ql_log_warn, vha, 0x5027, 342 "RISC paused -- HCCR=%x, Dumping " 343 "firmware.\n", hccr); 344 345 /* 346 * Issue a "HARD" reset in order for the RISC 347 * interrupt bit to be cleared. Schedule a big 348 * hammer to get out of the RISC PAUSED state. 349 */ 350 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 351 rd_reg_word(®->hccr); 352 353 ha->isp_ops->fw_dump(vha); 354 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 355 break; 356 } else if ((stat & HSR_RISC_INT) == 0) 357 break; 358 359 switch (stat & 0xff) { 360 case 0x1: 361 case 0x2: 362 case 0x10: 363 case 0x11: 364 qla2x00_mbx_completion(vha, MSW(stat)); 365 status |= MBX_INTERRUPT; 366 367 /* Release mailbox registers. */ 368 wrt_reg_word(®->semaphore, 0); 369 break; 370 case 0x12: 371 mb[0] = MSW(stat); 372 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 373 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 374 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 375 qla2x00_async_event(vha, rsp, mb); 376 break; 377 case 0x13: 378 qla2x00_process_response_queue(rsp); 379 break; 380 case 0x15: 381 mb[0] = MBA_CMPLT_1_16BIT; 382 mb[1] = MSW(stat); 383 qla2x00_async_event(vha, rsp, mb); 384 break; 385 case 0x16: 386 mb[0] = MBA_SCSI_COMPLETION; 387 mb[1] = MSW(stat); 388 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 389 qla2x00_async_event(vha, rsp, mb); 390 break; 391 default: 392 ql_dbg(ql_dbg_async, vha, 0x5028, 393 "Unrecognized interrupt type (%d).\n", stat & 0xff); 394 break; 395 } 396 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 397 rd_reg_word_relaxed(®->hccr); 398 } 399 qla2x00_handle_mbx_completion(ha, status); 400 spin_unlock_irqrestore(&ha->hardware_lock, flags); 401 402 return (IRQ_HANDLED); 403 } 404 405 /** 406 * qla2x00_mbx_completion() - Process mailbox command completions. 407 * @vha: SCSI driver HA context 408 * @mb0: Mailbox0 register 409 */ 410 static void 411 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 412 { 413 uint16_t cnt; 414 uint32_t mboxes; 415 __le16 __iomem *wptr; 416 struct qla_hw_data *ha = vha->hw; 417 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 418 419 /* Read all mbox registers? */ 420 WARN_ON_ONCE(ha->mbx_count > 32); 421 mboxes = (1ULL << ha->mbx_count) - 1; 422 if (!ha->mcp) 423 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 424 else 425 mboxes = ha->mcp->in_mb; 426 427 /* Load return mailbox registers. */ 428 ha->flags.mbox_int = 1; 429 ha->mailbox_out[0] = mb0; 430 mboxes >>= 1; 431 wptr = MAILBOX_REG(ha, reg, 1); 432 433 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 434 if (IS_QLA2200(ha) && cnt == 8) 435 wptr = MAILBOX_REG(ha, reg, 8); 436 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 437 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 438 else if (mboxes & BIT_0) 439 ha->mailbox_out[cnt] = rd_reg_word(wptr); 440 441 wptr++; 442 mboxes >>= 1; 443 } 444 } 445 446 static void 447 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 448 { 449 static char *event[] = 450 { "Complete", "Request Notification", "Time Extension" }; 451 int rval; 452 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 453 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 454 __le16 __iomem *wptr; 455 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 456 457 /* Seed data -- mailbox1 -> mailbox7. */ 458 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 459 wptr = ®24->mailbox1; 460 else if (IS_QLA8044(vha->hw)) 461 wptr = ®82->mailbox_out[1]; 462 else 463 return; 464 465 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 466 mb[cnt] = rd_reg_word(wptr); 467 468 ql_dbg(ql_dbg_async, vha, 0x5021, 469 "Inter-Driver Communication %s -- " 470 "%04x %04x %04x %04x %04x %04x %04x.\n", 471 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 472 mb[4], mb[5], mb[6]); 473 switch (aen) { 474 /* Handle IDC Error completion case. */ 475 case MBA_IDC_COMPLETE: 476 if (mb[1] >> 15) { 477 vha->hw->flags.idc_compl_status = 1; 478 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 479 complete(&vha->hw->dcbx_comp); 480 } 481 break; 482 483 case MBA_IDC_NOTIFY: 484 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 485 timeout = (descr >> 8) & 0xf; 486 ql_dbg(ql_dbg_async, vha, 0x5022, 487 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 488 vha->host_no, event[aen & 0xff], timeout); 489 490 if (!timeout) 491 return; 492 rval = qla2x00_post_idc_ack_work(vha, mb); 493 if (rval != QLA_SUCCESS) 494 ql_log(ql_log_warn, vha, 0x5023, 495 "IDC failed to post ACK.\n"); 496 break; 497 case MBA_IDC_TIME_EXT: 498 vha->hw->idc_extend_tmo = descr; 499 ql_dbg(ql_dbg_async, vha, 0x5087, 500 "%lu Inter-Driver Communication %s -- " 501 "Extend timeout by=%d.\n", 502 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 503 break; 504 } 505 } 506 507 #define LS_UNKNOWN 2 508 const char * 509 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 510 { 511 static const char *const link_speeds[] = { 512 "1", "2", "?", "4", "8", "16", "32", "10" 513 }; 514 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 515 516 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 517 return link_speeds[0]; 518 else if (speed == 0x13) 519 return link_speeds[QLA_LAST_SPEED]; 520 else if (speed < QLA_LAST_SPEED) 521 return link_speeds[speed]; 522 else 523 return link_speeds[LS_UNKNOWN]; 524 } 525 526 static void 527 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 528 { 529 struct qla_hw_data *ha = vha->hw; 530 531 /* 532 * 8200 AEN Interpretation: 533 * mb[0] = AEN code 534 * mb[1] = AEN Reason code 535 * mb[2] = LSW of Peg-Halt Status-1 Register 536 * mb[6] = MSW of Peg-Halt Status-1 Register 537 * mb[3] = LSW of Peg-Halt Status-2 register 538 * mb[7] = MSW of Peg-Halt Status-2 register 539 * mb[4] = IDC Device-State Register value 540 * mb[5] = IDC Driver-Presence Register value 541 */ 542 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 543 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 544 mb[0], mb[1], mb[2], mb[6]); 545 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 546 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 547 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 548 549 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 550 IDC_HEARTBEAT_FAILURE)) { 551 ha->flags.nic_core_hung = 1; 552 ql_log(ql_log_warn, vha, 0x5060, 553 "83XX: F/W Error Reported: Check if reset required.\n"); 554 555 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 556 uint32_t protocol_engine_id, fw_err_code, err_level; 557 558 /* 559 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 560 * - PEG-Halt Status-1 Register: 561 * (LSW = mb[2], MSW = mb[6]) 562 * Bits 0-7 = protocol-engine ID 563 * Bits 8-28 = f/w error code 564 * Bits 29-31 = Error-level 565 * Error-level 0x1 = Non-Fatal error 566 * Error-level 0x2 = Recoverable Fatal error 567 * Error-level 0x4 = UnRecoverable Fatal error 568 * - PEG-Halt Status-2 Register: 569 * (LSW = mb[3], MSW = mb[7]) 570 */ 571 protocol_engine_id = (mb[2] & 0xff); 572 fw_err_code = (((mb[2] & 0xff00) >> 8) | 573 ((mb[6] & 0x1fff) << 8)); 574 err_level = ((mb[6] & 0xe000) >> 13); 575 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 576 "Register: protocol_engine_id=0x%x " 577 "fw_err_code=0x%x err_level=0x%x.\n", 578 protocol_engine_id, fw_err_code, err_level); 579 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 580 "Register: 0x%x%x.\n", mb[7], mb[3]); 581 if (err_level == ERR_LEVEL_NON_FATAL) { 582 ql_log(ql_log_warn, vha, 0x5063, 583 "Not a fatal error, f/w has recovered itself.\n"); 584 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 585 ql_log(ql_log_fatal, vha, 0x5064, 586 "Recoverable Fatal error: Chip reset " 587 "required.\n"); 588 qla83xx_schedule_work(vha, 589 QLA83XX_NIC_CORE_RESET); 590 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 591 ql_log(ql_log_fatal, vha, 0x5065, 592 "Unrecoverable Fatal error: Set FAILED " 593 "state, reboot required.\n"); 594 qla83xx_schedule_work(vha, 595 QLA83XX_NIC_CORE_UNRECOVERABLE); 596 } 597 } 598 599 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 600 uint16_t peg_fw_state, nw_interface_link_up; 601 uint16_t nw_interface_signal_detect, sfp_status; 602 uint16_t htbt_counter, htbt_monitor_enable; 603 uint16_t sfp_additional_info, sfp_multirate; 604 uint16_t sfp_tx_fault, link_speed, dcbx_status; 605 606 /* 607 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 608 * - PEG-to-FC Status Register: 609 * (LSW = mb[2], MSW = mb[6]) 610 * Bits 0-7 = Peg-Firmware state 611 * Bit 8 = N/W Interface Link-up 612 * Bit 9 = N/W Interface signal detected 613 * Bits 10-11 = SFP Status 614 * SFP Status 0x0 = SFP+ transceiver not expected 615 * SFP Status 0x1 = SFP+ transceiver not present 616 * SFP Status 0x2 = SFP+ transceiver invalid 617 * SFP Status 0x3 = SFP+ transceiver present and 618 * valid 619 * Bits 12-14 = Heartbeat Counter 620 * Bit 15 = Heartbeat Monitor Enable 621 * Bits 16-17 = SFP Additional Info 622 * SFP info 0x0 = Unregocnized transceiver for 623 * Ethernet 624 * SFP info 0x1 = SFP+ brand validation failed 625 * SFP info 0x2 = SFP+ speed validation failed 626 * SFP info 0x3 = SFP+ access error 627 * Bit 18 = SFP Multirate 628 * Bit 19 = SFP Tx Fault 629 * Bits 20-22 = Link Speed 630 * Bits 23-27 = Reserved 631 * Bits 28-30 = DCBX Status 632 * DCBX Status 0x0 = DCBX Disabled 633 * DCBX Status 0x1 = DCBX Enabled 634 * DCBX Status 0x2 = DCBX Exchange error 635 * Bit 31 = Reserved 636 */ 637 peg_fw_state = (mb[2] & 0x00ff); 638 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 639 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 640 sfp_status = ((mb[2] & 0x0c00) >> 10); 641 htbt_counter = ((mb[2] & 0x7000) >> 12); 642 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 643 sfp_additional_info = (mb[6] & 0x0003); 644 sfp_multirate = ((mb[6] & 0x0004) >> 2); 645 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 646 link_speed = ((mb[6] & 0x0070) >> 4); 647 dcbx_status = ((mb[6] & 0x7000) >> 12); 648 649 ql_log(ql_log_warn, vha, 0x5066, 650 "Peg-to-Fc Status Register:\n" 651 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 652 "nw_interface_signal_detect=0x%x" 653 "\nsfp_statis=0x%x.\n ", peg_fw_state, 654 nw_interface_link_up, nw_interface_signal_detect, 655 sfp_status); 656 ql_log(ql_log_warn, vha, 0x5067, 657 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 658 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 659 htbt_counter, htbt_monitor_enable, 660 sfp_additional_info, sfp_multirate); 661 ql_log(ql_log_warn, vha, 0x5068, 662 "sfp_tx_fault=0x%x, link_state=0x%x, " 663 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 664 dcbx_status); 665 666 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 667 } 668 669 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 670 ql_log(ql_log_warn, vha, 0x5069, 671 "Heartbeat Failure encountered, chip reset " 672 "required.\n"); 673 674 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 675 } 676 } 677 678 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 679 ql_log(ql_log_info, vha, 0x506a, 680 "IDC Device-State changed = 0x%x.\n", mb[4]); 681 if (ha->flags.nic_core_reset_owner) 682 return; 683 qla83xx_schedule_work(vha, MBA_IDC_AEN); 684 } 685 } 686 687 int 688 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 689 { 690 struct qla_hw_data *ha = vha->hw; 691 scsi_qla_host_t *vp; 692 uint32_t vp_did; 693 unsigned long flags; 694 int ret = 0; 695 696 if (!ha->num_vhosts) 697 return ret; 698 699 spin_lock_irqsave(&ha->vport_slock, flags); 700 list_for_each_entry(vp, &ha->vp_list, list) { 701 vp_did = vp->d_id.b24; 702 if (vp_did == rscn_entry) { 703 ret = 1; 704 break; 705 } 706 } 707 spin_unlock_irqrestore(&ha->vport_slock, flags); 708 709 return ret; 710 } 711 712 fc_port_t * 713 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 714 { 715 fc_port_t *f, *tf; 716 717 f = tf = NULL; 718 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 719 if (f->loop_id == loop_id) 720 return f; 721 return NULL; 722 } 723 724 fc_port_t * 725 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 726 { 727 fc_port_t *f, *tf; 728 729 f = tf = NULL; 730 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 731 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 732 if (incl_deleted) 733 return f; 734 else if (f->deleted == 0) 735 return f; 736 } 737 } 738 return NULL; 739 } 740 741 fc_port_t * 742 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 743 u8 incl_deleted) 744 { 745 fc_port_t *f, *tf; 746 747 f = tf = NULL; 748 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 749 if (f->d_id.b24 == id->b24) { 750 if (incl_deleted) 751 return f; 752 else if (f->deleted == 0) 753 return f; 754 } 755 } 756 return NULL; 757 } 758 759 /* Shall be called only on supported adapters. */ 760 static void 761 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 762 { 763 struct qla_hw_data *ha = vha->hw; 764 bool reset_isp_needed = 0; 765 766 ql_log(ql_log_warn, vha, 0x02f0, 767 "MPI Heartbeat stop. MPI reset is%s needed. " 768 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 769 mb[1] & BIT_8 ? "" : " not", 770 mb[0], mb[1], mb[2], mb[3]); 771 772 if ((mb[1] & BIT_8) == 0) 773 return; 774 775 ql_log(ql_log_warn, vha, 0x02f1, 776 "MPI Heartbeat stop. FW dump needed\n"); 777 778 if (ql2xfulldump_on_mpifail) { 779 ha->isp_ops->fw_dump(vha); 780 reset_isp_needed = 1; 781 } 782 783 ha->isp_ops->mpi_fw_dump(vha, 1); 784 785 if (reset_isp_needed) { 786 vha->hw->flags.fw_init_done = 0; 787 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 788 qla2xxx_wake_dpc(vha); 789 } 790 } 791 792 static struct purex_item * 793 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) 794 { 795 struct purex_item *item = NULL; 796 uint8_t item_hdr_size = sizeof(*item); 797 798 if (size > QLA_DEFAULT_PAYLOAD_SIZE) { 799 item = kzalloc(item_hdr_size + 800 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); 801 } else { 802 if (atomic_inc_return(&vha->default_item.in_use) == 1) { 803 item = &vha->default_item; 804 goto initialize_purex_header; 805 } else { 806 item = kzalloc(item_hdr_size, GFP_ATOMIC); 807 } 808 } 809 if (!item) { 810 ql_log(ql_log_warn, vha, 0x5092, 811 ">> Failed allocate purex list item.\n"); 812 813 return NULL; 814 } 815 816 initialize_purex_header: 817 item->vha = vha; 818 item->size = size; 819 return item; 820 } 821 822 static void 823 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, 824 void (*process_item)(struct scsi_qla_host *vha, 825 struct purex_item *pkt)) 826 { 827 struct purex_list *list = &vha->purex_list; 828 ulong flags; 829 830 pkt->process_item = process_item; 831 832 spin_lock_irqsave(&list->lock, flags); 833 list_add_tail(&pkt->list, &list->head); 834 spin_unlock_irqrestore(&list->lock, flags); 835 836 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 837 } 838 839 /** 840 * qla24xx_copy_std_pkt() - Copy over purex ELS which is 841 * contained in a single IOCB. 842 * purex packet. 843 * @vha: SCSI driver HA context 844 * @pkt: ELS packet 845 */ 846 static struct purex_item 847 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) 848 { 849 struct purex_item *item; 850 851 item = qla24xx_alloc_purex_item(vha, 852 QLA_DEFAULT_PAYLOAD_SIZE); 853 if (!item) 854 return item; 855 856 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 857 return item; 858 } 859 860 /** 861 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can 862 * span over multiple IOCBs. 863 * @vha: SCSI driver HA context 864 * @pkt: ELS packet 865 * @rsp: Response queue 866 */ 867 static struct purex_item * 868 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, 869 struct rsp_que **rsp) 870 { 871 struct purex_entry_24xx *purex = *pkt; 872 struct rsp_que *rsp_q = *rsp; 873 sts_cont_entry_t *new_pkt; 874 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 875 uint16_t buffer_copy_offset = 0; 876 uint16_t entry_count, entry_count_remaining; 877 struct purex_item *item; 878 void *fpin_pkt = NULL; 879 880 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 881 - PURX_ELS_HEADER_SIZE; 882 pending_bytes = total_bytes; 883 entry_count = entry_count_remaining = purex->entry_count; 884 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 885 sizeof(purex->els_frame_payload) : pending_bytes; 886 ql_log(ql_log_info, vha, 0x509a, 887 "FPIN ELS, frame_size 0x%x, entry count %d\n", 888 total_bytes, entry_count); 889 890 item = qla24xx_alloc_purex_item(vha, total_bytes); 891 if (!item) 892 return item; 893 894 fpin_pkt = &item->iocb; 895 896 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); 897 buffer_copy_offset += no_bytes; 898 pending_bytes -= no_bytes; 899 --entry_count_remaining; 900 901 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 902 wmb(); 903 904 do { 905 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 906 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { 907 ql_dbg(ql_dbg_async, vha, 0x5084, 908 "Ran out of IOCBs, partial data 0x%x\n", 909 buffer_copy_offset); 910 cpu_relax(); 911 continue; 912 } 913 914 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 915 *pkt = new_pkt; 916 917 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 918 ql_log(ql_log_warn, vha, 0x507a, 919 "Unexpected IOCB type, partial data 0x%x\n", 920 buffer_copy_offset); 921 break; 922 } 923 924 rsp_q->ring_index++; 925 if (rsp_q->ring_index == rsp_q->length) { 926 rsp_q->ring_index = 0; 927 rsp_q->ring_ptr = rsp_q->ring; 928 } else { 929 rsp_q->ring_ptr++; 930 } 931 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 932 sizeof(new_pkt->data) : pending_bytes; 933 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 934 memcpy(((uint8_t *)fpin_pkt + 935 buffer_copy_offset), new_pkt->data, 936 no_bytes); 937 buffer_copy_offset += no_bytes; 938 pending_bytes -= no_bytes; 939 --entry_count_remaining; 940 } else { 941 ql_log(ql_log_warn, vha, 0x5044, 942 "Attempt to copy more that we got, optimizing..%x\n", 943 buffer_copy_offset); 944 memcpy(((uint8_t *)fpin_pkt + 945 buffer_copy_offset), new_pkt->data, 946 total_bytes - buffer_copy_offset); 947 } 948 949 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 950 wmb(); 951 } 952 953 if (pending_bytes != 0 || entry_count_remaining != 0) { 954 ql_log(ql_log_fatal, vha, 0x508b, 955 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", 956 total_bytes, entry_count_remaining); 957 qla24xx_free_purex_item(item); 958 return NULL; 959 } 960 } while (entry_count_remaining > 0); 961 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); 962 return item; 963 } 964 965 /** 966 * qla2x00_async_event() - Process aynchronous events. 967 * @vha: SCSI driver HA context 968 * @rsp: response queue 969 * @mb: Mailbox registers (0 - 3) 970 */ 971 void 972 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 973 { 974 uint16_t handle_cnt; 975 uint16_t cnt, mbx; 976 uint32_t handles[5]; 977 struct qla_hw_data *ha = vha->hw; 978 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 979 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 980 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 981 uint32_t rscn_entry, host_pid; 982 unsigned long flags; 983 fc_port_t *fcport = NULL; 984 985 if (!vha->hw->flags.fw_started) 986 return; 987 988 /* Setup to process RIO completion. */ 989 handle_cnt = 0; 990 if (IS_CNA_CAPABLE(ha)) 991 goto skip_rio; 992 switch (mb[0]) { 993 case MBA_SCSI_COMPLETION: 994 handles[0] = make_handle(mb[2], mb[1]); 995 handle_cnt = 1; 996 break; 997 case MBA_CMPLT_1_16BIT: 998 handles[0] = mb[1]; 999 handle_cnt = 1; 1000 mb[0] = MBA_SCSI_COMPLETION; 1001 break; 1002 case MBA_CMPLT_2_16BIT: 1003 handles[0] = mb[1]; 1004 handles[1] = mb[2]; 1005 handle_cnt = 2; 1006 mb[0] = MBA_SCSI_COMPLETION; 1007 break; 1008 case MBA_CMPLT_3_16BIT: 1009 handles[0] = mb[1]; 1010 handles[1] = mb[2]; 1011 handles[2] = mb[3]; 1012 handle_cnt = 3; 1013 mb[0] = MBA_SCSI_COMPLETION; 1014 break; 1015 case MBA_CMPLT_4_16BIT: 1016 handles[0] = mb[1]; 1017 handles[1] = mb[2]; 1018 handles[2] = mb[3]; 1019 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1020 handle_cnt = 4; 1021 mb[0] = MBA_SCSI_COMPLETION; 1022 break; 1023 case MBA_CMPLT_5_16BIT: 1024 handles[0] = mb[1]; 1025 handles[1] = mb[2]; 1026 handles[2] = mb[3]; 1027 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1028 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 1029 handle_cnt = 5; 1030 mb[0] = MBA_SCSI_COMPLETION; 1031 break; 1032 case MBA_CMPLT_2_32BIT: 1033 handles[0] = make_handle(mb[2], mb[1]); 1034 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), 1035 RD_MAILBOX_REG(ha, reg, 6)); 1036 handle_cnt = 2; 1037 mb[0] = MBA_SCSI_COMPLETION; 1038 break; 1039 default: 1040 break; 1041 } 1042 skip_rio: 1043 switch (mb[0]) { 1044 case MBA_SCSI_COMPLETION: /* Fast Post */ 1045 if (!vha->flags.online) 1046 break; 1047 1048 for (cnt = 0; cnt < handle_cnt; cnt++) 1049 qla2x00_process_completed_request(vha, rsp->req, 1050 handles[cnt]); 1051 break; 1052 1053 case MBA_RESET: /* Reset */ 1054 ql_dbg(ql_dbg_async, vha, 0x5002, 1055 "Asynchronous RESET.\n"); 1056 1057 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1058 break; 1059 1060 case MBA_SYSTEM_ERR: /* System Error */ 1061 mbx = 0; 1062 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 1063 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1064 u16 m[4]; 1065 1066 m[0] = rd_reg_word(®24->mailbox4); 1067 m[1] = rd_reg_word(®24->mailbox5); 1068 m[2] = rd_reg_word(®24->mailbox6); 1069 mbx = m[3] = rd_reg_word(®24->mailbox7); 1070 1071 ql_log(ql_log_warn, vha, 0x5003, 1072 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 1073 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 1074 } else 1075 ql_log(ql_log_warn, vha, 0x5003, 1076 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 1077 mb[1], mb[2], mb[3]); 1078 1079 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 1080 rd_reg_word(®24->mailbox7) & BIT_8) 1081 ha->isp_ops->mpi_fw_dump(vha, 1); 1082 ha->isp_ops->fw_dump(vha); 1083 ha->flags.fw_init_done = 0; 1084 QLA_FW_STOPPED(ha); 1085 1086 if (IS_FWI2_CAPABLE(ha)) { 1087 if (mb[1] == 0 && mb[2] == 0) { 1088 ql_log(ql_log_fatal, vha, 0x5004, 1089 "Unrecoverable Hardware Error: adapter " 1090 "marked OFFLINE!\n"); 1091 vha->flags.online = 0; 1092 vha->device_flags |= DFLG_DEV_FAILED; 1093 } else { 1094 /* Check to see if MPI timeout occurred */ 1095 if ((mbx & MBX_3) && (ha->port_no == 0)) 1096 set_bit(MPI_RESET_NEEDED, 1097 &vha->dpc_flags); 1098 1099 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1100 } 1101 } else if (mb[1] == 0) { 1102 ql_log(ql_log_fatal, vha, 0x5005, 1103 "Unrecoverable Hardware Error: adapter marked " 1104 "OFFLINE!\n"); 1105 vha->flags.online = 0; 1106 vha->device_flags |= DFLG_DEV_FAILED; 1107 } else 1108 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1109 break; 1110 1111 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 1112 ql_log(ql_log_warn, vha, 0x5006, 1113 "ISP Request Transfer Error (%x).\n", mb[1]); 1114 1115 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1116 break; 1117 1118 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 1119 ql_log(ql_log_warn, vha, 0x5007, 1120 "ISP Response Transfer Error (%x).\n", mb[1]); 1121 1122 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1123 break; 1124 1125 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 1126 ql_dbg(ql_dbg_async, vha, 0x5008, 1127 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 1128 break; 1129 1130 case MBA_LOOP_INIT_ERR: 1131 ql_log(ql_log_warn, vha, 0x5090, 1132 "LOOP INIT ERROR (%x).\n", mb[1]); 1133 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1134 break; 1135 1136 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 1137 ha->flags.lip_ae = 1; 1138 1139 ql_dbg(ql_dbg_async, vha, 0x5009, 1140 "LIP occurred (%x).\n", mb[1]); 1141 1142 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1143 atomic_set(&vha->loop_state, LOOP_DOWN); 1144 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1145 qla2x00_mark_all_devices_lost(vha); 1146 } 1147 1148 if (vha->vp_idx) { 1149 atomic_set(&vha->vp_state, VP_FAILED); 1150 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1151 } 1152 1153 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1154 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1155 1156 vha->flags.management_server_logged_in = 0; 1157 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 1158 break; 1159 1160 case MBA_LOOP_UP: /* Loop Up Event */ 1161 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1162 ha->link_data_rate = PORT_SPEED_1GB; 1163 else 1164 ha->link_data_rate = mb[1]; 1165 1166 ql_log(ql_log_info, vha, 0x500a, 1167 "LOOP UP detected (%s Gbps).\n", 1168 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 1169 1170 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1171 if (mb[2] & BIT_0) 1172 ql_log(ql_log_info, vha, 0x11a0, 1173 "FEC=enabled (link up).\n"); 1174 } 1175 1176 vha->flags.management_server_logged_in = 0; 1177 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 1178 1179 break; 1180 1181 case MBA_LOOP_DOWN: /* Loop Down Event */ 1182 SAVE_TOPO(ha); 1183 ha->flags.lip_ae = 0; 1184 ha->current_topology = 0; 1185 1186 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 1187 ? rd_reg_word(®24->mailbox4) : 0; 1188 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) 1189 : mbx; 1190 ql_log(ql_log_info, vha, 0x500b, 1191 "LOOP DOWN detected (%x %x %x %x).\n", 1192 mb[1], mb[2], mb[3], mbx); 1193 1194 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1195 atomic_set(&vha->loop_state, LOOP_DOWN); 1196 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1197 /* 1198 * In case of loop down, restore WWPN from 1199 * NVRAM in case of FA-WWPN capable ISP 1200 * Restore for Physical Port only 1201 */ 1202 if (!vha->vp_idx) { 1203 if (ha->flags.fawwpn_enabled && 1204 (ha->current_topology == ISP_CFG_F)) { 1205 void *wwpn = ha->init_cb->port_name; 1206 1207 memcpy(vha->port_name, wwpn, WWN_SIZE); 1208 fc_host_port_name(vha->host) = 1209 wwn_to_u64(vha->port_name); 1210 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1211 vha, 0x00d8, "LOOP DOWN detected," 1212 "restore WWPN %016llx\n", 1213 wwn_to_u64(vha->port_name)); 1214 } 1215 1216 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1217 } 1218 1219 vha->device_flags |= DFLG_NO_CABLE; 1220 qla2x00_mark_all_devices_lost(vha); 1221 } 1222 1223 if (vha->vp_idx) { 1224 atomic_set(&vha->vp_state, VP_FAILED); 1225 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1226 } 1227 1228 vha->flags.management_server_logged_in = 0; 1229 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1230 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1231 break; 1232 1233 case MBA_LIP_RESET: /* LIP reset occurred */ 1234 ql_dbg(ql_dbg_async, vha, 0x500c, 1235 "LIP reset occurred (%x).\n", mb[1]); 1236 1237 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1238 atomic_set(&vha->loop_state, LOOP_DOWN); 1239 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1240 qla2x00_mark_all_devices_lost(vha); 1241 } 1242 1243 if (vha->vp_idx) { 1244 atomic_set(&vha->vp_state, VP_FAILED); 1245 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1246 } 1247 1248 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1249 1250 ha->operating_mode = LOOP; 1251 vha->flags.management_server_logged_in = 0; 1252 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1253 break; 1254 1255 /* case MBA_DCBX_COMPLETE: */ 1256 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1257 ha->flags.lip_ae = 0; 1258 1259 if (IS_QLA2100(ha)) 1260 break; 1261 1262 if (IS_CNA_CAPABLE(ha)) { 1263 ql_dbg(ql_dbg_async, vha, 0x500d, 1264 "DCBX Completed -- %04x %04x %04x.\n", 1265 mb[1], mb[2], mb[3]); 1266 if (ha->notify_dcbx_comp && !vha->vp_idx) 1267 complete(&ha->dcbx_comp); 1268 1269 } else 1270 ql_dbg(ql_dbg_async, vha, 0x500e, 1271 "Asynchronous P2P MODE received.\n"); 1272 1273 /* 1274 * Until there's a transition from loop down to loop up, treat 1275 * this as loop down only. 1276 */ 1277 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1278 atomic_set(&vha->loop_state, LOOP_DOWN); 1279 if (!atomic_read(&vha->loop_down_timer)) 1280 atomic_set(&vha->loop_down_timer, 1281 LOOP_DOWN_TIME); 1282 if (!N2N_TOPO(ha)) 1283 qla2x00_mark_all_devices_lost(vha); 1284 } 1285 1286 if (vha->vp_idx) { 1287 atomic_set(&vha->vp_state, VP_FAILED); 1288 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1289 } 1290 1291 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1292 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1293 1294 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1295 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1296 1297 vha->flags.management_server_logged_in = 0; 1298 break; 1299 1300 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1301 if (IS_QLA2100(ha)) 1302 break; 1303 1304 ql_dbg(ql_dbg_async, vha, 0x500f, 1305 "Configuration change detected: value=%x.\n", mb[1]); 1306 1307 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1308 atomic_set(&vha->loop_state, LOOP_DOWN); 1309 if (!atomic_read(&vha->loop_down_timer)) 1310 atomic_set(&vha->loop_down_timer, 1311 LOOP_DOWN_TIME); 1312 qla2x00_mark_all_devices_lost(vha); 1313 } 1314 1315 if (vha->vp_idx) { 1316 atomic_set(&vha->vp_state, VP_FAILED); 1317 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1318 } 1319 1320 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1321 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1322 break; 1323 1324 case MBA_PORT_UPDATE: /* Port database update */ 1325 /* 1326 * Handle only global and vn-port update events 1327 * 1328 * Relevant inputs: 1329 * mb[1] = N_Port handle of changed port 1330 * OR 0xffff for global event 1331 * mb[2] = New login state 1332 * 7 = Port logged out 1333 * mb[3] = LSB is vp_idx, 0xff = all vps 1334 * 1335 * Skip processing if: 1336 * Event is global, vp_idx is NOT all vps, 1337 * vp_idx does not match 1338 * Event is not global, vp_idx does not match 1339 */ 1340 if (IS_QLA2XXX_MIDTYPE(ha) && 1341 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1342 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1343 break; 1344 1345 if (mb[2] == 0x7) { 1346 ql_dbg(ql_dbg_async, vha, 0x5010, 1347 "Port %s %04x %04x %04x.\n", 1348 mb[1] == 0xffff ? "unavailable" : "logout", 1349 mb[1], mb[2], mb[3]); 1350 1351 if (mb[1] == 0xffff) 1352 goto global_port_update; 1353 1354 if (mb[1] == NPH_SNS_LID(ha)) { 1355 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1356 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1357 break; 1358 } 1359 1360 /* use handle_cnt for loop id/nport handle */ 1361 if (IS_FWI2_CAPABLE(ha)) 1362 handle_cnt = NPH_SNS; 1363 else 1364 handle_cnt = SIMPLE_NAME_SERVER; 1365 if (mb[1] == handle_cnt) { 1366 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1367 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1368 break; 1369 } 1370 1371 /* Port logout */ 1372 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1373 if (!fcport) 1374 break; 1375 if (atomic_read(&fcport->state) != FCS_ONLINE) 1376 break; 1377 ql_dbg(ql_dbg_async, vha, 0x508a, 1378 "Marking port lost loopid=%04x portid=%06x.\n", 1379 fcport->loop_id, fcport->d_id.b24); 1380 if (qla_ini_mode_enabled(vha)) { 1381 fcport->logout_on_delete = 0; 1382 qlt_schedule_sess_for_deletion(fcport); 1383 } 1384 break; 1385 1386 global_port_update: 1387 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1388 atomic_set(&vha->loop_state, LOOP_DOWN); 1389 atomic_set(&vha->loop_down_timer, 1390 LOOP_DOWN_TIME); 1391 vha->device_flags |= DFLG_NO_CABLE; 1392 qla2x00_mark_all_devices_lost(vha); 1393 } 1394 1395 if (vha->vp_idx) { 1396 atomic_set(&vha->vp_state, VP_FAILED); 1397 fc_vport_set_state(vha->fc_vport, 1398 FC_VPORT_FAILED); 1399 qla2x00_mark_all_devices_lost(vha); 1400 } 1401 1402 vha->flags.management_server_logged_in = 0; 1403 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1404 break; 1405 } 1406 1407 /* 1408 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1409 * event etc. earlier indicating loop is down) then process 1410 * it. Otherwise ignore it and Wait for RSCN to come in. 1411 */ 1412 atomic_set(&vha->loop_down_timer, 0); 1413 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1414 !ha->flags.n2n_ae && 1415 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1416 ql_dbg(ql_dbg_async, vha, 0x5011, 1417 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1418 mb[1], mb[2], mb[3]); 1419 break; 1420 } 1421 1422 ql_dbg(ql_dbg_async, vha, 0x5012, 1423 "Port database changed %04x %04x %04x.\n", 1424 mb[1], mb[2], mb[3]); 1425 1426 /* 1427 * Mark all devices as missing so we will login again. 1428 */ 1429 atomic_set(&vha->loop_state, LOOP_UP); 1430 vha->scan.scan_retry = 0; 1431 1432 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1433 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1434 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1435 break; 1436 1437 case MBA_RSCN_UPDATE: /* State Change Registration */ 1438 /* Check if the Vport has issued a SCR */ 1439 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1440 break; 1441 /* Only handle SCNs for our Vport index. */ 1442 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1443 break; 1444 1445 ql_dbg(ql_dbg_async, vha, 0x5013, 1446 "RSCN database changed -- %04x %04x %04x.\n", 1447 mb[1], mb[2], mb[3]); 1448 1449 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1450 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1451 | vha->d_id.b.al_pa; 1452 if (rscn_entry == host_pid) { 1453 ql_dbg(ql_dbg_async, vha, 0x5014, 1454 "Ignoring RSCN update to local host " 1455 "port ID (%06x).\n", host_pid); 1456 break; 1457 } 1458 1459 /* Ignore reserved bits from RSCN-payload. */ 1460 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1461 1462 /* Skip RSCNs for virtual ports on the same physical port */ 1463 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1464 break; 1465 1466 atomic_set(&vha->loop_down_timer, 0); 1467 vha->flags.management_server_logged_in = 0; 1468 { 1469 struct event_arg ea; 1470 1471 memset(&ea, 0, sizeof(ea)); 1472 ea.id.b24 = rscn_entry; 1473 ea.id.b.rsvd_1 = rscn_entry >> 24; 1474 qla2x00_handle_rscn(vha, &ea); 1475 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1476 } 1477 break; 1478 case MBA_CONGN_NOTI_RECV: 1479 if (!ha->flags.scm_enabled || 1480 mb[1] != QLA_CON_PRIMITIVE_RECEIVED) 1481 break; 1482 1483 if (mb[2] == QLA_CONGESTION_ARB_WARNING) { 1484 ql_dbg(ql_dbg_async, vha, 0x509b, 1485 "Congestion Warning %04x %04x.\n", mb[1], mb[2]); 1486 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { 1487 ql_log(ql_log_warn, vha, 0x509b, 1488 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); 1489 } 1490 break; 1491 /* case MBA_RIO_RESPONSE: */ 1492 case MBA_ZIO_RESPONSE: 1493 ql_dbg(ql_dbg_async, vha, 0x5015, 1494 "[R|Z]IO update completion.\n"); 1495 1496 if (IS_FWI2_CAPABLE(ha)) 1497 qla24xx_process_response_queue(vha, rsp); 1498 else 1499 qla2x00_process_response_queue(rsp); 1500 break; 1501 1502 case MBA_DISCARD_RND_FRAME: 1503 ql_dbg(ql_dbg_async, vha, 0x5016, 1504 "Discard RND Frame -- %04x %04x %04x.\n", 1505 mb[1], mb[2], mb[3]); 1506 break; 1507 1508 case MBA_TRACE_NOTIFICATION: 1509 ql_dbg(ql_dbg_async, vha, 0x5017, 1510 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1511 break; 1512 1513 case MBA_ISP84XX_ALERT: 1514 ql_dbg(ql_dbg_async, vha, 0x5018, 1515 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1516 mb[1], mb[2], mb[3]); 1517 1518 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1519 switch (mb[1]) { 1520 case A84_PANIC_RECOVERY: 1521 ql_log(ql_log_info, vha, 0x5019, 1522 "Alert 84XX: panic recovery %04x %04x.\n", 1523 mb[2], mb[3]); 1524 break; 1525 case A84_OP_LOGIN_COMPLETE: 1526 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1527 ql_log(ql_log_info, vha, 0x501a, 1528 "Alert 84XX: firmware version %x.\n", 1529 ha->cs84xx->op_fw_version); 1530 break; 1531 case A84_DIAG_LOGIN_COMPLETE: 1532 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1533 ql_log(ql_log_info, vha, 0x501b, 1534 "Alert 84XX: diagnostic firmware version %x.\n", 1535 ha->cs84xx->diag_fw_version); 1536 break; 1537 case A84_GOLD_LOGIN_COMPLETE: 1538 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1539 ha->cs84xx->fw_update = 1; 1540 ql_log(ql_log_info, vha, 0x501c, 1541 "Alert 84XX: gold firmware version %x.\n", 1542 ha->cs84xx->gold_fw_version); 1543 break; 1544 default: 1545 ql_log(ql_log_warn, vha, 0x501d, 1546 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1547 mb[1], mb[2], mb[3]); 1548 } 1549 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1550 break; 1551 case MBA_DCBX_START: 1552 ql_dbg(ql_dbg_async, vha, 0x501e, 1553 "DCBX Started -- %04x %04x %04x.\n", 1554 mb[1], mb[2], mb[3]); 1555 break; 1556 case MBA_DCBX_PARAM_UPDATE: 1557 ql_dbg(ql_dbg_async, vha, 0x501f, 1558 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1559 mb[1], mb[2], mb[3]); 1560 break; 1561 case MBA_FCF_CONF_ERR: 1562 ql_dbg(ql_dbg_async, vha, 0x5020, 1563 "FCF Configuration Error -- %04x %04x %04x.\n", 1564 mb[1], mb[2], mb[3]); 1565 break; 1566 case MBA_IDC_NOTIFY: 1567 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1568 mb[4] = rd_reg_word(®24->mailbox4); 1569 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1570 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1571 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1572 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1573 /* 1574 * Extend loop down timer since port is active. 1575 */ 1576 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1577 atomic_set(&vha->loop_down_timer, 1578 LOOP_DOWN_TIME); 1579 qla2xxx_wake_dpc(vha); 1580 } 1581 } 1582 fallthrough; 1583 case MBA_IDC_COMPLETE: 1584 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1585 complete(&ha->lb_portup_comp); 1586 fallthrough; 1587 case MBA_IDC_TIME_EXT: 1588 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1589 IS_QLA8044(ha)) 1590 qla81xx_idc_event(vha, mb[0], mb[1]); 1591 break; 1592 1593 case MBA_IDC_AEN: 1594 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1595 qla27xx_handle_8200_aen(vha, mb); 1596 } else if (IS_QLA83XX(ha)) { 1597 mb[4] = rd_reg_word(®24->mailbox4); 1598 mb[5] = rd_reg_word(®24->mailbox5); 1599 mb[6] = rd_reg_word(®24->mailbox6); 1600 mb[7] = rd_reg_word(®24->mailbox7); 1601 qla83xx_handle_8200_aen(vha, mb); 1602 } else { 1603 ql_dbg(ql_dbg_async, vha, 0x5052, 1604 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1605 mb[0], mb[1], mb[2], mb[3]); 1606 } 1607 break; 1608 1609 case MBA_DPORT_DIAGNOSTICS: 1610 ql_dbg(ql_dbg_async, vha, 0x5052, 1611 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1612 mb[0], mb[1], mb[2], mb[3]); 1613 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1614 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1615 static char *results[] = { 1616 "start", "done(pass)", "done(error)", "undefined" }; 1617 static char *types[] = { 1618 "none", "dynamic", "static", "other" }; 1619 uint result = mb[1] >> 0 & 0x3; 1620 uint type = mb[1] >> 6 & 0x3; 1621 uint sw = mb[1] >> 15 & 0x1; 1622 ql_dbg(ql_dbg_async, vha, 0x5052, 1623 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1624 results[result], types[type], sw); 1625 if (result == 2) { 1626 static char *reasons[] = { 1627 "reserved", "unexpected reject", 1628 "unexpected phase", "retry exceeded", 1629 "timed out", "not supported", 1630 "user stopped" }; 1631 uint reason = mb[2] >> 0 & 0xf; 1632 uint phase = mb[2] >> 12 & 0xf; 1633 ql_dbg(ql_dbg_async, vha, 0x5052, 1634 "D-Port Diagnostics: reason=%s phase=%u \n", 1635 reason < 7 ? reasons[reason] : "other", 1636 phase >> 1); 1637 } 1638 } 1639 break; 1640 1641 case MBA_TEMPERATURE_ALERT: 1642 ql_dbg(ql_dbg_async, vha, 0x505e, 1643 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1644 if (mb[1] == 0x12) 1645 schedule_work(&ha->board_disable); 1646 break; 1647 1648 case MBA_TRANS_INSERT: 1649 ql_dbg(ql_dbg_async, vha, 0x5091, 1650 "Transceiver Insertion: %04x\n", mb[1]); 1651 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1652 break; 1653 1654 case MBA_TRANS_REMOVE: 1655 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1656 break; 1657 1658 default: 1659 ql_dbg(ql_dbg_async, vha, 0x5057, 1660 "Unknown AEN:%04x %04x %04x %04x\n", 1661 mb[0], mb[1], mb[2], mb[3]); 1662 } 1663 1664 qlt_async_event(mb[0], vha, mb); 1665 1666 if (!vha->vp_idx && ha->num_vhosts) 1667 qla2x00_alert_all_vps(rsp, mb); 1668 } 1669 1670 /** 1671 * qla2x00_process_completed_request() - Process a Fast Post response. 1672 * @vha: SCSI driver HA context 1673 * @req: request queue 1674 * @index: SRB index 1675 */ 1676 void 1677 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1678 struct req_que *req, uint32_t index) 1679 { 1680 srb_t *sp; 1681 struct qla_hw_data *ha = vha->hw; 1682 1683 /* Validate handle. */ 1684 if (index >= req->num_outstanding_cmds) { 1685 ql_log(ql_log_warn, vha, 0x3014, 1686 "Invalid SCSI command index (%x).\n", index); 1687 1688 if (IS_P3P_TYPE(ha)) 1689 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1690 else 1691 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1692 return; 1693 } 1694 1695 sp = req->outstanding_cmds[index]; 1696 if (sp) { 1697 /* Free outstanding command slot. */ 1698 req->outstanding_cmds[index] = NULL; 1699 1700 /* Save ISP completion status */ 1701 sp->done(sp, DID_OK << 16); 1702 } else { 1703 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1704 1705 if (IS_P3P_TYPE(ha)) 1706 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1707 else 1708 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1709 } 1710 } 1711 1712 srb_t * 1713 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1714 struct req_que *req, void *iocb) 1715 { 1716 struct qla_hw_data *ha = vha->hw; 1717 sts_entry_t *pkt = iocb; 1718 srb_t *sp; 1719 uint16_t index; 1720 1721 index = LSW(pkt->handle); 1722 if (index >= req->num_outstanding_cmds) { 1723 ql_log(ql_log_warn, vha, 0x5031, 1724 "%s: Invalid command index (%x) type %8ph.\n", 1725 func, index, iocb); 1726 if (IS_P3P_TYPE(ha)) 1727 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1728 else 1729 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1730 return NULL; 1731 } 1732 sp = req->outstanding_cmds[index]; 1733 if (!sp) { 1734 ql_log(ql_log_warn, vha, 0x5032, 1735 "%s: Invalid completion handle (%x) -- timed-out.\n", 1736 func, index); 1737 return NULL; 1738 } 1739 if (sp->handle != index) { 1740 ql_log(ql_log_warn, vha, 0x5033, 1741 "%s: SRB handle (%x) mismatch %x.\n", func, 1742 sp->handle, index); 1743 return NULL; 1744 } 1745 1746 req->outstanding_cmds[index] = NULL; 1747 return sp; 1748 } 1749 1750 static void 1751 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1752 struct mbx_entry *mbx) 1753 { 1754 const char func[] = "MBX-IOCB"; 1755 const char *type; 1756 fc_port_t *fcport; 1757 srb_t *sp; 1758 struct srb_iocb *lio; 1759 uint16_t *data; 1760 uint16_t status; 1761 1762 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1763 if (!sp) 1764 return; 1765 1766 lio = &sp->u.iocb_cmd; 1767 type = sp->name; 1768 fcport = sp->fcport; 1769 data = lio->u.logio.data; 1770 1771 data[0] = MBS_COMMAND_ERROR; 1772 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1773 QLA_LOGIO_LOGIN_RETRIED : 0; 1774 if (mbx->entry_status) { 1775 ql_dbg(ql_dbg_async, vha, 0x5043, 1776 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1777 "entry-status=%x status=%x state-flag=%x " 1778 "status-flags=%x.\n", type, sp->handle, 1779 fcport->d_id.b.domain, fcport->d_id.b.area, 1780 fcport->d_id.b.al_pa, mbx->entry_status, 1781 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1782 le16_to_cpu(mbx->status_flags)); 1783 1784 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1785 mbx, sizeof(*mbx)); 1786 1787 goto logio_done; 1788 } 1789 1790 status = le16_to_cpu(mbx->status); 1791 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1792 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1793 status = 0; 1794 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1795 ql_dbg(ql_dbg_async, vha, 0x5045, 1796 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1797 type, sp->handle, fcport->d_id.b.domain, 1798 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1799 le16_to_cpu(mbx->mb1)); 1800 1801 data[0] = MBS_COMMAND_COMPLETE; 1802 if (sp->type == SRB_LOGIN_CMD) { 1803 fcport->port_type = FCT_TARGET; 1804 if (le16_to_cpu(mbx->mb1) & BIT_0) 1805 fcport->port_type = FCT_INITIATOR; 1806 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1807 fcport->flags |= FCF_FCP2_DEVICE; 1808 } 1809 goto logio_done; 1810 } 1811 1812 data[0] = le16_to_cpu(mbx->mb0); 1813 switch (data[0]) { 1814 case MBS_PORT_ID_USED: 1815 data[1] = le16_to_cpu(mbx->mb1); 1816 break; 1817 case MBS_LOOP_ID_USED: 1818 break; 1819 default: 1820 data[0] = MBS_COMMAND_ERROR; 1821 break; 1822 } 1823 1824 ql_log(ql_log_warn, vha, 0x5046, 1825 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1826 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1827 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1828 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1829 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1830 le16_to_cpu(mbx->mb7)); 1831 1832 logio_done: 1833 sp->done(sp, 0); 1834 } 1835 1836 static void 1837 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1838 struct mbx_24xx_entry *pkt) 1839 { 1840 const char func[] = "MBX-IOCB2"; 1841 srb_t *sp; 1842 struct srb_iocb *si; 1843 u16 sz, i; 1844 int res; 1845 1846 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1847 if (!sp) 1848 return; 1849 1850 si = &sp->u.iocb_cmd; 1851 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1852 1853 for (i = 0; i < sz; i++) 1854 si->u.mbx.in_mb[i] = pkt->mb[i]; 1855 1856 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1857 1858 sp->done(sp, res); 1859 } 1860 1861 static void 1862 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1863 struct nack_to_isp *pkt) 1864 { 1865 const char func[] = "nack"; 1866 srb_t *sp; 1867 int res = 0; 1868 1869 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1870 if (!sp) 1871 return; 1872 1873 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1874 res = QLA_FUNCTION_FAILED; 1875 1876 sp->done(sp, res); 1877 } 1878 1879 static void 1880 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1881 sts_entry_t *pkt, int iocb_type) 1882 { 1883 const char func[] = "CT_IOCB"; 1884 const char *type; 1885 srb_t *sp; 1886 struct bsg_job *bsg_job; 1887 struct fc_bsg_reply *bsg_reply; 1888 uint16_t comp_status; 1889 int res = 0; 1890 1891 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1892 if (!sp) 1893 return; 1894 1895 switch (sp->type) { 1896 case SRB_CT_CMD: 1897 bsg_job = sp->u.bsg_job; 1898 bsg_reply = bsg_job->reply; 1899 1900 type = "ct pass-through"; 1901 1902 comp_status = le16_to_cpu(pkt->comp_status); 1903 1904 /* 1905 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1906 * fc payload to the caller 1907 */ 1908 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1909 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1910 1911 if (comp_status != CS_COMPLETE) { 1912 if (comp_status == CS_DATA_UNDERRUN) { 1913 res = DID_OK << 16; 1914 bsg_reply->reply_payload_rcv_len = 1915 le16_to_cpu(pkt->rsp_info_len); 1916 1917 ql_log(ql_log_warn, vha, 0x5048, 1918 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1919 type, comp_status, 1920 bsg_reply->reply_payload_rcv_len); 1921 } else { 1922 ql_log(ql_log_warn, vha, 0x5049, 1923 "CT pass-through-%s error comp_status=0x%x.\n", 1924 type, comp_status); 1925 res = DID_ERROR << 16; 1926 bsg_reply->reply_payload_rcv_len = 0; 1927 } 1928 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1929 pkt, sizeof(*pkt)); 1930 } else { 1931 res = DID_OK << 16; 1932 bsg_reply->reply_payload_rcv_len = 1933 bsg_job->reply_payload.payload_len; 1934 bsg_job->reply_len = 0; 1935 } 1936 break; 1937 case SRB_CT_PTHRU_CMD: 1938 /* 1939 * borrowing sts_entry_24xx.comp_status. 1940 * same location as ct_entry_24xx.comp_status 1941 */ 1942 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1943 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1944 sp->name); 1945 break; 1946 } 1947 1948 sp->done(sp, res); 1949 } 1950 1951 static void 1952 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1953 struct sts_entry_24xx *pkt, int iocb_type) 1954 { 1955 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; 1956 const char func[] = "ELS_CT_IOCB"; 1957 const char *type; 1958 srb_t *sp; 1959 struct bsg_job *bsg_job; 1960 struct fc_bsg_reply *bsg_reply; 1961 uint16_t comp_status; 1962 uint32_t fw_status[3]; 1963 int res; 1964 struct srb_iocb *els; 1965 1966 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1967 if (!sp) 1968 return; 1969 1970 type = NULL; 1971 switch (sp->type) { 1972 case SRB_ELS_CMD_RPT: 1973 case SRB_ELS_CMD_HST: 1974 type = "els"; 1975 break; 1976 case SRB_CT_CMD: 1977 type = "ct pass-through"; 1978 break; 1979 case SRB_ELS_DCMD: 1980 type = "Driver ELS logo"; 1981 if (iocb_type != ELS_IOCB_TYPE) { 1982 ql_dbg(ql_dbg_user, vha, 0x5047, 1983 "Completing %s: (%p) type=%d.\n", 1984 type, sp, sp->type); 1985 sp->done(sp, 0); 1986 return; 1987 } 1988 break; 1989 case SRB_CT_PTHRU_CMD: 1990 /* borrowing sts_entry_24xx.comp_status. 1991 same location as ct_entry_24xx.comp_status 1992 */ 1993 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 1994 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1995 sp->name); 1996 sp->done(sp, res); 1997 return; 1998 default: 1999 ql_dbg(ql_dbg_user, vha, 0x503e, 2000 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 2001 return; 2002 } 2003 2004 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 2005 fw_status[1] = le32_to_cpu(ese->error_subcode_1); 2006 fw_status[2] = le32_to_cpu(ese->error_subcode_2); 2007 2008 if (iocb_type == ELS_IOCB_TYPE) { 2009 els = &sp->u.iocb_cmd; 2010 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); 2011 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); 2012 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); 2013 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); 2014 if (comp_status == CS_COMPLETE) { 2015 res = DID_OK << 16; 2016 } else { 2017 if (comp_status == CS_DATA_UNDERRUN) { 2018 res = DID_OK << 16; 2019 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( 2020 ese->total_byte_count)); 2021 } else { 2022 els->u.els_plogi.len = 0; 2023 res = DID_ERROR << 16; 2024 } 2025 } 2026 ql_dbg(ql_dbg_disc, vha, 0x503f, 2027 "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", 2028 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2029 le32_to_cpu(ese->total_byte_count)); 2030 goto els_ct_done; 2031 } 2032 2033 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2034 * fc payload to the caller 2035 */ 2036 bsg_job = sp->u.bsg_job; 2037 bsg_reply = bsg_job->reply; 2038 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2039 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 2040 2041 if (comp_status != CS_COMPLETE) { 2042 if (comp_status == CS_DATA_UNDERRUN) { 2043 res = DID_OK << 16; 2044 bsg_reply->reply_payload_rcv_len = 2045 le32_to_cpu(ese->total_byte_count); 2046 2047 ql_dbg(ql_dbg_user, vha, 0x503f, 2048 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2049 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 2050 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2051 le32_to_cpu(ese->total_byte_count)); 2052 } else { 2053 ql_dbg(ql_dbg_user, vha, 0x5040, 2054 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2055 "error subcode 1=0x%x error subcode 2=0x%x.\n", 2056 type, sp->handle, comp_status, 2057 le32_to_cpu(ese->error_subcode_1), 2058 le32_to_cpu(ese->error_subcode_2)); 2059 res = DID_ERROR << 16; 2060 bsg_reply->reply_payload_rcv_len = 0; 2061 } 2062 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 2063 fw_status, sizeof(fw_status)); 2064 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 2065 pkt, sizeof(*pkt)); 2066 } 2067 else { 2068 res = DID_OK << 16; 2069 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 2070 bsg_job->reply_len = 0; 2071 } 2072 els_ct_done: 2073 2074 sp->done(sp, res); 2075 } 2076 2077 static void 2078 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 2079 struct logio_entry_24xx *logio) 2080 { 2081 const char func[] = "LOGIO-IOCB"; 2082 const char *type; 2083 fc_port_t *fcport; 2084 srb_t *sp; 2085 struct srb_iocb *lio; 2086 uint16_t *data; 2087 uint32_t iop[2]; 2088 2089 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 2090 if (!sp) 2091 return; 2092 2093 lio = &sp->u.iocb_cmd; 2094 type = sp->name; 2095 fcport = sp->fcport; 2096 data = lio->u.logio.data; 2097 2098 data[0] = MBS_COMMAND_ERROR; 2099 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 2100 QLA_LOGIO_LOGIN_RETRIED : 0; 2101 if (logio->entry_status) { 2102 ql_log(ql_log_warn, fcport->vha, 0x5034, 2103 "Async-%s error entry - %8phC hdl=%x" 2104 "portid=%02x%02x%02x entry-status=%x.\n", 2105 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 2106 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2107 logio->entry_status); 2108 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 2109 logio, sizeof(*logio)); 2110 2111 goto logio_done; 2112 } 2113 2114 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 2115 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 2116 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 2117 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2118 le32_to_cpu(logio->io_parameter[0])); 2119 2120 vha->hw->exch_starvation = 0; 2121 data[0] = MBS_COMMAND_COMPLETE; 2122 2123 if (sp->type == SRB_PRLI_CMD) { 2124 lio->u.logio.iop[0] = 2125 le32_to_cpu(logio->io_parameter[0]); 2126 lio->u.logio.iop[1] = 2127 le32_to_cpu(logio->io_parameter[1]); 2128 goto logio_done; 2129 } 2130 2131 if (sp->type != SRB_LOGIN_CMD) 2132 goto logio_done; 2133 2134 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2135 if (iop[0] & BIT_4) { 2136 fcport->port_type = FCT_TARGET; 2137 if (iop[0] & BIT_8) 2138 fcport->flags |= FCF_FCP2_DEVICE; 2139 } else if (iop[0] & BIT_5) 2140 fcport->port_type = FCT_INITIATOR; 2141 2142 if (iop[0] & BIT_7) 2143 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2144 2145 if (logio->io_parameter[7] || logio->io_parameter[8]) 2146 fcport->supported_classes |= FC_COS_CLASS2; 2147 if (logio->io_parameter[9] || logio->io_parameter[10]) 2148 fcport->supported_classes |= FC_COS_CLASS3; 2149 2150 goto logio_done; 2151 } 2152 2153 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2154 iop[1] = le32_to_cpu(logio->io_parameter[1]); 2155 lio->u.logio.iop[0] = iop[0]; 2156 lio->u.logio.iop[1] = iop[1]; 2157 switch (iop[0]) { 2158 case LSC_SCODE_PORTID_USED: 2159 data[0] = MBS_PORT_ID_USED; 2160 data[1] = LSW(iop[1]); 2161 break; 2162 case LSC_SCODE_NPORT_USED: 2163 data[0] = MBS_LOOP_ID_USED; 2164 break; 2165 case LSC_SCODE_CMD_FAILED: 2166 if (iop[1] == 0x0606) { 2167 /* 2168 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 2169 * Target side acked. 2170 */ 2171 data[0] = MBS_COMMAND_COMPLETE; 2172 goto logio_done; 2173 } 2174 data[0] = MBS_COMMAND_ERROR; 2175 break; 2176 case LSC_SCODE_NOXCB: 2177 vha->hw->exch_starvation++; 2178 if (vha->hw->exch_starvation > 5) { 2179 ql_log(ql_log_warn, vha, 0xd046, 2180 "Exchange starvation. Resetting RISC\n"); 2181 2182 vha->hw->exch_starvation = 0; 2183 2184 if (IS_P3P_TYPE(vha->hw)) 2185 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2186 else 2187 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2188 qla2xxx_wake_dpc(vha); 2189 } 2190 fallthrough; 2191 default: 2192 data[0] = MBS_COMMAND_ERROR; 2193 break; 2194 } 2195 2196 ql_dbg(ql_dbg_async, sp->vha, 0x5037, 2197 "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2198 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2199 le16_to_cpu(logio->comp_status), 2200 le32_to_cpu(logio->io_parameter[0]), 2201 le32_to_cpu(logio->io_parameter[1])); 2202 2203 logio_done: 2204 sp->done(sp, 0); 2205 } 2206 2207 static void 2208 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2209 { 2210 const char func[] = "TMF-IOCB"; 2211 const char *type; 2212 fc_port_t *fcport; 2213 srb_t *sp; 2214 struct srb_iocb *iocb; 2215 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2216 2217 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2218 if (!sp) 2219 return; 2220 2221 iocb = &sp->u.iocb_cmd; 2222 type = sp->name; 2223 fcport = sp->fcport; 2224 iocb->u.tmf.data = QLA_SUCCESS; 2225 2226 if (sts->entry_status) { 2227 ql_log(ql_log_warn, fcport->vha, 0x5038, 2228 "Async-%s error - hdl=%x entry-status(%x).\n", 2229 type, sp->handle, sts->entry_status); 2230 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2231 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2232 ql_log(ql_log_warn, fcport->vha, 0x5039, 2233 "Async-%s error - hdl=%x completion status(%x).\n", 2234 type, sp->handle, sts->comp_status); 2235 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2236 } else if ((le16_to_cpu(sts->scsi_status) & 2237 SS_RESPONSE_INFO_LEN_VALID)) { 2238 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2239 ql_log(ql_log_warn, fcport->vha, 0x503b, 2240 "Async-%s error - hdl=%x not enough response(%d).\n", 2241 type, sp->handle, sts->rsp_data_len); 2242 } else if (sts->data[3]) { 2243 ql_log(ql_log_warn, fcport->vha, 0x503c, 2244 "Async-%s error - hdl=%x response(%x).\n", 2245 type, sp->handle, sts->data[3]); 2246 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2247 } 2248 } 2249 2250 if (iocb->u.tmf.data != QLA_SUCCESS) 2251 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2252 sts, sizeof(*sts)); 2253 2254 sp->done(sp, 0); 2255 } 2256 2257 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2258 void *tsk, srb_t *sp) 2259 { 2260 fc_port_t *fcport; 2261 struct srb_iocb *iocb; 2262 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2263 uint16_t state_flags; 2264 struct nvmefc_fcp_req *fd; 2265 uint16_t ret = QLA_SUCCESS; 2266 __le16 comp_status = sts->comp_status; 2267 int logit = 0; 2268 2269 iocb = &sp->u.iocb_cmd; 2270 fcport = sp->fcport; 2271 iocb->u.nvme.comp_status = comp_status; 2272 state_flags = le16_to_cpu(sts->state_flags); 2273 fd = iocb->u.nvme.desc; 2274 2275 if (unlikely(iocb->u.nvme.aen_op)) 2276 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2277 2278 if (unlikely(comp_status != CS_COMPLETE)) 2279 logit = 1; 2280 2281 fd->transferred_length = fd->payload_length - 2282 le32_to_cpu(sts->residual_len); 2283 2284 /* 2285 * State flags: Bit 6 and 0. 2286 * If 0 is set, we don't care about 6. 2287 * both cases resp was dma'd to host buffer 2288 * if both are 0, that is good path case. 2289 * if six is set and 0 is clear, we need to 2290 * copy resp data from status iocb to resp buffer. 2291 */ 2292 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2293 iocb->u.nvme.rsp_pyld_len = 0; 2294 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2295 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2296 /* Response already DMA'd to fd->rspaddr. */ 2297 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2298 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2299 /* 2300 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2301 * as an error. 2302 */ 2303 iocb->u.nvme.rsp_pyld_len = 0; 2304 fd->transferred_length = 0; 2305 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2306 "Unexpected values in NVMe_RSP IU.\n"); 2307 logit = 1; 2308 } else if (state_flags & SF_NVME_ERSP) { 2309 uint32_t *inbuf, *outbuf; 2310 uint16_t iter; 2311 2312 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2313 outbuf = (uint32_t *)fd->rspaddr; 2314 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2315 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > 2316 sizeof(struct nvme_fc_ersp_iu))) { 2317 if (ql_mask_match(ql_dbg_io)) { 2318 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2319 iocb->u.nvme.rsp_pyld_len); 2320 ql_log(ql_log_warn, fcport->vha, 0x5100, 2321 "Unexpected response payload length %u.\n", 2322 iocb->u.nvme.rsp_pyld_len); 2323 } 2324 iocb->u.nvme.rsp_pyld_len = 2325 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); 2326 } 2327 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; 2328 for (; iter; iter--) 2329 *outbuf++ = swab32(*inbuf++); 2330 } 2331 2332 if (state_flags & SF_NVME_ERSP) { 2333 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2334 u32 tgt_xfer_len; 2335 2336 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2337 if (fd->transferred_length != tgt_xfer_len) { 2338 ql_dbg(ql_dbg_io, fcport->vha, 0x3079, 2339 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2340 tgt_xfer_len, fd->transferred_length); 2341 logit = 1; 2342 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { 2343 /* 2344 * Do not log if this is just an underflow and there 2345 * is no data loss. 2346 */ 2347 logit = 0; 2348 } 2349 } 2350 2351 if (unlikely(logit)) 2352 ql_log(ql_log_warn, fcport->vha, 0x5060, 2353 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2354 sp->name, sp->handle, comp_status, 2355 fd->transferred_length, le32_to_cpu(sts->residual_len), 2356 sts->ox_id); 2357 2358 /* 2359 * If transport error then Failure (HBA rejects request) 2360 * otherwise transport will handle. 2361 */ 2362 switch (le16_to_cpu(comp_status)) { 2363 case CS_COMPLETE: 2364 break; 2365 2366 case CS_RESET: 2367 case CS_PORT_UNAVAILABLE: 2368 case CS_PORT_LOGGED_OUT: 2369 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2370 fallthrough; 2371 case CS_ABORTED: 2372 case CS_PORT_BUSY: 2373 fd->transferred_length = 0; 2374 iocb->u.nvme.rsp_pyld_len = 0; 2375 ret = QLA_ABORTED; 2376 break; 2377 case CS_DATA_UNDERRUN: 2378 break; 2379 default: 2380 ret = QLA_FUNCTION_FAILED; 2381 break; 2382 } 2383 sp->done(sp, ret); 2384 } 2385 2386 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2387 struct vp_ctrl_entry_24xx *vce) 2388 { 2389 const char func[] = "CTRLVP-IOCB"; 2390 srb_t *sp; 2391 int rval = QLA_SUCCESS; 2392 2393 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2394 if (!sp) 2395 return; 2396 2397 if (vce->entry_status != 0) { 2398 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2399 "%s: Failed to complete IOCB -- error status (%x)\n", 2400 sp->name, vce->entry_status); 2401 rval = QLA_FUNCTION_FAILED; 2402 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2403 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2404 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2405 sp->name, le16_to_cpu(vce->comp_status), 2406 le16_to_cpu(vce->vp_idx_failed)); 2407 rval = QLA_FUNCTION_FAILED; 2408 } else { 2409 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2410 "Done %s.\n", __func__); 2411 } 2412 2413 sp->rc = rval; 2414 sp->done(sp, rval); 2415 } 2416 2417 /* Process a single response queue entry. */ 2418 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2419 struct rsp_que *rsp, 2420 sts_entry_t *pkt) 2421 { 2422 sts21_entry_t *sts21_entry; 2423 sts22_entry_t *sts22_entry; 2424 uint16_t handle_cnt; 2425 uint16_t cnt; 2426 2427 switch (pkt->entry_type) { 2428 case STATUS_TYPE: 2429 qla2x00_status_entry(vha, rsp, pkt); 2430 break; 2431 case STATUS_TYPE_21: 2432 sts21_entry = (sts21_entry_t *)pkt; 2433 handle_cnt = sts21_entry->handle_count; 2434 for (cnt = 0; cnt < handle_cnt; cnt++) 2435 qla2x00_process_completed_request(vha, rsp->req, 2436 sts21_entry->handle[cnt]); 2437 break; 2438 case STATUS_TYPE_22: 2439 sts22_entry = (sts22_entry_t *)pkt; 2440 handle_cnt = sts22_entry->handle_count; 2441 for (cnt = 0; cnt < handle_cnt; cnt++) 2442 qla2x00_process_completed_request(vha, rsp->req, 2443 sts22_entry->handle[cnt]); 2444 break; 2445 case STATUS_CONT_TYPE: 2446 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2447 break; 2448 case MBX_IOCB_TYPE: 2449 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2450 break; 2451 case CT_IOCB_TYPE: 2452 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2453 break; 2454 default: 2455 /* Type Not Supported. */ 2456 ql_log(ql_log_warn, vha, 0x504a, 2457 "Received unknown response pkt type %x entry status=%x.\n", 2458 pkt->entry_type, pkt->entry_status); 2459 break; 2460 } 2461 } 2462 2463 /** 2464 * qla2x00_process_response_queue() - Process response queue entries. 2465 * @rsp: response queue 2466 */ 2467 void 2468 qla2x00_process_response_queue(struct rsp_que *rsp) 2469 { 2470 struct scsi_qla_host *vha; 2471 struct qla_hw_data *ha = rsp->hw; 2472 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2473 sts_entry_t *pkt; 2474 2475 vha = pci_get_drvdata(ha->pdev); 2476 2477 if (!vha->flags.online) 2478 return; 2479 2480 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2481 pkt = (sts_entry_t *)rsp->ring_ptr; 2482 2483 rsp->ring_index++; 2484 if (rsp->ring_index == rsp->length) { 2485 rsp->ring_index = 0; 2486 rsp->ring_ptr = rsp->ring; 2487 } else { 2488 rsp->ring_ptr++; 2489 } 2490 2491 if (pkt->entry_status != 0) { 2492 qla2x00_error_entry(vha, rsp, pkt); 2493 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2494 wmb(); 2495 continue; 2496 } 2497 2498 qla2x00_process_response_entry(vha, rsp, pkt); 2499 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2500 wmb(); 2501 } 2502 2503 /* Adjust ring index */ 2504 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2505 } 2506 2507 static inline void 2508 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2509 uint32_t sense_len, struct rsp_que *rsp, int res) 2510 { 2511 struct scsi_qla_host *vha = sp->vha; 2512 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2513 uint32_t track_sense_len; 2514 2515 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2516 sense_len = SCSI_SENSE_BUFFERSIZE; 2517 2518 SET_CMD_SENSE_LEN(sp, sense_len); 2519 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2520 track_sense_len = sense_len; 2521 2522 if (sense_len > par_sense_len) 2523 sense_len = par_sense_len; 2524 2525 memcpy(cp->sense_buffer, sense_data, sense_len); 2526 2527 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2528 track_sense_len -= sense_len; 2529 SET_CMD_SENSE_LEN(sp, track_sense_len); 2530 2531 if (track_sense_len != 0) { 2532 rsp->status_srb = sp; 2533 cp->result = res; 2534 } 2535 2536 if (sense_len) { 2537 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2538 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2539 sp->vha->host_no, cp->device->id, cp->device->lun, 2540 cp); 2541 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2542 cp->sense_buffer, sense_len); 2543 } 2544 } 2545 2546 struct scsi_dif_tuple { 2547 __be16 guard; /* Checksum */ 2548 __be16 app_tag; /* APPL identifier */ 2549 __be32 ref_tag; /* Target LBA or indirect LBA */ 2550 }; 2551 2552 /* 2553 * Checks the guard or meta-data for the type of error 2554 * detected by the HBA. In case of errors, we set the 2555 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2556 * to indicate to the kernel that the HBA detected error. 2557 */ 2558 static inline int 2559 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2560 { 2561 struct scsi_qla_host *vha = sp->vha; 2562 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2563 uint8_t *ap = &sts24->data[12]; 2564 uint8_t *ep = &sts24->data[20]; 2565 uint32_t e_ref_tag, a_ref_tag; 2566 uint16_t e_app_tag, a_app_tag; 2567 uint16_t e_guard, a_guard; 2568 2569 /* 2570 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2571 * would make guard field appear at offset 2 2572 */ 2573 a_guard = get_unaligned_le16(ap + 2); 2574 a_app_tag = get_unaligned_le16(ap + 0); 2575 a_ref_tag = get_unaligned_le32(ap + 4); 2576 e_guard = get_unaligned_le16(ep + 2); 2577 e_app_tag = get_unaligned_le16(ep + 0); 2578 e_ref_tag = get_unaligned_le32(ep + 4); 2579 2580 ql_dbg(ql_dbg_io, vha, 0x3023, 2581 "iocb(s) %p Returned STATUS.\n", sts24); 2582 2583 ql_dbg(ql_dbg_io, vha, 0x3024, 2584 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2585 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2586 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2587 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2588 a_app_tag, e_app_tag, a_guard, e_guard); 2589 2590 /* 2591 * Ignore sector if: 2592 * For type 3: ref & app tag is all 'f's 2593 * For type 0,1,2: app tag is all 'f's 2594 */ 2595 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && 2596 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || 2597 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { 2598 uint32_t blocks_done, resid; 2599 sector_t lba_s = scsi_get_lba(cmd); 2600 2601 /* 2TB boundary case covered automatically with this */ 2602 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2603 2604 resid = scsi_bufflen(cmd) - (blocks_done * 2605 cmd->device->sector_size); 2606 2607 scsi_set_resid(cmd, resid); 2608 cmd->result = DID_OK << 16; 2609 2610 /* Update protection tag */ 2611 if (scsi_prot_sg_count(cmd)) { 2612 uint32_t i, j = 0, k = 0, num_ent; 2613 struct scatterlist *sg; 2614 struct t10_pi_tuple *spt; 2615 2616 /* Patch the corresponding protection tags */ 2617 scsi_for_each_prot_sg(cmd, sg, 2618 scsi_prot_sg_count(cmd), i) { 2619 num_ent = sg_dma_len(sg) / 8; 2620 if (k + num_ent < blocks_done) { 2621 k += num_ent; 2622 continue; 2623 } 2624 j = blocks_done - k - 1; 2625 k = blocks_done; 2626 break; 2627 } 2628 2629 if (k != blocks_done) { 2630 ql_log(ql_log_warn, vha, 0x302f, 2631 "unexpected tag values tag:lba=%x:%llx)\n", 2632 e_ref_tag, (unsigned long long)lba_s); 2633 return 1; 2634 } 2635 2636 spt = page_address(sg_page(sg)) + sg->offset; 2637 spt += j; 2638 2639 spt->app_tag = T10_PI_APP_ESCAPE; 2640 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2641 spt->ref_tag = T10_PI_REF_ESCAPE; 2642 } 2643 2644 return 0; 2645 } 2646 2647 /* check guard */ 2648 if (e_guard != a_guard) { 2649 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2650 0x10, 0x1); 2651 set_driver_byte(cmd, DRIVER_SENSE); 2652 set_host_byte(cmd, DID_ABORT); 2653 cmd->result |= SAM_STAT_CHECK_CONDITION; 2654 return 1; 2655 } 2656 2657 /* check ref tag */ 2658 if (e_ref_tag != a_ref_tag) { 2659 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2660 0x10, 0x3); 2661 set_driver_byte(cmd, DRIVER_SENSE); 2662 set_host_byte(cmd, DID_ABORT); 2663 cmd->result |= SAM_STAT_CHECK_CONDITION; 2664 return 1; 2665 } 2666 2667 /* check appl tag */ 2668 if (e_app_tag != a_app_tag) { 2669 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2670 0x10, 0x2); 2671 set_driver_byte(cmd, DRIVER_SENSE); 2672 set_host_byte(cmd, DID_ABORT); 2673 cmd->result |= SAM_STAT_CHECK_CONDITION; 2674 return 1; 2675 } 2676 2677 return 1; 2678 } 2679 2680 static void 2681 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2682 struct req_que *req, uint32_t index) 2683 { 2684 struct qla_hw_data *ha = vha->hw; 2685 srb_t *sp; 2686 uint16_t comp_status; 2687 uint16_t scsi_status; 2688 uint16_t thread_id; 2689 uint32_t rval = EXT_STATUS_OK; 2690 struct bsg_job *bsg_job = NULL; 2691 struct fc_bsg_request *bsg_request; 2692 struct fc_bsg_reply *bsg_reply; 2693 sts_entry_t *sts = pkt; 2694 struct sts_entry_24xx *sts24 = pkt; 2695 2696 /* Validate handle. */ 2697 if (index >= req->num_outstanding_cmds) { 2698 ql_log(ql_log_warn, vha, 0x70af, 2699 "Invalid SCSI completion handle 0x%x.\n", index); 2700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2701 return; 2702 } 2703 2704 sp = req->outstanding_cmds[index]; 2705 if (!sp) { 2706 ql_log(ql_log_warn, vha, 0x70b0, 2707 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2708 req->id, index); 2709 2710 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2711 return; 2712 } 2713 2714 /* Free outstanding command slot. */ 2715 req->outstanding_cmds[index] = NULL; 2716 bsg_job = sp->u.bsg_job; 2717 bsg_request = bsg_job->request; 2718 bsg_reply = bsg_job->reply; 2719 2720 if (IS_FWI2_CAPABLE(ha)) { 2721 comp_status = le16_to_cpu(sts24->comp_status); 2722 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2723 } else { 2724 comp_status = le16_to_cpu(sts->comp_status); 2725 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2726 } 2727 2728 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2729 switch (comp_status) { 2730 case CS_COMPLETE: 2731 if (scsi_status == 0) { 2732 bsg_reply->reply_payload_rcv_len = 2733 bsg_job->reply_payload.payload_len; 2734 vha->qla_stats.input_bytes += 2735 bsg_reply->reply_payload_rcv_len; 2736 vha->qla_stats.input_requests++; 2737 rval = EXT_STATUS_OK; 2738 } 2739 goto done; 2740 2741 case CS_DATA_OVERRUN: 2742 ql_dbg(ql_dbg_user, vha, 0x70b1, 2743 "Command completed with data overrun thread_id=%d\n", 2744 thread_id); 2745 rval = EXT_STATUS_DATA_OVERRUN; 2746 break; 2747 2748 case CS_DATA_UNDERRUN: 2749 ql_dbg(ql_dbg_user, vha, 0x70b2, 2750 "Command completed with data underrun thread_id=%d\n", 2751 thread_id); 2752 rval = EXT_STATUS_DATA_UNDERRUN; 2753 break; 2754 case CS_BIDIR_RD_OVERRUN: 2755 ql_dbg(ql_dbg_user, vha, 0x70b3, 2756 "Command completed with read data overrun thread_id=%d\n", 2757 thread_id); 2758 rval = EXT_STATUS_DATA_OVERRUN; 2759 break; 2760 2761 case CS_BIDIR_RD_WR_OVERRUN: 2762 ql_dbg(ql_dbg_user, vha, 0x70b4, 2763 "Command completed with read and write data overrun " 2764 "thread_id=%d\n", thread_id); 2765 rval = EXT_STATUS_DATA_OVERRUN; 2766 break; 2767 2768 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2769 ql_dbg(ql_dbg_user, vha, 0x70b5, 2770 "Command completed with read data over and write data " 2771 "underrun thread_id=%d\n", thread_id); 2772 rval = EXT_STATUS_DATA_OVERRUN; 2773 break; 2774 2775 case CS_BIDIR_RD_UNDERRUN: 2776 ql_dbg(ql_dbg_user, vha, 0x70b6, 2777 "Command completed with read data underrun " 2778 "thread_id=%d\n", thread_id); 2779 rval = EXT_STATUS_DATA_UNDERRUN; 2780 break; 2781 2782 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2783 ql_dbg(ql_dbg_user, vha, 0x70b7, 2784 "Command completed with read data under and write data " 2785 "overrun thread_id=%d\n", thread_id); 2786 rval = EXT_STATUS_DATA_UNDERRUN; 2787 break; 2788 2789 case CS_BIDIR_RD_WR_UNDERRUN: 2790 ql_dbg(ql_dbg_user, vha, 0x70b8, 2791 "Command completed with read and write data underrun " 2792 "thread_id=%d\n", thread_id); 2793 rval = EXT_STATUS_DATA_UNDERRUN; 2794 break; 2795 2796 case CS_BIDIR_DMA: 2797 ql_dbg(ql_dbg_user, vha, 0x70b9, 2798 "Command completed with data DMA error thread_id=%d\n", 2799 thread_id); 2800 rval = EXT_STATUS_DMA_ERR; 2801 break; 2802 2803 case CS_TIMEOUT: 2804 ql_dbg(ql_dbg_user, vha, 0x70ba, 2805 "Command completed with timeout thread_id=%d\n", 2806 thread_id); 2807 rval = EXT_STATUS_TIMEOUT; 2808 break; 2809 default: 2810 ql_dbg(ql_dbg_user, vha, 0x70bb, 2811 "Command completed with completion status=0x%x " 2812 "thread_id=%d\n", comp_status, thread_id); 2813 rval = EXT_STATUS_ERR; 2814 break; 2815 } 2816 bsg_reply->reply_payload_rcv_len = 0; 2817 2818 done: 2819 /* Return the vendor specific reply to API */ 2820 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2821 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2822 /* Always return DID_OK, bsg will send the vendor specific response 2823 * in this case only */ 2824 sp->done(sp, DID_OK << 16); 2825 2826 } 2827 2828 /** 2829 * qla2x00_status_entry() - Process a Status IOCB entry. 2830 * @vha: SCSI driver HA context 2831 * @rsp: response queue 2832 * @pkt: Entry pointer 2833 */ 2834 static void 2835 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2836 { 2837 srb_t *sp; 2838 fc_port_t *fcport; 2839 struct scsi_cmnd *cp; 2840 sts_entry_t *sts = pkt; 2841 struct sts_entry_24xx *sts24 = pkt; 2842 uint16_t comp_status; 2843 uint16_t scsi_status; 2844 uint16_t ox_id; 2845 uint8_t lscsi_status; 2846 int32_t resid; 2847 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2848 fw_resid_len; 2849 uint8_t *rsp_info, *sense_data; 2850 struct qla_hw_data *ha = vha->hw; 2851 uint32_t handle; 2852 uint16_t que; 2853 struct req_que *req; 2854 int logit = 1; 2855 int res = 0; 2856 uint16_t state_flags = 0; 2857 uint16_t sts_qual = 0; 2858 2859 if (IS_FWI2_CAPABLE(ha)) { 2860 comp_status = le16_to_cpu(sts24->comp_status); 2861 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2862 state_flags = le16_to_cpu(sts24->state_flags); 2863 } else { 2864 comp_status = le16_to_cpu(sts->comp_status); 2865 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2866 } 2867 handle = (uint32_t) LSW(sts->handle); 2868 que = MSW(sts->handle); 2869 req = ha->req_q_map[que]; 2870 2871 /* Check for invalid queue pointer */ 2872 if (req == NULL || 2873 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2874 ql_dbg(ql_dbg_io, vha, 0x3059, 2875 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2876 "que=%u.\n", sts->handle, req, que); 2877 return; 2878 } 2879 2880 /* Validate handle. */ 2881 if (handle < req->num_outstanding_cmds) { 2882 sp = req->outstanding_cmds[handle]; 2883 if (!sp) { 2884 ql_dbg(ql_dbg_io, vha, 0x3075, 2885 "%s(%ld): Already returned command for status handle (0x%x).\n", 2886 __func__, vha->host_no, sts->handle); 2887 return; 2888 } 2889 } else { 2890 ql_dbg(ql_dbg_io, vha, 0x3017, 2891 "Invalid status handle, out of range (0x%x).\n", 2892 sts->handle); 2893 2894 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2895 if (IS_P3P_TYPE(ha)) 2896 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2897 else 2898 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2899 qla2xxx_wake_dpc(vha); 2900 } 2901 return; 2902 } 2903 qla_put_iocbs(sp->qpair, &sp->iores); 2904 2905 if (sp->cmd_type != TYPE_SRB) { 2906 req->outstanding_cmds[handle] = NULL; 2907 ql_dbg(ql_dbg_io, vha, 0x3015, 2908 "Unknown sp->cmd_type %x %p).\n", 2909 sp->cmd_type, sp); 2910 return; 2911 } 2912 2913 /* NVME completion. */ 2914 if (sp->type == SRB_NVME_CMD) { 2915 req->outstanding_cmds[handle] = NULL; 2916 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 2917 return; 2918 } 2919 2920 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2921 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2922 return; 2923 } 2924 2925 /* Task Management completion. */ 2926 if (sp->type == SRB_TM_CMD) { 2927 qla24xx_tm_iocb_entry(vha, req, pkt); 2928 return; 2929 } 2930 2931 /* Fast path completion. */ 2932 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2933 qla2x00_process_completed_request(vha, req, handle); 2934 2935 return; 2936 } 2937 2938 req->outstanding_cmds[handle] = NULL; 2939 cp = GET_CMD_SP(sp); 2940 if (cp == NULL) { 2941 ql_dbg(ql_dbg_io, vha, 0x3018, 2942 "Command already returned (0x%x/%p).\n", 2943 sts->handle, sp); 2944 2945 return; 2946 } 2947 2948 lscsi_status = scsi_status & STATUS_MASK; 2949 2950 fcport = sp->fcport; 2951 2952 ox_id = 0; 2953 sense_len = par_sense_len = rsp_info_len = resid_len = 2954 fw_resid_len = 0; 2955 if (IS_FWI2_CAPABLE(ha)) { 2956 if (scsi_status & SS_SENSE_LEN_VALID) 2957 sense_len = le32_to_cpu(sts24->sense_len); 2958 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2959 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2960 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2961 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2962 if (comp_status == CS_DATA_UNDERRUN) 2963 fw_resid_len = le32_to_cpu(sts24->residual_len); 2964 rsp_info = sts24->data; 2965 sense_data = sts24->data; 2966 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2967 ox_id = le16_to_cpu(sts24->ox_id); 2968 par_sense_len = sizeof(sts24->data); 2969 sts_qual = le16_to_cpu(sts24->status_qualifier); 2970 } else { 2971 if (scsi_status & SS_SENSE_LEN_VALID) 2972 sense_len = le16_to_cpu(sts->req_sense_length); 2973 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2974 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2975 resid_len = le32_to_cpu(sts->residual_length); 2976 rsp_info = sts->rsp_info; 2977 sense_data = sts->req_sense_data; 2978 par_sense_len = sizeof(sts->req_sense_data); 2979 } 2980 2981 /* Check for any FCP transport errors. */ 2982 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2983 /* Sense data lies beyond any FCP RESPONSE data. */ 2984 if (IS_FWI2_CAPABLE(ha)) { 2985 sense_data += rsp_info_len; 2986 par_sense_len -= rsp_info_len; 2987 } 2988 if (rsp_info_len > 3 && rsp_info[3]) { 2989 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2990 "FCP I/O protocol failure (0x%x/0x%x).\n", 2991 rsp_info_len, rsp_info[3]); 2992 2993 res = DID_BUS_BUSY << 16; 2994 goto out; 2995 } 2996 } 2997 2998 /* Check for overrun. */ 2999 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 3000 scsi_status & SS_RESIDUAL_OVER) 3001 comp_status = CS_DATA_OVERRUN; 3002 3003 /* 3004 * Check retry_delay_timer value if we receive a busy or 3005 * queue full. 3006 */ 3007 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || 3008 lscsi_status == SAM_STAT_BUSY)) 3009 qla2x00_set_retry_delay_timestamp(fcport, sts_qual); 3010 3011 /* 3012 * Based on Host and scsi status generate status code for Linux 3013 */ 3014 switch (comp_status) { 3015 case CS_COMPLETE: 3016 case CS_QUEUE_FULL: 3017 if (scsi_status == 0) { 3018 res = DID_OK << 16; 3019 break; 3020 } 3021 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 3022 resid = resid_len; 3023 scsi_set_resid(cp, resid); 3024 3025 if (!lscsi_status && 3026 ((unsigned)(scsi_bufflen(cp) - resid) < 3027 cp->underflow)) { 3028 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 3029 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3030 resid, scsi_bufflen(cp)); 3031 3032 res = DID_ERROR << 16; 3033 break; 3034 } 3035 } 3036 res = DID_OK << 16 | lscsi_status; 3037 3038 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3039 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 3040 "QUEUE FULL detected.\n"); 3041 break; 3042 } 3043 logit = 0; 3044 if (lscsi_status != SS_CHECK_CONDITION) 3045 break; 3046 3047 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3048 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3049 break; 3050 3051 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 3052 rsp, res); 3053 break; 3054 3055 case CS_DATA_UNDERRUN: 3056 /* Use F/W calculated residual length. */ 3057 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 3058 scsi_set_resid(cp, resid); 3059 if (scsi_status & SS_RESIDUAL_UNDER) { 3060 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 3061 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 3062 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3063 resid, scsi_bufflen(cp)); 3064 3065 res = DID_ERROR << 16 | lscsi_status; 3066 goto check_scsi_status; 3067 } 3068 3069 if (!lscsi_status && 3070 ((unsigned)(scsi_bufflen(cp) - resid) < 3071 cp->underflow)) { 3072 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 3073 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3074 resid, scsi_bufflen(cp)); 3075 3076 res = DID_ERROR << 16; 3077 break; 3078 } 3079 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 3080 lscsi_status != SAM_STAT_BUSY) { 3081 /* 3082 * scsi status of task set and busy are considered to be 3083 * task not completed. 3084 */ 3085 3086 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 3087 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3088 resid, scsi_bufflen(cp)); 3089 3090 res = DID_ERROR << 16 | lscsi_status; 3091 goto check_scsi_status; 3092 } else { 3093 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 3094 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 3095 scsi_status, lscsi_status); 3096 } 3097 3098 res = DID_OK << 16 | lscsi_status; 3099 logit = 0; 3100 3101 check_scsi_status: 3102 /* 3103 * Check to see if SCSI Status is non zero. If so report SCSI 3104 * Status. 3105 */ 3106 if (lscsi_status != 0) { 3107 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3108 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 3109 "QUEUE FULL detected.\n"); 3110 logit = 1; 3111 break; 3112 } 3113 if (lscsi_status != SS_CHECK_CONDITION) 3114 break; 3115 3116 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3117 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3118 break; 3119 3120 qla2x00_handle_sense(sp, sense_data, par_sense_len, 3121 sense_len, rsp, res); 3122 } 3123 break; 3124 3125 case CS_PORT_LOGGED_OUT: 3126 case CS_PORT_CONFIG_CHG: 3127 case CS_PORT_BUSY: 3128 case CS_INCOMPLETE: 3129 case CS_PORT_UNAVAILABLE: 3130 case CS_TIMEOUT: 3131 case CS_RESET: 3132 3133 /* 3134 * We are going to have the fc class block the rport 3135 * while we try to recover so instruct the mid layer 3136 * to requeue until the class decides how to handle this. 3137 */ 3138 res = DID_TRANSPORT_DISRUPTED << 16; 3139 3140 if (comp_status == CS_TIMEOUT) { 3141 if (IS_FWI2_CAPABLE(ha)) 3142 break; 3143 else if ((le16_to_cpu(sts->status_flags) & 3144 SF_LOGOUT_SENT) == 0) 3145 break; 3146 } 3147 3148 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3149 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 3150 "Port to be marked lost on fcport=%02x%02x%02x, current " 3151 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 3152 fcport->d_id.b.area, fcport->d_id.b.al_pa, 3153 port_state_str[FCS_ONLINE], 3154 comp_status); 3155 3156 qlt_schedule_sess_for_deletion(fcport); 3157 } 3158 3159 break; 3160 3161 case CS_ABORTED: 3162 res = DID_RESET << 16; 3163 break; 3164 3165 case CS_DIF_ERROR: 3166 logit = qla2x00_handle_dif_error(sp, sts24); 3167 res = cp->result; 3168 break; 3169 3170 case CS_TRANSPORT: 3171 res = DID_ERROR << 16; 3172 3173 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 3174 break; 3175 3176 if (state_flags & BIT_4) 3177 scmd_printk(KERN_WARNING, cp, 3178 "Unsupported device '%s' found.\n", 3179 cp->device->vendor); 3180 break; 3181 3182 case CS_DMA: 3183 ql_log(ql_log_info, fcport->vha, 0x3022, 3184 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3185 comp_status, scsi_status, res, vha->host_no, 3186 cp->device->id, cp->device->lun, fcport->d_id.b24, 3187 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3188 resid_len, fw_resid_len, sp, cp); 3189 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 3190 pkt, sizeof(*sts24)); 3191 res = DID_ERROR << 16; 3192 break; 3193 default: 3194 res = DID_ERROR << 16; 3195 break; 3196 } 3197 3198 out: 3199 if (logit) 3200 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 3201 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 3202 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 3203 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3204 comp_status, scsi_status, res, vha->host_no, 3205 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3206 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3207 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3208 resid_len, fw_resid_len, sp, cp); 3209 3210 if (rsp->status_srb == NULL) 3211 sp->done(sp, res); 3212 } 3213 3214 /** 3215 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3216 * @rsp: response queue 3217 * @pkt: Entry pointer 3218 * 3219 * Extended sense data. 3220 */ 3221 static void 3222 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3223 { 3224 uint8_t sense_sz = 0; 3225 struct qla_hw_data *ha = rsp->hw; 3226 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3227 srb_t *sp = rsp->status_srb; 3228 struct scsi_cmnd *cp; 3229 uint32_t sense_len; 3230 uint8_t *sense_ptr; 3231 3232 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3233 return; 3234 3235 sense_len = GET_CMD_SENSE_LEN(sp); 3236 sense_ptr = GET_CMD_SENSE_PTR(sp); 3237 3238 cp = GET_CMD_SP(sp); 3239 if (cp == NULL) { 3240 ql_log(ql_log_warn, vha, 0x3025, 3241 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3242 3243 rsp->status_srb = NULL; 3244 return; 3245 } 3246 3247 if (sense_len > sizeof(pkt->data)) 3248 sense_sz = sizeof(pkt->data); 3249 else 3250 sense_sz = sense_len; 3251 3252 /* Move sense data. */ 3253 if (IS_FWI2_CAPABLE(ha)) 3254 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3255 memcpy(sense_ptr, pkt->data, sense_sz); 3256 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3257 sense_ptr, sense_sz); 3258 3259 sense_len -= sense_sz; 3260 sense_ptr += sense_sz; 3261 3262 SET_CMD_SENSE_PTR(sp, sense_ptr); 3263 SET_CMD_SENSE_LEN(sp, sense_len); 3264 3265 /* Place command on done queue. */ 3266 if (sense_len == 0) { 3267 rsp->status_srb = NULL; 3268 sp->done(sp, cp->result); 3269 } 3270 } 3271 3272 /** 3273 * qla2x00_error_entry() - Process an error entry. 3274 * @vha: SCSI driver HA context 3275 * @rsp: response queue 3276 * @pkt: Entry pointer 3277 * return : 1=allow further error analysis. 0=no additional error analysis. 3278 */ 3279 static int 3280 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3281 { 3282 srb_t *sp; 3283 struct qla_hw_data *ha = vha->hw; 3284 const char func[] = "ERROR-IOCB"; 3285 uint16_t que = MSW(pkt->handle); 3286 struct req_que *req = NULL; 3287 int res = DID_ERROR << 16; 3288 3289 ql_dbg(ql_dbg_async, vha, 0x502a, 3290 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3291 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3292 3293 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3294 goto fatal; 3295 3296 req = ha->req_q_map[que]; 3297 3298 if (pkt->entry_status & RF_BUSY) 3299 res = DID_BUS_BUSY << 16; 3300 3301 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3302 return 0; 3303 3304 switch (pkt->entry_type) { 3305 case NOTIFY_ACK_TYPE: 3306 case STATUS_TYPE: 3307 case STATUS_CONT_TYPE: 3308 case LOGINOUT_PORT_IOCB_TYPE: 3309 case CT_IOCB_TYPE: 3310 case ELS_IOCB_TYPE: 3311 case ABORT_IOCB_TYPE: 3312 case MBX_IOCB_TYPE: 3313 default: 3314 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3315 if (sp) { 3316 qla_put_iocbs(sp->qpair, &sp->iores); 3317 sp->done(sp, res); 3318 return 0; 3319 } 3320 break; 3321 3322 case ABTS_RESP_24XX: 3323 case CTIO_TYPE7: 3324 case CTIO_CRC2: 3325 return 1; 3326 } 3327 fatal: 3328 ql_log(ql_log_warn, vha, 0x5030, 3329 "Error entry - invalid handle/queue (%04x).\n", que); 3330 return 0; 3331 } 3332 3333 /** 3334 * qla24xx_mbx_completion() - Process mailbox command completions. 3335 * @vha: SCSI driver HA context 3336 * @mb0: Mailbox0 register 3337 */ 3338 static void 3339 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3340 { 3341 uint16_t cnt; 3342 uint32_t mboxes; 3343 __le16 __iomem *wptr; 3344 struct qla_hw_data *ha = vha->hw; 3345 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3346 3347 /* Read all mbox registers? */ 3348 WARN_ON_ONCE(ha->mbx_count > 32); 3349 mboxes = (1ULL << ha->mbx_count) - 1; 3350 if (!ha->mcp) 3351 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3352 else 3353 mboxes = ha->mcp->in_mb; 3354 3355 /* Load return mailbox registers. */ 3356 ha->flags.mbox_int = 1; 3357 ha->mailbox_out[0] = mb0; 3358 mboxes >>= 1; 3359 wptr = ®->mailbox1; 3360 3361 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3362 if (mboxes & BIT_0) 3363 ha->mailbox_out[cnt] = rd_reg_word(wptr); 3364 3365 mboxes >>= 1; 3366 wptr++; 3367 } 3368 } 3369 3370 static void 3371 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3372 struct abort_entry_24xx *pkt) 3373 { 3374 const char func[] = "ABT_IOCB"; 3375 srb_t *sp; 3376 struct srb_iocb *abt; 3377 3378 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3379 if (!sp) 3380 return; 3381 3382 abt = &sp->u.iocb_cmd; 3383 abt->u.abt.comp_status = pkt->nport_handle; 3384 sp->done(sp, 0); 3385 } 3386 3387 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3388 struct pt_ls4_request *pkt, struct req_que *req) 3389 { 3390 srb_t *sp; 3391 const char func[] = "LS4_IOCB"; 3392 uint16_t comp_status; 3393 3394 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3395 if (!sp) 3396 return; 3397 3398 comp_status = le16_to_cpu(pkt->status); 3399 sp->done(sp, comp_status); 3400 } 3401 3402 static void qla24xx_process_mbx_iocb_response(struct scsi_qla_host *vha, 3403 struct rsp_que *rsp, struct sts_entry_24xx *pkt) 3404 { 3405 struct qla_hw_data *ha = vha->hw; 3406 srb_t *sp; 3407 static const char func[] = "MBX-IOCB2"; 3408 3409 sp = qla2x00_get_sp_from_handle(vha, func, rsp->req, pkt); 3410 if (!sp) 3411 return; 3412 3413 if (sp->type == SRB_SCSI_CMD || 3414 sp->type == SRB_NVME_CMD || 3415 sp->type == SRB_TM_CMD) { 3416 ql_log(ql_log_warn, vha, 0x509d, 3417 "Inconsistent event entry type %d\n", sp->type); 3418 if (IS_P3P_TYPE(ha)) 3419 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3420 else 3421 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3422 return; 3423 } 3424 3425 qla24xx_mbx_iocb_entry(vha, rsp->req, (struct mbx_24xx_entry *)pkt); 3426 } 3427 3428 /** 3429 * qla24xx_process_response_queue() - Process response queue entries. 3430 * @vha: SCSI driver HA context 3431 * @rsp: response queue 3432 */ 3433 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3434 struct rsp_que *rsp) 3435 { 3436 struct sts_entry_24xx *pkt; 3437 struct qla_hw_data *ha = vha->hw; 3438 struct purex_entry_24xx *purex_entry; 3439 struct purex_item *pure_item; 3440 3441 if (!ha->flags.fw_started) 3442 return; 3443 3444 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { 3445 rsp->qpair->rcv_intr = 1; 3446 qla_cpu_update(rsp->qpair, smp_processor_id()); 3447 } 3448 3449 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 3450 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 3451 3452 rsp->ring_index++; 3453 if (rsp->ring_index == rsp->length) { 3454 rsp->ring_index = 0; 3455 rsp->ring_ptr = rsp->ring; 3456 } else { 3457 rsp->ring_ptr++; 3458 } 3459 3460 if (pkt->entry_status != 0) { 3461 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 3462 goto process_err; 3463 3464 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3465 wmb(); 3466 continue; 3467 } 3468 process_err: 3469 3470 switch (pkt->entry_type) { 3471 case STATUS_TYPE: 3472 qla2x00_status_entry(vha, rsp, pkt); 3473 break; 3474 case STATUS_CONT_TYPE: 3475 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 3476 break; 3477 case VP_RPT_ID_IOCB_TYPE: 3478 qla24xx_report_id_acquisition(vha, 3479 (struct vp_rpt_id_entry_24xx *)pkt); 3480 break; 3481 case LOGINOUT_PORT_IOCB_TYPE: 3482 qla24xx_logio_entry(vha, rsp->req, 3483 (struct logio_entry_24xx *)pkt); 3484 break; 3485 case CT_IOCB_TYPE: 3486 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 3487 break; 3488 case ELS_IOCB_TYPE: 3489 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 3490 break; 3491 case ABTS_RECV_24XX: 3492 if (qla_ini_mode_enabled(vha)) { 3493 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3494 if (!pure_item) 3495 break; 3496 qla24xx_queue_purex_item(vha, pure_item, 3497 qla24xx_process_abts); 3498 break; 3499 } 3500 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3501 IS_QLA28XX(ha)) { 3502 /* ensure that the ATIO queue is empty */ 3503 qlt_handle_abts_recv(vha, rsp, 3504 (response_t *)pkt); 3505 break; 3506 } else { 3507 qlt_24xx_process_atio_queue(vha, 1); 3508 } 3509 fallthrough; 3510 case ABTS_RESP_24XX: 3511 case CTIO_TYPE7: 3512 case CTIO_CRC2: 3513 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3514 break; 3515 case PT_LS4_REQUEST: 3516 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3517 rsp->req); 3518 break; 3519 case NOTIFY_ACK_TYPE: 3520 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3521 qlt_response_pkt_all_vps(vha, rsp, 3522 (response_t *)pkt); 3523 else 3524 qla24xxx_nack_iocb_entry(vha, rsp->req, 3525 (struct nack_to_isp *)pkt); 3526 break; 3527 case MARKER_TYPE: 3528 /* Do nothing in this case, this check is to prevent it 3529 * from falling into default case 3530 */ 3531 break; 3532 case ABORT_IOCB_TYPE: 3533 qla24xx_abort_iocb_entry(vha, rsp->req, 3534 (struct abort_entry_24xx *)pkt); 3535 break; 3536 case MBX_IOCB_TYPE: 3537 qla24xx_process_mbx_iocb_response(vha, rsp, pkt); 3538 break; 3539 case VP_CTRL_IOCB_TYPE: 3540 qla_ctrlvp_completed(vha, rsp->req, 3541 (struct vp_ctrl_entry_24xx *)pkt); 3542 break; 3543 case PUREX_IOCB_TYPE: 3544 purex_entry = (void *)pkt; 3545 switch (purex_entry->els_frame_payload[3]) { 3546 case ELS_RDP: 3547 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3548 if (!pure_item) 3549 break; 3550 qla24xx_queue_purex_item(vha, pure_item, 3551 qla24xx_process_purex_rdp); 3552 break; 3553 case ELS_FPIN: 3554 if (!vha->hw->flags.scm_enabled) { 3555 ql_log(ql_log_warn, vha, 0x5094, 3556 "SCM not active for this port\n"); 3557 break; 3558 } 3559 pure_item = qla27xx_copy_fpin_pkt(vha, 3560 (void **)&pkt, &rsp); 3561 if (!pure_item) 3562 break; 3563 qla24xx_queue_purex_item(vha, pure_item, 3564 qla27xx_process_purex_fpin); 3565 break; 3566 3567 default: 3568 ql_log(ql_log_warn, vha, 0x509c, 3569 "Discarding ELS Request opcode 0x%x\n", 3570 purex_entry->els_frame_payload[3]); 3571 } 3572 break; 3573 default: 3574 /* Type Not Supported. */ 3575 ql_dbg(ql_dbg_async, vha, 0x5042, 3576 "Received unknown response pkt type 0x%x entry status=%x.\n", 3577 pkt->entry_type, pkt->entry_status); 3578 break; 3579 } 3580 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3581 wmb(); 3582 } 3583 3584 /* Adjust ring index */ 3585 if (IS_P3P_TYPE(ha)) { 3586 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3587 3588 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); 3589 } else { 3590 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); 3591 } 3592 } 3593 3594 static void 3595 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3596 { 3597 int rval; 3598 uint32_t cnt; 3599 struct qla_hw_data *ha = vha->hw; 3600 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3601 3602 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3603 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3604 return; 3605 3606 rval = QLA_SUCCESS; 3607 wrt_reg_dword(®->iobase_addr, 0x7C00); 3608 rd_reg_dword(®->iobase_addr); 3609 wrt_reg_dword(®->iobase_window, 0x0001); 3610 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3611 rval == QLA_SUCCESS; cnt--) { 3612 if (cnt) { 3613 wrt_reg_dword(®->iobase_window, 0x0001); 3614 udelay(10); 3615 } else 3616 rval = QLA_FUNCTION_TIMEOUT; 3617 } 3618 if (rval == QLA_SUCCESS) 3619 goto next_test; 3620 3621 rval = QLA_SUCCESS; 3622 wrt_reg_dword(®->iobase_window, 0x0003); 3623 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3624 rval == QLA_SUCCESS; cnt--) { 3625 if (cnt) { 3626 wrt_reg_dword(®->iobase_window, 0x0003); 3627 udelay(10); 3628 } else 3629 rval = QLA_FUNCTION_TIMEOUT; 3630 } 3631 if (rval != QLA_SUCCESS) 3632 goto done; 3633 3634 next_test: 3635 if (rd_reg_dword(®->iobase_c8) & BIT_3) 3636 ql_log(ql_log_info, vha, 0x504c, 3637 "Additional code -- 0x55AA.\n"); 3638 3639 done: 3640 wrt_reg_dword(®->iobase_window, 0x0000); 3641 rd_reg_dword(®->iobase_window); 3642 } 3643 3644 /** 3645 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3646 * @irq: interrupt number 3647 * @dev_id: SCSI driver HA context 3648 * 3649 * Called by system whenever the host adapter generates an interrupt. 3650 * 3651 * Returns handled flag. 3652 */ 3653 irqreturn_t 3654 qla24xx_intr_handler(int irq, void *dev_id) 3655 { 3656 scsi_qla_host_t *vha; 3657 struct qla_hw_data *ha; 3658 struct device_reg_24xx __iomem *reg; 3659 int status; 3660 unsigned long iter; 3661 uint32_t stat; 3662 uint32_t hccr; 3663 uint16_t mb[8]; 3664 struct rsp_que *rsp; 3665 unsigned long flags; 3666 bool process_atio = false; 3667 3668 rsp = (struct rsp_que *) dev_id; 3669 if (!rsp) { 3670 ql_log(ql_log_info, NULL, 0x5059, 3671 "%s: NULL response queue pointer.\n", __func__); 3672 return IRQ_NONE; 3673 } 3674 3675 ha = rsp->hw; 3676 reg = &ha->iobase->isp24; 3677 status = 0; 3678 3679 if (unlikely(pci_channel_offline(ha->pdev))) 3680 return IRQ_HANDLED; 3681 3682 spin_lock_irqsave(&ha->hardware_lock, flags); 3683 vha = pci_get_drvdata(ha->pdev); 3684 for (iter = 50; iter--; ) { 3685 stat = rd_reg_dword(®->host_status); 3686 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3687 break; 3688 if (stat & HSRX_RISC_PAUSED) { 3689 if (unlikely(pci_channel_offline(ha->pdev))) 3690 break; 3691 3692 hccr = rd_reg_dword(®->hccr); 3693 3694 ql_log(ql_log_warn, vha, 0x504b, 3695 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3696 hccr); 3697 3698 qla2xxx_check_risc_status(vha); 3699 3700 ha->isp_ops->fw_dump(vha); 3701 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3702 break; 3703 } else if ((stat & HSRX_RISC_INT) == 0) 3704 break; 3705 3706 switch (stat & 0xff) { 3707 case INTR_ROM_MB_SUCCESS: 3708 case INTR_ROM_MB_FAILED: 3709 case INTR_MB_SUCCESS: 3710 case INTR_MB_FAILED: 3711 qla24xx_mbx_completion(vha, MSW(stat)); 3712 status |= MBX_INTERRUPT; 3713 3714 break; 3715 case INTR_ASYNC_EVENT: 3716 mb[0] = MSW(stat); 3717 mb[1] = rd_reg_word(®->mailbox1); 3718 mb[2] = rd_reg_word(®->mailbox2); 3719 mb[3] = rd_reg_word(®->mailbox3); 3720 qla2x00_async_event(vha, rsp, mb); 3721 break; 3722 case INTR_RSP_QUE_UPDATE: 3723 case INTR_RSP_QUE_UPDATE_83XX: 3724 qla24xx_process_response_queue(vha, rsp); 3725 break; 3726 case INTR_ATIO_QUE_UPDATE_27XX: 3727 case INTR_ATIO_QUE_UPDATE: 3728 process_atio = true; 3729 break; 3730 case INTR_ATIO_RSP_QUE_UPDATE: 3731 process_atio = true; 3732 qla24xx_process_response_queue(vha, rsp); 3733 break; 3734 default: 3735 ql_dbg(ql_dbg_async, vha, 0x504f, 3736 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3737 break; 3738 } 3739 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3740 rd_reg_dword_relaxed(®->hccr); 3741 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3742 ndelay(3500); 3743 } 3744 qla2x00_handle_mbx_completion(ha, status); 3745 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3746 3747 if (process_atio) { 3748 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3749 qlt_24xx_process_atio_queue(vha, 0); 3750 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3751 } 3752 3753 return IRQ_HANDLED; 3754 } 3755 3756 static irqreturn_t 3757 qla24xx_msix_rsp_q(int irq, void *dev_id) 3758 { 3759 struct qla_hw_data *ha; 3760 struct rsp_que *rsp; 3761 struct device_reg_24xx __iomem *reg; 3762 struct scsi_qla_host *vha; 3763 unsigned long flags; 3764 3765 rsp = (struct rsp_que *) dev_id; 3766 if (!rsp) { 3767 ql_log(ql_log_info, NULL, 0x505a, 3768 "%s: NULL response queue pointer.\n", __func__); 3769 return IRQ_NONE; 3770 } 3771 ha = rsp->hw; 3772 reg = &ha->iobase->isp24; 3773 3774 spin_lock_irqsave(&ha->hardware_lock, flags); 3775 3776 vha = pci_get_drvdata(ha->pdev); 3777 qla24xx_process_response_queue(vha, rsp); 3778 if (!ha->flags.disable_msix_handshake) { 3779 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3780 rd_reg_dword_relaxed(®->hccr); 3781 } 3782 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3783 3784 return IRQ_HANDLED; 3785 } 3786 3787 static irqreturn_t 3788 qla24xx_msix_default(int irq, void *dev_id) 3789 { 3790 scsi_qla_host_t *vha; 3791 struct qla_hw_data *ha; 3792 struct rsp_que *rsp; 3793 struct device_reg_24xx __iomem *reg; 3794 int status; 3795 uint32_t stat; 3796 uint32_t hccr; 3797 uint16_t mb[8]; 3798 unsigned long flags; 3799 bool process_atio = false; 3800 3801 rsp = (struct rsp_que *) dev_id; 3802 if (!rsp) { 3803 ql_log(ql_log_info, NULL, 0x505c, 3804 "%s: NULL response queue pointer.\n", __func__); 3805 return IRQ_NONE; 3806 } 3807 ha = rsp->hw; 3808 reg = &ha->iobase->isp24; 3809 status = 0; 3810 3811 spin_lock_irqsave(&ha->hardware_lock, flags); 3812 vha = pci_get_drvdata(ha->pdev); 3813 do { 3814 stat = rd_reg_dword(®->host_status); 3815 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3816 break; 3817 if (stat & HSRX_RISC_PAUSED) { 3818 if (unlikely(pci_channel_offline(ha->pdev))) 3819 break; 3820 3821 hccr = rd_reg_dword(®->hccr); 3822 3823 ql_log(ql_log_info, vha, 0x5050, 3824 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3825 hccr); 3826 3827 qla2xxx_check_risc_status(vha); 3828 3829 ha->isp_ops->fw_dump(vha); 3830 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3831 break; 3832 } else if ((stat & HSRX_RISC_INT) == 0) 3833 break; 3834 3835 switch (stat & 0xff) { 3836 case INTR_ROM_MB_SUCCESS: 3837 case INTR_ROM_MB_FAILED: 3838 case INTR_MB_SUCCESS: 3839 case INTR_MB_FAILED: 3840 qla24xx_mbx_completion(vha, MSW(stat)); 3841 status |= MBX_INTERRUPT; 3842 3843 break; 3844 case INTR_ASYNC_EVENT: 3845 mb[0] = MSW(stat); 3846 mb[1] = rd_reg_word(®->mailbox1); 3847 mb[2] = rd_reg_word(®->mailbox2); 3848 mb[3] = rd_reg_word(®->mailbox3); 3849 qla2x00_async_event(vha, rsp, mb); 3850 break; 3851 case INTR_RSP_QUE_UPDATE: 3852 case INTR_RSP_QUE_UPDATE_83XX: 3853 qla24xx_process_response_queue(vha, rsp); 3854 break; 3855 case INTR_ATIO_QUE_UPDATE_27XX: 3856 case INTR_ATIO_QUE_UPDATE: 3857 process_atio = true; 3858 break; 3859 case INTR_ATIO_RSP_QUE_UPDATE: 3860 process_atio = true; 3861 qla24xx_process_response_queue(vha, rsp); 3862 break; 3863 default: 3864 ql_dbg(ql_dbg_async, vha, 0x5051, 3865 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3866 break; 3867 } 3868 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3869 } while (0); 3870 qla2x00_handle_mbx_completion(ha, status); 3871 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3872 3873 if (process_atio) { 3874 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 3875 qlt_24xx_process_atio_queue(vha, 0); 3876 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 3877 } 3878 3879 return IRQ_HANDLED; 3880 } 3881 3882 irqreturn_t 3883 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3884 { 3885 struct qla_hw_data *ha; 3886 struct qla_qpair *qpair; 3887 3888 qpair = dev_id; 3889 if (!qpair) { 3890 ql_log(ql_log_info, NULL, 0x505b, 3891 "%s: NULL response queue pointer.\n", __func__); 3892 return IRQ_NONE; 3893 } 3894 ha = qpair->hw; 3895 3896 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3897 3898 return IRQ_HANDLED; 3899 } 3900 3901 irqreturn_t 3902 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) 3903 { 3904 struct qla_hw_data *ha; 3905 struct qla_qpair *qpair; 3906 struct device_reg_24xx __iomem *reg; 3907 unsigned long flags; 3908 3909 qpair = dev_id; 3910 if (!qpair) { 3911 ql_log(ql_log_info, NULL, 0x505b, 3912 "%s: NULL response queue pointer.\n", __func__); 3913 return IRQ_NONE; 3914 } 3915 ha = qpair->hw; 3916 3917 reg = &ha->iobase->isp24; 3918 spin_lock_irqsave(&ha->hardware_lock, flags); 3919 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 3920 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3921 3922 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 3923 3924 return IRQ_HANDLED; 3925 } 3926 3927 /* Interrupt handling helpers. */ 3928 3929 struct qla_init_msix_entry { 3930 const char *name; 3931 irq_handler_t handler; 3932 }; 3933 3934 static const struct qla_init_msix_entry msix_entries[] = { 3935 { "default", qla24xx_msix_default }, 3936 { "rsp_q", qla24xx_msix_rsp_q }, 3937 { "atio_q", qla83xx_msix_atio_q }, 3938 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3939 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, 3940 }; 3941 3942 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3943 { "qla2xxx (default)", qla82xx_msix_default }, 3944 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3945 }; 3946 3947 static int 3948 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3949 { 3950 int i, ret; 3951 struct qla_msix_entry *qentry; 3952 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3953 int min_vecs = QLA_BASE_VECTORS; 3954 struct irq_affinity desc = { 3955 .pre_vectors = QLA_BASE_VECTORS, 3956 }; 3957 3958 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3959 IS_ATIO_MSIX_CAPABLE(ha)) { 3960 desc.pre_vectors++; 3961 min_vecs++; 3962 } 3963 3964 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 3965 /* user wants to control IRQ setting for target mode */ 3966 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 3967 ha->msix_count, PCI_IRQ_MSIX); 3968 } else 3969 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 3970 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 3971 &desc); 3972 3973 if (ret < 0) { 3974 ql_log(ql_log_fatal, vha, 0x00c7, 3975 "MSI-X: Failed to enable support, " 3976 "giving up -- %d/%d.\n", 3977 ha->msix_count, ret); 3978 goto msix_out; 3979 } else if (ret < ha->msix_count) { 3980 ql_log(ql_log_info, vha, 0x00c6, 3981 "MSI-X: Using %d vectors\n", ret); 3982 ha->msix_count = ret; 3983 /* Recalculate queue values */ 3984 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 3985 ha->max_req_queues = ha->msix_count - 1; 3986 3987 /* ATIOQ needs 1 vector. That's 1 less QPair */ 3988 if (QLA_TGT_MODE_ENABLED()) 3989 ha->max_req_queues--; 3990 3991 ha->max_rsp_queues = ha->max_req_queues; 3992 3993 ha->max_qpairs = ha->max_req_queues - 1; 3994 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 3995 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3996 } 3997 } 3998 vha->irq_offset = desc.pre_vectors; 3999 ha->msix_entries = kcalloc(ha->msix_count, 4000 sizeof(struct qla_msix_entry), 4001 GFP_KERNEL); 4002 if (!ha->msix_entries) { 4003 ql_log(ql_log_fatal, vha, 0x00c8, 4004 "Failed to allocate memory for ha->msix_entries.\n"); 4005 ret = -ENOMEM; 4006 goto free_irqs; 4007 } 4008 ha->flags.msix_enabled = 1; 4009 4010 for (i = 0; i < ha->msix_count; i++) { 4011 qentry = &ha->msix_entries[i]; 4012 qentry->vector = pci_irq_vector(ha->pdev, i); 4013 qentry->entry = i; 4014 qentry->have_irq = 0; 4015 qentry->in_use = 0; 4016 qentry->handle = NULL; 4017 } 4018 4019 /* Enable MSI-X vectors for the base queue */ 4020 for (i = 0; i < QLA_BASE_VECTORS; i++) { 4021 qentry = &ha->msix_entries[i]; 4022 qentry->handle = rsp; 4023 rsp->msix = qentry; 4024 scnprintf(qentry->name, sizeof(qentry->name), 4025 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 4026 if (IS_P3P_TYPE(ha)) 4027 ret = request_irq(qentry->vector, 4028 qla82xx_msix_entries[i].handler, 4029 0, qla82xx_msix_entries[i].name, rsp); 4030 else 4031 ret = request_irq(qentry->vector, 4032 msix_entries[i].handler, 4033 0, qentry->name, rsp); 4034 if (ret) 4035 goto msix_register_fail; 4036 qentry->have_irq = 1; 4037 qentry->in_use = 1; 4038 } 4039 4040 /* 4041 * If target mode is enable, also request the vector for the ATIO 4042 * queue. 4043 */ 4044 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4045 IS_ATIO_MSIX_CAPABLE(ha)) { 4046 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 4047 rsp->msix = qentry; 4048 qentry->handle = rsp; 4049 scnprintf(qentry->name, sizeof(qentry->name), 4050 "qla2xxx%lu_%s", vha->host_no, 4051 msix_entries[QLA_ATIO_VECTOR].name); 4052 qentry->in_use = 1; 4053 ret = request_irq(qentry->vector, 4054 msix_entries[QLA_ATIO_VECTOR].handler, 4055 0, qentry->name, rsp); 4056 qentry->have_irq = 1; 4057 } 4058 4059 msix_register_fail: 4060 if (ret) { 4061 ql_log(ql_log_fatal, vha, 0x00cb, 4062 "MSI-X: unable to register handler -- %x/%d.\n", 4063 qentry->vector, ret); 4064 qla2x00_free_irqs(vha); 4065 ha->mqenable = 0; 4066 goto msix_out; 4067 } 4068 4069 /* Enable MSI-X vector for response queue update for queue 0 */ 4070 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 4071 if (ha->msixbase && ha->mqiobase && 4072 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4073 ql2xmqsupport)) 4074 ha->mqenable = 1; 4075 } else 4076 if (ha->mqiobase && 4077 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 4078 ql2xmqsupport)) 4079 ha->mqenable = 1; 4080 ql_dbg(ql_dbg_multiq, vha, 0xc005, 4081 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4082 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4083 ql_dbg(ql_dbg_init, vha, 0x0055, 4084 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4085 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4086 4087 msix_out: 4088 return ret; 4089 4090 free_irqs: 4091 pci_free_irq_vectors(ha->pdev); 4092 goto msix_out; 4093 } 4094 4095 int 4096 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 4097 { 4098 int ret = QLA_FUNCTION_FAILED; 4099 device_reg_t *reg = ha->iobase; 4100 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4101 4102 /* If possible, enable MSI-X. */ 4103 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 4104 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 4105 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 4106 goto skip_msi; 4107 4108 if (ql2xenablemsix == 2) 4109 goto skip_msix; 4110 4111 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 4112 (ha->pdev->subsystem_device == 0x7040 || 4113 ha->pdev->subsystem_device == 0x7041 || 4114 ha->pdev->subsystem_device == 0x1705)) { 4115 ql_log(ql_log_warn, vha, 0x0034, 4116 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 4117 ha->pdev->subsystem_vendor, 4118 ha->pdev->subsystem_device); 4119 goto skip_msi; 4120 } 4121 4122 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 4123 ql_log(ql_log_warn, vha, 0x0035, 4124 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 4125 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 4126 goto skip_msix; 4127 } 4128 4129 ret = qla24xx_enable_msix(ha, rsp); 4130 if (!ret) { 4131 ql_dbg(ql_dbg_init, vha, 0x0036, 4132 "MSI-X: Enabled (0x%X, 0x%X).\n", 4133 ha->chip_revision, ha->fw_attributes); 4134 goto clear_risc_ints; 4135 } 4136 4137 skip_msix: 4138 4139 ql_log(ql_log_info, vha, 0x0037, 4140 "Falling back-to MSI mode -- ret=%d.\n", ret); 4141 4142 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 4143 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 4144 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4145 goto skip_msi; 4146 4147 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 4148 if (ret > 0) { 4149 ql_dbg(ql_dbg_init, vha, 0x0038, 4150 "MSI: Enabled.\n"); 4151 ha->flags.msi_enabled = 1; 4152 } else 4153 ql_log(ql_log_warn, vha, 0x0039, 4154 "Falling back-to INTa mode -- ret=%d.\n", ret); 4155 skip_msi: 4156 4157 /* Skip INTx on ISP82xx. */ 4158 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 4159 return QLA_FUNCTION_FAILED; 4160 4161 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 4162 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 4163 QLA2XXX_DRIVER_NAME, rsp); 4164 if (ret) { 4165 ql_log(ql_log_warn, vha, 0x003a, 4166 "Failed to reserve interrupt %d already in use.\n", 4167 ha->pdev->irq); 4168 goto fail; 4169 } else if (!ha->flags.msi_enabled) { 4170 ql_dbg(ql_dbg_init, vha, 0x0125, 4171 "INTa mode: Enabled.\n"); 4172 ha->flags.mr_intr_valid = 1; 4173 } 4174 4175 clear_risc_ints: 4176 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 4177 goto fail; 4178 4179 spin_lock_irq(&ha->hardware_lock); 4180 wrt_reg_word(®->isp.semaphore, 0); 4181 spin_unlock_irq(&ha->hardware_lock); 4182 4183 fail: 4184 return ret; 4185 } 4186 4187 void 4188 qla2x00_free_irqs(scsi_qla_host_t *vha) 4189 { 4190 struct qla_hw_data *ha = vha->hw; 4191 struct rsp_que *rsp; 4192 struct qla_msix_entry *qentry; 4193 int i; 4194 4195 /* 4196 * We need to check that ha->rsp_q_map is valid in case we are called 4197 * from a probe failure context. 4198 */ 4199 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 4200 goto free_irqs; 4201 rsp = ha->rsp_q_map[0]; 4202 4203 if (ha->flags.msix_enabled) { 4204 for (i = 0; i < ha->msix_count; i++) { 4205 qentry = &ha->msix_entries[i]; 4206 if (qentry->have_irq) { 4207 irq_set_affinity_notifier(qentry->vector, NULL); 4208 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 4209 } 4210 } 4211 kfree(ha->msix_entries); 4212 ha->msix_entries = NULL; 4213 ha->flags.msix_enabled = 0; 4214 ql_dbg(ql_dbg_init, vha, 0x0042, 4215 "Disabled MSI-X.\n"); 4216 } else { 4217 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 4218 } 4219 4220 free_irqs: 4221 pci_free_irq_vectors(ha->pdev); 4222 } 4223 4224 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 4225 struct qla_msix_entry *msix, int vector_type) 4226 { 4227 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 4228 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4229 int ret; 4230 4231 scnprintf(msix->name, sizeof(msix->name), 4232 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 4233 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 4234 if (ret) { 4235 ql_log(ql_log_fatal, vha, 0x00e6, 4236 "MSI-X: Unable to register handler -- %x/%d.\n", 4237 msix->vector, ret); 4238 return ret; 4239 } 4240 msix->have_irq = 1; 4241 msix->handle = qpair; 4242 return ret; 4243 } 4244