1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * QLogic Fibre Channel HBA Driver 4 * Copyright (c) 2003-2014 QLogic Corporation 5 */ 6 #include "qla_def.h" 7 #include "qla_target.h" 8 #include "qla_gbl.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, 26 struct purex_item *item); 27 static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, 28 uint16_t size); 29 static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, 30 void *pkt); 31 static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, 32 void **pkt, struct rsp_que **rsp); 33 34 static void 35 qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) 36 { 37 void *pkt = &item->iocb; 38 uint16_t pkt_size = item->size; 39 40 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, 41 "%s: Enter\n", __func__); 42 43 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, 44 "-------- ELS REQ -------\n"); 45 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, 46 pkt, pkt_size); 47 48 fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt, 0); 49 } 50 51 const char *const port_state_str[] = { 52 [FCS_UNKNOWN] = "Unknown", 53 [FCS_UNCONFIGURED] = "UNCONFIGURED", 54 [FCS_DEVICE_DEAD] = "DEAD", 55 [FCS_DEVICE_LOST] = "LOST", 56 [FCS_ONLINE] = "ONLINE" 57 }; 58 59 static void 60 qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) 61 { 62 struct abts_entry_24xx *abts = 63 (struct abts_entry_24xx *)&pkt->iocb; 64 struct qla_hw_data *ha = vha->hw; 65 struct els_entry_24xx *rsp_els; 66 struct abts_entry_24xx *abts_rsp; 67 dma_addr_t dma; 68 uint32_t fctl; 69 int rval; 70 71 ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); 72 73 ql_log(ql_log_warn, vha, 0x0287, 74 "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", 75 abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, 76 abts->seq_id, abts->seq_cnt); 77 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 78 "-------- ABTS RCV -------\n"); 79 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, 80 (uint8_t *)abts, sizeof(*abts)); 81 82 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, 83 GFP_KERNEL); 84 if (!rsp_els) { 85 ql_log(ql_log_warn, vha, 0x0287, 86 "Failed allocate dma buffer ABTS/ELS RSP.\n"); 87 return; 88 } 89 90 /* terminate exchange */ 91 rsp_els->entry_type = ELS_IOCB_TYPE; 92 rsp_els->entry_count = 1; 93 rsp_els->nport_handle = cpu_to_le16(~0); 94 rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; 95 rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); 96 ql_dbg(ql_dbg_init, vha, 0x0283, 97 "Sending ELS Response to terminate exchange %#x...\n", 98 abts->rx_xch_addr_to_abort); 99 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 100 "-------- ELS RSP -------\n"); 101 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, 102 (uint8_t *)rsp_els, sizeof(*rsp_els)); 103 rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); 104 if (rval) { 105 ql_log(ql_log_warn, vha, 0x0288, 106 "%s: iocb failed to execute -> %x\n", __func__, rval); 107 } else if (rsp_els->comp_status) { 108 ql_log(ql_log_warn, vha, 0x0289, 109 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 110 __func__, rsp_els->comp_status, 111 rsp_els->error_subcode_1, rsp_els->error_subcode_2); 112 } else { 113 ql_dbg(ql_dbg_init, vha, 0x028a, 114 "%s: abort exchange done.\n", __func__); 115 } 116 117 /* send ABTS response */ 118 abts_rsp = (void *)rsp_els; 119 memset(abts_rsp, 0, sizeof(*abts_rsp)); 120 abts_rsp->entry_type = ABTS_RSP_TYPE; 121 abts_rsp->entry_count = 1; 122 abts_rsp->nport_handle = abts->nport_handle; 123 abts_rsp->vp_idx = abts->vp_idx; 124 abts_rsp->sof_type = abts->sof_type & 0xf0; 125 abts_rsp->rx_xch_addr = abts->rx_xch_addr; 126 abts_rsp->d_id[0] = abts->s_id[0]; 127 abts_rsp->d_id[1] = abts->s_id[1]; 128 abts_rsp->d_id[2] = abts->s_id[2]; 129 abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; 130 abts_rsp->s_id[0] = abts->d_id[0]; 131 abts_rsp->s_id[1] = abts->d_id[1]; 132 abts_rsp->s_id[2] = abts->d_id[2]; 133 abts_rsp->cs_ctl = abts->cs_ctl; 134 /* include flipping bit23 in fctl */ 135 fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | 136 FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; 137 abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; 138 abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; 139 abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; 140 abts_rsp->type = FC_TYPE_BLD; 141 abts_rsp->rx_id = abts->rx_id; 142 abts_rsp->ox_id = abts->ox_id; 143 abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; 144 abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; 145 abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); 146 abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; 147 ql_dbg(ql_dbg_init, vha, 0x028b, 148 "Sending BA ACC response to ABTS %#x...\n", 149 abts->rx_xch_addr_to_abort); 150 ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 151 "-------- ELS RSP -------\n"); 152 ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, 153 (uint8_t *)abts_rsp, sizeof(*abts_rsp)); 154 rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); 155 if (rval) { 156 ql_log(ql_log_warn, vha, 0x028c, 157 "%s: iocb failed to execute -> %x\n", __func__, rval); 158 } else if (abts_rsp->comp_status) { 159 ql_log(ql_log_warn, vha, 0x028d, 160 "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", 161 __func__, abts_rsp->comp_status, 162 abts_rsp->payload.error.subcode1, 163 abts_rsp->payload.error.subcode2); 164 } else { 165 ql_dbg(ql_dbg_init, vha, 0x028ea, 166 "%s: done.\n", __func__); 167 } 168 169 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); 170 } 171 172 /** 173 * __qla_consume_iocb - this routine is used to tell fw driver has processed 174 * or consumed the head IOCB along with the continuation IOCB's from the 175 * provided respond queue. 176 * @vha: host adapter pointer 177 * @pkt: pointer to current packet. On return, this pointer shall move 178 * to the next packet. 179 * @rsp: respond queue pointer. 180 * 181 * it is assumed pkt is the head iocb, not the continuation iocbk 182 */ 183 void __qla_consume_iocb(struct scsi_qla_host *vha, 184 void **pkt, struct rsp_que **rsp) 185 { 186 struct rsp_que *rsp_q = *rsp; 187 response_t *new_pkt; 188 uint16_t entry_count_remaining; 189 struct purex_entry_24xx *purex = *pkt; 190 191 entry_count_remaining = purex->entry_count; 192 while (entry_count_remaining > 0) { 193 new_pkt = rsp_q->ring_ptr; 194 *pkt = new_pkt; 195 196 rsp_q->ring_index++; 197 if (rsp_q->ring_index == rsp_q->length) { 198 rsp_q->ring_index = 0; 199 rsp_q->ring_ptr = rsp_q->ring; 200 } else { 201 rsp_q->ring_ptr++; 202 } 203 204 new_pkt->signature = RESPONSE_PROCESSED; 205 /* flush signature */ 206 wmb(); 207 --entry_count_remaining; 208 } 209 } 210 211 /** 212 * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB 213 * and save to provided buffer 214 * @vha: host adapter pointer 215 * @pkt: pointer Purex IOCB 216 * @rsp: respond queue 217 * @buf: extracted ELS payload copy here 218 * @buf_len: buffer length 219 */ 220 int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, 221 void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len) 222 { 223 struct purex_entry_24xx *purex = *pkt; 224 struct rsp_que *rsp_q = *rsp; 225 sts_cont_entry_t *new_pkt; 226 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 227 uint16_t buffer_copy_offset = 0; 228 uint16_t entry_count_remaining; 229 u16 tpad; 230 231 entry_count_remaining = purex->entry_count; 232 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 233 - PURX_ELS_HEADER_SIZE; 234 235 /* 236 * end of payload may not end in 4bytes boundary. Need to 237 * round up / pad for room to swap, before saving data 238 */ 239 tpad = roundup(total_bytes, 4); 240 241 if (buf_len < tpad) { 242 ql_dbg(ql_dbg_async, vha, 0x5084, 243 "%s buffer is too small %d < %d\n", 244 __func__, buf_len, tpad); 245 __qla_consume_iocb(vha, pkt, rsp); 246 return -EIO; 247 } 248 249 pending_bytes = total_bytes = tpad; 250 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 251 sizeof(purex->els_frame_payload) : pending_bytes; 252 253 memcpy(buf, &purex->els_frame_payload[0], no_bytes); 254 buffer_copy_offset += no_bytes; 255 pending_bytes -= no_bytes; 256 --entry_count_remaining; 257 258 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 259 /* flush signature */ 260 wmb(); 261 262 do { 263 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 264 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 265 *pkt = new_pkt; 266 267 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 268 ql_log(ql_log_warn, vha, 0x507a, 269 "Unexpected IOCB type, partial data 0x%x\n", 270 buffer_copy_offset); 271 break; 272 } 273 274 rsp_q->ring_index++; 275 if (rsp_q->ring_index == rsp_q->length) { 276 rsp_q->ring_index = 0; 277 rsp_q->ring_ptr = rsp_q->ring; 278 } else { 279 rsp_q->ring_ptr++; 280 } 281 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 282 sizeof(new_pkt->data) : pending_bytes; 283 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 284 memcpy((buf + buffer_copy_offset), new_pkt->data, 285 no_bytes); 286 buffer_copy_offset += no_bytes; 287 pending_bytes -= no_bytes; 288 --entry_count_remaining; 289 } else { 290 ql_log(ql_log_warn, vha, 0x5044, 291 "Attempt to copy more that we got, optimizing..%x\n", 292 buffer_copy_offset); 293 memcpy((buf + buffer_copy_offset), new_pkt->data, 294 total_bytes - buffer_copy_offset); 295 } 296 297 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 298 /* flush signature */ 299 wmb(); 300 } 301 302 if (pending_bytes != 0 || entry_count_remaining != 0) { 303 ql_log(ql_log_fatal, vha, 0x508b, 304 "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n", 305 total_bytes, entry_count_remaining); 306 return -EIO; 307 } 308 } while (entry_count_remaining > 0); 309 310 be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2); 311 312 return 0; 313 } 314 315 /** 316 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 317 * @irq: interrupt number 318 * @dev_id: SCSI driver HA context 319 * 320 * Called by system whenever the host adapter generates an interrupt. 321 * 322 * Returns handled flag. 323 */ 324 irqreturn_t 325 qla2100_intr_handler(int irq, void *dev_id) 326 { 327 scsi_qla_host_t *vha; 328 struct qla_hw_data *ha; 329 struct device_reg_2xxx __iomem *reg; 330 int status; 331 unsigned long iter; 332 uint16_t hccr; 333 uint16_t mb[8]; 334 struct rsp_que *rsp; 335 unsigned long flags; 336 337 rsp = (struct rsp_que *) dev_id; 338 if (!rsp) { 339 ql_log(ql_log_info, NULL, 0x505d, 340 "%s: NULL response queue pointer.\n", __func__); 341 return (IRQ_NONE); 342 } 343 344 ha = rsp->hw; 345 reg = &ha->iobase->isp; 346 status = 0; 347 348 spin_lock_irqsave(&ha->hardware_lock, flags); 349 vha = pci_get_drvdata(ha->pdev); 350 for (iter = 50; iter--; ) { 351 hccr = rd_reg_word(®->hccr); 352 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 353 break; 354 if (hccr & HCCR_RISC_PAUSE) { 355 if (pci_channel_offline(ha->pdev)) 356 break; 357 358 /* 359 * Issue a "HARD" reset in order for the RISC interrupt 360 * bit to be cleared. Schedule a big hammer to get 361 * out of the RISC PAUSED state. 362 */ 363 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 364 rd_reg_word(®->hccr); 365 366 ha->isp_ops->fw_dump(vha); 367 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 368 break; 369 } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) 370 break; 371 372 if (rd_reg_word(®->semaphore) & BIT_0) { 373 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 374 rd_reg_word(®->hccr); 375 376 /* Get mailbox data. */ 377 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 378 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 379 qla2x00_mbx_completion(vha, mb[0]); 380 status |= MBX_INTERRUPT; 381 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 382 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 383 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 384 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 385 qla2x00_async_event(vha, rsp, mb); 386 } else { 387 /*EMPTY*/ 388 ql_dbg(ql_dbg_async, vha, 0x5025, 389 "Unrecognized interrupt type (%d).\n", 390 mb[0]); 391 } 392 /* Release mailbox registers. */ 393 wrt_reg_word(®->semaphore, 0); 394 rd_reg_word(®->semaphore); 395 } else { 396 qla2x00_process_response_queue(rsp); 397 398 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 399 rd_reg_word(®->hccr); 400 } 401 } 402 qla2x00_handle_mbx_completion(ha, status); 403 spin_unlock_irqrestore(&ha->hardware_lock, flags); 404 405 return (IRQ_HANDLED); 406 } 407 408 bool 409 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 410 { 411 /* Check for PCI disconnection */ 412 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 413 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 414 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 415 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 416 qla_schedule_eeh_work(vha); 417 } 418 return true; 419 } else 420 return false; 421 } 422 423 bool 424 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 425 { 426 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 427 } 428 429 /** 430 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 431 * @irq: interrupt number 432 * @dev_id: SCSI driver HA context 433 * 434 * Called by system whenever the host adapter generates an interrupt. 435 * 436 * Returns handled flag. 437 */ 438 irqreturn_t 439 qla2300_intr_handler(int irq, void *dev_id) 440 { 441 scsi_qla_host_t *vha; 442 struct device_reg_2xxx __iomem *reg; 443 int status; 444 unsigned long iter; 445 uint32_t stat; 446 uint16_t hccr; 447 uint16_t mb[8]; 448 struct rsp_que *rsp; 449 struct qla_hw_data *ha; 450 unsigned long flags; 451 452 rsp = (struct rsp_que *) dev_id; 453 if (!rsp) { 454 ql_log(ql_log_info, NULL, 0x5058, 455 "%s: NULL response queue pointer.\n", __func__); 456 return (IRQ_NONE); 457 } 458 459 ha = rsp->hw; 460 reg = &ha->iobase->isp; 461 status = 0; 462 463 spin_lock_irqsave(&ha->hardware_lock, flags); 464 vha = pci_get_drvdata(ha->pdev); 465 for (iter = 50; iter--; ) { 466 stat = rd_reg_dword(®->u.isp2300.host_status); 467 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 468 break; 469 if (stat & HSR_RISC_PAUSED) { 470 if (unlikely(pci_channel_offline(ha->pdev))) 471 break; 472 473 hccr = rd_reg_word(®->hccr); 474 475 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 476 ql_log(ql_log_warn, vha, 0x5026, 477 "Parity error -- HCCR=%x, Dumping " 478 "firmware.\n", hccr); 479 else 480 ql_log(ql_log_warn, vha, 0x5027, 481 "RISC paused -- HCCR=%x, Dumping " 482 "firmware.\n", hccr); 483 484 /* 485 * Issue a "HARD" reset in order for the RISC 486 * interrupt bit to be cleared. Schedule a big 487 * hammer to get out of the RISC PAUSED state. 488 */ 489 wrt_reg_word(®->hccr, HCCR_RESET_RISC); 490 rd_reg_word(®->hccr); 491 492 ha->isp_ops->fw_dump(vha); 493 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 494 break; 495 } else if ((stat & HSR_RISC_INT) == 0) 496 break; 497 498 switch (stat & 0xff) { 499 case 0x1: 500 case 0x2: 501 case 0x10: 502 case 0x11: 503 qla2x00_mbx_completion(vha, MSW(stat)); 504 status |= MBX_INTERRUPT; 505 506 /* Release mailbox registers. */ 507 wrt_reg_word(®->semaphore, 0); 508 break; 509 case 0x12: 510 mb[0] = MSW(stat); 511 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 512 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 513 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 514 qla2x00_async_event(vha, rsp, mb); 515 break; 516 case 0x13: 517 qla2x00_process_response_queue(rsp); 518 break; 519 case 0x15: 520 mb[0] = MBA_CMPLT_1_16BIT; 521 mb[1] = MSW(stat); 522 qla2x00_async_event(vha, rsp, mb); 523 break; 524 case 0x16: 525 mb[0] = MBA_SCSI_COMPLETION; 526 mb[1] = MSW(stat); 527 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 528 qla2x00_async_event(vha, rsp, mb); 529 break; 530 default: 531 ql_dbg(ql_dbg_async, vha, 0x5028, 532 "Unrecognized interrupt type (%d).\n", stat & 0xff); 533 break; 534 } 535 wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); 536 rd_reg_word_relaxed(®->hccr); 537 } 538 qla2x00_handle_mbx_completion(ha, status); 539 spin_unlock_irqrestore(&ha->hardware_lock, flags); 540 541 return (IRQ_HANDLED); 542 } 543 544 /** 545 * qla2x00_mbx_completion() - Process mailbox command completions. 546 * @vha: SCSI driver HA context 547 * @mb0: Mailbox0 register 548 */ 549 static void 550 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 551 { 552 uint16_t cnt; 553 uint32_t mboxes; 554 __le16 __iomem *wptr; 555 struct qla_hw_data *ha = vha->hw; 556 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 557 558 /* Read all mbox registers? */ 559 WARN_ON_ONCE(ha->mbx_count > 32); 560 mboxes = (1ULL << ha->mbx_count) - 1; 561 if (!ha->mcp) 562 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 563 else 564 mboxes = ha->mcp->in_mb; 565 566 /* Load return mailbox registers. */ 567 ha->flags.mbox_int = 1; 568 ha->mailbox_out[0] = mb0; 569 mboxes >>= 1; 570 wptr = MAILBOX_REG(ha, reg, 1); 571 572 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 573 if (IS_QLA2200(ha) && cnt == 8) 574 wptr = MAILBOX_REG(ha, reg, 8); 575 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 576 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 577 else if (mboxes & BIT_0) 578 ha->mailbox_out[cnt] = rd_reg_word(wptr); 579 580 wptr++; 581 mboxes >>= 1; 582 } 583 } 584 585 static void 586 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 587 { 588 static char *event[] = 589 { "Complete", "Request Notification", "Time Extension" }; 590 int rval; 591 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 592 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 593 __le16 __iomem *wptr; 594 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 595 596 /* Seed data -- mailbox1 -> mailbox7. */ 597 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 598 wptr = ®24->mailbox1; 599 else if (IS_QLA8044(vha->hw)) 600 wptr = ®82->mailbox_out[1]; 601 else 602 return; 603 604 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 605 mb[cnt] = rd_reg_word(wptr); 606 607 ql_dbg(ql_dbg_async, vha, 0x5021, 608 "Inter-Driver Communication %s -- " 609 "%04x %04x %04x %04x %04x %04x %04x.\n", 610 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 611 mb[4], mb[5], mb[6]); 612 switch (aen) { 613 /* Handle IDC Error completion case. */ 614 case MBA_IDC_COMPLETE: 615 if (mb[1] >> 15) { 616 vha->hw->flags.idc_compl_status = 1; 617 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 618 complete(&vha->hw->dcbx_comp); 619 } 620 break; 621 622 case MBA_IDC_NOTIFY: 623 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 624 timeout = (descr >> 8) & 0xf; 625 ql_dbg(ql_dbg_async, vha, 0x5022, 626 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 627 vha->host_no, event[aen & 0xff], timeout); 628 629 if (!timeout) 630 return; 631 rval = qla2x00_post_idc_ack_work(vha, mb); 632 if (rval != QLA_SUCCESS) 633 ql_log(ql_log_warn, vha, 0x5023, 634 "IDC failed to post ACK.\n"); 635 break; 636 case MBA_IDC_TIME_EXT: 637 vha->hw->idc_extend_tmo = descr; 638 ql_dbg(ql_dbg_async, vha, 0x5087, 639 "%lu Inter-Driver Communication %s -- " 640 "Extend timeout by=%d.\n", 641 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 642 break; 643 } 644 } 645 646 #define LS_UNKNOWN 2 647 const char * 648 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 649 { 650 static const char *const link_speeds[] = { 651 "1", "2", "?", "4", "8", "16", "32", "64", "10" 652 }; 653 #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) 654 655 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 656 return link_speeds[0]; 657 else if (speed == 0x13) 658 return link_speeds[QLA_LAST_SPEED]; 659 else if (speed < QLA_LAST_SPEED) 660 return link_speeds[speed]; 661 else 662 return link_speeds[LS_UNKNOWN]; 663 } 664 665 static void 666 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 667 { 668 struct qla_hw_data *ha = vha->hw; 669 670 /* 671 * 8200 AEN Interpretation: 672 * mb[0] = AEN code 673 * mb[1] = AEN Reason code 674 * mb[2] = LSW of Peg-Halt Status-1 Register 675 * mb[6] = MSW of Peg-Halt Status-1 Register 676 * mb[3] = LSW of Peg-Halt Status-2 register 677 * mb[7] = MSW of Peg-Halt Status-2 register 678 * mb[4] = IDC Device-State Register value 679 * mb[5] = IDC Driver-Presence Register value 680 */ 681 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 682 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 683 mb[0], mb[1], mb[2], mb[6]); 684 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 685 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 686 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 687 688 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 689 IDC_HEARTBEAT_FAILURE)) { 690 ha->flags.nic_core_hung = 1; 691 ql_log(ql_log_warn, vha, 0x5060, 692 "83XX: F/W Error Reported: Check if reset required.\n"); 693 694 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 695 uint32_t protocol_engine_id, fw_err_code, err_level; 696 697 /* 698 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 699 * - PEG-Halt Status-1 Register: 700 * (LSW = mb[2], MSW = mb[6]) 701 * Bits 0-7 = protocol-engine ID 702 * Bits 8-28 = f/w error code 703 * Bits 29-31 = Error-level 704 * Error-level 0x1 = Non-Fatal error 705 * Error-level 0x2 = Recoverable Fatal error 706 * Error-level 0x4 = UnRecoverable Fatal error 707 * - PEG-Halt Status-2 Register: 708 * (LSW = mb[3], MSW = mb[7]) 709 */ 710 protocol_engine_id = (mb[2] & 0xff); 711 fw_err_code = (((mb[2] & 0xff00) >> 8) | 712 ((mb[6] & 0x1fff) << 8)); 713 err_level = ((mb[6] & 0xe000) >> 13); 714 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 715 "Register: protocol_engine_id=0x%x " 716 "fw_err_code=0x%x err_level=0x%x.\n", 717 protocol_engine_id, fw_err_code, err_level); 718 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 719 "Register: 0x%x%x.\n", mb[7], mb[3]); 720 if (err_level == ERR_LEVEL_NON_FATAL) { 721 ql_log(ql_log_warn, vha, 0x5063, 722 "Not a fatal error, f/w has recovered itself.\n"); 723 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 724 ql_log(ql_log_fatal, vha, 0x5064, 725 "Recoverable Fatal error: Chip reset " 726 "required.\n"); 727 qla83xx_schedule_work(vha, 728 QLA83XX_NIC_CORE_RESET); 729 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 730 ql_log(ql_log_fatal, vha, 0x5065, 731 "Unrecoverable Fatal error: Set FAILED " 732 "state, reboot required.\n"); 733 qla83xx_schedule_work(vha, 734 QLA83XX_NIC_CORE_UNRECOVERABLE); 735 } 736 } 737 738 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 739 uint16_t peg_fw_state, nw_interface_link_up; 740 uint16_t nw_interface_signal_detect, sfp_status; 741 uint16_t htbt_counter, htbt_monitor_enable; 742 uint16_t sfp_additional_info, sfp_multirate; 743 uint16_t sfp_tx_fault, link_speed, dcbx_status; 744 745 /* 746 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 747 * - PEG-to-FC Status Register: 748 * (LSW = mb[2], MSW = mb[6]) 749 * Bits 0-7 = Peg-Firmware state 750 * Bit 8 = N/W Interface Link-up 751 * Bit 9 = N/W Interface signal detected 752 * Bits 10-11 = SFP Status 753 * SFP Status 0x0 = SFP+ transceiver not expected 754 * SFP Status 0x1 = SFP+ transceiver not present 755 * SFP Status 0x2 = SFP+ transceiver invalid 756 * SFP Status 0x3 = SFP+ transceiver present and 757 * valid 758 * Bits 12-14 = Heartbeat Counter 759 * Bit 15 = Heartbeat Monitor Enable 760 * Bits 16-17 = SFP Additional Info 761 * SFP info 0x0 = Unregocnized transceiver for 762 * Ethernet 763 * SFP info 0x1 = SFP+ brand validation failed 764 * SFP info 0x2 = SFP+ speed validation failed 765 * SFP info 0x3 = SFP+ access error 766 * Bit 18 = SFP Multirate 767 * Bit 19 = SFP Tx Fault 768 * Bits 20-22 = Link Speed 769 * Bits 23-27 = Reserved 770 * Bits 28-30 = DCBX Status 771 * DCBX Status 0x0 = DCBX Disabled 772 * DCBX Status 0x1 = DCBX Enabled 773 * DCBX Status 0x2 = DCBX Exchange error 774 * Bit 31 = Reserved 775 */ 776 peg_fw_state = (mb[2] & 0x00ff); 777 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 778 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 779 sfp_status = ((mb[2] & 0x0c00) >> 10); 780 htbt_counter = ((mb[2] & 0x7000) >> 12); 781 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 782 sfp_additional_info = (mb[6] & 0x0003); 783 sfp_multirate = ((mb[6] & 0x0004) >> 2); 784 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 785 link_speed = ((mb[6] & 0x0070) >> 4); 786 dcbx_status = ((mb[6] & 0x7000) >> 12); 787 788 ql_log(ql_log_warn, vha, 0x5066, 789 "Peg-to-Fc Status Register:\n" 790 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 791 "nw_interface_signal_detect=0x%x" 792 "\nsfp_statis=0x%x.\n ", peg_fw_state, 793 nw_interface_link_up, nw_interface_signal_detect, 794 sfp_status); 795 ql_log(ql_log_warn, vha, 0x5067, 796 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 797 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 798 htbt_counter, htbt_monitor_enable, 799 sfp_additional_info, sfp_multirate); 800 ql_log(ql_log_warn, vha, 0x5068, 801 "sfp_tx_fault=0x%x, link_state=0x%x, " 802 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 803 dcbx_status); 804 805 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 806 } 807 808 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 809 ql_log(ql_log_warn, vha, 0x5069, 810 "Heartbeat Failure encountered, chip reset " 811 "required.\n"); 812 813 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 814 } 815 } 816 817 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 818 ql_log(ql_log_info, vha, 0x506a, 819 "IDC Device-State changed = 0x%x.\n", mb[4]); 820 if (ha->flags.nic_core_reset_owner) 821 return; 822 qla83xx_schedule_work(vha, MBA_IDC_AEN); 823 } 824 } 825 826 int 827 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 828 { 829 struct qla_hw_data *ha = vha->hw; 830 scsi_qla_host_t *vp; 831 uint32_t vp_did; 832 unsigned long flags; 833 int ret = 0; 834 835 if (!ha->num_vhosts) 836 return ret; 837 838 spin_lock_irqsave(&ha->vport_slock, flags); 839 list_for_each_entry(vp, &ha->vp_list, list) { 840 vp_did = vp->d_id.b24; 841 if (vp_did == rscn_entry) { 842 ret = 1; 843 break; 844 } 845 } 846 spin_unlock_irqrestore(&ha->vport_slock, flags); 847 848 return ret; 849 } 850 851 fc_port_t * 852 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 853 { 854 fc_port_t *f, *tf; 855 856 f = tf = NULL; 857 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 858 if (f->loop_id == loop_id) 859 return f; 860 return NULL; 861 } 862 863 fc_port_t * 864 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 865 { 866 fc_port_t *f, *tf; 867 868 f = tf = NULL; 869 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 870 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 871 if (incl_deleted) 872 return f; 873 else if (f->deleted == 0) 874 return f; 875 } 876 } 877 return NULL; 878 } 879 880 fc_port_t * 881 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 882 u8 incl_deleted) 883 { 884 fc_port_t *f, *tf; 885 886 f = tf = NULL; 887 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 888 if (f->d_id.b24 == id->b24) { 889 if (incl_deleted) 890 return f; 891 else if (f->deleted == 0) 892 return f; 893 } 894 } 895 return NULL; 896 } 897 898 /* Shall be called only on supported adapters. */ 899 static void 900 qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 901 { 902 struct qla_hw_data *ha = vha->hw; 903 bool reset_isp_needed = false; 904 905 ql_log(ql_log_warn, vha, 0x02f0, 906 "MPI Heartbeat stop. MPI reset is%s needed. " 907 "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", 908 mb[1] & BIT_8 ? "" : " not", 909 mb[0], mb[1], mb[2], mb[3]); 910 911 if ((mb[1] & BIT_8) == 0) 912 return; 913 914 ql_log(ql_log_warn, vha, 0x02f1, 915 "MPI Heartbeat stop. FW dump needed\n"); 916 917 if (ql2xfulldump_on_mpifail) { 918 ha->isp_ops->fw_dump(vha); 919 reset_isp_needed = true; 920 } 921 922 ha->isp_ops->mpi_fw_dump(vha, 1); 923 924 if (reset_isp_needed) { 925 vha->hw->flags.fw_init_done = 0; 926 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 927 qla2xxx_wake_dpc(vha); 928 } 929 } 930 931 static struct purex_item * 932 qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) 933 { 934 struct purex_item *item = NULL; 935 uint8_t item_hdr_size = sizeof(*item); 936 937 if (size > QLA_DEFAULT_PAYLOAD_SIZE) { 938 item = kzalloc(item_hdr_size + 939 (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); 940 } else { 941 if (atomic_inc_return(&vha->default_item.in_use) == 1) { 942 item = &vha->default_item; 943 goto initialize_purex_header; 944 } else { 945 item = kzalloc(item_hdr_size, GFP_ATOMIC); 946 } 947 } 948 if (!item) { 949 ql_log(ql_log_warn, vha, 0x5092, 950 ">> Failed allocate purex list item.\n"); 951 952 return NULL; 953 } 954 955 initialize_purex_header: 956 item->vha = vha; 957 item->size = size; 958 return item; 959 } 960 961 static void 962 qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, 963 void (*process_item)(struct scsi_qla_host *vha, 964 struct purex_item *pkt)) 965 { 966 struct purex_list *list = &vha->purex_list; 967 ulong flags; 968 969 pkt->process_item = process_item; 970 971 spin_lock_irqsave(&list->lock, flags); 972 list_add_tail(&pkt->list, &list->head); 973 spin_unlock_irqrestore(&list->lock, flags); 974 975 set_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags); 976 } 977 978 /** 979 * qla24xx_copy_std_pkt() - Copy over purex ELS which is 980 * contained in a single IOCB. 981 * purex packet. 982 * @vha: SCSI driver HA context 983 * @pkt: ELS packet 984 */ 985 static struct purex_item 986 *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt) 987 { 988 struct purex_item *item; 989 990 item = qla24xx_alloc_purex_item(vha, 991 QLA_DEFAULT_PAYLOAD_SIZE); 992 if (!item) 993 return item; 994 995 memcpy(&item->iocb, pkt, sizeof(item->iocb)); 996 return item; 997 } 998 999 /** 1000 * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can 1001 * span over multiple IOCBs. 1002 * @vha: SCSI driver HA context 1003 * @pkt: ELS packet 1004 * @rsp: Response queue 1005 */ 1006 static struct purex_item * 1007 qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, 1008 struct rsp_que **rsp) 1009 { 1010 struct purex_entry_24xx *purex = *pkt; 1011 struct rsp_que *rsp_q = *rsp; 1012 sts_cont_entry_t *new_pkt; 1013 uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; 1014 uint16_t buffer_copy_offset = 0; 1015 uint16_t entry_count, entry_count_remaining; 1016 struct purex_item *item; 1017 void *fpin_pkt = NULL; 1018 1019 total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) 1020 - PURX_ELS_HEADER_SIZE; 1021 pending_bytes = total_bytes; 1022 entry_count = entry_count_remaining = purex->entry_count; 1023 no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? 1024 sizeof(purex->els_frame_payload) : pending_bytes; 1025 ql_log(ql_log_info, vha, 0x509a, 1026 "FPIN ELS, frame_size 0x%x, entry count %d\n", 1027 total_bytes, entry_count); 1028 1029 item = qla24xx_alloc_purex_item(vha, total_bytes); 1030 if (!item) 1031 return item; 1032 1033 fpin_pkt = &item->iocb; 1034 1035 memcpy(fpin_pkt, &purex->els_frame_payload[0], no_bytes); 1036 buffer_copy_offset += no_bytes; 1037 pending_bytes -= no_bytes; 1038 --entry_count_remaining; 1039 1040 ((response_t *)purex)->signature = RESPONSE_PROCESSED; 1041 wmb(); 1042 1043 do { 1044 while ((total_bytes > 0) && (entry_count_remaining > 0)) { 1045 if (rsp_q->ring_ptr->signature == RESPONSE_PROCESSED) { 1046 ql_dbg(ql_dbg_async, vha, 0x5084, 1047 "Ran out of IOCBs, partial data 0x%x\n", 1048 buffer_copy_offset); 1049 cpu_relax(); 1050 continue; 1051 } 1052 1053 new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; 1054 *pkt = new_pkt; 1055 1056 if (new_pkt->entry_type != STATUS_CONT_TYPE) { 1057 ql_log(ql_log_warn, vha, 0x507a, 1058 "Unexpected IOCB type, partial data 0x%x\n", 1059 buffer_copy_offset); 1060 break; 1061 } 1062 1063 rsp_q->ring_index++; 1064 if (rsp_q->ring_index == rsp_q->length) { 1065 rsp_q->ring_index = 0; 1066 rsp_q->ring_ptr = rsp_q->ring; 1067 } else { 1068 rsp_q->ring_ptr++; 1069 } 1070 no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? 1071 sizeof(new_pkt->data) : pending_bytes; 1072 if ((buffer_copy_offset + no_bytes) <= total_bytes) { 1073 memcpy(((uint8_t *)fpin_pkt + 1074 buffer_copy_offset), new_pkt->data, 1075 no_bytes); 1076 buffer_copy_offset += no_bytes; 1077 pending_bytes -= no_bytes; 1078 --entry_count_remaining; 1079 } else { 1080 ql_log(ql_log_warn, vha, 0x5044, 1081 "Attempt to copy more that we got, optimizing..%x\n", 1082 buffer_copy_offset); 1083 memcpy(((uint8_t *)fpin_pkt + 1084 buffer_copy_offset), new_pkt->data, 1085 total_bytes - buffer_copy_offset); 1086 } 1087 1088 ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; 1089 wmb(); 1090 } 1091 1092 if (pending_bytes != 0 || entry_count_remaining != 0) { 1093 ql_log(ql_log_fatal, vha, 0x508b, 1094 "Dropping partial FPIN, underrun bytes = 0x%x, entry cnts 0x%x\n", 1095 total_bytes, entry_count_remaining); 1096 qla24xx_free_purex_item(item); 1097 return NULL; 1098 } 1099 } while (entry_count_remaining > 0); 1100 host_to_fcp_swap((uint8_t *)&item->iocb, total_bytes); 1101 return item; 1102 } 1103 1104 /** 1105 * qla2x00_async_event() - Process aynchronous events. 1106 * @vha: SCSI driver HA context 1107 * @rsp: response queue 1108 * @mb: Mailbox registers (0 - 3) 1109 */ 1110 void 1111 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 1112 { 1113 uint16_t handle_cnt; 1114 uint16_t cnt, mbx; 1115 uint32_t handles[5]; 1116 struct qla_hw_data *ha = vha->hw; 1117 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1118 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 1119 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 1120 uint32_t rscn_entry, host_pid; 1121 unsigned long flags; 1122 fc_port_t *fcport = NULL; 1123 1124 if (!vha->hw->flags.fw_started) 1125 return; 1126 1127 /* Setup to process RIO completion. */ 1128 handle_cnt = 0; 1129 if (IS_CNA_CAPABLE(ha)) 1130 goto skip_rio; 1131 switch (mb[0]) { 1132 case MBA_SCSI_COMPLETION: 1133 handles[0] = make_handle(mb[2], mb[1]); 1134 handle_cnt = 1; 1135 break; 1136 case MBA_CMPLT_1_16BIT: 1137 handles[0] = mb[1]; 1138 handle_cnt = 1; 1139 mb[0] = MBA_SCSI_COMPLETION; 1140 break; 1141 case MBA_CMPLT_2_16BIT: 1142 handles[0] = mb[1]; 1143 handles[1] = mb[2]; 1144 handle_cnt = 2; 1145 mb[0] = MBA_SCSI_COMPLETION; 1146 break; 1147 case MBA_CMPLT_3_16BIT: 1148 handles[0] = mb[1]; 1149 handles[1] = mb[2]; 1150 handles[2] = mb[3]; 1151 handle_cnt = 3; 1152 mb[0] = MBA_SCSI_COMPLETION; 1153 break; 1154 case MBA_CMPLT_4_16BIT: 1155 handles[0] = mb[1]; 1156 handles[1] = mb[2]; 1157 handles[2] = mb[3]; 1158 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1159 handle_cnt = 4; 1160 mb[0] = MBA_SCSI_COMPLETION; 1161 break; 1162 case MBA_CMPLT_5_16BIT: 1163 handles[0] = mb[1]; 1164 handles[1] = mb[2]; 1165 handles[2] = mb[3]; 1166 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 1167 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 1168 handle_cnt = 5; 1169 mb[0] = MBA_SCSI_COMPLETION; 1170 break; 1171 case MBA_CMPLT_2_32BIT: 1172 handles[0] = make_handle(mb[2], mb[1]); 1173 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), 1174 RD_MAILBOX_REG(ha, reg, 6)); 1175 handle_cnt = 2; 1176 mb[0] = MBA_SCSI_COMPLETION; 1177 break; 1178 default: 1179 break; 1180 } 1181 skip_rio: 1182 switch (mb[0]) { 1183 case MBA_SCSI_COMPLETION: /* Fast Post */ 1184 if (!vha->flags.online) 1185 break; 1186 1187 for (cnt = 0; cnt < handle_cnt; cnt++) 1188 qla2x00_process_completed_request(vha, rsp->req, 1189 handles[cnt]); 1190 break; 1191 1192 case MBA_RESET: /* Reset */ 1193 ql_dbg(ql_dbg_async, vha, 0x5002, 1194 "Asynchronous RESET.\n"); 1195 1196 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1197 break; 1198 1199 case MBA_SYSTEM_ERR: /* System Error */ 1200 mbx = 0; 1201 1202 vha->hw_err_cnt++; 1203 1204 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || 1205 IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1206 u16 m[4]; 1207 1208 m[0] = rd_reg_word(®24->mailbox4); 1209 m[1] = rd_reg_word(®24->mailbox5); 1210 m[2] = rd_reg_word(®24->mailbox6); 1211 mbx = m[3] = rd_reg_word(®24->mailbox7); 1212 1213 ql_log(ql_log_warn, vha, 0x5003, 1214 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", 1215 mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); 1216 } else 1217 ql_log(ql_log_warn, vha, 0x5003, 1218 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n ", 1219 mb[1], mb[2], mb[3]); 1220 1221 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && 1222 rd_reg_word(®24->mailbox7) & BIT_8) 1223 ha->isp_ops->mpi_fw_dump(vha, 1); 1224 ha->isp_ops->fw_dump(vha); 1225 ha->flags.fw_init_done = 0; 1226 QLA_FW_STOPPED(ha); 1227 1228 if (IS_FWI2_CAPABLE(ha)) { 1229 if (mb[1] == 0 && mb[2] == 0) { 1230 ql_log(ql_log_fatal, vha, 0x5004, 1231 "Unrecoverable Hardware Error: adapter " 1232 "marked OFFLINE!\n"); 1233 vha->flags.online = 0; 1234 vha->device_flags |= DFLG_DEV_FAILED; 1235 } else { 1236 /* Check to see if MPI timeout occurred */ 1237 if ((mbx & MBX_3) && (ha->port_no == 0)) 1238 set_bit(MPI_RESET_NEEDED, 1239 &vha->dpc_flags); 1240 1241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1242 } 1243 } else if (mb[1] == 0) { 1244 ql_log(ql_log_fatal, vha, 0x5005, 1245 "Unrecoverable Hardware Error: adapter marked " 1246 "OFFLINE!\n"); 1247 vha->flags.online = 0; 1248 vha->device_flags |= DFLG_DEV_FAILED; 1249 } else 1250 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1251 break; 1252 1253 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 1254 ql_log(ql_log_warn, vha, 0x5006, 1255 "ISP Request Transfer Error (%x).\n", mb[1]); 1256 1257 vha->hw_err_cnt++; 1258 1259 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1260 break; 1261 1262 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 1263 ql_log(ql_log_warn, vha, 0x5007, 1264 "ISP Response Transfer Error (%x).\n", mb[1]); 1265 1266 vha->hw_err_cnt++; 1267 1268 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1269 break; 1270 1271 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 1272 ql_dbg(ql_dbg_async, vha, 0x5008, 1273 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 1274 break; 1275 1276 case MBA_LOOP_INIT_ERR: 1277 ql_log(ql_log_warn, vha, 0x5090, 1278 "LOOP INIT ERROR (%x).\n", mb[1]); 1279 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1280 break; 1281 1282 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 1283 ha->flags.lip_ae = 1; 1284 1285 ql_dbg(ql_dbg_async, vha, 0x5009, 1286 "LIP occurred (%x).\n", mb[1]); 1287 1288 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1289 atomic_set(&vha->loop_state, LOOP_DOWN); 1290 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1291 qla2x00_mark_all_devices_lost(vha); 1292 } 1293 1294 if (vha->vp_idx) { 1295 atomic_set(&vha->vp_state, VP_FAILED); 1296 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1297 } 1298 1299 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1300 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1301 1302 vha->flags.management_server_logged_in = 0; 1303 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 1304 break; 1305 1306 case MBA_LOOP_UP: /* Loop Up Event */ 1307 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 1308 ha->link_data_rate = PORT_SPEED_1GB; 1309 else 1310 ha->link_data_rate = mb[1]; 1311 1312 ql_log(ql_log_info, vha, 0x500a, 1313 "LOOP UP detected (%s Gbps).\n", 1314 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 1315 1316 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1317 if (mb[2] & BIT_0) 1318 ql_log(ql_log_info, vha, 0x11a0, 1319 "FEC=enabled (link up).\n"); 1320 } 1321 1322 vha->flags.management_server_logged_in = 0; 1323 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 1324 1325 if (vha->link_down_time < vha->hw->port_down_retry_count) { 1326 vha->short_link_down_cnt++; 1327 vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; 1328 } 1329 1330 break; 1331 1332 case MBA_LOOP_DOWN: /* Loop Down Event */ 1333 SAVE_TOPO(ha); 1334 ha->flags.lip_ae = 0; 1335 ha->current_topology = 0; 1336 vha->link_down_time = 0; 1337 1338 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 1339 ? rd_reg_word(®24->mailbox4) : 0; 1340 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) 1341 : mbx; 1342 ql_log(ql_log_info, vha, 0x500b, 1343 "LOOP DOWN detected (%x %x %x %x).\n", 1344 mb[1], mb[2], mb[3], mbx); 1345 1346 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1347 atomic_set(&vha->loop_state, LOOP_DOWN); 1348 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1349 /* 1350 * In case of loop down, restore WWPN from 1351 * NVRAM in case of FA-WWPN capable ISP 1352 * Restore for Physical Port only 1353 */ 1354 if (!vha->vp_idx) { 1355 if (ha->flags.fawwpn_enabled && 1356 (ha->current_topology == ISP_CFG_F)) { 1357 memcpy(vha->port_name, ha->port_name, WWN_SIZE); 1358 fc_host_port_name(vha->host) = 1359 wwn_to_u64(vha->port_name); 1360 ql_dbg(ql_dbg_init + ql_dbg_verbose, 1361 vha, 0x00d8, "LOOP DOWN detected," 1362 "restore WWPN %016llx\n", 1363 wwn_to_u64(vha->port_name)); 1364 } 1365 1366 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 1367 } 1368 1369 vha->device_flags |= DFLG_NO_CABLE; 1370 qla2x00_mark_all_devices_lost(vha); 1371 } 1372 1373 if (vha->vp_idx) { 1374 atomic_set(&vha->vp_state, VP_FAILED); 1375 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1376 } 1377 1378 vha->flags.management_server_logged_in = 0; 1379 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1380 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 1381 break; 1382 1383 case MBA_LIP_RESET: /* LIP reset occurred */ 1384 ql_dbg(ql_dbg_async, vha, 0x500c, 1385 "LIP reset occurred (%x).\n", mb[1]); 1386 1387 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1388 atomic_set(&vha->loop_state, LOOP_DOWN); 1389 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 1390 qla2x00_mark_all_devices_lost(vha); 1391 } 1392 1393 if (vha->vp_idx) { 1394 atomic_set(&vha->vp_state, VP_FAILED); 1395 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1396 } 1397 1398 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1399 1400 ha->operating_mode = LOOP; 1401 vha->flags.management_server_logged_in = 0; 1402 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 1403 break; 1404 1405 /* case MBA_DCBX_COMPLETE: */ 1406 case MBA_POINT_TO_POINT: /* Point-to-Point */ 1407 ha->flags.lip_ae = 0; 1408 1409 if (IS_QLA2100(ha)) 1410 break; 1411 1412 if (IS_CNA_CAPABLE(ha)) { 1413 ql_dbg(ql_dbg_async, vha, 0x500d, 1414 "DCBX Completed -- %04x %04x %04x.\n", 1415 mb[1], mb[2], mb[3]); 1416 if (ha->notify_dcbx_comp && !vha->vp_idx) 1417 complete(&ha->dcbx_comp); 1418 1419 } else 1420 ql_dbg(ql_dbg_async, vha, 0x500e, 1421 "Asynchronous P2P MODE received.\n"); 1422 1423 /* 1424 * Until there's a transition from loop down to loop up, treat 1425 * this as loop down only. 1426 */ 1427 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1428 atomic_set(&vha->loop_state, LOOP_DOWN); 1429 if (!atomic_read(&vha->loop_down_timer)) 1430 atomic_set(&vha->loop_down_timer, 1431 LOOP_DOWN_TIME); 1432 if (!N2N_TOPO(ha)) 1433 qla2x00_mark_all_devices_lost(vha); 1434 } 1435 1436 if (vha->vp_idx) { 1437 atomic_set(&vha->vp_state, VP_FAILED); 1438 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1439 } 1440 1441 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 1442 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 1443 1444 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 1445 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 1446 1447 vha->flags.management_server_logged_in = 0; 1448 break; 1449 1450 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 1451 if (IS_QLA2100(ha)) 1452 break; 1453 1454 ql_dbg(ql_dbg_async, vha, 0x500f, 1455 "Configuration change detected: value=%x.\n", mb[1]); 1456 1457 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1458 atomic_set(&vha->loop_state, LOOP_DOWN); 1459 if (!atomic_read(&vha->loop_down_timer)) 1460 atomic_set(&vha->loop_down_timer, 1461 LOOP_DOWN_TIME); 1462 qla2x00_mark_all_devices_lost(vha); 1463 } 1464 1465 if (vha->vp_idx) { 1466 atomic_set(&vha->vp_state, VP_FAILED); 1467 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 1468 } 1469 1470 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1471 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1472 break; 1473 1474 case MBA_PORT_UPDATE: /* Port database update */ 1475 /* 1476 * Handle only global and vn-port update events 1477 * 1478 * Relevant inputs: 1479 * mb[1] = N_Port handle of changed port 1480 * OR 0xffff for global event 1481 * mb[2] = New login state 1482 * 7 = Port logged out 1483 * mb[3] = LSB is vp_idx, 0xff = all vps 1484 * 1485 * Skip processing if: 1486 * Event is global, vp_idx is NOT all vps, 1487 * vp_idx does not match 1488 * Event is not global, vp_idx does not match 1489 */ 1490 if (IS_QLA2XXX_MIDTYPE(ha) && 1491 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 1492 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 1493 break; 1494 1495 if (mb[2] == 0x7) { 1496 ql_dbg(ql_dbg_async, vha, 0x5010, 1497 "Port %s %04x %04x %04x.\n", 1498 mb[1] == 0xffff ? "unavailable" : "logout", 1499 mb[1], mb[2], mb[3]); 1500 1501 if (mb[1] == 0xffff) 1502 goto global_port_update; 1503 1504 if (mb[1] == NPH_SNS_LID(ha)) { 1505 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1506 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1507 break; 1508 } 1509 1510 /* use handle_cnt for loop id/nport handle */ 1511 if (IS_FWI2_CAPABLE(ha)) 1512 handle_cnt = NPH_SNS; 1513 else 1514 handle_cnt = SIMPLE_NAME_SERVER; 1515 if (mb[1] == handle_cnt) { 1516 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1517 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1518 break; 1519 } 1520 1521 /* Port logout */ 1522 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1523 if (!fcport) 1524 break; 1525 if (atomic_read(&fcport->state) != FCS_ONLINE) 1526 break; 1527 ql_dbg(ql_dbg_async, vha, 0x508a, 1528 "Marking port lost loopid=%04x portid=%06x.\n", 1529 fcport->loop_id, fcport->d_id.b24); 1530 if (qla_ini_mode_enabled(vha)) { 1531 fcport->logout_on_delete = 0; 1532 qlt_schedule_sess_for_deletion(fcport); 1533 } 1534 break; 1535 1536 global_port_update: 1537 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1538 atomic_set(&vha->loop_state, LOOP_DOWN); 1539 atomic_set(&vha->loop_down_timer, 1540 LOOP_DOWN_TIME); 1541 vha->device_flags |= DFLG_NO_CABLE; 1542 qla2x00_mark_all_devices_lost(vha); 1543 } 1544 1545 if (vha->vp_idx) { 1546 atomic_set(&vha->vp_state, VP_FAILED); 1547 fc_vport_set_state(vha->fc_vport, 1548 FC_VPORT_FAILED); 1549 qla2x00_mark_all_devices_lost(vha); 1550 } 1551 1552 vha->flags.management_server_logged_in = 0; 1553 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1554 break; 1555 } 1556 1557 /* 1558 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1559 * event etc. earlier indicating loop is down) then process 1560 * it. Otherwise ignore it and Wait for RSCN to come in. 1561 */ 1562 atomic_set(&vha->loop_down_timer, 0); 1563 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1564 !ha->flags.n2n_ae && 1565 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1566 ql_dbg(ql_dbg_async, vha, 0x5011, 1567 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1568 mb[1], mb[2], mb[3]); 1569 break; 1570 } 1571 1572 ql_dbg(ql_dbg_async, vha, 0x5012, 1573 "Port database changed %04x %04x %04x.\n", 1574 mb[1], mb[2], mb[3]); 1575 1576 /* 1577 * Mark all devices as missing so we will login again. 1578 */ 1579 atomic_set(&vha->loop_state, LOOP_UP); 1580 vha->scan.scan_retry = 0; 1581 1582 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1583 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1584 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1585 break; 1586 1587 case MBA_RSCN_UPDATE: /* State Change Registration */ 1588 /* Check if the Vport has issued a SCR */ 1589 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1590 break; 1591 /* Only handle SCNs for our Vport index. */ 1592 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1593 break; 1594 1595 ql_log(ql_log_warn, vha, 0x5013, 1596 "RSCN database changed -- %04x %04x %04x.\n", 1597 mb[1], mb[2], mb[3]); 1598 1599 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1600 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1601 | vha->d_id.b.al_pa; 1602 if (rscn_entry == host_pid) { 1603 ql_dbg(ql_dbg_async, vha, 0x5014, 1604 "Ignoring RSCN update to local host " 1605 "port ID (%06x).\n", host_pid); 1606 break; 1607 } 1608 1609 /* Ignore reserved bits from RSCN-payload. */ 1610 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1611 1612 /* Skip RSCNs for virtual ports on the same physical port */ 1613 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1614 break; 1615 1616 atomic_set(&vha->loop_down_timer, 0); 1617 vha->flags.management_server_logged_in = 0; 1618 { 1619 struct event_arg ea; 1620 1621 memset(&ea, 0, sizeof(ea)); 1622 ea.id.b24 = rscn_entry; 1623 ea.id.b.rsvd_1 = rscn_entry >> 24; 1624 qla2x00_handle_rscn(vha, &ea); 1625 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1626 } 1627 break; 1628 case MBA_CONGN_NOTI_RECV: 1629 if (!ha->flags.scm_enabled || 1630 mb[1] != QLA_CON_PRIMITIVE_RECEIVED) 1631 break; 1632 1633 if (mb[2] == QLA_CONGESTION_ARB_WARNING) { 1634 ql_dbg(ql_dbg_async, vha, 0x509b, 1635 "Congestion Warning %04x %04x.\n", mb[1], mb[2]); 1636 } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { 1637 ql_log(ql_log_warn, vha, 0x509b, 1638 "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); 1639 } 1640 break; 1641 /* case MBA_RIO_RESPONSE: */ 1642 case MBA_ZIO_RESPONSE: 1643 ql_dbg(ql_dbg_async, vha, 0x5015, 1644 "[R|Z]IO update completion.\n"); 1645 1646 if (IS_FWI2_CAPABLE(ha)) 1647 qla24xx_process_response_queue(vha, rsp); 1648 else 1649 qla2x00_process_response_queue(rsp); 1650 break; 1651 1652 case MBA_DISCARD_RND_FRAME: 1653 ql_dbg(ql_dbg_async, vha, 0x5016, 1654 "Discard RND Frame -- %04x %04x %04x.\n", 1655 mb[1], mb[2], mb[3]); 1656 vha->interface_err_cnt++; 1657 break; 1658 1659 case MBA_TRACE_NOTIFICATION: 1660 ql_dbg(ql_dbg_async, vha, 0x5017, 1661 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1662 break; 1663 1664 case MBA_ISP84XX_ALERT: 1665 ql_dbg(ql_dbg_async, vha, 0x5018, 1666 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1667 mb[1], mb[2], mb[3]); 1668 1669 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1670 switch (mb[1]) { 1671 case A84_PANIC_RECOVERY: 1672 ql_log(ql_log_info, vha, 0x5019, 1673 "Alert 84XX: panic recovery %04x %04x.\n", 1674 mb[2], mb[3]); 1675 break; 1676 case A84_OP_LOGIN_COMPLETE: 1677 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1678 ql_log(ql_log_info, vha, 0x501a, 1679 "Alert 84XX: firmware version %x.\n", 1680 ha->cs84xx->op_fw_version); 1681 break; 1682 case A84_DIAG_LOGIN_COMPLETE: 1683 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1684 ql_log(ql_log_info, vha, 0x501b, 1685 "Alert 84XX: diagnostic firmware version %x.\n", 1686 ha->cs84xx->diag_fw_version); 1687 break; 1688 case A84_GOLD_LOGIN_COMPLETE: 1689 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1690 ha->cs84xx->fw_update = 1; 1691 ql_log(ql_log_info, vha, 0x501c, 1692 "Alert 84XX: gold firmware version %x.\n", 1693 ha->cs84xx->gold_fw_version); 1694 break; 1695 default: 1696 ql_log(ql_log_warn, vha, 0x501d, 1697 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1698 mb[1], mb[2], mb[3]); 1699 } 1700 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1701 break; 1702 case MBA_DCBX_START: 1703 ql_dbg(ql_dbg_async, vha, 0x501e, 1704 "DCBX Started -- %04x %04x %04x.\n", 1705 mb[1], mb[2], mb[3]); 1706 break; 1707 case MBA_DCBX_PARAM_UPDATE: 1708 ql_dbg(ql_dbg_async, vha, 0x501f, 1709 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1710 mb[1], mb[2], mb[3]); 1711 break; 1712 case MBA_FCF_CONF_ERR: 1713 ql_dbg(ql_dbg_async, vha, 0x5020, 1714 "FCF Configuration Error -- %04x %04x %04x.\n", 1715 mb[1], mb[2], mb[3]); 1716 break; 1717 case MBA_IDC_NOTIFY: 1718 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1719 mb[4] = rd_reg_word(®24->mailbox4); 1720 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1721 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1722 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1723 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1724 /* 1725 * Extend loop down timer since port is active. 1726 */ 1727 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1728 atomic_set(&vha->loop_down_timer, 1729 LOOP_DOWN_TIME); 1730 qla2xxx_wake_dpc(vha); 1731 } 1732 } 1733 fallthrough; 1734 case MBA_IDC_COMPLETE: 1735 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1736 complete(&ha->lb_portup_comp); 1737 fallthrough; 1738 case MBA_IDC_TIME_EXT: 1739 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1740 IS_QLA8044(ha)) 1741 qla81xx_idc_event(vha, mb[0], mb[1]); 1742 break; 1743 1744 case MBA_IDC_AEN: 1745 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1746 vha->hw_err_cnt++; 1747 qla27xx_handle_8200_aen(vha, mb); 1748 } else if (IS_QLA83XX(ha)) { 1749 mb[4] = rd_reg_word(®24->mailbox4); 1750 mb[5] = rd_reg_word(®24->mailbox5); 1751 mb[6] = rd_reg_word(®24->mailbox6); 1752 mb[7] = rd_reg_word(®24->mailbox7); 1753 qla83xx_handle_8200_aen(vha, mb); 1754 } else { 1755 ql_dbg(ql_dbg_async, vha, 0x5052, 1756 "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", 1757 mb[0], mb[1], mb[2], mb[3]); 1758 } 1759 break; 1760 1761 case MBA_DPORT_DIAGNOSTICS: 1762 if ((mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR || 1763 (mb[1] & 0xF) == AEN_DONE_DIAG_TEST_WITH_ERR) 1764 vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; 1765 ql_dbg(ql_dbg_async, vha, 0x5052, 1766 "D-Port Diagnostics: %04x %04x %04x %04x\n", 1767 mb[0], mb[1], mb[2], mb[3]); 1768 memcpy(vha->dport_data, mb, sizeof(vha->dport_data)); 1769 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { 1770 static char *results[] = { 1771 "start", "done(pass)", "done(error)", "undefined" }; 1772 static char *types[] = { 1773 "none", "dynamic", "static", "other" }; 1774 uint result = mb[1] >> 0 & 0x3; 1775 uint type = mb[1] >> 6 & 0x3; 1776 uint sw = mb[1] >> 15 & 0x1; 1777 ql_dbg(ql_dbg_async, vha, 0x5052, 1778 "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", 1779 results[result], types[type], sw); 1780 if (result == 2) { 1781 static char *reasons[] = { 1782 "reserved", "unexpected reject", 1783 "unexpected phase", "retry exceeded", 1784 "timed out", "not supported", 1785 "user stopped" }; 1786 uint reason = mb[2] >> 0 & 0xf; 1787 uint phase = mb[2] >> 12 & 0xf; 1788 ql_dbg(ql_dbg_async, vha, 0x5052, 1789 "D-Port Diagnostics: reason=%s phase=%u \n", 1790 reason < 7 ? reasons[reason] : "other", 1791 phase >> 1); 1792 } 1793 } 1794 break; 1795 1796 case MBA_TEMPERATURE_ALERT: 1797 ql_dbg(ql_dbg_async, vha, 0x505e, 1798 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1799 break; 1800 1801 case MBA_TRANS_INSERT: 1802 ql_dbg(ql_dbg_async, vha, 0x5091, 1803 "Transceiver Insertion: %04x\n", mb[1]); 1804 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 1805 break; 1806 1807 case MBA_TRANS_REMOVE: 1808 ql_dbg(ql_dbg_async, vha, 0x5091, "Transceiver Removal\n"); 1809 break; 1810 1811 default: 1812 ql_dbg(ql_dbg_async, vha, 0x5057, 1813 "Unknown AEN:%04x %04x %04x %04x\n", 1814 mb[0], mb[1], mb[2], mb[3]); 1815 } 1816 1817 qlt_async_event(mb[0], vha, mb); 1818 1819 if (!vha->vp_idx && ha->num_vhosts) 1820 qla2x00_alert_all_vps(rsp, mb); 1821 } 1822 1823 /** 1824 * qla2x00_process_completed_request() - Process a Fast Post response. 1825 * @vha: SCSI driver HA context 1826 * @req: request queue 1827 * @index: SRB index 1828 */ 1829 void 1830 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1831 struct req_que *req, uint32_t index) 1832 { 1833 srb_t *sp; 1834 struct qla_hw_data *ha = vha->hw; 1835 1836 /* Validate handle. */ 1837 if (index >= req->num_outstanding_cmds) { 1838 ql_log(ql_log_warn, vha, 0x3014, 1839 "Invalid SCSI command index (%x).\n", index); 1840 1841 if (IS_P3P_TYPE(ha)) 1842 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1843 else 1844 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1845 return; 1846 } 1847 1848 sp = req->outstanding_cmds[index]; 1849 if (sp) { 1850 /* Free outstanding command slot. */ 1851 req->outstanding_cmds[index] = NULL; 1852 1853 /* Save ISP completion status */ 1854 sp->done(sp, DID_OK << 16); 1855 } else { 1856 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1857 1858 if (IS_P3P_TYPE(ha)) 1859 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1860 else 1861 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1862 } 1863 } 1864 1865 srb_t * 1866 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1867 struct req_que *req, void *iocb) 1868 { 1869 struct qla_hw_data *ha = vha->hw; 1870 sts_entry_t *pkt = iocb; 1871 srb_t *sp; 1872 uint16_t index; 1873 1874 if (pkt->handle == QLA_SKIP_HANDLE) 1875 return NULL; 1876 1877 index = LSW(pkt->handle); 1878 if (index >= req->num_outstanding_cmds) { 1879 ql_log(ql_log_warn, vha, 0x5031, 1880 "%s: Invalid command index (%x) type %8ph.\n", 1881 func, index, iocb); 1882 if (IS_P3P_TYPE(ha)) 1883 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1884 else 1885 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1886 return NULL; 1887 } 1888 sp = req->outstanding_cmds[index]; 1889 if (!sp) { 1890 ql_log(ql_log_warn, vha, 0x5032, 1891 "%s: Invalid completion handle (%x) -- timed-out.\n", 1892 func, index); 1893 return NULL; 1894 } 1895 if (sp->handle != index) { 1896 ql_log(ql_log_warn, vha, 0x5033, 1897 "%s: SRB handle (%x) mismatch %x.\n", func, 1898 sp->handle, index); 1899 return NULL; 1900 } 1901 1902 req->outstanding_cmds[index] = NULL; 1903 1904 qla_put_fw_resources(sp->qpair, &sp->iores); 1905 return sp; 1906 } 1907 1908 static void 1909 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1910 struct mbx_entry *mbx) 1911 { 1912 const char func[] = "MBX-IOCB"; 1913 const char *type; 1914 fc_port_t *fcport; 1915 srb_t *sp; 1916 struct srb_iocb *lio; 1917 uint16_t *data; 1918 uint16_t status; 1919 1920 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1921 if (!sp) 1922 return; 1923 1924 lio = &sp->u.iocb_cmd; 1925 type = sp->name; 1926 fcport = sp->fcport; 1927 data = lio->u.logio.data; 1928 1929 data[0] = MBS_COMMAND_ERROR; 1930 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1931 QLA_LOGIO_LOGIN_RETRIED : 0; 1932 if (mbx->entry_status) { 1933 ql_dbg(ql_dbg_async, vha, 0x5043, 1934 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1935 "entry-status=%x status=%x state-flag=%x " 1936 "status-flags=%x.\n", type, sp->handle, 1937 fcport->d_id.b.domain, fcport->d_id.b.area, 1938 fcport->d_id.b.al_pa, mbx->entry_status, 1939 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1940 le16_to_cpu(mbx->status_flags)); 1941 1942 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1943 mbx, sizeof(*mbx)); 1944 1945 goto logio_done; 1946 } 1947 1948 status = le16_to_cpu(mbx->status); 1949 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1950 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1951 status = 0; 1952 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1953 ql_dbg(ql_dbg_async, vha, 0x5045, 1954 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1955 type, sp->handle, fcport->d_id.b.domain, 1956 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1957 le16_to_cpu(mbx->mb1)); 1958 1959 data[0] = MBS_COMMAND_COMPLETE; 1960 if (sp->type == SRB_LOGIN_CMD) { 1961 fcport->port_type = FCT_TARGET; 1962 if (le16_to_cpu(mbx->mb1) & BIT_0) 1963 fcport->port_type = FCT_INITIATOR; 1964 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1965 fcport->flags |= FCF_FCP2_DEVICE; 1966 } 1967 goto logio_done; 1968 } 1969 1970 data[0] = le16_to_cpu(mbx->mb0); 1971 switch (data[0]) { 1972 case MBS_PORT_ID_USED: 1973 data[1] = le16_to_cpu(mbx->mb1); 1974 break; 1975 case MBS_LOOP_ID_USED: 1976 break; 1977 default: 1978 data[0] = MBS_COMMAND_ERROR; 1979 break; 1980 } 1981 1982 ql_log(ql_log_warn, vha, 0x5046, 1983 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1984 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1985 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1986 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1987 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1988 le16_to_cpu(mbx->mb7)); 1989 1990 logio_done: 1991 sp->done(sp, 0); 1992 } 1993 1994 static void 1995 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1996 struct mbx_24xx_entry *pkt) 1997 { 1998 const char func[] = "MBX-IOCB2"; 1999 struct qla_hw_data *ha = vha->hw; 2000 srb_t *sp; 2001 struct srb_iocb *si; 2002 u16 sz, i; 2003 int res; 2004 2005 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2006 if (!sp) 2007 return; 2008 2009 if (sp->type == SRB_SCSI_CMD || 2010 sp->type == SRB_NVME_CMD || 2011 sp->type == SRB_TM_CMD) { 2012 ql_log(ql_log_warn, vha, 0x509d, 2013 "Inconsistent event entry type %d\n", sp->type); 2014 if (IS_P3P_TYPE(ha)) 2015 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2016 else 2017 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2018 return; 2019 } 2020 2021 si = &sp->u.iocb_cmd; 2022 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 2023 2024 for (i = 0; i < sz; i++) 2025 si->u.mbx.in_mb[i] = pkt->mb[i]; 2026 2027 res = (si->u.mbx.in_mb[0] & MBS_MASK); 2028 2029 sp->done(sp, res); 2030 } 2031 2032 static void 2033 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2034 struct nack_to_isp *pkt) 2035 { 2036 const char func[] = "nack"; 2037 srb_t *sp; 2038 int res = 0; 2039 2040 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2041 if (!sp) 2042 return; 2043 2044 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 2045 res = QLA_FUNCTION_FAILED; 2046 2047 sp->done(sp, res); 2048 } 2049 2050 static void 2051 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 2052 sts_entry_t *pkt, int iocb_type) 2053 { 2054 const char func[] = "CT_IOCB"; 2055 const char *type; 2056 srb_t *sp; 2057 struct bsg_job *bsg_job; 2058 struct fc_bsg_reply *bsg_reply; 2059 uint16_t comp_status; 2060 int res = 0; 2061 2062 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2063 if (!sp) 2064 return; 2065 2066 switch (sp->type) { 2067 case SRB_CT_CMD: 2068 bsg_job = sp->u.bsg_job; 2069 bsg_reply = bsg_job->reply; 2070 2071 type = "ct pass-through"; 2072 2073 comp_status = le16_to_cpu(pkt->comp_status); 2074 2075 /* 2076 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2077 * fc payload to the caller 2078 */ 2079 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2080 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2081 2082 if (comp_status != CS_COMPLETE) { 2083 if (comp_status == CS_DATA_UNDERRUN) { 2084 res = DID_OK << 16; 2085 bsg_reply->reply_payload_rcv_len = 2086 le16_to_cpu(pkt->rsp_info_len); 2087 2088 ql_log(ql_log_warn, vha, 0x5048, 2089 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 2090 type, comp_status, 2091 bsg_reply->reply_payload_rcv_len); 2092 } else { 2093 ql_log(ql_log_warn, vha, 0x5049, 2094 "CT pass-through-%s error comp_status=0x%x.\n", 2095 type, comp_status); 2096 res = DID_ERROR << 16; 2097 bsg_reply->reply_payload_rcv_len = 0; 2098 } 2099 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 2100 pkt, sizeof(*pkt)); 2101 } else { 2102 res = DID_OK << 16; 2103 bsg_reply->reply_payload_rcv_len = 2104 bsg_job->reply_payload.payload_len; 2105 bsg_job->reply_len = 0; 2106 } 2107 break; 2108 case SRB_CT_PTHRU_CMD: 2109 /* 2110 * borrowing sts_entry_24xx.comp_status. 2111 * same location as ct_entry_24xx.comp_status 2112 */ 2113 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 2114 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 2115 sp->name); 2116 break; 2117 } 2118 2119 sp->done(sp, res); 2120 } 2121 2122 static void 2123 qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, 2124 struct sts_entry_24xx *pkt, int iocb_type) 2125 { 2126 struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; 2127 const char func[] = "ELS_CT_IOCB"; 2128 const char *type; 2129 srb_t *sp; 2130 struct bsg_job *bsg_job; 2131 struct fc_bsg_reply *bsg_reply; 2132 uint16_t comp_status; 2133 uint32_t fw_status[3]; 2134 int res, logit = 1; 2135 struct srb_iocb *els; 2136 uint n; 2137 scsi_qla_host_t *vha; 2138 struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx *)pkt; 2139 2140 sp = qla2x00_get_sp_from_handle(v, func, req, pkt); 2141 if (!sp) 2142 return; 2143 bsg_job = sp->u.bsg_job; 2144 vha = sp->vha; 2145 2146 type = NULL; 2147 2148 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 2149 fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); 2150 fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); 2151 2152 switch (sp->type) { 2153 case SRB_ELS_CMD_RPT: 2154 case SRB_ELS_CMD_HST: 2155 type = "rpt hst"; 2156 break; 2157 case SRB_ELS_CMD_HST_NOLOGIN: 2158 type = "els"; 2159 { 2160 struct els_entry_24xx *els = (void *)pkt; 2161 struct qla_bsg_auth_els_request *p = 2162 (struct qla_bsg_auth_els_request *)bsg_job->request; 2163 2164 ql_dbg(ql_dbg_user, vha, 0x700f, 2165 "%s %s. portid=%02x%02x%02x status %x xchg %x bsg ptr %p\n", 2166 __func__, sc_to_str(p->e.sub_cmd), 2167 e->d_id[2], e->d_id[1], e->d_id[0], 2168 comp_status, p->e.extra_rx_xchg_address, bsg_job); 2169 2170 if (!(le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) { 2171 if (sp->remap.remapped) { 2172 n = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, 2173 bsg_job->reply_payload.sg_cnt, 2174 sp->remap.rsp.buf, 2175 sp->remap.rsp.len); 2176 ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x700e, 2177 "%s: SG copied %x of %x\n", 2178 __func__, n, sp->remap.rsp.len); 2179 } else { 2180 ql_dbg(ql_dbg_user, vha, 0x700f, 2181 "%s: NOT REMAPPED (error)...!!!\n", 2182 __func__); 2183 } 2184 } 2185 } 2186 break; 2187 case SRB_CT_CMD: 2188 type = "ct pass-through"; 2189 break; 2190 case SRB_ELS_DCMD: 2191 type = "Driver ELS logo"; 2192 if (iocb_type != ELS_IOCB_TYPE) { 2193 ql_dbg(ql_dbg_user, vha, 0x5047, 2194 "Completing %s: (%p) type=%d.\n", 2195 type, sp, sp->type); 2196 sp->done(sp, 0); 2197 return; 2198 } 2199 break; 2200 case SRB_CT_PTHRU_CMD: 2201 /* borrowing sts_entry_24xx.comp_status. 2202 same location as ct_entry_24xx.comp_status 2203 */ 2204 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 2205 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 2206 sp->name); 2207 sp->done(sp, res); 2208 return; 2209 default: 2210 ql_dbg(ql_dbg_user, vha, 0x503e, 2211 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 2212 return; 2213 } 2214 2215 if (iocb_type == ELS_IOCB_TYPE) { 2216 els = &sp->u.iocb_cmd; 2217 els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); 2218 els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); 2219 els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); 2220 els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); 2221 if (comp_status == CS_COMPLETE) { 2222 res = DID_OK << 16; 2223 } else { 2224 if (comp_status == CS_DATA_UNDERRUN) { 2225 res = DID_OK << 16; 2226 els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( 2227 ese->total_byte_count)); 2228 2229 if (sp->remap.remapped && 2230 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_ACC) { 2231 ql_dbg(ql_dbg_user, vha, 0x503f, 2232 "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x", 2233 __func__, e->s_id[0], e->s_id[2], e->s_id[1], 2234 e->d_id[2], e->d_id[1], e->d_id[0]); 2235 logit = 0; 2236 } 2237 2238 } else if (comp_status == CS_PORT_LOGGED_OUT) { 2239 ql_dbg(ql_dbg_disc, vha, 0x911e, 2240 "%s %d schedule session deletion\n", 2241 __func__, __LINE__); 2242 2243 els->u.els_plogi.len = 0; 2244 res = DID_IMM_RETRY << 16; 2245 qlt_schedule_sess_for_deletion(sp->fcport); 2246 } else { 2247 els->u.els_plogi.len = 0; 2248 res = DID_ERROR << 16; 2249 } 2250 2251 if (sp->remap.remapped && 2252 ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) { 2253 if (logit) { 2254 ql_dbg(ql_dbg_user, vha, 0x503f, 2255 "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n", 2256 type, sp->handle, comp_status); 2257 2258 ql_dbg(ql_dbg_user, vha, 0x503f, 2259 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", 2260 fw_status[1], fw_status[2], 2261 le32_to_cpu(((struct els_sts_entry_24xx *) 2262 pkt)->total_byte_count), 2263 e->s_id[0], e->s_id[2], e->s_id[1], 2264 e->d_id[2], e->d_id[1], e->d_id[0]); 2265 } 2266 if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE && 2267 sp->type == SRB_ELS_CMD_HST_NOLOGIN) { 2268 ql_dbg(ql_dbg_edif, vha, 0x911e, 2269 "%s rcv reject. Sched delete\n", __func__); 2270 qlt_schedule_sess_for_deletion(sp->fcport); 2271 } 2272 } else if (logit) { 2273 ql_log(ql_log_info, vha, 0x503f, 2274 "%s IOCB Done hdl=%x comp_status=0x%x\n", 2275 type, sp->handle, comp_status); 2276 ql_log(ql_log_info, vha, 0x503f, 2277 "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", 2278 fw_status[1], fw_status[2], 2279 le32_to_cpu(((struct els_sts_entry_24xx *) 2280 pkt)->total_byte_count), 2281 e->s_id[0], e->s_id[2], e->s_id[1], 2282 e->d_id[2], e->d_id[1], e->d_id[0]); 2283 } 2284 } 2285 goto els_ct_done; 2286 } 2287 2288 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 2289 * fc payload to the caller 2290 */ 2291 bsg_job = sp->u.bsg_job; 2292 bsg_reply = bsg_job->reply; 2293 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 2294 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 2295 2296 if (comp_status != CS_COMPLETE) { 2297 if (comp_status == CS_DATA_UNDERRUN) { 2298 res = DID_OK << 16; 2299 bsg_reply->reply_payload_rcv_len = 2300 le32_to_cpu(ese->total_byte_count); 2301 2302 ql_dbg(ql_dbg_user, vha, 0x503f, 2303 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2304 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 2305 type, sp->handle, comp_status, fw_status[1], fw_status[2], 2306 le32_to_cpu(ese->total_byte_count)); 2307 } else { 2308 ql_dbg(ql_dbg_user, vha, 0x5040, 2309 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 2310 "error subcode 1=0x%x error subcode 2=0x%x.\n", 2311 type, sp->handle, comp_status, 2312 le32_to_cpu(ese->error_subcode_1), 2313 le32_to_cpu(ese->error_subcode_2)); 2314 res = DID_ERROR << 16; 2315 bsg_reply->reply_payload_rcv_len = 0; 2316 } 2317 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 2318 fw_status, sizeof(fw_status)); 2319 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 2320 pkt, sizeof(*pkt)); 2321 } 2322 else { 2323 res = DID_OK << 16; 2324 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 2325 bsg_job->reply_len = 0; 2326 } 2327 els_ct_done: 2328 2329 sp->done(sp, res); 2330 } 2331 2332 static void 2333 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 2334 struct logio_entry_24xx *logio) 2335 { 2336 const char func[] = "LOGIO-IOCB"; 2337 const char *type; 2338 fc_port_t *fcport; 2339 srb_t *sp; 2340 struct srb_iocb *lio; 2341 uint16_t *data; 2342 uint32_t iop[2]; 2343 int logit = 1; 2344 2345 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 2346 if (!sp) 2347 return; 2348 2349 lio = &sp->u.iocb_cmd; 2350 type = sp->name; 2351 fcport = sp->fcport; 2352 data = lio->u.logio.data; 2353 2354 data[0] = MBS_COMMAND_ERROR; 2355 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 2356 QLA_LOGIO_LOGIN_RETRIED : 0; 2357 if (logio->entry_status) { 2358 ql_log(ql_log_warn, fcport->vha, 0x5034, 2359 "Async-%s error entry - %8phC hdl=%x" 2360 "portid=%02x%02x%02x entry-status=%x.\n", 2361 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 2362 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2363 logio->entry_status); 2364 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 2365 logio, sizeof(*logio)); 2366 2367 goto logio_done; 2368 } 2369 2370 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 2371 ql_dbg(ql_dbg_async, sp->vha, 0x5036, 2372 "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", 2373 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2374 le32_to_cpu(logio->io_parameter[0])); 2375 2376 vha->hw->exch_starvation = 0; 2377 data[0] = MBS_COMMAND_COMPLETE; 2378 2379 if (sp->type == SRB_PRLI_CMD) { 2380 lio->u.logio.iop[0] = 2381 le32_to_cpu(logio->io_parameter[0]); 2382 lio->u.logio.iop[1] = 2383 le32_to_cpu(logio->io_parameter[1]); 2384 goto logio_done; 2385 } 2386 2387 if (sp->type != SRB_LOGIN_CMD) 2388 goto logio_done; 2389 2390 lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]); 2391 if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP) 2392 fcport->flags |= FCF_FCSP_DEVICE; 2393 2394 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2395 if (iop[0] & BIT_4) { 2396 fcport->port_type = FCT_TARGET; 2397 if (iop[0] & BIT_8) 2398 fcport->flags |= FCF_FCP2_DEVICE; 2399 } else if (iop[0] & BIT_5) 2400 fcport->port_type = FCT_INITIATOR; 2401 2402 if (iop[0] & BIT_7) 2403 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 2404 2405 if (logio->io_parameter[7] || logio->io_parameter[8]) 2406 fcport->supported_classes |= FC_COS_CLASS2; 2407 if (logio->io_parameter[9] || logio->io_parameter[10]) 2408 fcport->supported_classes |= FC_COS_CLASS3; 2409 2410 goto logio_done; 2411 } 2412 2413 iop[0] = le32_to_cpu(logio->io_parameter[0]); 2414 iop[1] = le32_to_cpu(logio->io_parameter[1]); 2415 lio->u.logio.iop[0] = iop[0]; 2416 lio->u.logio.iop[1] = iop[1]; 2417 switch (iop[0]) { 2418 case LSC_SCODE_PORTID_USED: 2419 data[0] = MBS_PORT_ID_USED; 2420 data[1] = LSW(iop[1]); 2421 logit = 0; 2422 break; 2423 case LSC_SCODE_NPORT_USED: 2424 data[0] = MBS_LOOP_ID_USED; 2425 logit = 0; 2426 break; 2427 case LSC_SCODE_CMD_FAILED: 2428 if (iop[1] == 0x0606) { 2429 /* 2430 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 2431 * Target side acked. 2432 */ 2433 data[0] = MBS_COMMAND_COMPLETE; 2434 goto logio_done; 2435 } 2436 data[0] = MBS_COMMAND_ERROR; 2437 break; 2438 case LSC_SCODE_NOXCB: 2439 vha->hw->exch_starvation++; 2440 if (vha->hw->exch_starvation > 5) { 2441 ql_log(ql_log_warn, vha, 0xd046, 2442 "Exchange starvation. Resetting RISC\n"); 2443 2444 vha->hw->exch_starvation = 0; 2445 2446 if (IS_P3P_TYPE(vha->hw)) 2447 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2448 else 2449 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2450 qla2xxx_wake_dpc(vha); 2451 } 2452 fallthrough; 2453 default: 2454 data[0] = MBS_COMMAND_ERROR; 2455 break; 2456 } 2457 2458 if (logit) 2459 ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: " 2460 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2461 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2462 le16_to_cpu(logio->comp_status), 2463 le32_to_cpu(logio->io_parameter[0]), 2464 le32_to_cpu(logio->io_parameter[1])); 2465 else 2466 ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: " 2467 "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", 2468 type, sp->handle, fcport->d_id.b24, fcport->port_name, 2469 le16_to_cpu(logio->comp_status), 2470 le32_to_cpu(logio->io_parameter[0]), 2471 le32_to_cpu(logio->io_parameter[1])); 2472 2473 logio_done: 2474 sp->done(sp, 0); 2475 } 2476 2477 static void 2478 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 2479 { 2480 const char func[] = "TMF-IOCB"; 2481 const char *type; 2482 fc_port_t *fcport; 2483 srb_t *sp; 2484 struct srb_iocb *iocb; 2485 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2486 u16 comp_status; 2487 2488 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 2489 if (!sp) 2490 return; 2491 2492 comp_status = le16_to_cpu(sts->comp_status); 2493 iocb = &sp->u.iocb_cmd; 2494 type = sp->name; 2495 fcport = sp->fcport; 2496 iocb->u.tmf.data = QLA_SUCCESS; 2497 2498 if (sts->entry_status) { 2499 ql_log(ql_log_warn, fcport->vha, 0x5038, 2500 "Async-%s error - hdl=%x entry-status(%x).\n", 2501 type, sp->handle, sts->entry_status); 2502 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2503 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 2504 ql_log(ql_log_warn, fcport->vha, 0x5039, 2505 "Async-%s error - hdl=%x completion status(%x).\n", 2506 type, sp->handle, comp_status); 2507 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2508 } else if ((le16_to_cpu(sts->scsi_status) & 2509 SS_RESPONSE_INFO_LEN_VALID)) { 2510 host_to_fcp_swap(sts->data, sizeof(sts->data)); 2511 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2512 ql_log(ql_log_warn, fcport->vha, 0x503b, 2513 "Async-%s error - hdl=%x not enough response(%d).\n", 2514 type, sp->handle, sts->rsp_data_len); 2515 } else if (sts->data[3]) { 2516 ql_log(ql_log_warn, fcport->vha, 0x503c, 2517 "Async-%s error - hdl=%x response(%x).\n", 2518 type, sp->handle, sts->data[3]); 2519 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 2520 } 2521 } 2522 2523 switch (comp_status) { 2524 case CS_PORT_LOGGED_OUT: 2525 case CS_PORT_CONFIG_CHG: 2526 case CS_PORT_BUSY: 2527 case CS_INCOMPLETE: 2528 case CS_PORT_UNAVAILABLE: 2529 case CS_TIMEOUT: 2530 case CS_RESET: 2531 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2532 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2533 "-Port to be marked lost on fcport=%02x%02x%02x, current port state= %s comp_status %x.\n", 2534 fcport->d_id.b.domain, fcport->d_id.b.area, 2535 fcport->d_id.b.al_pa, 2536 port_state_str[FCS_ONLINE], 2537 comp_status); 2538 2539 qlt_schedule_sess_for_deletion(fcport); 2540 } 2541 break; 2542 2543 default: 2544 break; 2545 } 2546 2547 if (iocb->u.tmf.data != QLA_SUCCESS) 2548 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, 2549 sts, sizeof(*sts)); 2550 2551 sp->done(sp, 0); 2552 } 2553 2554 static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2555 void *tsk, srb_t *sp) 2556 { 2557 fc_port_t *fcport; 2558 struct srb_iocb *iocb; 2559 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 2560 uint16_t state_flags; 2561 struct nvmefc_fcp_req *fd; 2562 uint16_t ret = QLA_SUCCESS; 2563 __le16 comp_status = sts->comp_status; 2564 int logit = 0; 2565 2566 iocb = &sp->u.iocb_cmd; 2567 fcport = sp->fcport; 2568 iocb->u.nvme.comp_status = comp_status; 2569 state_flags = le16_to_cpu(sts->state_flags); 2570 fd = iocb->u.nvme.desc; 2571 2572 if (unlikely(iocb->u.nvme.aen_op)) 2573 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 2574 else 2575 sp->qpair->cmd_completion_cnt++; 2576 2577 if (unlikely(comp_status != CS_COMPLETE)) 2578 logit = 1; 2579 2580 fd->transferred_length = fd->payload_length - 2581 le32_to_cpu(sts->residual_len); 2582 2583 /* 2584 * State flags: Bit 6 and 0. 2585 * If 0 is set, we don't care about 6. 2586 * both cases resp was dma'd to host buffer 2587 * if both are 0, that is good path case. 2588 * if six is set and 0 is clear, we need to 2589 * copy resp data from status iocb to resp buffer. 2590 */ 2591 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 2592 iocb->u.nvme.rsp_pyld_len = 0; 2593 } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == 2594 (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { 2595 /* Response already DMA'd to fd->rspaddr. */ 2596 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2597 } else if ((state_flags & SF_FCP_RSP_DMA)) { 2598 /* 2599 * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this 2600 * as an error. 2601 */ 2602 iocb->u.nvme.rsp_pyld_len = 0; 2603 fd->transferred_length = 0; 2604 ql_dbg(ql_dbg_io, fcport->vha, 0x307a, 2605 "Unexpected values in NVMe_RSP IU.\n"); 2606 logit = 1; 2607 } else if (state_flags & SF_NVME_ERSP) { 2608 uint32_t *inbuf, *outbuf; 2609 uint16_t iter; 2610 2611 inbuf = (uint32_t *)&sts->nvme_ersp_data; 2612 outbuf = (uint32_t *)fd->rspaddr; 2613 iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; 2614 if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > 2615 sizeof(struct nvme_fc_ersp_iu))) { 2616 if (ql_mask_match(ql_dbg_io)) { 2617 WARN_ONCE(1, "Unexpected response payload length %u.\n", 2618 iocb->u.nvme.rsp_pyld_len); 2619 ql_log(ql_log_warn, fcport->vha, 0x5100, 2620 "Unexpected response payload length %u.\n", 2621 iocb->u.nvme.rsp_pyld_len); 2622 } 2623 iocb->u.nvme.rsp_pyld_len = 2624 cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); 2625 } 2626 iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; 2627 for (; iter; iter--) 2628 *outbuf++ = swab32(*inbuf++); 2629 } 2630 2631 if (state_flags & SF_NVME_ERSP) { 2632 struct nvme_fc_ersp_iu *rsp_iu = fd->rspaddr; 2633 u32 tgt_xfer_len; 2634 2635 tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); 2636 if (fd->transferred_length != tgt_xfer_len) { 2637 ql_log(ql_log_warn, fcport->vha, 0x3079, 2638 "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", 2639 tgt_xfer_len, fd->transferred_length); 2640 logit = 1; 2641 } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { 2642 /* 2643 * Do not log if this is just an underflow and there 2644 * is no data loss. 2645 */ 2646 logit = 0; 2647 } 2648 } 2649 2650 if (unlikely(logit)) 2651 ql_dbg(ql_dbg_io, fcport->vha, 0x5060, 2652 "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", 2653 sp->name, sp->handle, comp_status, 2654 fd->transferred_length, le32_to_cpu(sts->residual_len), 2655 sts->ox_id); 2656 2657 /* 2658 * If transport error then Failure (HBA rejects request) 2659 * otherwise transport will handle. 2660 */ 2661 switch (le16_to_cpu(comp_status)) { 2662 case CS_COMPLETE: 2663 break; 2664 2665 case CS_RESET: 2666 case CS_PORT_UNAVAILABLE: 2667 case CS_PORT_LOGGED_OUT: 2668 fcport->nvme_flag |= NVME_FLAG_RESETTING; 2669 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2670 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2671 "Port to be marked lost on fcport=%06x, current " 2672 "port state= %s comp_status %x.\n", 2673 fcport->d_id.b24, port_state_str[FCS_ONLINE], 2674 comp_status); 2675 2676 qlt_schedule_sess_for_deletion(fcport); 2677 } 2678 fallthrough; 2679 case CS_ABORTED: 2680 case CS_PORT_BUSY: 2681 fd->transferred_length = 0; 2682 iocb->u.nvme.rsp_pyld_len = 0; 2683 ret = QLA_ABORTED; 2684 break; 2685 case CS_DATA_UNDERRUN: 2686 break; 2687 default: 2688 ret = QLA_FUNCTION_FAILED; 2689 break; 2690 } 2691 sp->done(sp, ret); 2692 } 2693 2694 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 2695 struct vp_ctrl_entry_24xx *vce) 2696 { 2697 const char func[] = "CTRLVP-IOCB"; 2698 srb_t *sp; 2699 int rval = QLA_SUCCESS; 2700 2701 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 2702 if (!sp) 2703 return; 2704 2705 if (vce->entry_status != 0) { 2706 ql_dbg(ql_dbg_vport, vha, 0x10c4, 2707 "%s: Failed to complete IOCB -- error status (%x)\n", 2708 sp->name, vce->entry_status); 2709 rval = QLA_FUNCTION_FAILED; 2710 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 2711 ql_dbg(ql_dbg_vport, vha, 0x10c5, 2712 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 2713 sp->name, le16_to_cpu(vce->comp_status), 2714 le16_to_cpu(vce->vp_idx_failed)); 2715 rval = QLA_FUNCTION_FAILED; 2716 } else { 2717 ql_dbg(ql_dbg_vport, vha, 0x10c6, 2718 "Done %s.\n", __func__); 2719 } 2720 2721 sp->rc = rval; 2722 sp->done(sp, rval); 2723 } 2724 2725 /* Process a single response queue entry. */ 2726 static void qla2x00_process_response_entry(struct scsi_qla_host *vha, 2727 struct rsp_que *rsp, 2728 sts_entry_t *pkt) 2729 { 2730 sts21_entry_t *sts21_entry; 2731 sts22_entry_t *sts22_entry; 2732 uint16_t handle_cnt; 2733 uint16_t cnt; 2734 2735 switch (pkt->entry_type) { 2736 case STATUS_TYPE: 2737 qla2x00_status_entry(vha, rsp, pkt); 2738 break; 2739 case STATUS_TYPE_21: 2740 sts21_entry = (sts21_entry_t *)pkt; 2741 handle_cnt = sts21_entry->handle_count; 2742 for (cnt = 0; cnt < handle_cnt; cnt++) 2743 qla2x00_process_completed_request(vha, rsp->req, 2744 sts21_entry->handle[cnt]); 2745 break; 2746 case STATUS_TYPE_22: 2747 sts22_entry = (sts22_entry_t *)pkt; 2748 handle_cnt = sts22_entry->handle_count; 2749 for (cnt = 0; cnt < handle_cnt; cnt++) 2750 qla2x00_process_completed_request(vha, rsp->req, 2751 sts22_entry->handle[cnt]); 2752 break; 2753 case STATUS_CONT_TYPE: 2754 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2755 break; 2756 case MBX_IOCB_TYPE: 2757 qla2x00_mbx_iocb_entry(vha, rsp->req, (struct mbx_entry *)pkt); 2758 break; 2759 case CT_IOCB_TYPE: 2760 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2761 break; 2762 default: 2763 /* Type Not Supported. */ 2764 ql_log(ql_log_warn, vha, 0x504a, 2765 "Received unknown response pkt type %x entry status=%x.\n", 2766 pkt->entry_type, pkt->entry_status); 2767 break; 2768 } 2769 } 2770 2771 /** 2772 * qla2x00_process_response_queue() - Process response queue entries. 2773 * @rsp: response queue 2774 */ 2775 void 2776 qla2x00_process_response_queue(struct rsp_que *rsp) 2777 { 2778 struct scsi_qla_host *vha; 2779 struct qla_hw_data *ha = rsp->hw; 2780 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 2781 sts_entry_t *pkt; 2782 2783 vha = pci_get_drvdata(ha->pdev); 2784 2785 if (!vha->flags.online) 2786 return; 2787 2788 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2789 pkt = (sts_entry_t *)rsp->ring_ptr; 2790 2791 rsp->ring_index++; 2792 if (rsp->ring_index == rsp->length) { 2793 rsp->ring_index = 0; 2794 rsp->ring_ptr = rsp->ring; 2795 } else { 2796 rsp->ring_ptr++; 2797 } 2798 2799 if (pkt->entry_status != 0) { 2800 qla2x00_error_entry(vha, rsp, pkt); 2801 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2802 wmb(); 2803 continue; 2804 } 2805 2806 qla2x00_process_response_entry(vha, rsp, pkt); 2807 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2808 wmb(); 2809 } 2810 2811 /* Adjust ring index */ 2812 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2813 } 2814 2815 static inline void 2816 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2817 uint32_t sense_len, struct rsp_que *rsp, int res) 2818 { 2819 struct scsi_qla_host *vha = sp->vha; 2820 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2821 uint32_t track_sense_len; 2822 2823 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2824 sense_len = SCSI_SENSE_BUFFERSIZE; 2825 2826 SET_CMD_SENSE_LEN(sp, sense_len); 2827 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2828 track_sense_len = sense_len; 2829 2830 if (sense_len > par_sense_len) 2831 sense_len = par_sense_len; 2832 2833 memcpy(cp->sense_buffer, sense_data, sense_len); 2834 2835 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2836 track_sense_len -= sense_len; 2837 SET_CMD_SENSE_LEN(sp, track_sense_len); 2838 2839 if (track_sense_len != 0) { 2840 rsp->status_srb = sp; 2841 cp->result = res; 2842 } 2843 2844 if (sense_len) { 2845 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2846 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2847 sp->vha->host_no, cp->device->id, cp->device->lun, 2848 cp); 2849 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2850 cp->sense_buffer, sense_len); 2851 } 2852 } 2853 2854 struct scsi_dif_tuple { 2855 __be16 guard; /* Checksum */ 2856 __be16 app_tag; /* APPL identifier */ 2857 __be32 ref_tag; /* Target LBA or indirect LBA */ 2858 }; 2859 2860 /* 2861 * Checks the guard or meta-data for the type of error 2862 * detected by the HBA. In case of errors, we set the 2863 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2864 * to indicate to the kernel that the HBA detected error. 2865 */ 2866 static inline int 2867 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2868 { 2869 struct scsi_qla_host *vha = sp->vha; 2870 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2871 uint8_t *ap = &sts24->data[12]; 2872 uint8_t *ep = &sts24->data[20]; 2873 uint32_t e_ref_tag, a_ref_tag; 2874 uint16_t e_app_tag, a_app_tag; 2875 uint16_t e_guard, a_guard; 2876 2877 /* 2878 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2879 * would make guard field appear at offset 2 2880 */ 2881 a_guard = get_unaligned_le16(ap + 2); 2882 a_app_tag = get_unaligned_le16(ap + 0); 2883 a_ref_tag = get_unaligned_le32(ap + 4); 2884 e_guard = get_unaligned_le16(ep + 2); 2885 e_app_tag = get_unaligned_le16(ep + 0); 2886 e_ref_tag = get_unaligned_le32(ep + 4); 2887 2888 ql_dbg(ql_dbg_io, vha, 0x3023, 2889 "iocb(s) %p Returned STATUS.\n", sts24); 2890 2891 ql_dbg(ql_dbg_io, vha, 0x3024, 2892 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2893 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2894 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2895 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2896 a_app_tag, e_app_tag, a_guard, e_guard); 2897 2898 /* 2899 * Ignore sector if: 2900 * For type 3: ref & app tag is all 'f's 2901 * For type 0,1,2: app tag is all 'f's 2902 */ 2903 if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && 2904 (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || 2905 a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { 2906 uint32_t blocks_done, resid; 2907 sector_t lba_s = scsi_get_lba(cmd); 2908 2909 /* 2TB boundary case covered automatically with this */ 2910 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2911 2912 resid = scsi_bufflen(cmd) - (blocks_done * 2913 cmd->device->sector_size); 2914 2915 scsi_set_resid(cmd, resid); 2916 cmd->result = DID_OK << 16; 2917 2918 /* Update protection tag */ 2919 if (scsi_prot_sg_count(cmd)) { 2920 uint32_t i, j = 0, k = 0, num_ent; 2921 struct scatterlist *sg; 2922 struct t10_pi_tuple *spt; 2923 2924 /* Patch the corresponding protection tags */ 2925 scsi_for_each_prot_sg(cmd, sg, 2926 scsi_prot_sg_count(cmd), i) { 2927 num_ent = sg_dma_len(sg) / 8; 2928 if (k + num_ent < blocks_done) { 2929 k += num_ent; 2930 continue; 2931 } 2932 j = blocks_done - k - 1; 2933 k = blocks_done; 2934 break; 2935 } 2936 2937 if (k != blocks_done) { 2938 ql_log(ql_log_warn, vha, 0x302f, 2939 "unexpected tag values tag:lba=%x:%llx)\n", 2940 e_ref_tag, (unsigned long long)lba_s); 2941 return 1; 2942 } 2943 2944 spt = page_address(sg_page(sg)) + sg->offset; 2945 spt += j; 2946 2947 spt->app_tag = T10_PI_APP_ESCAPE; 2948 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2949 spt->ref_tag = T10_PI_REF_ESCAPE; 2950 } 2951 2952 return 0; 2953 } 2954 2955 /* check guard */ 2956 if (e_guard != a_guard) { 2957 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x1); 2958 set_host_byte(cmd, DID_ABORT); 2959 return 1; 2960 } 2961 2962 /* check ref tag */ 2963 if (e_ref_tag != a_ref_tag) { 2964 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x3); 2965 set_host_byte(cmd, DID_ABORT); 2966 return 1; 2967 } 2968 2969 /* check appl tag */ 2970 if (e_app_tag != a_app_tag) { 2971 scsi_build_sense(cmd, 1, ILLEGAL_REQUEST, 0x10, 0x2); 2972 set_host_byte(cmd, DID_ABORT); 2973 return 1; 2974 } 2975 2976 return 1; 2977 } 2978 2979 static void 2980 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2981 struct req_que *req, uint32_t index) 2982 { 2983 struct qla_hw_data *ha = vha->hw; 2984 srb_t *sp; 2985 uint16_t comp_status; 2986 uint16_t scsi_status; 2987 uint16_t thread_id; 2988 uint32_t rval = EXT_STATUS_OK; 2989 struct bsg_job *bsg_job = NULL; 2990 struct fc_bsg_request *bsg_request; 2991 struct fc_bsg_reply *bsg_reply; 2992 sts_entry_t *sts = pkt; 2993 struct sts_entry_24xx *sts24 = pkt; 2994 2995 /* Validate handle. */ 2996 if (index >= req->num_outstanding_cmds) { 2997 ql_log(ql_log_warn, vha, 0x70af, 2998 "Invalid SCSI completion handle 0x%x.\n", index); 2999 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3000 return; 3001 } 3002 3003 sp = req->outstanding_cmds[index]; 3004 if (!sp) { 3005 ql_log(ql_log_warn, vha, 0x70b0, 3006 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 3007 req->id, index); 3008 3009 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3010 return; 3011 } 3012 3013 /* Free outstanding command slot. */ 3014 req->outstanding_cmds[index] = NULL; 3015 bsg_job = sp->u.bsg_job; 3016 bsg_request = bsg_job->request; 3017 bsg_reply = bsg_job->reply; 3018 3019 if (IS_FWI2_CAPABLE(ha)) { 3020 comp_status = le16_to_cpu(sts24->comp_status); 3021 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 3022 } else { 3023 comp_status = le16_to_cpu(sts->comp_status); 3024 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 3025 } 3026 3027 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 3028 switch (comp_status) { 3029 case CS_COMPLETE: 3030 if (scsi_status == 0) { 3031 bsg_reply->reply_payload_rcv_len = 3032 bsg_job->reply_payload.payload_len; 3033 vha->qla_stats.input_bytes += 3034 bsg_reply->reply_payload_rcv_len; 3035 vha->qla_stats.input_requests++; 3036 rval = EXT_STATUS_OK; 3037 } 3038 goto done; 3039 3040 case CS_DATA_OVERRUN: 3041 ql_dbg(ql_dbg_user, vha, 0x70b1, 3042 "Command completed with data overrun thread_id=%d\n", 3043 thread_id); 3044 rval = EXT_STATUS_DATA_OVERRUN; 3045 break; 3046 3047 case CS_DATA_UNDERRUN: 3048 ql_dbg(ql_dbg_user, vha, 0x70b2, 3049 "Command completed with data underrun thread_id=%d\n", 3050 thread_id); 3051 rval = EXT_STATUS_DATA_UNDERRUN; 3052 break; 3053 case CS_BIDIR_RD_OVERRUN: 3054 ql_dbg(ql_dbg_user, vha, 0x70b3, 3055 "Command completed with read data overrun thread_id=%d\n", 3056 thread_id); 3057 rval = EXT_STATUS_DATA_OVERRUN; 3058 break; 3059 3060 case CS_BIDIR_RD_WR_OVERRUN: 3061 ql_dbg(ql_dbg_user, vha, 0x70b4, 3062 "Command completed with read and write data overrun " 3063 "thread_id=%d\n", thread_id); 3064 rval = EXT_STATUS_DATA_OVERRUN; 3065 break; 3066 3067 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 3068 ql_dbg(ql_dbg_user, vha, 0x70b5, 3069 "Command completed with read data over and write data " 3070 "underrun thread_id=%d\n", thread_id); 3071 rval = EXT_STATUS_DATA_OVERRUN; 3072 break; 3073 3074 case CS_BIDIR_RD_UNDERRUN: 3075 ql_dbg(ql_dbg_user, vha, 0x70b6, 3076 "Command completed with read data underrun " 3077 "thread_id=%d\n", thread_id); 3078 rval = EXT_STATUS_DATA_UNDERRUN; 3079 break; 3080 3081 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 3082 ql_dbg(ql_dbg_user, vha, 0x70b7, 3083 "Command completed with read data under and write data " 3084 "overrun thread_id=%d\n", thread_id); 3085 rval = EXT_STATUS_DATA_UNDERRUN; 3086 break; 3087 3088 case CS_BIDIR_RD_WR_UNDERRUN: 3089 ql_dbg(ql_dbg_user, vha, 0x70b8, 3090 "Command completed with read and write data underrun " 3091 "thread_id=%d\n", thread_id); 3092 rval = EXT_STATUS_DATA_UNDERRUN; 3093 break; 3094 3095 case CS_BIDIR_DMA: 3096 ql_dbg(ql_dbg_user, vha, 0x70b9, 3097 "Command completed with data DMA error thread_id=%d\n", 3098 thread_id); 3099 rval = EXT_STATUS_DMA_ERR; 3100 break; 3101 3102 case CS_TIMEOUT: 3103 ql_dbg(ql_dbg_user, vha, 0x70ba, 3104 "Command completed with timeout thread_id=%d\n", 3105 thread_id); 3106 rval = EXT_STATUS_TIMEOUT; 3107 break; 3108 default: 3109 ql_dbg(ql_dbg_user, vha, 0x70bb, 3110 "Command completed with completion status=0x%x " 3111 "thread_id=%d\n", comp_status, thread_id); 3112 rval = EXT_STATUS_ERR; 3113 break; 3114 } 3115 bsg_reply->reply_payload_rcv_len = 0; 3116 3117 done: 3118 /* Return the vendor specific reply to API */ 3119 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 3120 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 3121 /* Always return DID_OK, bsg will send the vendor specific response 3122 * in this case only */ 3123 sp->done(sp, DID_OK << 16); 3124 3125 } 3126 3127 /** 3128 * qla2x00_status_entry() - Process a Status IOCB entry. 3129 * @vha: SCSI driver HA context 3130 * @rsp: response queue 3131 * @pkt: Entry pointer 3132 */ 3133 static void 3134 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 3135 { 3136 srb_t *sp; 3137 fc_port_t *fcport; 3138 struct scsi_cmnd *cp; 3139 sts_entry_t *sts = pkt; 3140 struct sts_entry_24xx *sts24 = pkt; 3141 uint16_t comp_status; 3142 uint16_t scsi_status; 3143 uint16_t ox_id; 3144 uint8_t lscsi_status; 3145 int32_t resid; 3146 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 3147 fw_resid_len; 3148 uint8_t *rsp_info, *sense_data; 3149 struct qla_hw_data *ha = vha->hw; 3150 uint32_t handle; 3151 uint16_t que; 3152 struct req_que *req; 3153 int logit = 1; 3154 int res = 0; 3155 uint16_t state_flags = 0; 3156 uint16_t sts_qual = 0; 3157 3158 if (IS_FWI2_CAPABLE(ha)) { 3159 comp_status = le16_to_cpu(sts24->comp_status); 3160 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 3161 state_flags = le16_to_cpu(sts24->state_flags); 3162 } else { 3163 comp_status = le16_to_cpu(sts->comp_status); 3164 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 3165 } 3166 handle = (uint32_t) LSW(sts->handle); 3167 que = MSW(sts->handle); 3168 req = ha->req_q_map[que]; 3169 3170 /* Check for invalid queue pointer */ 3171 if (req == NULL || 3172 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 3173 ql_dbg(ql_dbg_io, vha, 0x3059, 3174 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 3175 "que=%u.\n", sts->handle, req, que); 3176 return; 3177 } 3178 3179 /* Validate handle. */ 3180 if (handle < req->num_outstanding_cmds) { 3181 sp = req->outstanding_cmds[handle]; 3182 if (!sp) { 3183 ql_dbg(ql_dbg_io, vha, 0x3075, 3184 "%s(%ld): Already returned command for status handle (0x%x).\n", 3185 __func__, vha->host_no, sts->handle); 3186 return; 3187 } 3188 } else { 3189 ql_dbg(ql_dbg_io, vha, 0x3017, 3190 "Invalid status handle, out of range (0x%x).\n", 3191 sts->handle); 3192 3193 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 3194 if (IS_P3P_TYPE(ha)) 3195 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 3196 else 3197 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3198 qla2xxx_wake_dpc(vha); 3199 } 3200 return; 3201 } 3202 qla_put_fw_resources(sp->qpair, &sp->iores); 3203 3204 if (sp->cmd_type != TYPE_SRB) { 3205 req->outstanding_cmds[handle] = NULL; 3206 ql_dbg(ql_dbg_io, vha, 0x3015, 3207 "Unknown sp->cmd_type %x %p).\n", 3208 sp->cmd_type, sp); 3209 return; 3210 } 3211 3212 /* NVME completion. */ 3213 if (sp->type == SRB_NVME_CMD) { 3214 req->outstanding_cmds[handle] = NULL; 3215 qla24xx_nvme_iocb_entry(vha, req, pkt, sp); 3216 return; 3217 } 3218 3219 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 3220 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 3221 return; 3222 } 3223 3224 /* Task Management completion. */ 3225 if (sp->type == SRB_TM_CMD) { 3226 qla24xx_tm_iocb_entry(vha, req, pkt); 3227 return; 3228 } 3229 3230 /* Fast path completion. */ 3231 qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24); 3232 sp->qpair->cmd_completion_cnt++; 3233 3234 if (comp_status == CS_COMPLETE && scsi_status == 0) { 3235 qla2x00_process_completed_request(vha, req, handle); 3236 3237 return; 3238 } 3239 3240 req->outstanding_cmds[handle] = NULL; 3241 cp = GET_CMD_SP(sp); 3242 if (cp == NULL) { 3243 ql_dbg(ql_dbg_io, vha, 0x3018, 3244 "Command already returned (0x%x/%p).\n", 3245 sts->handle, sp); 3246 3247 return; 3248 } 3249 3250 lscsi_status = scsi_status & STATUS_MASK; 3251 3252 fcport = sp->fcport; 3253 3254 ox_id = 0; 3255 sense_len = par_sense_len = rsp_info_len = resid_len = 3256 fw_resid_len = 0; 3257 if (IS_FWI2_CAPABLE(ha)) { 3258 if (scsi_status & SS_SENSE_LEN_VALID) 3259 sense_len = le32_to_cpu(sts24->sense_len); 3260 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3261 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 3262 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 3263 resid_len = le32_to_cpu(sts24->rsp_residual_count); 3264 if (comp_status == CS_DATA_UNDERRUN) 3265 fw_resid_len = le32_to_cpu(sts24->residual_len); 3266 rsp_info = sts24->data; 3267 sense_data = sts24->data; 3268 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 3269 ox_id = le16_to_cpu(sts24->ox_id); 3270 par_sense_len = sizeof(sts24->data); 3271 sts_qual = le16_to_cpu(sts24->status_qualifier); 3272 } else { 3273 if (scsi_status & SS_SENSE_LEN_VALID) 3274 sense_len = le16_to_cpu(sts->req_sense_length); 3275 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 3276 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 3277 resid_len = le32_to_cpu(sts->residual_length); 3278 rsp_info = sts->rsp_info; 3279 sense_data = sts->req_sense_data; 3280 par_sense_len = sizeof(sts->req_sense_data); 3281 } 3282 3283 /* Check for any FCP transport errors. */ 3284 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 3285 /* Sense data lies beyond any FCP RESPONSE data. */ 3286 if (IS_FWI2_CAPABLE(ha)) { 3287 sense_data += rsp_info_len; 3288 par_sense_len -= rsp_info_len; 3289 } 3290 if (rsp_info_len > 3 && rsp_info[3]) { 3291 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 3292 "FCP I/O protocol failure (0x%x/0x%x).\n", 3293 rsp_info_len, rsp_info[3]); 3294 3295 res = DID_BUS_BUSY << 16; 3296 goto out; 3297 } 3298 } 3299 3300 /* Check for overrun. */ 3301 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 3302 scsi_status & SS_RESIDUAL_OVER) 3303 comp_status = CS_DATA_OVERRUN; 3304 3305 /* 3306 * Check retry_delay_timer value if we receive a busy or 3307 * queue full. 3308 */ 3309 if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || 3310 lscsi_status == SAM_STAT_BUSY)) 3311 qla2x00_set_retry_delay_timestamp(fcport, sts_qual); 3312 3313 /* 3314 * Based on Host and scsi status generate status code for Linux 3315 */ 3316 switch (comp_status) { 3317 case CS_COMPLETE: 3318 case CS_QUEUE_FULL: 3319 if (scsi_status == 0) { 3320 res = DID_OK << 16; 3321 break; 3322 } 3323 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 3324 resid = resid_len; 3325 scsi_set_resid(cp, resid); 3326 3327 if (!lscsi_status && 3328 ((unsigned)(scsi_bufflen(cp) - resid) < 3329 cp->underflow)) { 3330 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 3331 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3332 resid, scsi_bufflen(cp)); 3333 3334 res = DID_ERROR << 16; 3335 break; 3336 } 3337 } 3338 res = DID_OK << 16 | lscsi_status; 3339 3340 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3341 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 3342 "QUEUE FULL detected.\n"); 3343 break; 3344 } 3345 logit = 0; 3346 if (lscsi_status != SS_CHECK_CONDITION) 3347 break; 3348 3349 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3350 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3351 break; 3352 3353 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 3354 rsp, res); 3355 break; 3356 3357 case CS_DATA_UNDERRUN: 3358 /* Use F/W calculated residual length. */ 3359 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 3360 scsi_set_resid(cp, resid); 3361 if (scsi_status & SS_RESIDUAL_UNDER) { 3362 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 3363 ql_log(ql_log_warn, fcport->vha, 0x301d, 3364 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3365 resid, scsi_bufflen(cp)); 3366 3367 res = DID_ERROR << 16 | lscsi_status; 3368 goto check_scsi_status; 3369 } 3370 3371 if (!lscsi_status && 3372 ((unsigned)(scsi_bufflen(cp) - resid) < 3373 cp->underflow)) { 3374 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 3375 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 3376 resid, scsi_bufflen(cp)); 3377 3378 res = DID_ERROR << 16; 3379 break; 3380 } 3381 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 3382 lscsi_status != SAM_STAT_BUSY) { 3383 /* 3384 * scsi status of task set and busy are considered to be 3385 * task not completed. 3386 */ 3387 3388 ql_log(ql_log_warn, fcport->vha, 0x301f, 3389 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 3390 resid, scsi_bufflen(cp)); 3391 3392 vha->interface_err_cnt++; 3393 3394 res = DID_ERROR << 16 | lscsi_status; 3395 goto check_scsi_status; 3396 } else { 3397 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 3398 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 3399 scsi_status, lscsi_status); 3400 } 3401 3402 res = DID_OK << 16 | lscsi_status; 3403 logit = 0; 3404 3405 check_scsi_status: 3406 /* 3407 * Check to see if SCSI Status is non zero. If so report SCSI 3408 * Status. 3409 */ 3410 if (lscsi_status != 0) { 3411 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 3412 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 3413 "QUEUE FULL detected.\n"); 3414 logit = 1; 3415 break; 3416 } 3417 if (lscsi_status != SS_CHECK_CONDITION) 3418 break; 3419 3420 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 3421 if (!(scsi_status & SS_SENSE_LEN_VALID)) 3422 break; 3423 3424 qla2x00_handle_sense(sp, sense_data, par_sense_len, 3425 sense_len, rsp, res); 3426 } 3427 break; 3428 3429 case CS_PORT_LOGGED_OUT: 3430 case CS_PORT_CONFIG_CHG: 3431 case CS_PORT_BUSY: 3432 case CS_INCOMPLETE: 3433 case CS_PORT_UNAVAILABLE: 3434 case CS_TIMEOUT: 3435 case CS_RESET: 3436 case CS_EDIF_INV_REQ: 3437 3438 /* 3439 * We are going to have the fc class block the rport 3440 * while we try to recover so instruct the mid layer 3441 * to requeue until the class decides how to handle this. 3442 */ 3443 res = DID_TRANSPORT_DISRUPTED << 16; 3444 3445 if (comp_status == CS_TIMEOUT) { 3446 if (IS_FWI2_CAPABLE(ha)) 3447 break; 3448 else if ((le16_to_cpu(sts->status_flags) & 3449 SF_LOGOUT_SENT) == 0) 3450 break; 3451 } 3452 3453 if (atomic_read(&fcport->state) == FCS_ONLINE) { 3454 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 3455 "Port to be marked lost on fcport=%02x%02x%02x, current " 3456 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 3457 fcport->d_id.b.area, fcport->d_id.b.al_pa, 3458 port_state_str[FCS_ONLINE], 3459 comp_status); 3460 3461 qlt_schedule_sess_for_deletion(fcport); 3462 } 3463 3464 break; 3465 3466 case CS_ABORTED: 3467 res = DID_RESET << 16; 3468 break; 3469 3470 case CS_DIF_ERROR: 3471 logit = qla2x00_handle_dif_error(sp, sts24); 3472 res = cp->result; 3473 break; 3474 3475 case CS_TRANSPORT: 3476 res = DID_ERROR << 16; 3477 vha->hw_err_cnt++; 3478 3479 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 3480 break; 3481 3482 if (state_flags & BIT_4) 3483 scmd_printk(KERN_WARNING, cp, 3484 "Unsupported device '%s' found.\n", 3485 cp->device->vendor); 3486 break; 3487 3488 case CS_DMA: 3489 ql_log(ql_log_info, fcport->vha, 0x3022, 3490 "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3491 comp_status, scsi_status, res, vha->host_no, 3492 cp->device->id, cp->device->lun, fcport->d_id.b24, 3493 ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3494 resid_len, fw_resid_len, sp, cp); 3495 ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, 3496 pkt, sizeof(*sts24)); 3497 res = DID_ERROR << 16; 3498 vha->hw_err_cnt++; 3499 break; 3500 default: 3501 res = DID_ERROR << 16; 3502 break; 3503 } 3504 3505 out: 3506 if (logit) 3507 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 3508 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 3509 comp_status, scsi_status, res, vha->host_no, 3510 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 3511 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 3512 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 3513 resid_len, fw_resid_len, sp, cp); 3514 3515 if (rsp->status_srb == NULL) 3516 sp->done(sp, res); 3517 } 3518 3519 /** 3520 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 3521 * @rsp: response queue 3522 * @pkt: Entry pointer 3523 * 3524 * Extended sense data. 3525 */ 3526 static void 3527 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 3528 { 3529 uint8_t sense_sz = 0; 3530 struct qla_hw_data *ha = rsp->hw; 3531 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 3532 srb_t *sp = rsp->status_srb; 3533 struct scsi_cmnd *cp; 3534 uint32_t sense_len; 3535 uint8_t *sense_ptr; 3536 3537 if (!sp || !GET_CMD_SENSE_LEN(sp)) 3538 return; 3539 3540 sense_len = GET_CMD_SENSE_LEN(sp); 3541 sense_ptr = GET_CMD_SENSE_PTR(sp); 3542 3543 cp = GET_CMD_SP(sp); 3544 if (cp == NULL) { 3545 ql_log(ql_log_warn, vha, 0x3025, 3546 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 3547 3548 rsp->status_srb = NULL; 3549 return; 3550 } 3551 3552 if (sense_len > sizeof(pkt->data)) 3553 sense_sz = sizeof(pkt->data); 3554 else 3555 sense_sz = sense_len; 3556 3557 /* Move sense data. */ 3558 if (IS_FWI2_CAPABLE(ha)) 3559 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 3560 memcpy(sense_ptr, pkt->data, sense_sz); 3561 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 3562 sense_ptr, sense_sz); 3563 3564 sense_len -= sense_sz; 3565 sense_ptr += sense_sz; 3566 3567 SET_CMD_SENSE_PTR(sp, sense_ptr); 3568 SET_CMD_SENSE_LEN(sp, sense_len); 3569 3570 /* Place command on done queue. */ 3571 if (sense_len == 0) { 3572 rsp->status_srb = NULL; 3573 sp->done(sp, cp->result); 3574 } 3575 } 3576 3577 /** 3578 * qla2x00_error_entry() - Process an error entry. 3579 * @vha: SCSI driver HA context 3580 * @rsp: response queue 3581 * @pkt: Entry pointer 3582 * return : 1=allow further error analysis. 0=no additional error analysis. 3583 */ 3584 static int 3585 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 3586 { 3587 srb_t *sp; 3588 struct qla_hw_data *ha = vha->hw; 3589 const char func[] = "ERROR-IOCB"; 3590 uint16_t que = MSW(pkt->handle); 3591 struct req_que *req = NULL; 3592 int res = DID_ERROR << 16; 3593 3594 ql_dbg(ql_dbg_async, vha, 0x502a, 3595 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 3596 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 3597 3598 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 3599 goto fatal; 3600 3601 req = ha->req_q_map[que]; 3602 3603 if (pkt->entry_status & RF_BUSY) 3604 res = DID_BUS_BUSY << 16; 3605 3606 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 3607 return 0; 3608 3609 switch (pkt->entry_type) { 3610 case NOTIFY_ACK_TYPE: 3611 case STATUS_TYPE: 3612 case STATUS_CONT_TYPE: 3613 case LOGINOUT_PORT_IOCB_TYPE: 3614 case CT_IOCB_TYPE: 3615 case ELS_IOCB_TYPE: 3616 case ABORT_IOCB_TYPE: 3617 case MBX_IOCB_TYPE: 3618 default: 3619 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3620 if (sp) { 3621 sp->done(sp, res); 3622 return 0; 3623 } 3624 break; 3625 3626 case SA_UPDATE_IOCB_TYPE: 3627 case ABTS_RESP_24XX: 3628 case CTIO_TYPE7: 3629 case CTIO_CRC2: 3630 return 1; 3631 } 3632 fatal: 3633 ql_log(ql_log_warn, vha, 0x5030, 3634 "Error entry - invalid handle/queue (%04x).\n", que); 3635 return 0; 3636 } 3637 3638 /** 3639 * qla24xx_mbx_completion() - Process mailbox command completions. 3640 * @vha: SCSI driver HA context 3641 * @mb0: Mailbox0 register 3642 */ 3643 static void 3644 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 3645 { 3646 uint16_t cnt; 3647 uint32_t mboxes; 3648 __le16 __iomem *wptr; 3649 struct qla_hw_data *ha = vha->hw; 3650 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3651 3652 /* Read all mbox registers? */ 3653 WARN_ON_ONCE(ha->mbx_count > 32); 3654 mboxes = (1ULL << ha->mbx_count) - 1; 3655 if (!ha->mcp) 3656 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 3657 else 3658 mboxes = ha->mcp->in_mb; 3659 3660 /* Load return mailbox registers. */ 3661 ha->flags.mbox_int = 1; 3662 ha->mailbox_out[0] = mb0; 3663 mboxes >>= 1; 3664 wptr = ®->mailbox1; 3665 3666 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 3667 if (mboxes & BIT_0) 3668 ha->mailbox_out[cnt] = rd_reg_word(wptr); 3669 3670 mboxes >>= 1; 3671 wptr++; 3672 } 3673 } 3674 3675 static void 3676 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 3677 struct abort_entry_24xx *pkt) 3678 { 3679 const char func[] = "ABT_IOCB"; 3680 srb_t *sp; 3681 srb_t *orig_sp = NULL; 3682 struct srb_iocb *abt; 3683 3684 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3685 if (!sp) 3686 return; 3687 3688 abt = &sp->u.iocb_cmd; 3689 abt->u.abt.comp_status = pkt->comp_status; 3690 orig_sp = sp->cmd_sp; 3691 /* Need to pass original sp */ 3692 if (orig_sp) 3693 qla_nvme_abort_process_comp_status(pkt, orig_sp); 3694 3695 sp->done(sp, 0); 3696 } 3697 3698 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 3699 struct pt_ls4_request *pkt, struct req_que *req) 3700 { 3701 srb_t *sp; 3702 const char func[] = "LS4_IOCB"; 3703 uint16_t comp_status; 3704 3705 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 3706 if (!sp) 3707 return; 3708 3709 comp_status = le16_to_cpu(pkt->status); 3710 sp->done(sp, comp_status); 3711 } 3712 3713 /** 3714 * qla_chk_cont_iocb_avail - check for all continuation iocbs are available 3715 * before iocb processing can start. 3716 * @vha: host adapter pointer 3717 * @rsp: respond queue 3718 * @pkt: head iocb describing how many continuation iocb 3719 * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived. 3720 */ 3721 static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, 3722 struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in) 3723 { 3724 int start_pkt_ring_index; 3725 u32 iocb_cnt = 0; 3726 int rc = 0; 3727 3728 if (pkt->entry_count == 1) 3729 return rc; 3730 3731 /* ring_index was pre-increment. set it back to current pkt */ 3732 if (rsp->ring_index == 0) 3733 start_pkt_ring_index = rsp->length - 1; 3734 else 3735 start_pkt_ring_index = rsp->ring_index - 1; 3736 3737 if (rsp_q_in < start_pkt_ring_index) 3738 /* q in ptr is wrapped */ 3739 iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in; 3740 else 3741 iocb_cnt = rsp_q_in - start_pkt_ring_index; 3742 3743 if (iocb_cnt < pkt->entry_count) 3744 rc = -EIO; 3745 3746 ql_dbg(ql_dbg_init, vha, 0x5091, 3747 "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n", 3748 __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc); 3749 3750 return rc; 3751 } 3752 3753 /** 3754 * qla24xx_process_response_queue() - Process response queue entries. 3755 * @vha: SCSI driver HA context 3756 * @rsp: response queue 3757 */ 3758 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 3759 struct rsp_que *rsp) 3760 { 3761 struct sts_entry_24xx *pkt; 3762 struct qla_hw_data *ha = vha->hw; 3763 struct purex_entry_24xx *purex_entry; 3764 struct purex_item *pure_item; 3765 u16 rsp_in = 0, cur_ring_index; 3766 int is_shadow_hba; 3767 3768 if (!ha->flags.fw_started) 3769 return; 3770 3771 if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { 3772 rsp->qpair->rcv_intr = 1; 3773 3774 if (!rsp->qpair->cpu_mapped) 3775 qla_cpu_update(rsp->qpair, raw_smp_processor_id()); 3776 } 3777 3778 #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \ 3779 do { \ 3780 _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \ 3781 rd_reg_dword_relaxed((_rsp)->rsp_q_in); \ 3782 } while (0) 3783 3784 is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha); 3785 3786 __update_rsp_in(is_shadow_hba, rsp, rsp_in); 3787 3788 while (rsp->ring_index != rsp_in && 3789 rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 3790 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 3791 cur_ring_index = rsp->ring_index; 3792 3793 rsp->ring_index++; 3794 if (rsp->ring_index == rsp->length) { 3795 rsp->ring_index = 0; 3796 rsp->ring_ptr = rsp->ring; 3797 } else { 3798 rsp->ring_ptr++; 3799 } 3800 3801 if (pkt->entry_status != 0) { 3802 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 3803 goto process_err; 3804 3805 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3806 wmb(); 3807 continue; 3808 } 3809 process_err: 3810 3811 switch (pkt->entry_type) { 3812 case STATUS_TYPE: 3813 qla2x00_status_entry(vha, rsp, pkt); 3814 break; 3815 case STATUS_CONT_TYPE: 3816 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 3817 break; 3818 case VP_RPT_ID_IOCB_TYPE: 3819 qla24xx_report_id_acquisition(vha, 3820 (struct vp_rpt_id_entry_24xx *)pkt); 3821 break; 3822 case LOGINOUT_PORT_IOCB_TYPE: 3823 qla24xx_logio_entry(vha, rsp->req, 3824 (struct logio_entry_24xx *)pkt); 3825 break; 3826 case CT_IOCB_TYPE: 3827 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 3828 break; 3829 case ELS_IOCB_TYPE: 3830 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 3831 break; 3832 case ABTS_RECV_24XX: 3833 if (qla_ini_mode_enabled(vha)) { 3834 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3835 if (!pure_item) 3836 break; 3837 qla24xx_queue_purex_item(vha, pure_item, 3838 qla24xx_process_abts); 3839 break; 3840 } 3841 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || 3842 IS_QLA28XX(ha)) { 3843 /* ensure that the ATIO queue is empty */ 3844 qlt_handle_abts_recv(vha, rsp, 3845 (response_t *)pkt); 3846 break; 3847 } else { 3848 qlt_24xx_process_atio_queue(vha, 1); 3849 } 3850 fallthrough; 3851 case ABTS_RESP_24XX: 3852 case CTIO_TYPE7: 3853 case CTIO_CRC2: 3854 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3855 break; 3856 case PT_LS4_REQUEST: 3857 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3858 rsp->req); 3859 break; 3860 case NOTIFY_ACK_TYPE: 3861 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3862 qlt_response_pkt_all_vps(vha, rsp, 3863 (response_t *)pkt); 3864 else 3865 qla24xxx_nack_iocb_entry(vha, rsp->req, 3866 (struct nack_to_isp *)pkt); 3867 break; 3868 case MARKER_TYPE: 3869 /* Do nothing in this case, this check is to prevent it 3870 * from falling into default case 3871 */ 3872 break; 3873 case ABORT_IOCB_TYPE: 3874 qla24xx_abort_iocb_entry(vha, rsp->req, 3875 (struct abort_entry_24xx *)pkt); 3876 break; 3877 case MBX_IOCB_TYPE: 3878 qla24xx_mbx_iocb_entry(vha, rsp->req, 3879 (struct mbx_24xx_entry *)pkt); 3880 break; 3881 case VP_CTRL_IOCB_TYPE: 3882 qla_ctrlvp_completed(vha, rsp->req, 3883 (struct vp_ctrl_entry_24xx *)pkt); 3884 break; 3885 case PUREX_IOCB_TYPE: 3886 purex_entry = (void *)pkt; 3887 switch (purex_entry->els_frame_payload[3]) { 3888 case ELS_RDP: 3889 pure_item = qla24xx_copy_std_pkt(vha, pkt); 3890 if (!pure_item) 3891 break; 3892 qla24xx_queue_purex_item(vha, pure_item, 3893 qla24xx_process_purex_rdp); 3894 break; 3895 case ELS_FPIN: 3896 if (!vha->hw->flags.scm_enabled) { 3897 ql_log(ql_log_warn, vha, 0x5094, 3898 "SCM not active for this port\n"); 3899 break; 3900 } 3901 pure_item = qla27xx_copy_fpin_pkt(vha, 3902 (void **)&pkt, &rsp); 3903 __update_rsp_in(is_shadow_hba, rsp, rsp_in); 3904 if (!pure_item) 3905 break; 3906 qla24xx_queue_purex_item(vha, pure_item, 3907 qla27xx_process_purex_fpin); 3908 break; 3909 3910 case ELS_AUTH_ELS: 3911 if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *)pkt, rsp_in)) { 3912 /* 3913 * ring_ptr and ring_index were 3914 * pre-incremented above. Reset them 3915 * back to current. Wait for next 3916 * interrupt with all IOCBs to arrive 3917 * and re-process. 3918 */ 3919 rsp->ring_ptr = (response_t *)pkt; 3920 rsp->ring_index = cur_ring_index; 3921 3922 ql_dbg(ql_dbg_init, vha, 0x5091, 3923 "Defer processing ELS opcode %#x...\n", 3924 purex_entry->els_frame_payload[3]); 3925 return; 3926 } 3927 qla24xx_auth_els(vha, (void **)&pkt, &rsp); 3928 break; 3929 default: 3930 ql_log(ql_log_warn, vha, 0x509c, 3931 "Discarding ELS Request opcode 0x%x\n", 3932 purex_entry->els_frame_payload[3]); 3933 } 3934 break; 3935 case SA_UPDATE_IOCB_TYPE: 3936 qla28xx_sa_update_iocb_entry(vha, rsp->req, 3937 (struct sa_update_28xx *)pkt); 3938 break; 3939 3940 default: 3941 /* Type Not Supported. */ 3942 ql_dbg(ql_dbg_async, vha, 0x5042, 3943 "Received unknown response pkt type 0x%x entry status=%x.\n", 3944 pkt->entry_type, pkt->entry_status); 3945 break; 3946 } 3947 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3948 wmb(); 3949 } 3950 3951 /* Adjust ring index */ 3952 if (IS_P3P_TYPE(ha)) { 3953 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3954 3955 wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); 3956 } else { 3957 wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); 3958 } 3959 } 3960 3961 static void 3962 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3963 { 3964 int rval; 3965 uint32_t cnt; 3966 struct qla_hw_data *ha = vha->hw; 3967 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3968 3969 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3970 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 3971 return; 3972 3973 rval = QLA_SUCCESS; 3974 wrt_reg_dword(®->iobase_addr, 0x7C00); 3975 rd_reg_dword(®->iobase_addr); 3976 wrt_reg_dword(®->iobase_window, 0x0001); 3977 for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3978 rval == QLA_SUCCESS; cnt--) { 3979 if (cnt) { 3980 wrt_reg_dword(®->iobase_window, 0x0001); 3981 udelay(10); 3982 } else 3983 rval = QLA_FUNCTION_TIMEOUT; 3984 } 3985 if (rval == QLA_SUCCESS) 3986 goto next_test; 3987 3988 rval = QLA_SUCCESS; 3989 wrt_reg_dword(®->iobase_window, 0x0003); 3990 for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && 3991 rval == QLA_SUCCESS; cnt--) { 3992 if (cnt) { 3993 wrt_reg_dword(®->iobase_window, 0x0003); 3994 udelay(10); 3995 } else 3996 rval = QLA_FUNCTION_TIMEOUT; 3997 } 3998 if (rval != QLA_SUCCESS) 3999 goto done; 4000 4001 next_test: 4002 if (rd_reg_dword(®->iobase_c8) & BIT_3) 4003 ql_log(ql_log_info, vha, 0x504c, 4004 "Additional code -- 0x55AA.\n"); 4005 4006 done: 4007 wrt_reg_dword(®->iobase_window, 0x0000); 4008 rd_reg_dword(®->iobase_window); 4009 } 4010 4011 /** 4012 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 4013 * @irq: interrupt number 4014 * @dev_id: SCSI driver HA context 4015 * 4016 * Called by system whenever the host adapter generates an interrupt. 4017 * 4018 * Returns handled flag. 4019 */ 4020 irqreturn_t 4021 qla24xx_intr_handler(int irq, void *dev_id) 4022 { 4023 scsi_qla_host_t *vha; 4024 struct qla_hw_data *ha; 4025 struct device_reg_24xx __iomem *reg; 4026 int status; 4027 unsigned long iter; 4028 uint32_t stat; 4029 uint32_t hccr; 4030 uint16_t mb[8]; 4031 struct rsp_que *rsp; 4032 unsigned long flags; 4033 bool process_atio = false; 4034 4035 rsp = (struct rsp_que *) dev_id; 4036 if (!rsp) { 4037 ql_log(ql_log_info, NULL, 0x5059, 4038 "%s: NULL response queue pointer.\n", __func__); 4039 return IRQ_NONE; 4040 } 4041 4042 ha = rsp->hw; 4043 reg = &ha->iobase->isp24; 4044 status = 0; 4045 4046 if (unlikely(pci_channel_offline(ha->pdev))) 4047 return IRQ_HANDLED; 4048 4049 spin_lock_irqsave(&ha->hardware_lock, flags); 4050 vha = pci_get_drvdata(ha->pdev); 4051 for (iter = 50; iter--; ) { 4052 stat = rd_reg_dword(®->host_status); 4053 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 4054 break; 4055 if (stat & HSRX_RISC_PAUSED) { 4056 if (unlikely(pci_channel_offline(ha->pdev))) 4057 break; 4058 4059 hccr = rd_reg_dword(®->hccr); 4060 4061 ql_log(ql_log_warn, vha, 0x504b, 4062 "RISC paused -- HCCR=%x, Dumping firmware.\n", 4063 hccr); 4064 4065 qla2xxx_check_risc_status(vha); 4066 4067 ha->isp_ops->fw_dump(vha); 4068 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4069 break; 4070 } else if ((stat & HSRX_RISC_INT) == 0) 4071 break; 4072 4073 switch (stat & 0xff) { 4074 case INTR_ROM_MB_SUCCESS: 4075 case INTR_ROM_MB_FAILED: 4076 case INTR_MB_SUCCESS: 4077 case INTR_MB_FAILED: 4078 qla24xx_mbx_completion(vha, MSW(stat)); 4079 status |= MBX_INTERRUPT; 4080 4081 break; 4082 case INTR_ASYNC_EVENT: 4083 mb[0] = MSW(stat); 4084 mb[1] = rd_reg_word(®->mailbox1); 4085 mb[2] = rd_reg_word(®->mailbox2); 4086 mb[3] = rd_reg_word(®->mailbox3); 4087 qla2x00_async_event(vha, rsp, mb); 4088 break; 4089 case INTR_RSP_QUE_UPDATE: 4090 case INTR_RSP_QUE_UPDATE_83XX: 4091 qla24xx_process_response_queue(vha, rsp); 4092 break; 4093 case INTR_ATIO_QUE_UPDATE_27XX: 4094 case INTR_ATIO_QUE_UPDATE: 4095 process_atio = true; 4096 break; 4097 case INTR_ATIO_RSP_QUE_UPDATE: 4098 process_atio = true; 4099 qla24xx_process_response_queue(vha, rsp); 4100 break; 4101 default: 4102 ql_dbg(ql_dbg_async, vha, 0x504f, 4103 "Unrecognized interrupt type (%d).\n", stat * 0xff); 4104 break; 4105 } 4106 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 4107 rd_reg_dword_relaxed(®->hccr); 4108 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 4109 ndelay(3500); 4110 } 4111 qla2x00_handle_mbx_completion(ha, status); 4112 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4113 4114 if (process_atio) { 4115 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4116 qlt_24xx_process_atio_queue(vha, 0); 4117 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4118 } 4119 4120 return IRQ_HANDLED; 4121 } 4122 4123 static irqreturn_t 4124 qla24xx_msix_rsp_q(int irq, void *dev_id) 4125 { 4126 struct qla_hw_data *ha; 4127 struct rsp_que *rsp; 4128 struct device_reg_24xx __iomem *reg; 4129 struct scsi_qla_host *vha; 4130 unsigned long flags; 4131 4132 rsp = (struct rsp_que *) dev_id; 4133 if (!rsp) { 4134 ql_log(ql_log_info, NULL, 0x505a, 4135 "%s: NULL response queue pointer.\n", __func__); 4136 return IRQ_NONE; 4137 } 4138 ha = rsp->hw; 4139 reg = &ha->iobase->isp24; 4140 4141 spin_lock_irqsave(&ha->hardware_lock, flags); 4142 4143 vha = pci_get_drvdata(ha->pdev); 4144 qla24xx_process_response_queue(vha, rsp); 4145 if (!ha->flags.disable_msix_handshake) { 4146 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 4147 rd_reg_dword_relaxed(®->hccr); 4148 } 4149 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4150 4151 return IRQ_HANDLED; 4152 } 4153 4154 static irqreturn_t 4155 qla24xx_msix_default(int irq, void *dev_id) 4156 { 4157 scsi_qla_host_t *vha; 4158 struct qla_hw_data *ha; 4159 struct rsp_que *rsp; 4160 struct device_reg_24xx __iomem *reg; 4161 int status; 4162 uint32_t stat; 4163 uint32_t hccr; 4164 uint16_t mb[8]; 4165 unsigned long flags; 4166 bool process_atio = false; 4167 4168 rsp = (struct rsp_que *) dev_id; 4169 if (!rsp) { 4170 ql_log(ql_log_info, NULL, 0x505c, 4171 "%s: NULL response queue pointer.\n", __func__); 4172 return IRQ_NONE; 4173 } 4174 ha = rsp->hw; 4175 reg = &ha->iobase->isp24; 4176 status = 0; 4177 4178 spin_lock_irqsave(&ha->hardware_lock, flags); 4179 vha = pci_get_drvdata(ha->pdev); 4180 do { 4181 stat = rd_reg_dword(®->host_status); 4182 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 4183 break; 4184 if (stat & HSRX_RISC_PAUSED) { 4185 if (unlikely(pci_channel_offline(ha->pdev))) 4186 break; 4187 4188 hccr = rd_reg_dword(®->hccr); 4189 4190 ql_log(ql_log_info, vha, 0x5050, 4191 "RISC paused -- HCCR=%x, Dumping firmware.\n", 4192 hccr); 4193 4194 qla2xxx_check_risc_status(vha); 4195 vha->hw_err_cnt++; 4196 4197 ha->isp_ops->fw_dump(vha); 4198 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 4199 break; 4200 } else if ((stat & HSRX_RISC_INT) == 0) 4201 break; 4202 4203 switch (stat & 0xff) { 4204 case INTR_ROM_MB_SUCCESS: 4205 case INTR_ROM_MB_FAILED: 4206 case INTR_MB_SUCCESS: 4207 case INTR_MB_FAILED: 4208 qla24xx_mbx_completion(vha, MSW(stat)); 4209 status |= MBX_INTERRUPT; 4210 4211 break; 4212 case INTR_ASYNC_EVENT: 4213 mb[0] = MSW(stat); 4214 mb[1] = rd_reg_word(®->mailbox1); 4215 mb[2] = rd_reg_word(®->mailbox2); 4216 mb[3] = rd_reg_word(®->mailbox3); 4217 qla2x00_async_event(vha, rsp, mb); 4218 break; 4219 case INTR_RSP_QUE_UPDATE: 4220 case INTR_RSP_QUE_UPDATE_83XX: 4221 qla24xx_process_response_queue(vha, rsp); 4222 break; 4223 case INTR_ATIO_QUE_UPDATE_27XX: 4224 case INTR_ATIO_QUE_UPDATE: 4225 process_atio = true; 4226 break; 4227 case INTR_ATIO_RSP_QUE_UPDATE: 4228 process_atio = true; 4229 qla24xx_process_response_queue(vha, rsp); 4230 break; 4231 default: 4232 ql_dbg(ql_dbg_async, vha, 0x5051, 4233 "Unrecognized interrupt type (%d).\n", stat & 0xff); 4234 break; 4235 } 4236 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 4237 } while (0); 4238 qla2x00_handle_mbx_completion(ha, status); 4239 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4240 4241 if (process_atio) { 4242 spin_lock_irqsave(&ha->tgt.atio_lock, flags); 4243 qlt_24xx_process_atio_queue(vha, 0); 4244 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags); 4245 } 4246 4247 return IRQ_HANDLED; 4248 } 4249 4250 irqreturn_t 4251 qla2xxx_msix_rsp_q(int irq, void *dev_id) 4252 { 4253 struct qla_hw_data *ha; 4254 struct qla_qpair *qpair; 4255 4256 qpair = dev_id; 4257 if (!qpair) { 4258 ql_log(ql_log_info, NULL, 0x505b, 4259 "%s: NULL response queue pointer.\n", __func__); 4260 return IRQ_NONE; 4261 } 4262 ha = qpair->hw; 4263 4264 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 4265 4266 return IRQ_HANDLED; 4267 } 4268 4269 irqreturn_t 4270 qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) 4271 { 4272 struct qla_hw_data *ha; 4273 struct qla_qpair *qpair; 4274 struct device_reg_24xx __iomem *reg; 4275 unsigned long flags; 4276 4277 qpair = dev_id; 4278 if (!qpair) { 4279 ql_log(ql_log_info, NULL, 0x505b, 4280 "%s: NULL response queue pointer.\n", __func__); 4281 return IRQ_NONE; 4282 } 4283 ha = qpair->hw; 4284 4285 reg = &ha->iobase->isp24; 4286 spin_lock_irqsave(&ha->hardware_lock, flags); 4287 wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); 4288 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4289 4290 queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); 4291 4292 return IRQ_HANDLED; 4293 } 4294 4295 /* Interrupt handling helpers. */ 4296 4297 struct qla_init_msix_entry { 4298 const char *name; 4299 irq_handler_t handler; 4300 }; 4301 4302 static const struct qla_init_msix_entry msix_entries[] = { 4303 { "default", qla24xx_msix_default }, 4304 { "rsp_q", qla24xx_msix_rsp_q }, 4305 { "atio_q", qla83xx_msix_atio_q }, 4306 { "qpair_multiq", qla2xxx_msix_rsp_q }, 4307 { "qpair_multiq_hs", qla2xxx_msix_rsp_q_hs }, 4308 }; 4309 4310 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 4311 { "qla2xxx (default)", qla82xx_msix_default }, 4312 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 4313 }; 4314 4315 static int 4316 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 4317 { 4318 int i, ret; 4319 struct qla_msix_entry *qentry; 4320 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4321 int min_vecs = QLA_BASE_VECTORS; 4322 struct irq_affinity desc = { 4323 .pre_vectors = QLA_BASE_VECTORS, 4324 }; 4325 4326 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4327 IS_ATIO_MSIX_CAPABLE(ha)) { 4328 desc.pre_vectors++; 4329 min_vecs++; 4330 } 4331 4332 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { 4333 /* user wants to control IRQ setting for target mode */ 4334 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 4335 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 4336 PCI_IRQ_MSIX); 4337 } else 4338 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 4339 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), 4340 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 4341 &desc); 4342 4343 if (ret < 0) { 4344 ql_log(ql_log_fatal, vha, 0x00c7, 4345 "MSI-X: Failed to enable support, " 4346 "giving up -- %d/%d.\n", 4347 ha->msix_count, ret); 4348 goto msix_out; 4349 } else if (ret < ha->msix_count) { 4350 ql_log(ql_log_info, vha, 0x00c6, 4351 "MSI-X: Using %d vectors\n", ret); 4352 ha->msix_count = ret; 4353 /* Recalculate queue values */ 4354 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 4355 ha->max_req_queues = ha->msix_count - 1; 4356 4357 /* ATIOQ needs 1 vector. That's 1 less QPair */ 4358 if (QLA_TGT_MODE_ENABLED()) 4359 ha->max_req_queues--; 4360 4361 ha->max_rsp_queues = ha->max_req_queues; 4362 4363 ha->max_qpairs = ha->max_req_queues - 1; 4364 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 4365 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 4366 } 4367 } 4368 vha->irq_offset = desc.pre_vectors; 4369 ha->msix_entries = kcalloc(ha->msix_count, 4370 sizeof(struct qla_msix_entry), 4371 GFP_KERNEL); 4372 if (!ha->msix_entries) { 4373 ql_log(ql_log_fatal, vha, 0x00c8, 4374 "Failed to allocate memory for ha->msix_entries.\n"); 4375 ret = -ENOMEM; 4376 goto free_irqs; 4377 } 4378 ha->flags.msix_enabled = 1; 4379 4380 for (i = 0; i < ha->msix_count; i++) { 4381 qentry = &ha->msix_entries[i]; 4382 qentry->vector = pci_irq_vector(ha->pdev, i); 4383 qentry->vector_base0 = i; 4384 qentry->entry = i; 4385 qentry->have_irq = 0; 4386 qentry->in_use = 0; 4387 qentry->handle = NULL; 4388 } 4389 4390 /* Enable MSI-X vectors for the base queue */ 4391 for (i = 0; i < QLA_BASE_VECTORS; i++) { 4392 qentry = &ha->msix_entries[i]; 4393 qentry->handle = rsp; 4394 rsp->msix = qentry; 4395 scnprintf(qentry->name, sizeof(qentry->name), 4396 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 4397 if (IS_P3P_TYPE(ha)) 4398 ret = request_irq(qentry->vector, 4399 qla82xx_msix_entries[i].handler, 4400 0, qla82xx_msix_entries[i].name, rsp); 4401 else 4402 ret = request_irq(qentry->vector, 4403 msix_entries[i].handler, 4404 0, qentry->name, rsp); 4405 if (ret) 4406 goto msix_register_fail; 4407 qentry->have_irq = 1; 4408 qentry->in_use = 1; 4409 } 4410 4411 /* 4412 * If target mode is enable, also request the vector for the ATIO 4413 * queue. 4414 */ 4415 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 4416 IS_ATIO_MSIX_CAPABLE(ha)) { 4417 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 4418 rsp->msix = qentry; 4419 qentry->handle = rsp; 4420 scnprintf(qentry->name, sizeof(qentry->name), 4421 "qla2xxx%lu_%s", vha->host_no, 4422 msix_entries[QLA_ATIO_VECTOR].name); 4423 qentry->in_use = 1; 4424 ret = request_irq(qentry->vector, 4425 msix_entries[QLA_ATIO_VECTOR].handler, 4426 0, qentry->name, rsp); 4427 qentry->have_irq = 1; 4428 } 4429 4430 msix_register_fail: 4431 if (ret) { 4432 ql_log(ql_log_fatal, vha, 0x00cb, 4433 "MSI-X: unable to register handler -- %x/%d.\n", 4434 qentry->vector, ret); 4435 qla2x00_free_irqs(vha); 4436 ha->mqenable = 0; 4437 goto msix_out; 4438 } 4439 4440 /* Enable MSI-X vector for response queue update for queue 0 */ 4441 if (IS_MQUE_CAPABLE(ha) && 4442 (ha->msixbase && ha->mqiobase && ha->max_qpairs)) 4443 ha->mqenable = 1; 4444 else 4445 ha->mqenable = 0; 4446 4447 ql_dbg(ql_dbg_multiq, vha, 0xc005, 4448 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4449 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4450 ql_dbg(ql_dbg_init, vha, 0x0055, 4451 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 4452 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 4453 4454 msix_out: 4455 return ret; 4456 4457 free_irqs: 4458 pci_free_irq_vectors(ha->pdev); 4459 goto msix_out; 4460 } 4461 4462 int 4463 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 4464 { 4465 int ret = QLA_FUNCTION_FAILED; 4466 device_reg_t *reg = ha->iobase; 4467 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4468 4469 /* If possible, enable MSI-X. */ 4470 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 4471 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 4472 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) 4473 goto skip_msi; 4474 4475 if (ql2xenablemsix == 2) 4476 goto skip_msix; 4477 4478 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 4479 (ha->pdev->subsystem_device == 0x7040 || 4480 ha->pdev->subsystem_device == 0x7041 || 4481 ha->pdev->subsystem_device == 0x1705)) { 4482 ql_log(ql_log_warn, vha, 0x0034, 4483 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 4484 ha->pdev->subsystem_vendor, 4485 ha->pdev->subsystem_device); 4486 goto skip_msi; 4487 } 4488 4489 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 4490 ql_log(ql_log_warn, vha, 0x0035, 4491 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 4492 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 4493 goto skip_msix; 4494 } 4495 4496 ret = qla24xx_enable_msix(ha, rsp); 4497 if (!ret) { 4498 ql_dbg(ql_dbg_init, vha, 0x0036, 4499 "MSI-X: Enabled (0x%X, 0x%X).\n", 4500 ha->chip_revision, ha->fw_attributes); 4501 goto clear_risc_ints; 4502 } 4503 4504 skip_msix: 4505 4506 ql_log(ql_log_info, vha, 0x0037, 4507 "Falling back-to MSI mode -- ret=%d.\n", ret); 4508 4509 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 4510 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 4511 !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) 4512 goto skip_msi; 4513 4514 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 4515 if (ret > 0) { 4516 ql_dbg(ql_dbg_init, vha, 0x0038, 4517 "MSI: Enabled.\n"); 4518 ha->flags.msi_enabled = 1; 4519 } else 4520 ql_log(ql_log_warn, vha, 0x0039, 4521 "Falling back-to INTa mode -- ret=%d.\n", ret); 4522 skip_msi: 4523 4524 /* Skip INTx on ISP82xx. */ 4525 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 4526 return QLA_FUNCTION_FAILED; 4527 4528 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 4529 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 4530 QLA2XXX_DRIVER_NAME, rsp); 4531 if (ret) { 4532 ql_log(ql_log_warn, vha, 0x003a, 4533 "Failed to reserve interrupt %d already in use.\n", 4534 ha->pdev->irq); 4535 goto fail; 4536 } else if (!ha->flags.msi_enabled) { 4537 ql_dbg(ql_dbg_init, vha, 0x0125, 4538 "INTa mode: Enabled.\n"); 4539 ha->flags.mr_intr_valid = 1; 4540 /* Set max_qpair to 0, as MSI-X and MSI in not enabled */ 4541 ha->max_qpairs = 0; 4542 } 4543 4544 clear_risc_ints: 4545 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 4546 goto fail; 4547 4548 spin_lock_irq(&ha->hardware_lock); 4549 wrt_reg_word(®->isp.semaphore, 0); 4550 spin_unlock_irq(&ha->hardware_lock); 4551 4552 fail: 4553 return ret; 4554 } 4555 4556 void 4557 qla2x00_free_irqs(scsi_qla_host_t *vha) 4558 { 4559 struct qla_hw_data *ha = vha->hw; 4560 struct rsp_que *rsp; 4561 struct qla_msix_entry *qentry; 4562 int i; 4563 4564 /* 4565 * We need to check that ha->rsp_q_map is valid in case we are called 4566 * from a probe failure context. 4567 */ 4568 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 4569 goto free_irqs; 4570 rsp = ha->rsp_q_map[0]; 4571 4572 if (ha->flags.msix_enabled) { 4573 for (i = 0; i < ha->msix_count; i++) { 4574 qentry = &ha->msix_entries[i]; 4575 if (qentry->have_irq) { 4576 irq_set_affinity_notifier(qentry->vector, NULL); 4577 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 4578 } 4579 } 4580 kfree(ha->msix_entries); 4581 ha->msix_entries = NULL; 4582 ha->flags.msix_enabled = 0; 4583 ql_dbg(ql_dbg_init, vha, 0x0042, 4584 "Disabled MSI-X.\n"); 4585 } else { 4586 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 4587 } 4588 4589 free_irqs: 4590 pci_free_irq_vectors(ha->pdev); 4591 } 4592 4593 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 4594 struct qla_msix_entry *msix, int vector_type) 4595 { 4596 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 4597 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 4598 int ret; 4599 4600 scnprintf(msix->name, sizeof(msix->name), 4601 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 4602 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 4603 if (ret) { 4604 ql_log(ql_log_fatal, vha, 0x00e6, 4605 "MSI-X: Unable to register handler -- %x/%d.\n", 4606 msix->vector, ret); 4607 return ret; 4608 } 4609 msix->have_irq = 1; 4610 msix->handle = qpair; 4611 qla_mapq_init_qp_cpu_map(ha, msix, qpair); 4612 return ret; 4613 } 4614