1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 26 /** 27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 28 * @irq: 29 * @dev_id: SCSI driver HA context 30 * 31 * Called by system whenever the host adapter generates an interrupt. 32 * 33 * Returns handled flag. 34 */ 35 irqreturn_t 36 qla2100_intr_handler(int irq, void *dev_id) 37 { 38 scsi_qla_host_t *vha; 39 struct qla_hw_data *ha; 40 struct device_reg_2xxx __iomem *reg; 41 int status; 42 unsigned long iter; 43 uint16_t hccr; 44 uint16_t mb[4]; 45 struct rsp_que *rsp; 46 unsigned long flags; 47 48 rsp = (struct rsp_que *) dev_id; 49 if (!rsp) { 50 ql_log(ql_log_info, NULL, 0x505d, 51 "%s: NULL response queue pointer.\n", __func__); 52 return (IRQ_NONE); 53 } 54 55 ha = rsp->hw; 56 reg = &ha->iobase->isp; 57 status = 0; 58 59 spin_lock_irqsave(&ha->hardware_lock, flags); 60 vha = pci_get_drvdata(ha->pdev); 61 for (iter = 50; iter--; ) { 62 hccr = RD_REG_WORD(®->hccr); 63 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 64 break; 65 if (hccr & HCCR_RISC_PAUSE) { 66 if (pci_channel_offline(ha->pdev)) 67 break; 68 69 /* 70 * Issue a "HARD" reset in order for the RISC interrupt 71 * bit to be cleared. Schedule a big hammer to get 72 * out of the RISC PAUSED state. 73 */ 74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 75 RD_REG_WORD(®->hccr); 76 77 ha->isp_ops->fw_dump(vha, 1); 78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 79 break; 80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 81 break; 82 83 if (RD_REG_WORD(®->semaphore) & BIT_0) { 84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 85 RD_REG_WORD(®->hccr); 86 87 /* Get mailbox data. */ 88 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 89 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 90 qla2x00_mbx_completion(vha, mb[0]); 91 status |= MBX_INTERRUPT; 92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 93 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 94 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 95 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 96 qla2x00_async_event(vha, rsp, mb); 97 } else { 98 /*EMPTY*/ 99 ql_dbg(ql_dbg_async, vha, 0x5025, 100 "Unrecognized interrupt type (%d).\n", 101 mb[0]); 102 } 103 /* Release mailbox registers. */ 104 WRT_REG_WORD(®->semaphore, 0); 105 RD_REG_WORD(®->semaphore); 106 } else { 107 qla2x00_process_response_queue(rsp); 108 109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 110 RD_REG_WORD(®->hccr); 111 } 112 } 113 qla2x00_handle_mbx_completion(ha, status); 114 spin_unlock_irqrestore(&ha->hardware_lock, flags); 115 116 return (IRQ_HANDLED); 117 } 118 119 bool 120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 121 { 122 /* Check for PCI disconnection */ 123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 127 /* 128 * Schedule this (only once) on the default system 129 * workqueue so that all the adapter workqueues and the 130 * DPC thread can be shutdown cleanly. 131 */ 132 schedule_work(&vha->hw->board_disable); 133 } 134 return true; 135 } else 136 return false; 137 } 138 139 bool 140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 141 { 142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 143 } 144 145 /** 146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 147 * @irq: 148 * @dev_id: SCSI driver HA context 149 * 150 * Called by system whenever the host adapter generates an interrupt. 151 * 152 * Returns handled flag. 153 */ 154 irqreturn_t 155 qla2300_intr_handler(int irq, void *dev_id) 156 { 157 scsi_qla_host_t *vha; 158 struct device_reg_2xxx __iomem *reg; 159 int status; 160 unsigned long iter; 161 uint32_t stat; 162 uint16_t hccr; 163 uint16_t mb[4]; 164 struct rsp_que *rsp; 165 struct qla_hw_data *ha; 166 unsigned long flags; 167 168 rsp = (struct rsp_que *) dev_id; 169 if (!rsp) { 170 ql_log(ql_log_info, NULL, 0x5058, 171 "%s: NULL response queue pointer.\n", __func__); 172 return (IRQ_NONE); 173 } 174 175 ha = rsp->hw; 176 reg = &ha->iobase->isp; 177 status = 0; 178 179 spin_lock_irqsave(&ha->hardware_lock, flags); 180 vha = pci_get_drvdata(ha->pdev); 181 for (iter = 50; iter--; ) { 182 stat = RD_REG_DWORD(®->u.isp2300.host_status); 183 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 184 break; 185 if (stat & HSR_RISC_PAUSED) { 186 if (unlikely(pci_channel_offline(ha->pdev))) 187 break; 188 189 hccr = RD_REG_WORD(®->hccr); 190 191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 192 ql_log(ql_log_warn, vha, 0x5026, 193 "Parity error -- HCCR=%x, Dumping " 194 "firmware.\n", hccr); 195 else 196 ql_log(ql_log_warn, vha, 0x5027, 197 "RISC paused -- HCCR=%x, Dumping " 198 "firmware.\n", hccr); 199 200 /* 201 * Issue a "HARD" reset in order for the RISC 202 * interrupt bit to be cleared. Schedule a big 203 * hammer to get out of the RISC PAUSED state. 204 */ 205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 206 RD_REG_WORD(®->hccr); 207 208 ha->isp_ops->fw_dump(vha, 1); 209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 210 break; 211 } else if ((stat & HSR_RISC_INT) == 0) 212 break; 213 214 switch (stat & 0xff) { 215 case 0x1: 216 case 0x2: 217 case 0x10: 218 case 0x11: 219 qla2x00_mbx_completion(vha, MSW(stat)); 220 status |= MBX_INTERRUPT; 221 222 /* Release mailbox registers. */ 223 WRT_REG_WORD(®->semaphore, 0); 224 break; 225 case 0x12: 226 mb[0] = MSW(stat); 227 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 228 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 229 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 230 qla2x00_async_event(vha, rsp, mb); 231 break; 232 case 0x13: 233 qla2x00_process_response_queue(rsp); 234 break; 235 case 0x15: 236 mb[0] = MBA_CMPLT_1_16BIT; 237 mb[1] = MSW(stat); 238 qla2x00_async_event(vha, rsp, mb); 239 break; 240 case 0x16: 241 mb[0] = MBA_SCSI_COMPLETION; 242 mb[1] = MSW(stat); 243 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 244 qla2x00_async_event(vha, rsp, mb); 245 break; 246 default: 247 ql_dbg(ql_dbg_async, vha, 0x5028, 248 "Unrecognized interrupt type (%d).\n", stat & 0xff); 249 break; 250 } 251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 252 RD_REG_WORD_RELAXED(®->hccr); 253 } 254 qla2x00_handle_mbx_completion(ha, status); 255 spin_unlock_irqrestore(&ha->hardware_lock, flags); 256 257 return (IRQ_HANDLED); 258 } 259 260 /** 261 * qla2x00_mbx_completion() - Process mailbox command completions. 262 * @ha: SCSI driver HA context 263 * @mb0: Mailbox0 register 264 */ 265 static void 266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 267 { 268 uint16_t cnt; 269 uint32_t mboxes; 270 uint16_t __iomem *wptr; 271 struct qla_hw_data *ha = vha->hw; 272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 273 274 /* Read all mbox registers? */ 275 mboxes = (1 << ha->mbx_count) - 1; 276 if (!ha->mcp) 277 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 278 else 279 mboxes = ha->mcp->in_mb; 280 281 /* Load return mailbox registers. */ 282 ha->flags.mbox_int = 1; 283 ha->mailbox_out[0] = mb0; 284 mboxes >>= 1; 285 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 286 287 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 288 if (IS_QLA2200(ha) && cnt == 8) 289 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 290 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 291 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 292 else if (mboxes & BIT_0) 293 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 294 295 wptr++; 296 mboxes >>= 1; 297 } 298 } 299 300 static void 301 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 302 { 303 static char *event[] = 304 { "Complete", "Request Notification", "Time Extension" }; 305 int rval; 306 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 307 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 308 uint16_t __iomem *wptr; 309 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 310 311 /* Seed data -- mailbox1 -> mailbox7. */ 312 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 313 wptr = (uint16_t __iomem *)®24->mailbox1; 314 else if (IS_QLA8044(vha->hw)) 315 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 316 else 317 return; 318 319 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 320 mb[cnt] = RD_REG_WORD(wptr); 321 322 ql_dbg(ql_dbg_async, vha, 0x5021, 323 "Inter-Driver Communication %s -- " 324 "%04x %04x %04x %04x %04x %04x %04x.\n", 325 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 326 mb[4], mb[5], mb[6]); 327 switch (aen) { 328 /* Handle IDC Error completion case. */ 329 case MBA_IDC_COMPLETE: 330 if (mb[1] >> 15) { 331 vha->hw->flags.idc_compl_status = 1; 332 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 333 complete(&vha->hw->dcbx_comp); 334 } 335 break; 336 337 case MBA_IDC_NOTIFY: 338 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 339 timeout = (descr >> 8) & 0xf; 340 ql_dbg(ql_dbg_async, vha, 0x5022, 341 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 342 vha->host_no, event[aen & 0xff], timeout); 343 344 if (!timeout) 345 return; 346 rval = qla2x00_post_idc_ack_work(vha, mb); 347 if (rval != QLA_SUCCESS) 348 ql_log(ql_log_warn, vha, 0x5023, 349 "IDC failed to post ACK.\n"); 350 break; 351 case MBA_IDC_TIME_EXT: 352 vha->hw->idc_extend_tmo = descr; 353 ql_dbg(ql_dbg_async, vha, 0x5087, 354 "%lu Inter-Driver Communication %s -- " 355 "Extend timeout by=%d.\n", 356 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 357 break; 358 } 359 } 360 361 #define LS_UNKNOWN 2 362 const char * 363 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 364 { 365 static const char *const link_speeds[] = { 366 "1", "2", "?", "4", "8", "16", "32", "10" 367 }; 368 #define QLA_LAST_SPEED 7 369 370 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 371 return link_speeds[0]; 372 else if (speed == 0x13) 373 return link_speeds[QLA_LAST_SPEED]; 374 else if (speed < QLA_LAST_SPEED) 375 return link_speeds[speed]; 376 else 377 return link_speeds[LS_UNKNOWN]; 378 } 379 380 static void 381 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 382 { 383 struct qla_hw_data *ha = vha->hw; 384 385 /* 386 * 8200 AEN Interpretation: 387 * mb[0] = AEN code 388 * mb[1] = AEN Reason code 389 * mb[2] = LSW of Peg-Halt Status-1 Register 390 * mb[6] = MSW of Peg-Halt Status-1 Register 391 * mb[3] = LSW of Peg-Halt Status-2 register 392 * mb[7] = MSW of Peg-Halt Status-2 register 393 * mb[4] = IDC Device-State Register value 394 * mb[5] = IDC Driver-Presence Register value 395 */ 396 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 397 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 398 mb[0], mb[1], mb[2], mb[6]); 399 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 400 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 401 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 402 403 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 404 IDC_HEARTBEAT_FAILURE)) { 405 ha->flags.nic_core_hung = 1; 406 ql_log(ql_log_warn, vha, 0x5060, 407 "83XX: F/W Error Reported: Check if reset required.\n"); 408 409 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 410 uint32_t protocol_engine_id, fw_err_code, err_level; 411 412 /* 413 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 414 * - PEG-Halt Status-1 Register: 415 * (LSW = mb[2], MSW = mb[6]) 416 * Bits 0-7 = protocol-engine ID 417 * Bits 8-28 = f/w error code 418 * Bits 29-31 = Error-level 419 * Error-level 0x1 = Non-Fatal error 420 * Error-level 0x2 = Recoverable Fatal error 421 * Error-level 0x4 = UnRecoverable Fatal error 422 * - PEG-Halt Status-2 Register: 423 * (LSW = mb[3], MSW = mb[7]) 424 */ 425 protocol_engine_id = (mb[2] & 0xff); 426 fw_err_code = (((mb[2] & 0xff00) >> 8) | 427 ((mb[6] & 0x1fff) << 8)); 428 err_level = ((mb[6] & 0xe000) >> 13); 429 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 430 "Register: protocol_engine_id=0x%x " 431 "fw_err_code=0x%x err_level=0x%x.\n", 432 protocol_engine_id, fw_err_code, err_level); 433 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 434 "Register: 0x%x%x.\n", mb[7], mb[3]); 435 if (err_level == ERR_LEVEL_NON_FATAL) { 436 ql_log(ql_log_warn, vha, 0x5063, 437 "Not a fatal error, f/w has recovered itself.\n"); 438 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 439 ql_log(ql_log_fatal, vha, 0x5064, 440 "Recoverable Fatal error: Chip reset " 441 "required.\n"); 442 qla83xx_schedule_work(vha, 443 QLA83XX_NIC_CORE_RESET); 444 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 445 ql_log(ql_log_fatal, vha, 0x5065, 446 "Unrecoverable Fatal error: Set FAILED " 447 "state, reboot required.\n"); 448 qla83xx_schedule_work(vha, 449 QLA83XX_NIC_CORE_UNRECOVERABLE); 450 } 451 } 452 453 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 454 uint16_t peg_fw_state, nw_interface_link_up; 455 uint16_t nw_interface_signal_detect, sfp_status; 456 uint16_t htbt_counter, htbt_monitor_enable; 457 uint16_t sfp_additional_info, sfp_multirate; 458 uint16_t sfp_tx_fault, link_speed, dcbx_status; 459 460 /* 461 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 462 * - PEG-to-FC Status Register: 463 * (LSW = mb[2], MSW = mb[6]) 464 * Bits 0-7 = Peg-Firmware state 465 * Bit 8 = N/W Interface Link-up 466 * Bit 9 = N/W Interface signal detected 467 * Bits 10-11 = SFP Status 468 * SFP Status 0x0 = SFP+ transceiver not expected 469 * SFP Status 0x1 = SFP+ transceiver not present 470 * SFP Status 0x2 = SFP+ transceiver invalid 471 * SFP Status 0x3 = SFP+ transceiver present and 472 * valid 473 * Bits 12-14 = Heartbeat Counter 474 * Bit 15 = Heartbeat Monitor Enable 475 * Bits 16-17 = SFP Additional Info 476 * SFP info 0x0 = Unregocnized transceiver for 477 * Ethernet 478 * SFP info 0x1 = SFP+ brand validation failed 479 * SFP info 0x2 = SFP+ speed validation failed 480 * SFP info 0x3 = SFP+ access error 481 * Bit 18 = SFP Multirate 482 * Bit 19 = SFP Tx Fault 483 * Bits 20-22 = Link Speed 484 * Bits 23-27 = Reserved 485 * Bits 28-30 = DCBX Status 486 * DCBX Status 0x0 = DCBX Disabled 487 * DCBX Status 0x1 = DCBX Enabled 488 * DCBX Status 0x2 = DCBX Exchange error 489 * Bit 31 = Reserved 490 */ 491 peg_fw_state = (mb[2] & 0x00ff); 492 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 493 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 494 sfp_status = ((mb[2] & 0x0c00) >> 10); 495 htbt_counter = ((mb[2] & 0x7000) >> 12); 496 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 497 sfp_additional_info = (mb[6] & 0x0003); 498 sfp_multirate = ((mb[6] & 0x0004) >> 2); 499 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 500 link_speed = ((mb[6] & 0x0070) >> 4); 501 dcbx_status = ((mb[6] & 0x7000) >> 12); 502 503 ql_log(ql_log_warn, vha, 0x5066, 504 "Peg-to-Fc Status Register:\n" 505 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 506 "nw_interface_signal_detect=0x%x" 507 "\nsfp_statis=0x%x.\n ", peg_fw_state, 508 nw_interface_link_up, nw_interface_signal_detect, 509 sfp_status); 510 ql_log(ql_log_warn, vha, 0x5067, 511 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 512 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 513 htbt_counter, htbt_monitor_enable, 514 sfp_additional_info, sfp_multirate); 515 ql_log(ql_log_warn, vha, 0x5068, 516 "sfp_tx_fault=0x%x, link_state=0x%x, " 517 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 518 dcbx_status); 519 520 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 521 } 522 523 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 524 ql_log(ql_log_warn, vha, 0x5069, 525 "Heartbeat Failure encountered, chip reset " 526 "required.\n"); 527 528 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 529 } 530 } 531 532 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 533 ql_log(ql_log_info, vha, 0x506a, 534 "IDC Device-State changed = 0x%x.\n", mb[4]); 535 if (ha->flags.nic_core_reset_owner) 536 return; 537 qla83xx_schedule_work(vha, MBA_IDC_AEN); 538 } 539 } 540 541 int 542 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 543 { 544 struct qla_hw_data *ha = vha->hw; 545 scsi_qla_host_t *vp; 546 uint32_t vp_did; 547 unsigned long flags; 548 int ret = 0; 549 550 if (!ha->num_vhosts) 551 return ret; 552 553 spin_lock_irqsave(&ha->vport_slock, flags); 554 list_for_each_entry(vp, &ha->vp_list, list) { 555 vp_did = vp->d_id.b24; 556 if (vp_did == rscn_entry) { 557 ret = 1; 558 break; 559 } 560 } 561 spin_unlock_irqrestore(&ha->vport_slock, flags); 562 563 return ret; 564 } 565 566 fc_port_t * 567 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 568 { 569 fc_port_t *f, *tf; 570 571 f = tf = NULL; 572 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 573 if (f->loop_id == loop_id) 574 return f; 575 return NULL; 576 } 577 578 fc_port_t * 579 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 580 { 581 fc_port_t *f, *tf; 582 583 f = tf = NULL; 584 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 585 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 586 if (incl_deleted) 587 return f; 588 else if (f->deleted == 0) 589 return f; 590 } 591 } 592 return NULL; 593 } 594 595 fc_port_t * 596 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 597 u8 incl_deleted) 598 { 599 fc_port_t *f, *tf; 600 601 f = tf = NULL; 602 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 603 if (f->d_id.b24 == id->b24) { 604 if (incl_deleted) 605 return f; 606 else if (f->deleted == 0) 607 return f; 608 } 609 } 610 return NULL; 611 } 612 613 /** 614 * qla2x00_async_event() - Process aynchronous events. 615 * @ha: SCSI driver HA context 616 * @mb: Mailbox registers (0 - 3) 617 */ 618 void 619 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 620 { 621 uint16_t handle_cnt; 622 uint16_t cnt, mbx; 623 uint32_t handles[5]; 624 struct qla_hw_data *ha = vha->hw; 625 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 626 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 627 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 628 uint32_t rscn_entry, host_pid; 629 unsigned long flags; 630 fc_port_t *fcport = NULL; 631 632 /* Setup to process RIO completion. */ 633 handle_cnt = 0; 634 if (IS_CNA_CAPABLE(ha)) 635 goto skip_rio; 636 switch (mb[0]) { 637 case MBA_SCSI_COMPLETION: 638 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 639 handle_cnt = 1; 640 break; 641 case MBA_CMPLT_1_16BIT: 642 handles[0] = mb[1]; 643 handle_cnt = 1; 644 mb[0] = MBA_SCSI_COMPLETION; 645 break; 646 case MBA_CMPLT_2_16BIT: 647 handles[0] = mb[1]; 648 handles[1] = mb[2]; 649 handle_cnt = 2; 650 mb[0] = MBA_SCSI_COMPLETION; 651 break; 652 case MBA_CMPLT_3_16BIT: 653 handles[0] = mb[1]; 654 handles[1] = mb[2]; 655 handles[2] = mb[3]; 656 handle_cnt = 3; 657 mb[0] = MBA_SCSI_COMPLETION; 658 break; 659 case MBA_CMPLT_4_16BIT: 660 handles[0] = mb[1]; 661 handles[1] = mb[2]; 662 handles[2] = mb[3]; 663 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 664 handle_cnt = 4; 665 mb[0] = MBA_SCSI_COMPLETION; 666 break; 667 case MBA_CMPLT_5_16BIT: 668 handles[0] = mb[1]; 669 handles[1] = mb[2]; 670 handles[2] = mb[3]; 671 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 672 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 673 handle_cnt = 5; 674 mb[0] = MBA_SCSI_COMPLETION; 675 break; 676 case MBA_CMPLT_2_32BIT: 677 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 678 handles[1] = le32_to_cpu( 679 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 680 RD_MAILBOX_REG(ha, reg, 6)); 681 handle_cnt = 2; 682 mb[0] = MBA_SCSI_COMPLETION; 683 break; 684 default: 685 break; 686 } 687 skip_rio: 688 switch (mb[0]) { 689 case MBA_SCSI_COMPLETION: /* Fast Post */ 690 if (!vha->flags.online) 691 break; 692 693 for (cnt = 0; cnt < handle_cnt; cnt++) 694 qla2x00_process_completed_request(vha, rsp->req, 695 handles[cnt]); 696 break; 697 698 case MBA_RESET: /* Reset */ 699 ql_dbg(ql_dbg_async, vha, 0x5002, 700 "Asynchronous RESET.\n"); 701 702 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 703 break; 704 705 case MBA_SYSTEM_ERR: /* System Error */ 706 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? 707 RD_REG_WORD(®24->mailbox7) : 0; 708 ql_log(ql_log_warn, vha, 0x5003, 709 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 710 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 711 712 ha->isp_ops->fw_dump(vha, 1); 713 ha->flags.fw_init_done = 0; 714 QLA_FW_STOPPED(ha); 715 716 if (IS_FWI2_CAPABLE(ha)) { 717 if (mb[1] == 0 && mb[2] == 0) { 718 ql_log(ql_log_fatal, vha, 0x5004, 719 "Unrecoverable Hardware Error: adapter " 720 "marked OFFLINE!\n"); 721 vha->flags.online = 0; 722 vha->device_flags |= DFLG_DEV_FAILED; 723 } else { 724 /* Check to see if MPI timeout occurred */ 725 if ((mbx & MBX_3) && (ha->port_no == 0)) 726 set_bit(MPI_RESET_NEEDED, 727 &vha->dpc_flags); 728 729 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 730 } 731 } else if (mb[1] == 0) { 732 ql_log(ql_log_fatal, vha, 0x5005, 733 "Unrecoverable Hardware Error: adapter marked " 734 "OFFLINE!\n"); 735 vha->flags.online = 0; 736 vha->device_flags |= DFLG_DEV_FAILED; 737 } else 738 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 739 break; 740 741 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 742 ql_log(ql_log_warn, vha, 0x5006, 743 "ISP Request Transfer Error (%x).\n", mb[1]); 744 745 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 746 break; 747 748 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 749 ql_log(ql_log_warn, vha, 0x5007, 750 "ISP Response Transfer Error (%x).\n", mb[1]); 751 752 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 753 break; 754 755 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 756 ql_dbg(ql_dbg_async, vha, 0x5008, 757 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 758 break; 759 760 case MBA_LOOP_INIT_ERR: 761 ql_log(ql_log_warn, vha, 0x5090, 762 "LOOP INIT ERROR (%x).\n", mb[1]); 763 ha->isp_ops->fw_dump(vha, 1); 764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 765 break; 766 767 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 768 ha->flags.lip_ae = 1; 769 ha->flags.n2n_ae = 0; 770 771 ql_dbg(ql_dbg_async, vha, 0x5009, 772 "LIP occurred (%x).\n", mb[1]); 773 774 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 775 atomic_set(&vha->loop_state, LOOP_DOWN); 776 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 777 qla2x00_mark_all_devices_lost(vha, 1); 778 } 779 780 if (vha->vp_idx) { 781 atomic_set(&vha->vp_state, VP_FAILED); 782 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 783 } 784 785 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 786 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 787 788 vha->flags.management_server_logged_in = 0; 789 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 790 break; 791 792 case MBA_LOOP_UP: /* Loop Up Event */ 793 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 794 ha->link_data_rate = PORT_SPEED_1GB; 795 else 796 ha->link_data_rate = mb[1]; 797 798 ql_log(ql_log_info, vha, 0x500a, 799 "LOOP UP detected (%s Gbps).\n", 800 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 801 802 vha->flags.management_server_logged_in = 0; 803 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 804 805 if (AUTO_DETECT_SFP_SUPPORT(vha)) { 806 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 807 qla2xxx_wake_dpc(vha); 808 } 809 break; 810 811 case MBA_LOOP_DOWN: /* Loop Down Event */ 812 ha->flags.n2n_ae = 0; 813 ha->flags.lip_ae = 0; 814 ha->current_topology = 0; 815 816 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 817 ? RD_REG_WORD(®24->mailbox4) : 0; 818 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 819 : mbx; 820 ql_log(ql_log_info, vha, 0x500b, 821 "LOOP DOWN detected (%x %x %x %x).\n", 822 mb[1], mb[2], mb[3], mbx); 823 824 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 825 atomic_set(&vha->loop_state, LOOP_DOWN); 826 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 827 /* 828 * In case of loop down, restore WWPN from 829 * NVRAM in case of FA-WWPN capable ISP 830 * Restore for Physical Port only 831 */ 832 if (!vha->vp_idx) { 833 if (ha->flags.fawwpn_enabled) { 834 void *wwpn = ha->init_cb->port_name; 835 memcpy(vha->port_name, wwpn, WWN_SIZE); 836 fc_host_port_name(vha->host) = 837 wwn_to_u64(vha->port_name); 838 ql_dbg(ql_dbg_init + ql_dbg_verbose, 839 vha, 0x00d8, "LOOP DOWN detected," 840 "restore WWPN %016llx\n", 841 wwn_to_u64(vha->port_name)); 842 } 843 844 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 845 } 846 847 vha->device_flags |= DFLG_NO_CABLE; 848 qla2x00_mark_all_devices_lost(vha, 1); 849 } 850 851 if (vha->vp_idx) { 852 atomic_set(&vha->vp_state, VP_FAILED); 853 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 854 } 855 856 vha->flags.management_server_logged_in = 0; 857 ha->link_data_rate = PORT_SPEED_UNKNOWN; 858 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 859 break; 860 861 case MBA_LIP_RESET: /* LIP reset occurred */ 862 ql_dbg(ql_dbg_async, vha, 0x500c, 863 "LIP reset occurred (%x).\n", mb[1]); 864 865 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 866 atomic_set(&vha->loop_state, LOOP_DOWN); 867 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 868 qla2x00_mark_all_devices_lost(vha, 1); 869 } 870 871 if (vha->vp_idx) { 872 atomic_set(&vha->vp_state, VP_FAILED); 873 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 874 } 875 876 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 877 878 ha->operating_mode = LOOP; 879 vha->flags.management_server_logged_in = 0; 880 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 881 break; 882 883 /* case MBA_DCBX_COMPLETE: */ 884 case MBA_POINT_TO_POINT: /* Point-to-Point */ 885 ha->flags.lip_ae = 0; 886 ha->flags.n2n_ae = 1; 887 888 if (IS_QLA2100(ha)) 889 break; 890 891 if (IS_CNA_CAPABLE(ha)) { 892 ql_dbg(ql_dbg_async, vha, 0x500d, 893 "DCBX Completed -- %04x %04x %04x.\n", 894 mb[1], mb[2], mb[3]); 895 if (ha->notify_dcbx_comp && !vha->vp_idx) 896 complete(&ha->dcbx_comp); 897 898 } else 899 ql_dbg(ql_dbg_async, vha, 0x500e, 900 "Asynchronous P2P MODE received.\n"); 901 902 /* 903 * Until there's a transition from loop down to loop up, treat 904 * this as loop down only. 905 */ 906 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 907 atomic_set(&vha->loop_state, LOOP_DOWN); 908 if (!atomic_read(&vha->loop_down_timer)) 909 atomic_set(&vha->loop_down_timer, 910 LOOP_DOWN_TIME); 911 qla2x00_mark_all_devices_lost(vha, 1); 912 } 913 914 if (vha->vp_idx) { 915 atomic_set(&vha->vp_state, VP_FAILED); 916 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 917 } 918 919 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 920 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 921 922 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 923 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 924 925 ha->flags.gpsc_supported = 1; 926 vha->flags.management_server_logged_in = 0; 927 break; 928 929 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 930 if (IS_QLA2100(ha)) 931 break; 932 933 ql_dbg(ql_dbg_async, vha, 0x500f, 934 "Configuration change detected: value=%x.\n", mb[1]); 935 936 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 937 atomic_set(&vha->loop_state, LOOP_DOWN); 938 if (!atomic_read(&vha->loop_down_timer)) 939 atomic_set(&vha->loop_down_timer, 940 LOOP_DOWN_TIME); 941 qla2x00_mark_all_devices_lost(vha, 1); 942 } 943 944 if (vha->vp_idx) { 945 atomic_set(&vha->vp_state, VP_FAILED); 946 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 947 } 948 949 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 950 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 951 break; 952 953 case MBA_PORT_UPDATE: /* Port database update */ 954 /* 955 * Handle only global and vn-port update events 956 * 957 * Relevant inputs: 958 * mb[1] = N_Port handle of changed port 959 * OR 0xffff for global event 960 * mb[2] = New login state 961 * 7 = Port logged out 962 * mb[3] = LSB is vp_idx, 0xff = all vps 963 * 964 * Skip processing if: 965 * Event is global, vp_idx is NOT all vps, 966 * vp_idx does not match 967 * Event is not global, vp_idx does not match 968 */ 969 if (IS_QLA2XXX_MIDTYPE(ha) && 970 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 971 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 972 break; 973 974 if (mb[2] == 0x7) { 975 ql_dbg(ql_dbg_async, vha, 0x5010, 976 "Port %s %04x %04x %04x.\n", 977 mb[1] == 0xffff ? "unavailable" : "logout", 978 mb[1], mb[2], mb[3]); 979 980 if (mb[1] == 0xffff) 981 goto global_port_update; 982 983 if (mb[1] == NPH_SNS_LID(ha)) { 984 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 985 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 986 break; 987 } 988 989 /* use handle_cnt for loop id/nport handle */ 990 if (IS_FWI2_CAPABLE(ha)) 991 handle_cnt = NPH_SNS; 992 else 993 handle_cnt = SIMPLE_NAME_SERVER; 994 if (mb[1] == handle_cnt) { 995 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 996 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 997 break; 998 } 999 1000 /* Port logout */ 1001 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1002 if (!fcport) 1003 break; 1004 if (atomic_read(&fcport->state) != FCS_ONLINE) 1005 break; 1006 ql_dbg(ql_dbg_async, vha, 0x508a, 1007 "Marking port lost loopid=%04x portid=%06x.\n", 1008 fcport->loop_id, fcport->d_id.b24); 1009 if (qla_ini_mode_enabled(vha)) { 1010 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1011 fcport->logout_on_delete = 0; 1012 qlt_schedule_sess_for_deletion_lock(fcport); 1013 } 1014 break; 1015 1016 global_port_update: 1017 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1018 atomic_set(&vha->loop_state, LOOP_DOWN); 1019 atomic_set(&vha->loop_down_timer, 1020 LOOP_DOWN_TIME); 1021 vha->device_flags |= DFLG_NO_CABLE; 1022 qla2x00_mark_all_devices_lost(vha, 1); 1023 } 1024 1025 if (vha->vp_idx) { 1026 atomic_set(&vha->vp_state, VP_FAILED); 1027 fc_vport_set_state(vha->fc_vport, 1028 FC_VPORT_FAILED); 1029 qla2x00_mark_all_devices_lost(vha, 1); 1030 } 1031 1032 vha->flags.management_server_logged_in = 0; 1033 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1034 break; 1035 } 1036 1037 /* 1038 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1039 * event etc. earlier indicating loop is down) then process 1040 * it. Otherwise ignore it and Wait for RSCN to come in. 1041 */ 1042 atomic_set(&vha->loop_down_timer, 0); 1043 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1044 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1045 ql_dbg(ql_dbg_async, vha, 0x5011, 1046 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1047 mb[1], mb[2], mb[3]); 1048 1049 qlt_async_event(mb[0], vha, mb); 1050 break; 1051 } 1052 1053 ql_dbg(ql_dbg_async, vha, 0x5012, 1054 "Port database changed %04x %04x %04x.\n", 1055 mb[1], mb[2], mb[3]); 1056 1057 /* 1058 * Mark all devices as missing so we will login again. 1059 */ 1060 atomic_set(&vha->loop_state, LOOP_UP); 1061 1062 qla2x00_mark_all_devices_lost(vha, 1); 1063 1064 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1065 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1066 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1067 1068 qlt_async_event(mb[0], vha, mb); 1069 break; 1070 1071 case MBA_RSCN_UPDATE: /* State Change Registration */ 1072 /* Check if the Vport has issued a SCR */ 1073 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1074 break; 1075 /* Only handle SCNs for our Vport index. */ 1076 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1077 break; 1078 1079 ql_dbg(ql_dbg_async, vha, 0x5013, 1080 "RSCN database changed -- %04x %04x %04x.\n", 1081 mb[1], mb[2], mb[3]); 1082 1083 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1084 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1085 | vha->d_id.b.al_pa; 1086 if (rscn_entry == host_pid) { 1087 ql_dbg(ql_dbg_async, vha, 0x5014, 1088 "Ignoring RSCN update to local host " 1089 "port ID (%06x).\n", host_pid); 1090 break; 1091 } 1092 1093 /* Ignore reserved bits from RSCN-payload. */ 1094 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1095 1096 /* Skip RSCNs for virtual ports on the same physical port */ 1097 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1098 break; 1099 1100 atomic_set(&vha->loop_down_timer, 0); 1101 vha->flags.management_server_logged_in = 0; 1102 { 1103 struct event_arg ea; 1104 1105 memset(&ea, 0, sizeof(ea)); 1106 ea.event = FCME_RSCN; 1107 ea.id.b24 = rscn_entry; 1108 ea.id.b.rsvd_1 = rscn_entry >> 24; 1109 qla2x00_fcport_event_handler(vha, &ea); 1110 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1111 } 1112 break; 1113 /* case MBA_RIO_RESPONSE: */ 1114 case MBA_ZIO_RESPONSE: 1115 ql_dbg(ql_dbg_async, vha, 0x5015, 1116 "[R|Z]IO update completion.\n"); 1117 1118 if (IS_FWI2_CAPABLE(ha)) 1119 qla24xx_process_response_queue(vha, rsp); 1120 else 1121 qla2x00_process_response_queue(rsp); 1122 break; 1123 1124 case MBA_DISCARD_RND_FRAME: 1125 ql_dbg(ql_dbg_async, vha, 0x5016, 1126 "Discard RND Frame -- %04x %04x %04x.\n", 1127 mb[1], mb[2], mb[3]); 1128 break; 1129 1130 case MBA_TRACE_NOTIFICATION: 1131 ql_dbg(ql_dbg_async, vha, 0x5017, 1132 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1133 break; 1134 1135 case MBA_ISP84XX_ALERT: 1136 ql_dbg(ql_dbg_async, vha, 0x5018, 1137 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1138 mb[1], mb[2], mb[3]); 1139 1140 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1141 switch (mb[1]) { 1142 case A84_PANIC_RECOVERY: 1143 ql_log(ql_log_info, vha, 0x5019, 1144 "Alert 84XX: panic recovery %04x %04x.\n", 1145 mb[2], mb[3]); 1146 break; 1147 case A84_OP_LOGIN_COMPLETE: 1148 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1149 ql_log(ql_log_info, vha, 0x501a, 1150 "Alert 84XX: firmware version %x.\n", 1151 ha->cs84xx->op_fw_version); 1152 break; 1153 case A84_DIAG_LOGIN_COMPLETE: 1154 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1155 ql_log(ql_log_info, vha, 0x501b, 1156 "Alert 84XX: diagnostic firmware version %x.\n", 1157 ha->cs84xx->diag_fw_version); 1158 break; 1159 case A84_GOLD_LOGIN_COMPLETE: 1160 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1161 ha->cs84xx->fw_update = 1; 1162 ql_log(ql_log_info, vha, 0x501c, 1163 "Alert 84XX: gold firmware version %x.\n", 1164 ha->cs84xx->gold_fw_version); 1165 break; 1166 default: 1167 ql_log(ql_log_warn, vha, 0x501d, 1168 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1169 mb[1], mb[2], mb[3]); 1170 } 1171 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1172 break; 1173 case MBA_DCBX_START: 1174 ql_dbg(ql_dbg_async, vha, 0x501e, 1175 "DCBX Started -- %04x %04x %04x.\n", 1176 mb[1], mb[2], mb[3]); 1177 break; 1178 case MBA_DCBX_PARAM_UPDATE: 1179 ql_dbg(ql_dbg_async, vha, 0x501f, 1180 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1181 mb[1], mb[2], mb[3]); 1182 break; 1183 case MBA_FCF_CONF_ERR: 1184 ql_dbg(ql_dbg_async, vha, 0x5020, 1185 "FCF Configuration Error -- %04x %04x %04x.\n", 1186 mb[1], mb[2], mb[3]); 1187 break; 1188 case MBA_IDC_NOTIFY: 1189 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1190 mb[4] = RD_REG_WORD(®24->mailbox4); 1191 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1192 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1193 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1194 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1195 /* 1196 * Extend loop down timer since port is active. 1197 */ 1198 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1199 atomic_set(&vha->loop_down_timer, 1200 LOOP_DOWN_TIME); 1201 qla2xxx_wake_dpc(vha); 1202 } 1203 } 1204 case MBA_IDC_COMPLETE: 1205 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1206 complete(&ha->lb_portup_comp); 1207 /* Fallthru */ 1208 case MBA_IDC_TIME_EXT: 1209 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1210 IS_QLA8044(ha)) 1211 qla81xx_idc_event(vha, mb[0], mb[1]); 1212 break; 1213 1214 case MBA_IDC_AEN: 1215 mb[4] = RD_REG_WORD(®24->mailbox4); 1216 mb[5] = RD_REG_WORD(®24->mailbox5); 1217 mb[6] = RD_REG_WORD(®24->mailbox6); 1218 mb[7] = RD_REG_WORD(®24->mailbox7); 1219 qla83xx_handle_8200_aen(vha, mb); 1220 break; 1221 1222 case MBA_DPORT_DIAGNOSTICS: 1223 ql_dbg(ql_dbg_async, vha, 0x5052, 1224 "D-Port Diagnostics: %04x result=%s\n", 1225 mb[0], 1226 mb[1] == 0 ? "start" : 1227 mb[1] == 1 ? "done (pass)" : 1228 mb[1] == 2 ? "done (error)" : "other"); 1229 break; 1230 1231 case MBA_TEMPERATURE_ALERT: 1232 ql_dbg(ql_dbg_async, vha, 0x505e, 1233 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1234 if (mb[1] == 0x12) 1235 schedule_work(&ha->board_disable); 1236 break; 1237 1238 case MBA_TRANS_INSERT: 1239 ql_dbg(ql_dbg_async, vha, 0x5091, 1240 "Transceiver Insertion: %04x\n", mb[1]); 1241 break; 1242 1243 default: 1244 ql_dbg(ql_dbg_async, vha, 0x5057, 1245 "Unknown AEN:%04x %04x %04x %04x\n", 1246 mb[0], mb[1], mb[2], mb[3]); 1247 } 1248 1249 qlt_async_event(mb[0], vha, mb); 1250 1251 if (!vha->vp_idx && ha->num_vhosts) 1252 qla2x00_alert_all_vps(rsp, mb); 1253 } 1254 1255 /** 1256 * qla2x00_process_completed_request() - Process a Fast Post response. 1257 * @ha: SCSI driver HA context 1258 * @index: SRB index 1259 */ 1260 void 1261 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1262 struct req_que *req, uint32_t index) 1263 { 1264 srb_t *sp; 1265 struct qla_hw_data *ha = vha->hw; 1266 1267 /* Validate handle. */ 1268 if (index >= req->num_outstanding_cmds) { 1269 ql_log(ql_log_warn, vha, 0x3014, 1270 "Invalid SCSI command index (%x).\n", index); 1271 1272 if (IS_P3P_TYPE(ha)) 1273 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1274 else 1275 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1276 return; 1277 } 1278 1279 sp = req->outstanding_cmds[index]; 1280 if (sp) { 1281 /* Free outstanding command slot. */ 1282 req->outstanding_cmds[index] = NULL; 1283 1284 /* Save ISP completion status */ 1285 sp->done(sp, DID_OK << 16); 1286 } else { 1287 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1288 1289 if (IS_P3P_TYPE(ha)) 1290 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1291 else 1292 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1293 } 1294 } 1295 1296 srb_t * 1297 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1298 struct req_que *req, void *iocb) 1299 { 1300 struct qla_hw_data *ha = vha->hw; 1301 sts_entry_t *pkt = iocb; 1302 srb_t *sp = NULL; 1303 uint16_t index; 1304 1305 index = LSW(pkt->handle); 1306 if (index >= req->num_outstanding_cmds) { 1307 ql_log(ql_log_warn, vha, 0x5031, 1308 "Invalid command index (%x) type %8ph.\n", 1309 index, iocb); 1310 if (IS_P3P_TYPE(ha)) 1311 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1312 else 1313 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1314 goto done; 1315 } 1316 sp = req->outstanding_cmds[index]; 1317 if (!sp) { 1318 ql_log(ql_log_warn, vha, 0x5032, 1319 "Invalid completion handle (%x) -- timed-out.\n", index); 1320 return sp; 1321 } 1322 if (sp->handle != index) { 1323 ql_log(ql_log_warn, vha, 0x5033, 1324 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1325 return NULL; 1326 } 1327 1328 req->outstanding_cmds[index] = NULL; 1329 1330 done: 1331 return sp; 1332 } 1333 1334 static void 1335 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1336 struct mbx_entry *mbx) 1337 { 1338 const char func[] = "MBX-IOCB"; 1339 const char *type; 1340 fc_port_t *fcport; 1341 srb_t *sp; 1342 struct srb_iocb *lio; 1343 uint16_t *data; 1344 uint16_t status; 1345 1346 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1347 if (!sp) 1348 return; 1349 1350 lio = &sp->u.iocb_cmd; 1351 type = sp->name; 1352 fcport = sp->fcport; 1353 data = lio->u.logio.data; 1354 1355 data[0] = MBS_COMMAND_ERROR; 1356 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1357 QLA_LOGIO_LOGIN_RETRIED : 0; 1358 if (mbx->entry_status) { 1359 ql_dbg(ql_dbg_async, vha, 0x5043, 1360 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1361 "entry-status=%x status=%x state-flag=%x " 1362 "status-flags=%x.\n", type, sp->handle, 1363 fcport->d_id.b.domain, fcport->d_id.b.area, 1364 fcport->d_id.b.al_pa, mbx->entry_status, 1365 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1366 le16_to_cpu(mbx->status_flags)); 1367 1368 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1369 (uint8_t *)mbx, sizeof(*mbx)); 1370 1371 goto logio_done; 1372 } 1373 1374 status = le16_to_cpu(mbx->status); 1375 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1376 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1377 status = 0; 1378 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1379 ql_dbg(ql_dbg_async, vha, 0x5045, 1380 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1381 type, sp->handle, fcport->d_id.b.domain, 1382 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1383 le16_to_cpu(mbx->mb1)); 1384 1385 data[0] = MBS_COMMAND_COMPLETE; 1386 if (sp->type == SRB_LOGIN_CMD) { 1387 fcport->port_type = FCT_TARGET; 1388 if (le16_to_cpu(mbx->mb1) & BIT_0) 1389 fcport->port_type = FCT_INITIATOR; 1390 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1391 fcport->flags |= FCF_FCP2_DEVICE; 1392 } 1393 goto logio_done; 1394 } 1395 1396 data[0] = le16_to_cpu(mbx->mb0); 1397 switch (data[0]) { 1398 case MBS_PORT_ID_USED: 1399 data[1] = le16_to_cpu(mbx->mb1); 1400 break; 1401 case MBS_LOOP_ID_USED: 1402 break; 1403 default: 1404 data[0] = MBS_COMMAND_ERROR; 1405 break; 1406 } 1407 1408 ql_log(ql_log_warn, vha, 0x5046, 1409 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1410 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1411 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1412 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1413 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1414 le16_to_cpu(mbx->mb7)); 1415 1416 logio_done: 1417 sp->done(sp, 0); 1418 } 1419 1420 static void 1421 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1422 struct mbx_24xx_entry *pkt) 1423 { 1424 const char func[] = "MBX-IOCB2"; 1425 srb_t *sp; 1426 struct srb_iocb *si; 1427 u16 sz, i; 1428 int res; 1429 1430 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1431 if (!sp) 1432 return; 1433 1434 si = &sp->u.iocb_cmd; 1435 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1436 1437 for (i = 0; i < sz; i++) 1438 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); 1439 1440 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1441 1442 sp->done(sp, res); 1443 } 1444 1445 static void 1446 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1447 struct nack_to_isp *pkt) 1448 { 1449 const char func[] = "nack"; 1450 srb_t *sp; 1451 int res = 0; 1452 1453 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1454 if (!sp) 1455 return; 1456 1457 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1458 res = QLA_FUNCTION_FAILED; 1459 1460 sp->done(sp, res); 1461 } 1462 1463 static void 1464 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1465 sts_entry_t *pkt, int iocb_type) 1466 { 1467 const char func[] = "CT_IOCB"; 1468 const char *type; 1469 srb_t *sp; 1470 struct bsg_job *bsg_job; 1471 struct fc_bsg_reply *bsg_reply; 1472 uint16_t comp_status; 1473 int res = 0; 1474 1475 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1476 if (!sp) 1477 return; 1478 1479 switch (sp->type) { 1480 case SRB_CT_CMD: 1481 bsg_job = sp->u.bsg_job; 1482 bsg_reply = bsg_job->reply; 1483 1484 type = "ct pass-through"; 1485 1486 comp_status = le16_to_cpu(pkt->comp_status); 1487 1488 /* 1489 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1490 * fc payload to the caller 1491 */ 1492 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1493 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1494 1495 if (comp_status != CS_COMPLETE) { 1496 if (comp_status == CS_DATA_UNDERRUN) { 1497 res = DID_OK << 16; 1498 bsg_reply->reply_payload_rcv_len = 1499 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1500 1501 ql_log(ql_log_warn, vha, 0x5048, 1502 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1503 type, comp_status, 1504 bsg_reply->reply_payload_rcv_len); 1505 } else { 1506 ql_log(ql_log_warn, vha, 0x5049, 1507 "CT pass-through-%s error comp_status=0x%x.\n", 1508 type, comp_status); 1509 res = DID_ERROR << 16; 1510 bsg_reply->reply_payload_rcv_len = 0; 1511 } 1512 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1513 (uint8_t *)pkt, sizeof(*pkt)); 1514 } else { 1515 res = DID_OK << 16; 1516 bsg_reply->reply_payload_rcv_len = 1517 bsg_job->reply_payload.payload_len; 1518 bsg_job->reply_len = 0; 1519 } 1520 break; 1521 case SRB_CT_PTHRU_CMD: 1522 /* 1523 * borrowing sts_entry_24xx.comp_status. 1524 * same location as ct_entry_24xx.comp_status 1525 */ 1526 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1527 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1528 sp->name); 1529 break; 1530 } 1531 1532 sp->done(sp, res); 1533 } 1534 1535 static void 1536 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1537 struct sts_entry_24xx *pkt, int iocb_type) 1538 { 1539 const char func[] = "ELS_CT_IOCB"; 1540 const char *type; 1541 srb_t *sp; 1542 struct bsg_job *bsg_job; 1543 struct fc_bsg_reply *bsg_reply; 1544 uint16_t comp_status; 1545 uint32_t fw_status[3]; 1546 uint8_t* fw_sts_ptr; 1547 int res; 1548 1549 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1550 if (!sp) 1551 return; 1552 1553 type = NULL; 1554 switch (sp->type) { 1555 case SRB_ELS_CMD_RPT: 1556 case SRB_ELS_CMD_HST: 1557 type = "els"; 1558 break; 1559 case SRB_CT_CMD: 1560 type = "ct pass-through"; 1561 break; 1562 case SRB_ELS_DCMD: 1563 type = "Driver ELS logo"; 1564 ql_dbg(ql_dbg_user, vha, 0x5047, 1565 "Completing %s: (%p) type=%d.\n", type, sp, sp->type); 1566 sp->done(sp, 0); 1567 return; 1568 case SRB_CT_PTHRU_CMD: 1569 /* borrowing sts_entry_24xx.comp_status. 1570 same location as ct_entry_24xx.comp_status 1571 */ 1572 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1573 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1574 sp->name); 1575 sp->done(sp, res); 1576 return; 1577 default: 1578 ql_dbg(ql_dbg_user, vha, 0x503e, 1579 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1580 return; 1581 } 1582 1583 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1584 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1585 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1586 1587 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1588 * fc payload to the caller 1589 */ 1590 bsg_job = sp->u.bsg_job; 1591 bsg_reply = bsg_job->reply; 1592 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1593 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1594 1595 if (comp_status != CS_COMPLETE) { 1596 if (comp_status == CS_DATA_UNDERRUN) { 1597 res = DID_OK << 16; 1598 bsg_reply->reply_payload_rcv_len = 1599 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1600 1601 ql_dbg(ql_dbg_user, vha, 0x503f, 1602 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1603 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1604 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1605 le16_to_cpu(((struct els_sts_entry_24xx *) 1606 pkt)->total_byte_count)); 1607 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) + 1608 sizeof(struct fc_bsg_reply); 1609 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1610 } 1611 else { 1612 ql_dbg(ql_dbg_user, vha, 0x5040, 1613 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1614 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1615 type, sp->handle, comp_status, 1616 le16_to_cpu(((struct els_sts_entry_24xx *) 1617 pkt)->error_subcode_1), 1618 le16_to_cpu(((struct els_sts_entry_24xx *) 1619 pkt)->error_subcode_2)); 1620 res = DID_ERROR << 16; 1621 bsg_reply->reply_payload_rcv_len = 0; 1622 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) + 1623 sizeof(struct fc_bsg_reply); 1624 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1625 } 1626 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1627 (uint8_t *)pkt, sizeof(*pkt)); 1628 } 1629 else { 1630 res = DID_OK << 16; 1631 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1632 bsg_job->reply_len = 0; 1633 } 1634 1635 sp->done(sp, res); 1636 } 1637 1638 static void 1639 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1640 struct logio_entry_24xx *logio) 1641 { 1642 const char func[] = "LOGIO-IOCB"; 1643 const char *type; 1644 fc_port_t *fcport; 1645 srb_t *sp; 1646 struct srb_iocb *lio; 1647 uint16_t *data; 1648 uint32_t iop[2]; 1649 1650 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1651 if (!sp) 1652 return; 1653 1654 lio = &sp->u.iocb_cmd; 1655 type = sp->name; 1656 fcport = sp->fcport; 1657 data = lio->u.logio.data; 1658 1659 data[0] = MBS_COMMAND_ERROR; 1660 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1661 QLA_LOGIO_LOGIN_RETRIED : 0; 1662 if (logio->entry_status) { 1663 ql_log(ql_log_warn, fcport->vha, 0x5034, 1664 "Async-%s error entry - %8phC hdl=%x" 1665 "portid=%02x%02x%02x entry-status=%x.\n", 1666 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 1667 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1668 logio->entry_status); 1669 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1670 (uint8_t *)logio, sizeof(*logio)); 1671 1672 goto logio_done; 1673 } 1674 1675 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1676 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1677 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x " 1678 "iop0=%x.\n", type, fcport->port_name, sp->handle, 1679 fcport->d_id.b.domain, 1680 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1681 le32_to_cpu(logio->io_parameter[0])); 1682 1683 vha->hw->exch_starvation = 0; 1684 data[0] = MBS_COMMAND_COMPLETE; 1685 if (sp->type != SRB_LOGIN_CMD) 1686 goto logio_done; 1687 1688 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1689 if (iop[0] & BIT_4) { 1690 fcport->port_type = FCT_TARGET; 1691 if (iop[0] & BIT_8) 1692 fcport->flags |= FCF_FCP2_DEVICE; 1693 } else if (iop[0] & BIT_5) 1694 fcport->port_type = FCT_INITIATOR; 1695 1696 if (iop[0] & BIT_7) 1697 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1698 1699 if (logio->io_parameter[7] || logio->io_parameter[8]) 1700 fcport->supported_classes |= FC_COS_CLASS2; 1701 if (logio->io_parameter[9] || logio->io_parameter[10]) 1702 fcport->supported_classes |= FC_COS_CLASS3; 1703 1704 goto logio_done; 1705 } 1706 1707 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1708 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1709 lio->u.logio.iop[0] = iop[0]; 1710 lio->u.logio.iop[1] = iop[1]; 1711 switch (iop[0]) { 1712 case LSC_SCODE_PORTID_USED: 1713 data[0] = MBS_PORT_ID_USED; 1714 data[1] = LSW(iop[1]); 1715 break; 1716 case LSC_SCODE_NPORT_USED: 1717 data[0] = MBS_LOOP_ID_USED; 1718 break; 1719 case LSC_SCODE_CMD_FAILED: 1720 if (iop[1] == 0x0606) { 1721 /* 1722 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 1723 * Target side acked. 1724 */ 1725 data[0] = MBS_COMMAND_COMPLETE; 1726 goto logio_done; 1727 } 1728 data[0] = MBS_COMMAND_ERROR; 1729 break; 1730 case LSC_SCODE_NOXCB: 1731 vha->hw->exch_starvation++; 1732 if (vha->hw->exch_starvation > 5) { 1733 ql_log(ql_log_warn, vha, 0xd046, 1734 "Exchange starvation. Resetting RISC\n"); 1735 1736 vha->hw->exch_starvation = 0; 1737 1738 if (IS_P3P_TYPE(vha->hw)) 1739 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1740 else 1741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1742 qla2xxx_wake_dpc(vha); 1743 } 1744 /* drop through */ 1745 default: 1746 data[0] = MBS_COMMAND_ERROR; 1747 break; 1748 } 1749 1750 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1751 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x " 1752 "iop0=%x iop1=%x.\n", type, fcport->port_name, 1753 sp->handle, fcport->d_id.b.domain, 1754 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1755 le16_to_cpu(logio->comp_status), 1756 le32_to_cpu(logio->io_parameter[0]), 1757 le32_to_cpu(logio->io_parameter[1])); 1758 1759 logio_done: 1760 sp->done(sp, 0); 1761 } 1762 1763 static void 1764 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1765 { 1766 const char func[] = "TMF-IOCB"; 1767 const char *type; 1768 fc_port_t *fcport; 1769 srb_t *sp; 1770 struct srb_iocb *iocb; 1771 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1772 1773 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1774 if (!sp) 1775 return; 1776 1777 iocb = &sp->u.iocb_cmd; 1778 type = sp->name; 1779 fcport = sp->fcport; 1780 iocb->u.tmf.data = QLA_SUCCESS; 1781 1782 if (sts->entry_status) { 1783 ql_log(ql_log_warn, fcport->vha, 0x5038, 1784 "Async-%s error - hdl=%x entry-status(%x).\n", 1785 type, sp->handle, sts->entry_status); 1786 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1787 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 1788 ql_log(ql_log_warn, fcport->vha, 0x5039, 1789 "Async-%s error - hdl=%x completion status(%x).\n", 1790 type, sp->handle, sts->comp_status); 1791 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1792 } else if ((le16_to_cpu(sts->scsi_status) & 1793 SS_RESPONSE_INFO_LEN_VALID)) { 1794 if (le32_to_cpu(sts->rsp_data_len) < 4) { 1795 ql_log(ql_log_warn, fcport->vha, 0x503b, 1796 "Async-%s error - hdl=%x not enough response(%d).\n", 1797 type, sp->handle, sts->rsp_data_len); 1798 } else if (sts->data[3]) { 1799 ql_log(ql_log_warn, fcport->vha, 0x503c, 1800 "Async-%s error - hdl=%x response(%x).\n", 1801 type, sp->handle, sts->data[3]); 1802 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1803 } 1804 } 1805 1806 if (iocb->u.tmf.data != QLA_SUCCESS) 1807 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1808 (uint8_t *)sts, sizeof(*sts)); 1809 1810 sp->done(sp, 0); 1811 } 1812 1813 static void 1814 qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1815 { 1816 const char func[] = "NVME-IOCB"; 1817 fc_port_t *fcport; 1818 srb_t *sp; 1819 struct srb_iocb *iocb; 1820 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1821 uint16_t state_flags; 1822 struct nvmefc_fcp_req *fd; 1823 uint16_t ret = 0; 1824 struct srb_iocb *nvme; 1825 1826 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1827 if (!sp) 1828 return; 1829 1830 iocb = &sp->u.iocb_cmd; 1831 fcport = sp->fcport; 1832 iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status); 1833 state_flags = le16_to_cpu(sts->state_flags); 1834 fd = iocb->u.nvme.desc; 1835 nvme = &sp->u.iocb_cmd; 1836 1837 if (unlikely(nvme->u.nvme.aen_op)) 1838 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 1839 1840 /* 1841 * State flags: Bit 6 and 0. 1842 * If 0 is set, we don't care about 6. 1843 * both cases resp was dma'd to host buffer 1844 * if both are 0, that is good path case. 1845 * if six is set and 0 is clear, we need to 1846 * copy resp data from status iocb to resp buffer. 1847 */ 1848 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 1849 iocb->u.nvme.rsp_pyld_len = 0; 1850 } else if ((state_flags & SF_FCP_RSP_DMA)) { 1851 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); 1852 } else if (state_flags & SF_NVME_ERSP) { 1853 uint32_t *inbuf, *outbuf; 1854 uint16_t iter; 1855 1856 inbuf = (uint32_t *)&sts->nvme_ersp_data; 1857 outbuf = (uint32_t *)fd->rspaddr; 1858 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); 1859 iter = iocb->u.nvme.rsp_pyld_len >> 2; 1860 for (; iter; iter--) 1861 *outbuf++ = swab32(*inbuf++); 1862 } else { /* unhandled case */ 1863 ql_log(ql_log_warn, fcport->vha, 0x503a, 1864 "NVME-%s error. Unhandled state_flags of %x\n", 1865 sp->name, state_flags); 1866 } 1867 1868 fd->transferred_length = fd->payload_length - 1869 le32_to_cpu(sts->residual_len); 1870 1871 /* 1872 * If transport error then Failure (HBA rejects request) 1873 * otherwise transport will handle. 1874 */ 1875 if (sts->entry_status) { 1876 ql_log(ql_log_warn, fcport->vha, 0x5038, 1877 "NVME-%s error - hdl=%x entry-status(%x).\n", 1878 sp->name, sp->handle, sts->entry_status); 1879 ret = QLA_FUNCTION_FAILED; 1880 } else { 1881 switch (le16_to_cpu(sts->comp_status)) { 1882 case CS_COMPLETE: 1883 ret = 0; 1884 break; 1885 1886 case CS_ABORTED: 1887 case CS_RESET: 1888 case CS_PORT_UNAVAILABLE: 1889 case CS_PORT_LOGGED_OUT: 1890 case CS_PORT_BUSY: 1891 ql_log(ql_log_warn, fcport->vha, 0x5060, 1892 "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n", 1893 sp->name, sp->handle, sts->comp_status, 1894 le32_to_cpu(sts->residual_len), sts->ox_id); 1895 fd->transferred_length = fd->payload_length; 1896 ret = QLA_ABORTED; 1897 break; 1898 1899 default: 1900 ql_log(ql_log_warn, fcport->vha, 0x5060, 1901 "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n", 1902 sp->name, sp->handle, sts->comp_status, 1903 le32_to_cpu(sts->residual_len), sts->ox_id); 1904 ret = QLA_FUNCTION_FAILED; 1905 break; 1906 } 1907 } 1908 sp->done(sp, ret); 1909 } 1910 1911 /** 1912 * qla2x00_process_response_queue() - Process response queue entries. 1913 * @ha: SCSI driver HA context 1914 */ 1915 void 1916 qla2x00_process_response_queue(struct rsp_que *rsp) 1917 { 1918 struct scsi_qla_host *vha; 1919 struct qla_hw_data *ha = rsp->hw; 1920 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1921 sts_entry_t *pkt; 1922 uint16_t handle_cnt; 1923 uint16_t cnt; 1924 1925 vha = pci_get_drvdata(ha->pdev); 1926 1927 if (!vha->flags.online) 1928 return; 1929 1930 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1931 pkt = (sts_entry_t *)rsp->ring_ptr; 1932 1933 rsp->ring_index++; 1934 if (rsp->ring_index == rsp->length) { 1935 rsp->ring_index = 0; 1936 rsp->ring_ptr = rsp->ring; 1937 } else { 1938 rsp->ring_ptr++; 1939 } 1940 1941 if (pkt->entry_status != 0) { 1942 qla2x00_error_entry(vha, rsp, pkt); 1943 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1944 wmb(); 1945 continue; 1946 } 1947 1948 switch (pkt->entry_type) { 1949 case STATUS_TYPE: 1950 qla2x00_status_entry(vha, rsp, pkt); 1951 break; 1952 case STATUS_TYPE_21: 1953 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1954 for (cnt = 0; cnt < handle_cnt; cnt++) { 1955 qla2x00_process_completed_request(vha, rsp->req, 1956 ((sts21_entry_t *)pkt)->handle[cnt]); 1957 } 1958 break; 1959 case STATUS_TYPE_22: 1960 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1961 for (cnt = 0; cnt < handle_cnt; cnt++) { 1962 qla2x00_process_completed_request(vha, rsp->req, 1963 ((sts22_entry_t *)pkt)->handle[cnt]); 1964 } 1965 break; 1966 case STATUS_CONT_TYPE: 1967 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1968 break; 1969 case MBX_IOCB_TYPE: 1970 qla2x00_mbx_iocb_entry(vha, rsp->req, 1971 (struct mbx_entry *)pkt); 1972 break; 1973 case CT_IOCB_TYPE: 1974 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1975 break; 1976 default: 1977 /* Type Not Supported. */ 1978 ql_log(ql_log_warn, vha, 0x504a, 1979 "Received unknown response pkt type %x " 1980 "entry status=%x.\n", 1981 pkt->entry_type, pkt->entry_status); 1982 break; 1983 } 1984 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1985 wmb(); 1986 } 1987 1988 /* Adjust ring index */ 1989 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1990 } 1991 1992 static inline void 1993 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1994 uint32_t sense_len, struct rsp_que *rsp, int res) 1995 { 1996 struct scsi_qla_host *vha = sp->vha; 1997 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1998 uint32_t track_sense_len; 1999 2000 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2001 sense_len = SCSI_SENSE_BUFFERSIZE; 2002 2003 SET_CMD_SENSE_LEN(sp, sense_len); 2004 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2005 track_sense_len = sense_len; 2006 2007 if (sense_len > par_sense_len) 2008 sense_len = par_sense_len; 2009 2010 memcpy(cp->sense_buffer, sense_data, sense_len); 2011 2012 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2013 track_sense_len -= sense_len; 2014 SET_CMD_SENSE_LEN(sp, track_sense_len); 2015 2016 if (track_sense_len != 0) { 2017 rsp->status_srb = sp; 2018 cp->result = res; 2019 } 2020 2021 if (sense_len) { 2022 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2023 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2024 sp->vha->host_no, cp->device->id, cp->device->lun, 2025 cp); 2026 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2027 cp->sense_buffer, sense_len); 2028 } 2029 } 2030 2031 struct scsi_dif_tuple { 2032 __be16 guard; /* Checksum */ 2033 __be16 app_tag; /* APPL identifier */ 2034 __be32 ref_tag; /* Target LBA or indirect LBA */ 2035 }; 2036 2037 /* 2038 * Checks the guard or meta-data for the type of error 2039 * detected by the HBA. In case of errors, we set the 2040 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2041 * to indicate to the kernel that the HBA detected error. 2042 */ 2043 static inline int 2044 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2045 { 2046 struct scsi_qla_host *vha = sp->vha; 2047 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2048 uint8_t *ap = &sts24->data[12]; 2049 uint8_t *ep = &sts24->data[20]; 2050 uint32_t e_ref_tag, a_ref_tag; 2051 uint16_t e_app_tag, a_app_tag; 2052 uint16_t e_guard, a_guard; 2053 2054 /* 2055 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2056 * would make guard field appear at offset 2 2057 */ 2058 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 2059 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 2060 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 2061 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 2062 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 2063 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 2064 2065 ql_dbg(ql_dbg_io, vha, 0x3023, 2066 "iocb(s) %p Returned STATUS.\n", sts24); 2067 2068 ql_dbg(ql_dbg_io, vha, 0x3024, 2069 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2070 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2071 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2072 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2073 a_app_tag, e_app_tag, a_guard, e_guard); 2074 2075 /* 2076 * Ignore sector if: 2077 * For type 3: ref & app tag is all 'f's 2078 * For type 0,1,2: app tag is all 'f's 2079 */ 2080 if ((a_app_tag == T10_PI_APP_ESCAPE) && 2081 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 2082 (a_ref_tag == T10_PI_REF_ESCAPE))) { 2083 uint32_t blocks_done, resid; 2084 sector_t lba_s = scsi_get_lba(cmd); 2085 2086 /* 2TB boundary case covered automatically with this */ 2087 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2088 2089 resid = scsi_bufflen(cmd) - (blocks_done * 2090 cmd->device->sector_size); 2091 2092 scsi_set_resid(cmd, resid); 2093 cmd->result = DID_OK << 16; 2094 2095 /* Update protection tag */ 2096 if (scsi_prot_sg_count(cmd)) { 2097 uint32_t i, j = 0, k = 0, num_ent; 2098 struct scatterlist *sg; 2099 struct t10_pi_tuple *spt; 2100 2101 /* Patch the corresponding protection tags */ 2102 scsi_for_each_prot_sg(cmd, sg, 2103 scsi_prot_sg_count(cmd), i) { 2104 num_ent = sg_dma_len(sg) / 8; 2105 if (k + num_ent < blocks_done) { 2106 k += num_ent; 2107 continue; 2108 } 2109 j = blocks_done - k - 1; 2110 k = blocks_done; 2111 break; 2112 } 2113 2114 if (k != blocks_done) { 2115 ql_log(ql_log_warn, vha, 0x302f, 2116 "unexpected tag values tag:lba=%x:%llx)\n", 2117 e_ref_tag, (unsigned long long)lba_s); 2118 return 1; 2119 } 2120 2121 spt = page_address(sg_page(sg)) + sg->offset; 2122 spt += j; 2123 2124 spt->app_tag = T10_PI_APP_ESCAPE; 2125 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2126 spt->ref_tag = T10_PI_REF_ESCAPE; 2127 } 2128 2129 return 0; 2130 } 2131 2132 /* check guard */ 2133 if (e_guard != a_guard) { 2134 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2135 0x10, 0x1); 2136 set_driver_byte(cmd, DRIVER_SENSE); 2137 set_host_byte(cmd, DID_ABORT); 2138 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 2139 return 1; 2140 } 2141 2142 /* check ref tag */ 2143 if (e_ref_tag != a_ref_tag) { 2144 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2145 0x10, 0x3); 2146 set_driver_byte(cmd, DRIVER_SENSE); 2147 set_host_byte(cmd, DID_ABORT); 2148 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 2149 return 1; 2150 } 2151 2152 /* check appl tag */ 2153 if (e_app_tag != a_app_tag) { 2154 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2155 0x10, 0x2); 2156 set_driver_byte(cmd, DRIVER_SENSE); 2157 set_host_byte(cmd, DID_ABORT); 2158 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 2159 return 1; 2160 } 2161 2162 return 1; 2163 } 2164 2165 static void 2166 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2167 struct req_que *req, uint32_t index) 2168 { 2169 struct qla_hw_data *ha = vha->hw; 2170 srb_t *sp; 2171 uint16_t comp_status; 2172 uint16_t scsi_status; 2173 uint16_t thread_id; 2174 uint32_t rval = EXT_STATUS_OK; 2175 struct bsg_job *bsg_job = NULL; 2176 struct fc_bsg_request *bsg_request; 2177 struct fc_bsg_reply *bsg_reply; 2178 sts_entry_t *sts; 2179 struct sts_entry_24xx *sts24; 2180 sts = (sts_entry_t *) pkt; 2181 sts24 = (struct sts_entry_24xx *) pkt; 2182 2183 /* Validate handle. */ 2184 if (index >= req->num_outstanding_cmds) { 2185 ql_log(ql_log_warn, vha, 0x70af, 2186 "Invalid SCSI completion handle 0x%x.\n", index); 2187 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2188 return; 2189 } 2190 2191 sp = req->outstanding_cmds[index]; 2192 if (!sp) { 2193 ql_log(ql_log_warn, vha, 0x70b0, 2194 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2195 req->id, index); 2196 2197 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2198 return; 2199 } 2200 2201 /* Free outstanding command slot. */ 2202 req->outstanding_cmds[index] = NULL; 2203 bsg_job = sp->u.bsg_job; 2204 bsg_request = bsg_job->request; 2205 bsg_reply = bsg_job->reply; 2206 2207 if (IS_FWI2_CAPABLE(ha)) { 2208 comp_status = le16_to_cpu(sts24->comp_status); 2209 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2210 } else { 2211 comp_status = le16_to_cpu(sts->comp_status); 2212 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2213 } 2214 2215 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2216 switch (comp_status) { 2217 case CS_COMPLETE: 2218 if (scsi_status == 0) { 2219 bsg_reply->reply_payload_rcv_len = 2220 bsg_job->reply_payload.payload_len; 2221 vha->qla_stats.input_bytes += 2222 bsg_reply->reply_payload_rcv_len; 2223 vha->qla_stats.input_requests++; 2224 rval = EXT_STATUS_OK; 2225 } 2226 goto done; 2227 2228 case CS_DATA_OVERRUN: 2229 ql_dbg(ql_dbg_user, vha, 0x70b1, 2230 "Command completed with data overrun thread_id=%d\n", 2231 thread_id); 2232 rval = EXT_STATUS_DATA_OVERRUN; 2233 break; 2234 2235 case CS_DATA_UNDERRUN: 2236 ql_dbg(ql_dbg_user, vha, 0x70b2, 2237 "Command completed with data underrun thread_id=%d\n", 2238 thread_id); 2239 rval = EXT_STATUS_DATA_UNDERRUN; 2240 break; 2241 case CS_BIDIR_RD_OVERRUN: 2242 ql_dbg(ql_dbg_user, vha, 0x70b3, 2243 "Command completed with read data overrun thread_id=%d\n", 2244 thread_id); 2245 rval = EXT_STATUS_DATA_OVERRUN; 2246 break; 2247 2248 case CS_BIDIR_RD_WR_OVERRUN: 2249 ql_dbg(ql_dbg_user, vha, 0x70b4, 2250 "Command completed with read and write data overrun " 2251 "thread_id=%d\n", thread_id); 2252 rval = EXT_STATUS_DATA_OVERRUN; 2253 break; 2254 2255 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2256 ql_dbg(ql_dbg_user, vha, 0x70b5, 2257 "Command completed with read data over and write data " 2258 "underrun thread_id=%d\n", thread_id); 2259 rval = EXT_STATUS_DATA_OVERRUN; 2260 break; 2261 2262 case CS_BIDIR_RD_UNDERRUN: 2263 ql_dbg(ql_dbg_user, vha, 0x70b6, 2264 "Command completed with read data underrun " 2265 "thread_id=%d\n", thread_id); 2266 rval = EXT_STATUS_DATA_UNDERRUN; 2267 break; 2268 2269 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2270 ql_dbg(ql_dbg_user, vha, 0x70b7, 2271 "Command completed with read data under and write data " 2272 "overrun thread_id=%d\n", thread_id); 2273 rval = EXT_STATUS_DATA_UNDERRUN; 2274 break; 2275 2276 case CS_BIDIR_RD_WR_UNDERRUN: 2277 ql_dbg(ql_dbg_user, vha, 0x70b8, 2278 "Command completed with read and write data underrun " 2279 "thread_id=%d\n", thread_id); 2280 rval = EXT_STATUS_DATA_UNDERRUN; 2281 break; 2282 2283 case CS_BIDIR_DMA: 2284 ql_dbg(ql_dbg_user, vha, 0x70b9, 2285 "Command completed with data DMA error thread_id=%d\n", 2286 thread_id); 2287 rval = EXT_STATUS_DMA_ERR; 2288 break; 2289 2290 case CS_TIMEOUT: 2291 ql_dbg(ql_dbg_user, vha, 0x70ba, 2292 "Command completed with timeout thread_id=%d\n", 2293 thread_id); 2294 rval = EXT_STATUS_TIMEOUT; 2295 break; 2296 default: 2297 ql_dbg(ql_dbg_user, vha, 0x70bb, 2298 "Command completed with completion status=0x%x " 2299 "thread_id=%d\n", comp_status, thread_id); 2300 rval = EXT_STATUS_ERR; 2301 break; 2302 } 2303 bsg_reply->reply_payload_rcv_len = 0; 2304 2305 done: 2306 /* Return the vendor specific reply to API */ 2307 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2308 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2309 /* Always return DID_OK, bsg will send the vendor specific response 2310 * in this case only */ 2311 sp->done(sp, DID_OK << 6); 2312 2313 } 2314 2315 /** 2316 * qla2x00_status_entry() - Process a Status IOCB entry. 2317 * @ha: SCSI driver HA context 2318 * @pkt: Entry pointer 2319 */ 2320 static void 2321 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2322 { 2323 srb_t *sp; 2324 fc_port_t *fcport; 2325 struct scsi_cmnd *cp; 2326 sts_entry_t *sts; 2327 struct sts_entry_24xx *sts24; 2328 uint16_t comp_status; 2329 uint16_t scsi_status; 2330 uint16_t ox_id; 2331 uint8_t lscsi_status; 2332 int32_t resid; 2333 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2334 fw_resid_len; 2335 uint8_t *rsp_info, *sense_data; 2336 struct qla_hw_data *ha = vha->hw; 2337 uint32_t handle; 2338 uint16_t que; 2339 struct req_que *req; 2340 int logit = 1; 2341 int res = 0; 2342 uint16_t state_flags = 0; 2343 uint16_t retry_delay = 0; 2344 uint8_t no_logout = 0; 2345 2346 sts = (sts_entry_t *) pkt; 2347 sts24 = (struct sts_entry_24xx *) pkt; 2348 if (IS_FWI2_CAPABLE(ha)) { 2349 comp_status = le16_to_cpu(sts24->comp_status); 2350 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2351 state_flags = le16_to_cpu(sts24->state_flags); 2352 } else { 2353 comp_status = le16_to_cpu(sts->comp_status); 2354 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2355 } 2356 handle = (uint32_t) LSW(sts->handle); 2357 que = MSW(sts->handle); 2358 req = ha->req_q_map[que]; 2359 2360 /* Check for invalid queue pointer */ 2361 if (req == NULL || 2362 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2363 ql_dbg(ql_dbg_io, vha, 0x3059, 2364 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2365 "que=%u.\n", sts->handle, req, que); 2366 return; 2367 } 2368 2369 /* Validate handle. */ 2370 if (handle < req->num_outstanding_cmds) { 2371 sp = req->outstanding_cmds[handle]; 2372 if (!sp) { 2373 ql_dbg(ql_dbg_io, vha, 0x3075, 2374 "%s(%ld): Already returned command for status handle (0x%x).\n", 2375 __func__, vha->host_no, sts->handle); 2376 return; 2377 } 2378 } else { 2379 ql_dbg(ql_dbg_io, vha, 0x3017, 2380 "Invalid status handle, out of range (0x%x).\n", 2381 sts->handle); 2382 2383 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2384 if (IS_P3P_TYPE(ha)) 2385 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2386 else 2387 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2388 qla2xxx_wake_dpc(vha); 2389 } 2390 return; 2391 } 2392 2393 if (sp->cmd_type != TYPE_SRB) { 2394 req->outstanding_cmds[handle] = NULL; 2395 ql_dbg(ql_dbg_io, vha, 0x3015, 2396 "Unknown sp->cmd_type %x %p).\n", 2397 sp->cmd_type, sp); 2398 return; 2399 } 2400 2401 /* NVME completion. */ 2402 if (sp->type == SRB_NVME_CMD) { 2403 qla24xx_nvme_iocb_entry(vha, req, pkt); 2404 return; 2405 } 2406 2407 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2408 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2409 return; 2410 } 2411 2412 /* Task Management completion. */ 2413 if (sp->type == SRB_TM_CMD) { 2414 qla24xx_tm_iocb_entry(vha, req, pkt); 2415 return; 2416 } 2417 2418 /* Fast path completion. */ 2419 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2420 qla2x00_process_completed_request(vha, req, handle); 2421 2422 return; 2423 } 2424 2425 req->outstanding_cmds[handle] = NULL; 2426 cp = GET_CMD_SP(sp); 2427 if (cp == NULL) { 2428 ql_dbg(ql_dbg_io, vha, 0x3018, 2429 "Command already returned (0x%x/%p).\n", 2430 sts->handle, sp); 2431 2432 return; 2433 } 2434 2435 lscsi_status = scsi_status & STATUS_MASK; 2436 2437 fcport = sp->fcport; 2438 2439 ox_id = 0; 2440 sense_len = par_sense_len = rsp_info_len = resid_len = 2441 fw_resid_len = 0; 2442 if (IS_FWI2_CAPABLE(ha)) { 2443 if (scsi_status & SS_SENSE_LEN_VALID) 2444 sense_len = le32_to_cpu(sts24->sense_len); 2445 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2446 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2447 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2448 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2449 if (comp_status == CS_DATA_UNDERRUN) 2450 fw_resid_len = le32_to_cpu(sts24->residual_len); 2451 rsp_info = sts24->data; 2452 sense_data = sts24->data; 2453 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2454 ox_id = le16_to_cpu(sts24->ox_id); 2455 par_sense_len = sizeof(sts24->data); 2456 /* Valid values of the retry delay timer are 0x1-0xffef */ 2457 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) 2458 retry_delay = sts24->retry_delay; 2459 } else { 2460 if (scsi_status & SS_SENSE_LEN_VALID) 2461 sense_len = le16_to_cpu(sts->req_sense_length); 2462 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2463 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2464 resid_len = le32_to_cpu(sts->residual_length); 2465 rsp_info = sts->rsp_info; 2466 sense_data = sts->req_sense_data; 2467 par_sense_len = sizeof(sts->req_sense_data); 2468 } 2469 2470 /* Check for any FCP transport errors. */ 2471 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2472 /* Sense data lies beyond any FCP RESPONSE data. */ 2473 if (IS_FWI2_CAPABLE(ha)) { 2474 sense_data += rsp_info_len; 2475 par_sense_len -= rsp_info_len; 2476 } 2477 if (rsp_info_len > 3 && rsp_info[3]) { 2478 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2479 "FCP I/O protocol failure (0x%x/0x%x).\n", 2480 rsp_info_len, rsp_info[3]); 2481 2482 res = DID_BUS_BUSY << 16; 2483 goto out; 2484 } 2485 } 2486 2487 /* Check for overrun. */ 2488 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2489 scsi_status & SS_RESIDUAL_OVER) 2490 comp_status = CS_DATA_OVERRUN; 2491 2492 /* 2493 * Check retry_delay_timer value if we receive a busy or 2494 * queue full. 2495 */ 2496 if (lscsi_status == SAM_STAT_TASK_SET_FULL || 2497 lscsi_status == SAM_STAT_BUSY) 2498 qla2x00_set_retry_delay_timestamp(fcport, retry_delay); 2499 2500 /* 2501 * Based on Host and scsi status generate status code for Linux 2502 */ 2503 switch (comp_status) { 2504 case CS_COMPLETE: 2505 case CS_QUEUE_FULL: 2506 if (scsi_status == 0) { 2507 res = DID_OK << 16; 2508 break; 2509 } 2510 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2511 resid = resid_len; 2512 scsi_set_resid(cp, resid); 2513 2514 if (!lscsi_status && 2515 ((unsigned)(scsi_bufflen(cp) - resid) < 2516 cp->underflow)) { 2517 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2518 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2519 resid, scsi_bufflen(cp)); 2520 2521 res = DID_ERROR << 16; 2522 break; 2523 } 2524 } 2525 res = DID_OK << 16 | lscsi_status; 2526 2527 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2528 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2529 "QUEUE FULL detected.\n"); 2530 break; 2531 } 2532 logit = 0; 2533 if (lscsi_status != SS_CHECK_CONDITION) 2534 break; 2535 2536 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2537 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2538 break; 2539 2540 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2541 rsp, res); 2542 break; 2543 2544 case CS_DATA_UNDERRUN: 2545 /* Use F/W calculated residual length. */ 2546 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2547 scsi_set_resid(cp, resid); 2548 if (scsi_status & SS_RESIDUAL_UNDER) { 2549 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2550 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2551 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2552 resid, scsi_bufflen(cp)); 2553 2554 res = DID_ERROR << 16 | lscsi_status; 2555 goto check_scsi_status; 2556 } 2557 2558 if (!lscsi_status && 2559 ((unsigned)(scsi_bufflen(cp) - resid) < 2560 cp->underflow)) { 2561 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2562 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2563 resid, scsi_bufflen(cp)); 2564 2565 res = DID_ERROR << 16; 2566 break; 2567 } 2568 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2569 lscsi_status != SAM_STAT_BUSY) { 2570 /* 2571 * scsi status of task set and busy are considered to be 2572 * task not completed. 2573 */ 2574 2575 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2576 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2577 resid, scsi_bufflen(cp)); 2578 2579 res = DID_ERROR << 16 | lscsi_status; 2580 goto check_scsi_status; 2581 } else { 2582 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2583 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2584 scsi_status, lscsi_status); 2585 } 2586 2587 res = DID_OK << 16 | lscsi_status; 2588 logit = 0; 2589 2590 check_scsi_status: 2591 /* 2592 * Check to see if SCSI Status is non zero. If so report SCSI 2593 * Status. 2594 */ 2595 if (lscsi_status != 0) { 2596 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2597 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2598 "QUEUE FULL detected.\n"); 2599 logit = 1; 2600 break; 2601 } 2602 if (lscsi_status != SS_CHECK_CONDITION) 2603 break; 2604 2605 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2606 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2607 break; 2608 2609 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2610 sense_len, rsp, res); 2611 } 2612 break; 2613 2614 case CS_PORT_LOGGED_OUT: 2615 no_logout = 1; 2616 case CS_PORT_CONFIG_CHG: 2617 case CS_PORT_BUSY: 2618 case CS_INCOMPLETE: 2619 case CS_PORT_UNAVAILABLE: 2620 case CS_TIMEOUT: 2621 case CS_RESET: 2622 2623 /* 2624 * We are going to have the fc class block the rport 2625 * while we try to recover so instruct the mid layer 2626 * to requeue until the class decides how to handle this. 2627 */ 2628 res = DID_TRANSPORT_DISRUPTED << 16; 2629 2630 if (comp_status == CS_TIMEOUT) { 2631 if (IS_FWI2_CAPABLE(ha)) 2632 break; 2633 else if ((le16_to_cpu(sts->status_flags) & 2634 SF_LOGOUT_SENT) == 0) 2635 break; 2636 } 2637 2638 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2639 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2640 "Port to be marked lost on fcport=%02x%02x%02x, current " 2641 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 2642 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2643 port_state_str[atomic_read(&fcport->state)], 2644 comp_status); 2645 2646 if (no_logout) 2647 fcport->logout_on_delete = 0; 2648 2649 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2650 qlt_schedule_sess_for_deletion_lock(fcport); 2651 } 2652 2653 break; 2654 2655 case CS_ABORTED: 2656 res = DID_RESET << 16; 2657 break; 2658 2659 case CS_DIF_ERROR: 2660 logit = qla2x00_handle_dif_error(sp, sts24); 2661 res = cp->result; 2662 break; 2663 2664 case CS_TRANSPORT: 2665 res = DID_ERROR << 16; 2666 2667 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2668 break; 2669 2670 if (state_flags & BIT_4) 2671 scmd_printk(KERN_WARNING, cp, 2672 "Unsupported device '%s' found.\n", 2673 cp->device->vendor); 2674 break; 2675 2676 default: 2677 res = DID_ERROR << 16; 2678 break; 2679 } 2680 2681 out: 2682 if (logit) 2683 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2684 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2685 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2686 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 2687 comp_status, scsi_status, res, vha->host_no, 2688 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2689 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2690 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2691 resid_len, fw_resid_len, sp, cp); 2692 2693 if (rsp->status_srb == NULL) 2694 sp->done(sp, res); 2695 } 2696 2697 /** 2698 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2699 * @ha: SCSI driver HA context 2700 * @pkt: Entry pointer 2701 * 2702 * Extended sense data. 2703 */ 2704 static void 2705 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2706 { 2707 uint8_t sense_sz = 0; 2708 struct qla_hw_data *ha = rsp->hw; 2709 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2710 srb_t *sp = rsp->status_srb; 2711 struct scsi_cmnd *cp; 2712 uint32_t sense_len; 2713 uint8_t *sense_ptr; 2714 2715 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2716 return; 2717 2718 sense_len = GET_CMD_SENSE_LEN(sp); 2719 sense_ptr = GET_CMD_SENSE_PTR(sp); 2720 2721 cp = GET_CMD_SP(sp); 2722 if (cp == NULL) { 2723 ql_log(ql_log_warn, vha, 0x3025, 2724 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2725 2726 rsp->status_srb = NULL; 2727 return; 2728 } 2729 2730 if (sense_len > sizeof(pkt->data)) 2731 sense_sz = sizeof(pkt->data); 2732 else 2733 sense_sz = sense_len; 2734 2735 /* Move sense data. */ 2736 if (IS_FWI2_CAPABLE(ha)) 2737 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2738 memcpy(sense_ptr, pkt->data, sense_sz); 2739 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2740 sense_ptr, sense_sz); 2741 2742 sense_len -= sense_sz; 2743 sense_ptr += sense_sz; 2744 2745 SET_CMD_SENSE_PTR(sp, sense_ptr); 2746 SET_CMD_SENSE_LEN(sp, sense_len); 2747 2748 /* Place command on done queue. */ 2749 if (sense_len == 0) { 2750 rsp->status_srb = NULL; 2751 sp->done(sp, cp->result); 2752 } 2753 } 2754 2755 /** 2756 * qla2x00_error_entry() - Process an error entry. 2757 * @ha: SCSI driver HA context 2758 * @pkt: Entry pointer 2759 * return : 1=allow further error analysis. 0=no additional error analysis. 2760 */ 2761 static int 2762 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2763 { 2764 srb_t *sp; 2765 struct qla_hw_data *ha = vha->hw; 2766 const char func[] = "ERROR-IOCB"; 2767 uint16_t que = MSW(pkt->handle); 2768 struct req_que *req = NULL; 2769 int res = DID_ERROR << 16; 2770 2771 ql_dbg(ql_dbg_async, vha, 0x502a, 2772 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 2773 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 2774 2775 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2776 goto fatal; 2777 2778 req = ha->req_q_map[que]; 2779 2780 if (pkt->entry_status & RF_BUSY) 2781 res = DID_BUS_BUSY << 16; 2782 2783 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 2784 return 0; 2785 2786 switch (pkt->entry_type) { 2787 case NOTIFY_ACK_TYPE: 2788 case STATUS_TYPE: 2789 case STATUS_CONT_TYPE: 2790 case LOGINOUT_PORT_IOCB_TYPE: 2791 case CT_IOCB_TYPE: 2792 case ELS_IOCB_TYPE: 2793 case ABORT_IOCB_TYPE: 2794 case MBX_IOCB_TYPE: 2795 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2796 if (sp) { 2797 sp->done(sp, res); 2798 return 0; 2799 } 2800 break; 2801 2802 case ABTS_RESP_24XX: 2803 case CTIO_TYPE7: 2804 case CTIO_CRC2: 2805 default: 2806 return 1; 2807 } 2808 fatal: 2809 ql_log(ql_log_warn, vha, 0x5030, 2810 "Error entry - invalid handle/queue (%04x).\n", que); 2811 return 0; 2812 } 2813 2814 /** 2815 * qla24xx_mbx_completion() - Process mailbox command completions. 2816 * @ha: SCSI driver HA context 2817 * @mb0: Mailbox0 register 2818 */ 2819 static void 2820 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2821 { 2822 uint16_t cnt; 2823 uint32_t mboxes; 2824 uint16_t __iomem *wptr; 2825 struct qla_hw_data *ha = vha->hw; 2826 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2827 2828 /* Read all mbox registers? */ 2829 mboxes = (1 << ha->mbx_count) - 1; 2830 if (!ha->mcp) 2831 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2832 else 2833 mboxes = ha->mcp->in_mb; 2834 2835 /* Load return mailbox registers. */ 2836 ha->flags.mbox_int = 1; 2837 ha->mailbox_out[0] = mb0; 2838 mboxes >>= 1; 2839 wptr = (uint16_t __iomem *)®->mailbox1; 2840 2841 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2842 if (mboxes & BIT_0) 2843 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2844 2845 mboxes >>= 1; 2846 wptr++; 2847 } 2848 } 2849 2850 static void 2851 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2852 struct abort_entry_24xx *pkt) 2853 { 2854 const char func[] = "ABT_IOCB"; 2855 srb_t *sp; 2856 struct srb_iocb *abt; 2857 2858 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2859 if (!sp) 2860 return; 2861 2862 abt = &sp->u.iocb_cmd; 2863 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); 2864 sp->done(sp, 0); 2865 } 2866 2867 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 2868 struct pt_ls4_request *pkt, struct req_que *req) 2869 { 2870 srb_t *sp; 2871 const char func[] = "LS4_IOCB"; 2872 uint16_t comp_status; 2873 2874 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2875 if (!sp) 2876 return; 2877 2878 comp_status = le16_to_cpu(pkt->status); 2879 sp->done(sp, comp_status); 2880 } 2881 2882 /** 2883 * qla24xx_process_response_queue() - Process response queue entries. 2884 * @ha: SCSI driver HA context 2885 */ 2886 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2887 struct rsp_que *rsp) 2888 { 2889 struct sts_entry_24xx *pkt; 2890 struct qla_hw_data *ha = vha->hw; 2891 2892 if (!ha->flags.fw_started) 2893 return; 2894 2895 if (rsp->qpair->cpuid != smp_processor_id()) 2896 qla_cpu_update(rsp->qpair, smp_processor_id()); 2897 2898 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2899 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2900 2901 rsp->ring_index++; 2902 if (rsp->ring_index == rsp->length) { 2903 rsp->ring_index = 0; 2904 rsp->ring_ptr = rsp->ring; 2905 } else { 2906 rsp->ring_ptr++; 2907 } 2908 2909 if (pkt->entry_status != 0) { 2910 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 2911 goto process_err; 2912 2913 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2914 wmb(); 2915 continue; 2916 } 2917 process_err: 2918 2919 switch (pkt->entry_type) { 2920 case STATUS_TYPE: 2921 qla2x00_status_entry(vha, rsp, pkt); 2922 break; 2923 case STATUS_CONT_TYPE: 2924 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2925 break; 2926 case VP_RPT_ID_IOCB_TYPE: 2927 qla24xx_report_id_acquisition(vha, 2928 (struct vp_rpt_id_entry_24xx *)pkt); 2929 break; 2930 case LOGINOUT_PORT_IOCB_TYPE: 2931 qla24xx_logio_entry(vha, rsp->req, 2932 (struct logio_entry_24xx *)pkt); 2933 break; 2934 case CT_IOCB_TYPE: 2935 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2936 break; 2937 case ELS_IOCB_TYPE: 2938 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2939 break; 2940 case ABTS_RECV_24XX: 2941 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2942 /* ensure that the ATIO queue is empty */ 2943 qlt_handle_abts_recv(vha, rsp, 2944 (response_t *)pkt); 2945 break; 2946 } else { 2947 /* drop through */ 2948 qlt_24xx_process_atio_queue(vha, 1); 2949 } 2950 case ABTS_RESP_24XX: 2951 case CTIO_TYPE7: 2952 case CTIO_CRC2: 2953 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 2954 break; 2955 case PT_LS4_REQUEST: 2956 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 2957 rsp->req); 2958 break; 2959 case NOTIFY_ACK_TYPE: 2960 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 2961 qlt_response_pkt_all_vps(vha, rsp, 2962 (response_t *)pkt); 2963 else 2964 qla24xxx_nack_iocb_entry(vha, rsp->req, 2965 (struct nack_to_isp *)pkt); 2966 break; 2967 case MARKER_TYPE: 2968 /* Do nothing in this case, this check is to prevent it 2969 * from falling into default case 2970 */ 2971 break; 2972 case ABORT_IOCB_TYPE: 2973 qla24xx_abort_iocb_entry(vha, rsp->req, 2974 (struct abort_entry_24xx *)pkt); 2975 break; 2976 case MBX_IOCB_TYPE: 2977 qla24xx_mbx_iocb_entry(vha, rsp->req, 2978 (struct mbx_24xx_entry *)pkt); 2979 break; 2980 default: 2981 /* Type Not Supported. */ 2982 ql_dbg(ql_dbg_async, vha, 0x5042, 2983 "Received unknown response pkt type %x " 2984 "entry status=%x.\n", 2985 pkt->entry_type, pkt->entry_status); 2986 break; 2987 } 2988 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2989 wmb(); 2990 } 2991 2992 /* Adjust ring index */ 2993 if (IS_P3P_TYPE(ha)) { 2994 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2995 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2996 } else { 2997 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2998 } 2999 } 3000 3001 static void 3002 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3003 { 3004 int rval; 3005 uint32_t cnt; 3006 struct qla_hw_data *ha = vha->hw; 3007 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3008 3009 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3010 !IS_QLA27XX(ha)) 3011 return; 3012 3013 rval = QLA_SUCCESS; 3014 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 3015 RD_REG_DWORD(®->iobase_addr); 3016 WRT_REG_DWORD(®->iobase_window, 0x0001); 3017 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 3018 rval == QLA_SUCCESS; cnt--) { 3019 if (cnt) { 3020 WRT_REG_DWORD(®->iobase_window, 0x0001); 3021 udelay(10); 3022 } else 3023 rval = QLA_FUNCTION_TIMEOUT; 3024 } 3025 if (rval == QLA_SUCCESS) 3026 goto next_test; 3027 3028 rval = QLA_SUCCESS; 3029 WRT_REG_DWORD(®->iobase_window, 0x0003); 3030 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 3031 rval == QLA_SUCCESS; cnt--) { 3032 if (cnt) { 3033 WRT_REG_DWORD(®->iobase_window, 0x0003); 3034 udelay(10); 3035 } else 3036 rval = QLA_FUNCTION_TIMEOUT; 3037 } 3038 if (rval != QLA_SUCCESS) 3039 goto done; 3040 3041 next_test: 3042 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 3043 ql_log(ql_log_info, vha, 0x504c, 3044 "Additional code -- 0x55AA.\n"); 3045 3046 done: 3047 WRT_REG_DWORD(®->iobase_window, 0x0000); 3048 RD_REG_DWORD(®->iobase_window); 3049 } 3050 3051 /** 3052 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3053 * @irq: 3054 * @dev_id: SCSI driver HA context 3055 * 3056 * Called by system whenever the host adapter generates an interrupt. 3057 * 3058 * Returns handled flag. 3059 */ 3060 irqreturn_t 3061 qla24xx_intr_handler(int irq, void *dev_id) 3062 { 3063 scsi_qla_host_t *vha; 3064 struct qla_hw_data *ha; 3065 struct device_reg_24xx __iomem *reg; 3066 int status; 3067 unsigned long iter; 3068 uint32_t stat; 3069 uint32_t hccr; 3070 uint16_t mb[8]; 3071 struct rsp_que *rsp; 3072 unsigned long flags; 3073 3074 rsp = (struct rsp_que *) dev_id; 3075 if (!rsp) { 3076 ql_log(ql_log_info, NULL, 0x5059, 3077 "%s: NULL response queue pointer.\n", __func__); 3078 return IRQ_NONE; 3079 } 3080 3081 ha = rsp->hw; 3082 reg = &ha->iobase->isp24; 3083 status = 0; 3084 3085 if (unlikely(pci_channel_offline(ha->pdev))) 3086 return IRQ_HANDLED; 3087 3088 spin_lock_irqsave(&ha->hardware_lock, flags); 3089 vha = pci_get_drvdata(ha->pdev); 3090 for (iter = 50; iter--; ) { 3091 stat = RD_REG_DWORD(®->host_status); 3092 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3093 break; 3094 if (stat & HSRX_RISC_PAUSED) { 3095 if (unlikely(pci_channel_offline(ha->pdev))) 3096 break; 3097 3098 hccr = RD_REG_DWORD(®->hccr); 3099 3100 ql_log(ql_log_warn, vha, 0x504b, 3101 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3102 hccr); 3103 3104 qla2xxx_check_risc_status(vha); 3105 3106 ha->isp_ops->fw_dump(vha, 1); 3107 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3108 break; 3109 } else if ((stat & HSRX_RISC_INT) == 0) 3110 break; 3111 3112 switch (stat & 0xff) { 3113 case INTR_ROM_MB_SUCCESS: 3114 case INTR_ROM_MB_FAILED: 3115 case INTR_MB_SUCCESS: 3116 case INTR_MB_FAILED: 3117 qla24xx_mbx_completion(vha, MSW(stat)); 3118 status |= MBX_INTERRUPT; 3119 3120 break; 3121 case INTR_ASYNC_EVENT: 3122 mb[0] = MSW(stat); 3123 mb[1] = RD_REG_WORD(®->mailbox1); 3124 mb[2] = RD_REG_WORD(®->mailbox2); 3125 mb[3] = RD_REG_WORD(®->mailbox3); 3126 qla2x00_async_event(vha, rsp, mb); 3127 break; 3128 case INTR_RSP_QUE_UPDATE: 3129 case INTR_RSP_QUE_UPDATE_83XX: 3130 qla24xx_process_response_queue(vha, rsp); 3131 break; 3132 case INTR_ATIO_QUE_UPDATE:{ 3133 unsigned long flags2; 3134 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3135 qlt_24xx_process_atio_queue(vha, 1); 3136 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3137 break; 3138 } 3139 case INTR_ATIO_RSP_QUE_UPDATE: { 3140 unsigned long flags2; 3141 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3142 qlt_24xx_process_atio_queue(vha, 1); 3143 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3144 3145 qla24xx_process_response_queue(vha, rsp); 3146 break; 3147 } 3148 default: 3149 ql_dbg(ql_dbg_async, vha, 0x504f, 3150 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3151 break; 3152 } 3153 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3154 RD_REG_DWORD_RELAXED(®->hccr); 3155 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3156 ndelay(3500); 3157 } 3158 qla2x00_handle_mbx_completion(ha, status); 3159 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3160 3161 return IRQ_HANDLED; 3162 } 3163 3164 static irqreturn_t 3165 qla24xx_msix_rsp_q(int irq, void *dev_id) 3166 { 3167 struct qla_hw_data *ha; 3168 struct rsp_que *rsp; 3169 struct device_reg_24xx __iomem *reg; 3170 struct scsi_qla_host *vha; 3171 unsigned long flags; 3172 3173 rsp = (struct rsp_que *) dev_id; 3174 if (!rsp) { 3175 ql_log(ql_log_info, NULL, 0x505a, 3176 "%s: NULL response queue pointer.\n", __func__); 3177 return IRQ_NONE; 3178 } 3179 ha = rsp->hw; 3180 reg = &ha->iobase->isp24; 3181 3182 spin_lock_irqsave(&ha->hardware_lock, flags); 3183 3184 vha = pci_get_drvdata(ha->pdev); 3185 qla24xx_process_response_queue(vha, rsp); 3186 if (!ha->flags.disable_msix_handshake) { 3187 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3188 RD_REG_DWORD_RELAXED(®->hccr); 3189 } 3190 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3191 3192 return IRQ_HANDLED; 3193 } 3194 3195 static irqreturn_t 3196 qla24xx_msix_default(int irq, void *dev_id) 3197 { 3198 scsi_qla_host_t *vha; 3199 struct qla_hw_data *ha; 3200 struct rsp_que *rsp; 3201 struct device_reg_24xx __iomem *reg; 3202 int status; 3203 uint32_t stat; 3204 uint32_t hccr; 3205 uint16_t mb[8]; 3206 unsigned long flags; 3207 3208 rsp = (struct rsp_que *) dev_id; 3209 if (!rsp) { 3210 ql_log(ql_log_info, NULL, 0x505c, 3211 "%s: NULL response queue pointer.\n", __func__); 3212 return IRQ_NONE; 3213 } 3214 ha = rsp->hw; 3215 reg = &ha->iobase->isp24; 3216 status = 0; 3217 3218 spin_lock_irqsave(&ha->hardware_lock, flags); 3219 vha = pci_get_drvdata(ha->pdev); 3220 do { 3221 stat = RD_REG_DWORD(®->host_status); 3222 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3223 break; 3224 if (stat & HSRX_RISC_PAUSED) { 3225 if (unlikely(pci_channel_offline(ha->pdev))) 3226 break; 3227 3228 hccr = RD_REG_DWORD(®->hccr); 3229 3230 ql_log(ql_log_info, vha, 0x5050, 3231 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3232 hccr); 3233 3234 qla2xxx_check_risc_status(vha); 3235 3236 ha->isp_ops->fw_dump(vha, 1); 3237 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3238 break; 3239 } else if ((stat & HSRX_RISC_INT) == 0) 3240 break; 3241 3242 switch (stat & 0xff) { 3243 case INTR_ROM_MB_SUCCESS: 3244 case INTR_ROM_MB_FAILED: 3245 case INTR_MB_SUCCESS: 3246 case INTR_MB_FAILED: 3247 qla24xx_mbx_completion(vha, MSW(stat)); 3248 status |= MBX_INTERRUPT; 3249 3250 break; 3251 case INTR_ASYNC_EVENT: 3252 mb[0] = MSW(stat); 3253 mb[1] = RD_REG_WORD(®->mailbox1); 3254 mb[2] = RD_REG_WORD(®->mailbox2); 3255 mb[3] = RD_REG_WORD(®->mailbox3); 3256 qla2x00_async_event(vha, rsp, mb); 3257 break; 3258 case INTR_RSP_QUE_UPDATE: 3259 case INTR_RSP_QUE_UPDATE_83XX: 3260 qla24xx_process_response_queue(vha, rsp); 3261 break; 3262 case INTR_ATIO_QUE_UPDATE:{ 3263 unsigned long flags2; 3264 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3265 qlt_24xx_process_atio_queue(vha, 1); 3266 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3267 break; 3268 } 3269 case INTR_ATIO_RSP_QUE_UPDATE: { 3270 unsigned long flags2; 3271 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3272 qlt_24xx_process_atio_queue(vha, 1); 3273 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3274 3275 qla24xx_process_response_queue(vha, rsp); 3276 break; 3277 } 3278 default: 3279 ql_dbg(ql_dbg_async, vha, 0x5051, 3280 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3281 break; 3282 } 3283 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3284 } while (0); 3285 qla2x00_handle_mbx_completion(ha, status); 3286 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3287 3288 return IRQ_HANDLED; 3289 } 3290 3291 irqreturn_t 3292 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3293 { 3294 struct qla_hw_data *ha; 3295 struct qla_qpair *qpair; 3296 struct device_reg_24xx __iomem *reg; 3297 unsigned long flags; 3298 3299 qpair = dev_id; 3300 if (!qpair) { 3301 ql_log(ql_log_info, NULL, 0x505b, 3302 "%s: NULL response queue pointer.\n", __func__); 3303 return IRQ_NONE; 3304 } 3305 ha = qpair->hw; 3306 3307 /* Clear the interrupt, if enabled, for this response queue */ 3308 if (unlikely(!ha->flags.disable_msix_handshake)) { 3309 reg = &ha->iobase->isp24; 3310 spin_lock_irqsave(&ha->hardware_lock, flags); 3311 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3312 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3313 } 3314 3315 queue_work(ha->wq, &qpair->q_work); 3316 3317 return IRQ_HANDLED; 3318 } 3319 3320 /* Interrupt handling helpers. */ 3321 3322 struct qla_init_msix_entry { 3323 const char *name; 3324 irq_handler_t handler; 3325 }; 3326 3327 static const struct qla_init_msix_entry msix_entries[] = { 3328 { "default", qla24xx_msix_default }, 3329 { "rsp_q", qla24xx_msix_rsp_q }, 3330 { "atio_q", qla83xx_msix_atio_q }, 3331 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3332 }; 3333 3334 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3335 { "qla2xxx (default)", qla82xx_msix_default }, 3336 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3337 }; 3338 3339 static int 3340 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3341 { 3342 int i, ret; 3343 struct qla_msix_entry *qentry; 3344 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3345 int min_vecs = QLA_BASE_VECTORS; 3346 struct irq_affinity desc = { 3347 .pre_vectors = QLA_BASE_VECTORS, 3348 }; 3349 3350 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3351 desc.pre_vectors++; 3352 min_vecs++; 3353 } 3354 3355 if (USER_CTRL_IRQ(ha)) { 3356 /* user wants to control IRQ setting for target mode */ 3357 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 3358 ha->msix_count, PCI_IRQ_MSIX); 3359 } else 3360 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 3361 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 3362 &desc); 3363 3364 if (ret < 0) { 3365 ql_log(ql_log_fatal, vha, 0x00c7, 3366 "MSI-X: Failed to enable support, " 3367 "giving up -- %d/%d.\n", 3368 ha->msix_count, ret); 3369 goto msix_out; 3370 } else if (ret < ha->msix_count) { 3371 ql_log(ql_log_warn, vha, 0x00c6, 3372 "MSI-X: Failed to enable support " 3373 "with %d vectors, using %d vectors.\n", 3374 ha->msix_count, ret); 3375 ha->msix_count = ret; 3376 /* Recalculate queue values */ 3377 if (ha->mqiobase && ql2xmqsupport) { 3378 ha->max_req_queues = ha->msix_count - 1; 3379 3380 /* ATIOQ needs 1 vector. That's 1 less QPair */ 3381 if (QLA_TGT_MODE_ENABLED()) 3382 ha->max_req_queues--; 3383 3384 ha->max_rsp_queues = ha->max_req_queues; 3385 3386 ha->max_qpairs = ha->max_req_queues - 1; 3387 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 3388 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3389 } 3390 } 3391 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3392 ha->msix_count, GFP_KERNEL); 3393 if (!ha->msix_entries) { 3394 ql_log(ql_log_fatal, vha, 0x00c8, 3395 "Failed to allocate memory for ha->msix_entries.\n"); 3396 ret = -ENOMEM; 3397 goto msix_out; 3398 } 3399 ha->flags.msix_enabled = 1; 3400 3401 for (i = 0; i < ha->msix_count; i++) { 3402 qentry = &ha->msix_entries[i]; 3403 qentry->vector = pci_irq_vector(ha->pdev, i); 3404 qentry->entry = i; 3405 qentry->have_irq = 0; 3406 qentry->in_use = 0; 3407 qentry->handle = NULL; 3408 } 3409 3410 /* Enable MSI-X vectors for the base queue */ 3411 for (i = 0; i < QLA_BASE_VECTORS; i++) { 3412 qentry = &ha->msix_entries[i]; 3413 qentry->handle = rsp; 3414 rsp->msix = qentry; 3415 scnprintf(qentry->name, sizeof(qentry->name), 3416 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 3417 if (IS_P3P_TYPE(ha)) 3418 ret = request_irq(qentry->vector, 3419 qla82xx_msix_entries[i].handler, 3420 0, qla82xx_msix_entries[i].name, rsp); 3421 else 3422 ret = request_irq(qentry->vector, 3423 msix_entries[i].handler, 3424 0, qentry->name, rsp); 3425 if (ret) 3426 goto msix_register_fail; 3427 qentry->have_irq = 1; 3428 qentry->in_use = 1; 3429 } 3430 3431 /* 3432 * If target mode is enable, also request the vector for the ATIO 3433 * queue. 3434 */ 3435 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3436 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 3437 rsp->msix = qentry; 3438 qentry->handle = rsp; 3439 scnprintf(qentry->name, sizeof(qentry->name), 3440 "qla2xxx%lu_%s", vha->host_no, 3441 msix_entries[QLA_ATIO_VECTOR].name); 3442 qentry->in_use = 1; 3443 ret = request_irq(qentry->vector, 3444 msix_entries[QLA_ATIO_VECTOR].handler, 3445 0, qentry->name, rsp); 3446 qentry->have_irq = 1; 3447 } 3448 3449 msix_register_fail: 3450 if (ret) { 3451 ql_log(ql_log_fatal, vha, 0x00cb, 3452 "MSI-X: unable to register handler -- %x/%d.\n", 3453 qentry->vector, ret); 3454 qla2x00_free_irqs(vha); 3455 ha->mqenable = 0; 3456 goto msix_out; 3457 } 3458 3459 /* Enable MSI-X vector for response queue update for queue 0 */ 3460 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3461 if (ha->msixbase && ha->mqiobase && 3462 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3463 ql2xmqsupport)) 3464 ha->mqenable = 1; 3465 } else 3466 if (ha->mqiobase && 3467 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3468 ql2xmqsupport)) 3469 ha->mqenable = 1; 3470 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3471 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3472 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3473 ql_dbg(ql_dbg_init, vha, 0x0055, 3474 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3475 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3476 3477 msix_out: 3478 return ret; 3479 } 3480 3481 int 3482 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3483 { 3484 int ret = QLA_FUNCTION_FAILED; 3485 device_reg_t *reg = ha->iobase; 3486 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3487 3488 /* If possible, enable MSI-X. */ 3489 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3490 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && 3491 !IS_QLA27XX(ha)) 3492 goto skip_msi; 3493 3494 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3495 (ha->pdev->subsystem_device == 0x7040 || 3496 ha->pdev->subsystem_device == 0x7041 || 3497 ha->pdev->subsystem_device == 0x1705)) { 3498 ql_log(ql_log_warn, vha, 0x0034, 3499 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3500 ha->pdev->subsystem_vendor, 3501 ha->pdev->subsystem_device); 3502 goto skip_msi; 3503 } 3504 3505 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3506 ql_log(ql_log_warn, vha, 0x0035, 3507 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3508 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3509 goto skip_msix; 3510 } 3511 3512 ret = qla24xx_enable_msix(ha, rsp); 3513 if (!ret) { 3514 ql_dbg(ql_dbg_init, vha, 0x0036, 3515 "MSI-X: Enabled (0x%X, 0x%X).\n", 3516 ha->chip_revision, ha->fw_attributes); 3517 goto clear_risc_ints; 3518 } 3519 3520 skip_msix: 3521 3522 ql_log(ql_log_info, vha, 0x0037, 3523 "Falling back-to MSI mode -%d.\n", ret); 3524 3525 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3526 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3527 !IS_QLA27XX(ha)) 3528 goto skip_msi; 3529 3530 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 3531 if (!ret) { 3532 ql_dbg(ql_dbg_init, vha, 0x0038, 3533 "MSI: Enabled.\n"); 3534 ha->flags.msi_enabled = 1; 3535 } else 3536 ql_log(ql_log_warn, vha, 0x0039, 3537 "Falling back-to INTa mode -- %d.\n", ret); 3538 skip_msi: 3539 3540 /* Skip INTx on ISP82xx. */ 3541 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3542 return QLA_FUNCTION_FAILED; 3543 3544 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3545 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3546 QLA2XXX_DRIVER_NAME, rsp); 3547 if (ret) { 3548 ql_log(ql_log_warn, vha, 0x003a, 3549 "Failed to reserve interrupt %d already in use.\n", 3550 ha->pdev->irq); 3551 goto fail; 3552 } else if (!ha->flags.msi_enabled) { 3553 ql_dbg(ql_dbg_init, vha, 0x0125, 3554 "INTa mode: Enabled.\n"); 3555 ha->flags.mr_intr_valid = 1; 3556 } 3557 3558 clear_risc_ints: 3559 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 3560 goto fail; 3561 3562 spin_lock_irq(&ha->hardware_lock); 3563 WRT_REG_WORD(®->isp.semaphore, 0); 3564 spin_unlock_irq(&ha->hardware_lock); 3565 3566 fail: 3567 return ret; 3568 } 3569 3570 void 3571 qla2x00_free_irqs(scsi_qla_host_t *vha) 3572 { 3573 struct qla_hw_data *ha = vha->hw; 3574 struct rsp_que *rsp; 3575 struct qla_msix_entry *qentry; 3576 int i; 3577 3578 /* 3579 * We need to check that ha->rsp_q_map is valid in case we are called 3580 * from a probe failure context. 3581 */ 3582 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3583 goto free_irqs; 3584 rsp = ha->rsp_q_map[0]; 3585 3586 if (ha->flags.msix_enabled) { 3587 for (i = 0; i < ha->msix_count; i++) { 3588 qentry = &ha->msix_entries[i]; 3589 if (qentry->have_irq) { 3590 irq_set_affinity_notifier(qentry->vector, NULL); 3591 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 3592 } 3593 } 3594 kfree(ha->msix_entries); 3595 ha->msix_entries = NULL; 3596 ha->flags.msix_enabled = 0; 3597 ql_dbg(ql_dbg_init, vha, 0x0042, 3598 "Disabled MSI-X.\n"); 3599 } else { 3600 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 3601 } 3602 3603 free_irqs: 3604 pci_free_irq_vectors(ha->pdev); 3605 } 3606 3607 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 3608 struct qla_msix_entry *msix, int vector_type) 3609 { 3610 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 3611 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3612 int ret; 3613 3614 scnprintf(msix->name, sizeof(msix->name), 3615 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 3616 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 3617 if (ret) { 3618 ql_log(ql_log_fatal, vha, 0x00e6, 3619 "MSI-X: Unable to register handler -- %x/%d.\n", 3620 msix->vector, ret); 3621 return ret; 3622 } 3623 msix->have_irq = 1; 3624 msix->handle = qpair; 3625 return ret; 3626 } 3627