1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/cpu.h> 13 #include <linux/t10-pi.h> 14 #include <scsi/scsi_tcq.h> 15 #include <scsi/scsi_bsg_fc.h> 16 #include <scsi/scsi_eh.h> 17 #include <scsi/fc/fc_fs.h> 18 #include <linux/nvme-fc-driver.h> 19 20 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 21 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 22 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 23 static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 24 sts_entry_t *); 25 26 /** 27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 28 * @irq: 29 * @dev_id: SCSI driver HA context 30 * 31 * Called by system whenever the host adapter generates an interrupt. 32 * 33 * Returns handled flag. 34 */ 35 irqreturn_t 36 qla2100_intr_handler(int irq, void *dev_id) 37 { 38 scsi_qla_host_t *vha; 39 struct qla_hw_data *ha; 40 struct device_reg_2xxx __iomem *reg; 41 int status; 42 unsigned long iter; 43 uint16_t hccr; 44 uint16_t mb[4]; 45 struct rsp_que *rsp; 46 unsigned long flags; 47 48 rsp = (struct rsp_que *) dev_id; 49 if (!rsp) { 50 ql_log(ql_log_info, NULL, 0x505d, 51 "%s: NULL response queue pointer.\n", __func__); 52 return (IRQ_NONE); 53 } 54 55 ha = rsp->hw; 56 reg = &ha->iobase->isp; 57 status = 0; 58 59 spin_lock_irqsave(&ha->hardware_lock, flags); 60 vha = pci_get_drvdata(ha->pdev); 61 for (iter = 50; iter--; ) { 62 hccr = RD_REG_WORD(®->hccr); 63 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 64 break; 65 if (hccr & HCCR_RISC_PAUSE) { 66 if (pci_channel_offline(ha->pdev)) 67 break; 68 69 /* 70 * Issue a "HARD" reset in order for the RISC interrupt 71 * bit to be cleared. Schedule a big hammer to get 72 * out of the RISC PAUSED state. 73 */ 74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 75 RD_REG_WORD(®->hccr); 76 77 ha->isp_ops->fw_dump(vha, 1); 78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 79 break; 80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 81 break; 82 83 if (RD_REG_WORD(®->semaphore) & BIT_0) { 84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 85 RD_REG_WORD(®->hccr); 86 87 /* Get mailbox data. */ 88 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 89 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 90 qla2x00_mbx_completion(vha, mb[0]); 91 status |= MBX_INTERRUPT; 92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 93 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 94 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 95 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 96 qla2x00_async_event(vha, rsp, mb); 97 } else { 98 /*EMPTY*/ 99 ql_dbg(ql_dbg_async, vha, 0x5025, 100 "Unrecognized interrupt type (%d).\n", 101 mb[0]); 102 } 103 /* Release mailbox registers. */ 104 WRT_REG_WORD(®->semaphore, 0); 105 RD_REG_WORD(®->semaphore); 106 } else { 107 qla2x00_process_response_queue(rsp); 108 109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 110 RD_REG_WORD(®->hccr); 111 } 112 } 113 qla2x00_handle_mbx_completion(ha, status); 114 spin_unlock_irqrestore(&ha->hardware_lock, flags); 115 116 return (IRQ_HANDLED); 117 } 118 119 bool 120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 121 { 122 /* Check for PCI disconnection */ 123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 127 /* 128 * Schedule this (only once) on the default system 129 * workqueue so that all the adapter workqueues and the 130 * DPC thread can be shutdown cleanly. 131 */ 132 schedule_work(&vha->hw->board_disable); 133 } 134 return true; 135 } else 136 return false; 137 } 138 139 bool 140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 141 { 142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 143 } 144 145 /** 146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 147 * @irq: 148 * @dev_id: SCSI driver HA context 149 * 150 * Called by system whenever the host adapter generates an interrupt. 151 * 152 * Returns handled flag. 153 */ 154 irqreturn_t 155 qla2300_intr_handler(int irq, void *dev_id) 156 { 157 scsi_qla_host_t *vha; 158 struct device_reg_2xxx __iomem *reg; 159 int status; 160 unsigned long iter; 161 uint32_t stat; 162 uint16_t hccr; 163 uint16_t mb[4]; 164 struct rsp_que *rsp; 165 struct qla_hw_data *ha; 166 unsigned long flags; 167 168 rsp = (struct rsp_que *) dev_id; 169 if (!rsp) { 170 ql_log(ql_log_info, NULL, 0x5058, 171 "%s: NULL response queue pointer.\n", __func__); 172 return (IRQ_NONE); 173 } 174 175 ha = rsp->hw; 176 reg = &ha->iobase->isp; 177 status = 0; 178 179 spin_lock_irqsave(&ha->hardware_lock, flags); 180 vha = pci_get_drvdata(ha->pdev); 181 for (iter = 50; iter--; ) { 182 stat = RD_REG_DWORD(®->u.isp2300.host_status); 183 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 184 break; 185 if (stat & HSR_RISC_PAUSED) { 186 if (unlikely(pci_channel_offline(ha->pdev))) 187 break; 188 189 hccr = RD_REG_WORD(®->hccr); 190 191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 192 ql_log(ql_log_warn, vha, 0x5026, 193 "Parity error -- HCCR=%x, Dumping " 194 "firmware.\n", hccr); 195 else 196 ql_log(ql_log_warn, vha, 0x5027, 197 "RISC paused -- HCCR=%x, Dumping " 198 "firmware.\n", hccr); 199 200 /* 201 * Issue a "HARD" reset in order for the RISC 202 * interrupt bit to be cleared. Schedule a big 203 * hammer to get out of the RISC PAUSED state. 204 */ 205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 206 RD_REG_WORD(®->hccr); 207 208 ha->isp_ops->fw_dump(vha, 1); 209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 210 break; 211 } else if ((stat & HSR_RISC_INT) == 0) 212 break; 213 214 switch (stat & 0xff) { 215 case 0x1: 216 case 0x2: 217 case 0x10: 218 case 0x11: 219 qla2x00_mbx_completion(vha, MSW(stat)); 220 status |= MBX_INTERRUPT; 221 222 /* Release mailbox registers. */ 223 WRT_REG_WORD(®->semaphore, 0); 224 break; 225 case 0x12: 226 mb[0] = MSW(stat); 227 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 228 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 229 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 230 qla2x00_async_event(vha, rsp, mb); 231 break; 232 case 0x13: 233 qla2x00_process_response_queue(rsp); 234 break; 235 case 0x15: 236 mb[0] = MBA_CMPLT_1_16BIT; 237 mb[1] = MSW(stat); 238 qla2x00_async_event(vha, rsp, mb); 239 break; 240 case 0x16: 241 mb[0] = MBA_SCSI_COMPLETION; 242 mb[1] = MSW(stat); 243 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 244 qla2x00_async_event(vha, rsp, mb); 245 break; 246 default: 247 ql_dbg(ql_dbg_async, vha, 0x5028, 248 "Unrecognized interrupt type (%d).\n", stat & 0xff); 249 break; 250 } 251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 252 RD_REG_WORD_RELAXED(®->hccr); 253 } 254 qla2x00_handle_mbx_completion(ha, status); 255 spin_unlock_irqrestore(&ha->hardware_lock, flags); 256 257 return (IRQ_HANDLED); 258 } 259 260 /** 261 * qla2x00_mbx_completion() - Process mailbox command completions. 262 * @ha: SCSI driver HA context 263 * @mb0: Mailbox0 register 264 */ 265 static void 266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 267 { 268 uint16_t cnt; 269 uint32_t mboxes; 270 uint16_t __iomem *wptr; 271 struct qla_hw_data *ha = vha->hw; 272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 273 274 /* Read all mbox registers? */ 275 mboxes = (1 << ha->mbx_count) - 1; 276 if (!ha->mcp) 277 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 278 else 279 mboxes = ha->mcp->in_mb; 280 281 /* Load return mailbox registers. */ 282 ha->flags.mbox_int = 1; 283 ha->mailbox_out[0] = mb0; 284 mboxes >>= 1; 285 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 286 287 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 288 if (IS_QLA2200(ha) && cnt == 8) 289 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 290 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 291 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 292 else if (mboxes & BIT_0) 293 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 294 295 wptr++; 296 mboxes >>= 1; 297 } 298 } 299 300 static void 301 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 302 { 303 static char *event[] = 304 { "Complete", "Request Notification", "Time Extension" }; 305 int rval; 306 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 307 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 308 uint16_t __iomem *wptr; 309 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 310 311 /* Seed data -- mailbox1 -> mailbox7. */ 312 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 313 wptr = (uint16_t __iomem *)®24->mailbox1; 314 else if (IS_QLA8044(vha->hw)) 315 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 316 else 317 return; 318 319 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 320 mb[cnt] = RD_REG_WORD(wptr); 321 322 ql_dbg(ql_dbg_async, vha, 0x5021, 323 "Inter-Driver Communication %s -- " 324 "%04x %04x %04x %04x %04x %04x %04x.\n", 325 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 326 mb[4], mb[5], mb[6]); 327 switch (aen) { 328 /* Handle IDC Error completion case. */ 329 case MBA_IDC_COMPLETE: 330 if (mb[1] >> 15) { 331 vha->hw->flags.idc_compl_status = 1; 332 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 333 complete(&vha->hw->dcbx_comp); 334 } 335 break; 336 337 case MBA_IDC_NOTIFY: 338 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 339 timeout = (descr >> 8) & 0xf; 340 ql_dbg(ql_dbg_async, vha, 0x5022, 341 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 342 vha->host_no, event[aen & 0xff], timeout); 343 344 if (!timeout) 345 return; 346 rval = qla2x00_post_idc_ack_work(vha, mb); 347 if (rval != QLA_SUCCESS) 348 ql_log(ql_log_warn, vha, 0x5023, 349 "IDC failed to post ACK.\n"); 350 break; 351 case MBA_IDC_TIME_EXT: 352 vha->hw->idc_extend_tmo = descr; 353 ql_dbg(ql_dbg_async, vha, 0x5087, 354 "%lu Inter-Driver Communication %s -- " 355 "Extend timeout by=%d.\n", 356 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 357 break; 358 } 359 } 360 361 #define LS_UNKNOWN 2 362 const char * 363 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 364 { 365 static const char *const link_speeds[] = { 366 "1", "2", "?", "4", "8", "16", "32", "10" 367 }; 368 #define QLA_LAST_SPEED 7 369 370 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 371 return link_speeds[0]; 372 else if (speed == 0x13) 373 return link_speeds[QLA_LAST_SPEED]; 374 else if (speed < QLA_LAST_SPEED) 375 return link_speeds[speed]; 376 else 377 return link_speeds[LS_UNKNOWN]; 378 } 379 380 static void 381 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 382 { 383 struct qla_hw_data *ha = vha->hw; 384 385 /* 386 * 8200 AEN Interpretation: 387 * mb[0] = AEN code 388 * mb[1] = AEN Reason code 389 * mb[2] = LSW of Peg-Halt Status-1 Register 390 * mb[6] = MSW of Peg-Halt Status-1 Register 391 * mb[3] = LSW of Peg-Halt Status-2 register 392 * mb[7] = MSW of Peg-Halt Status-2 register 393 * mb[4] = IDC Device-State Register value 394 * mb[5] = IDC Driver-Presence Register value 395 */ 396 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 397 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 398 mb[0], mb[1], mb[2], mb[6]); 399 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 400 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 401 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 402 403 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 404 IDC_HEARTBEAT_FAILURE)) { 405 ha->flags.nic_core_hung = 1; 406 ql_log(ql_log_warn, vha, 0x5060, 407 "83XX: F/W Error Reported: Check if reset required.\n"); 408 409 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 410 uint32_t protocol_engine_id, fw_err_code, err_level; 411 412 /* 413 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 414 * - PEG-Halt Status-1 Register: 415 * (LSW = mb[2], MSW = mb[6]) 416 * Bits 0-7 = protocol-engine ID 417 * Bits 8-28 = f/w error code 418 * Bits 29-31 = Error-level 419 * Error-level 0x1 = Non-Fatal error 420 * Error-level 0x2 = Recoverable Fatal error 421 * Error-level 0x4 = UnRecoverable Fatal error 422 * - PEG-Halt Status-2 Register: 423 * (LSW = mb[3], MSW = mb[7]) 424 */ 425 protocol_engine_id = (mb[2] & 0xff); 426 fw_err_code = (((mb[2] & 0xff00) >> 8) | 427 ((mb[6] & 0x1fff) << 8)); 428 err_level = ((mb[6] & 0xe000) >> 13); 429 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 430 "Register: protocol_engine_id=0x%x " 431 "fw_err_code=0x%x err_level=0x%x.\n", 432 protocol_engine_id, fw_err_code, err_level); 433 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 434 "Register: 0x%x%x.\n", mb[7], mb[3]); 435 if (err_level == ERR_LEVEL_NON_FATAL) { 436 ql_log(ql_log_warn, vha, 0x5063, 437 "Not a fatal error, f/w has recovered itself.\n"); 438 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 439 ql_log(ql_log_fatal, vha, 0x5064, 440 "Recoverable Fatal error: Chip reset " 441 "required.\n"); 442 qla83xx_schedule_work(vha, 443 QLA83XX_NIC_CORE_RESET); 444 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 445 ql_log(ql_log_fatal, vha, 0x5065, 446 "Unrecoverable Fatal error: Set FAILED " 447 "state, reboot required.\n"); 448 qla83xx_schedule_work(vha, 449 QLA83XX_NIC_CORE_UNRECOVERABLE); 450 } 451 } 452 453 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 454 uint16_t peg_fw_state, nw_interface_link_up; 455 uint16_t nw_interface_signal_detect, sfp_status; 456 uint16_t htbt_counter, htbt_monitor_enable; 457 uint16_t sfp_additional_info, sfp_multirate; 458 uint16_t sfp_tx_fault, link_speed, dcbx_status; 459 460 /* 461 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 462 * - PEG-to-FC Status Register: 463 * (LSW = mb[2], MSW = mb[6]) 464 * Bits 0-7 = Peg-Firmware state 465 * Bit 8 = N/W Interface Link-up 466 * Bit 9 = N/W Interface signal detected 467 * Bits 10-11 = SFP Status 468 * SFP Status 0x0 = SFP+ transceiver not expected 469 * SFP Status 0x1 = SFP+ transceiver not present 470 * SFP Status 0x2 = SFP+ transceiver invalid 471 * SFP Status 0x3 = SFP+ transceiver present and 472 * valid 473 * Bits 12-14 = Heartbeat Counter 474 * Bit 15 = Heartbeat Monitor Enable 475 * Bits 16-17 = SFP Additional Info 476 * SFP info 0x0 = Unregocnized transceiver for 477 * Ethernet 478 * SFP info 0x1 = SFP+ brand validation failed 479 * SFP info 0x2 = SFP+ speed validation failed 480 * SFP info 0x3 = SFP+ access error 481 * Bit 18 = SFP Multirate 482 * Bit 19 = SFP Tx Fault 483 * Bits 20-22 = Link Speed 484 * Bits 23-27 = Reserved 485 * Bits 28-30 = DCBX Status 486 * DCBX Status 0x0 = DCBX Disabled 487 * DCBX Status 0x1 = DCBX Enabled 488 * DCBX Status 0x2 = DCBX Exchange error 489 * Bit 31 = Reserved 490 */ 491 peg_fw_state = (mb[2] & 0x00ff); 492 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 493 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 494 sfp_status = ((mb[2] & 0x0c00) >> 10); 495 htbt_counter = ((mb[2] & 0x7000) >> 12); 496 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 497 sfp_additional_info = (mb[6] & 0x0003); 498 sfp_multirate = ((mb[6] & 0x0004) >> 2); 499 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 500 link_speed = ((mb[6] & 0x0070) >> 4); 501 dcbx_status = ((mb[6] & 0x7000) >> 12); 502 503 ql_log(ql_log_warn, vha, 0x5066, 504 "Peg-to-Fc Status Register:\n" 505 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 506 "nw_interface_signal_detect=0x%x" 507 "\nsfp_statis=0x%x.\n ", peg_fw_state, 508 nw_interface_link_up, nw_interface_signal_detect, 509 sfp_status); 510 ql_log(ql_log_warn, vha, 0x5067, 511 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 512 "sfp_additional_info=0x%x, sfp_multirate=0x%x.\n ", 513 htbt_counter, htbt_monitor_enable, 514 sfp_additional_info, sfp_multirate); 515 ql_log(ql_log_warn, vha, 0x5068, 516 "sfp_tx_fault=0x%x, link_state=0x%x, " 517 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 518 dcbx_status); 519 520 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 521 } 522 523 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 524 ql_log(ql_log_warn, vha, 0x5069, 525 "Heartbeat Failure encountered, chip reset " 526 "required.\n"); 527 528 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 529 } 530 } 531 532 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 533 ql_log(ql_log_info, vha, 0x506a, 534 "IDC Device-State changed = 0x%x.\n", mb[4]); 535 if (ha->flags.nic_core_reset_owner) 536 return; 537 qla83xx_schedule_work(vha, MBA_IDC_AEN); 538 } 539 } 540 541 int 542 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 543 { 544 struct qla_hw_data *ha = vha->hw; 545 scsi_qla_host_t *vp; 546 uint32_t vp_did; 547 unsigned long flags; 548 int ret = 0; 549 550 if (!ha->num_vhosts) 551 return ret; 552 553 spin_lock_irqsave(&ha->vport_slock, flags); 554 list_for_each_entry(vp, &ha->vp_list, list) { 555 vp_did = vp->d_id.b24; 556 if (vp_did == rscn_entry) { 557 ret = 1; 558 break; 559 } 560 } 561 spin_unlock_irqrestore(&ha->vport_slock, flags); 562 563 return ret; 564 } 565 566 fc_port_t * 567 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 568 { 569 fc_port_t *f, *tf; 570 571 f = tf = NULL; 572 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 573 if (f->loop_id == loop_id) 574 return f; 575 return NULL; 576 } 577 578 fc_port_t * 579 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 580 { 581 fc_port_t *f, *tf; 582 583 f = tf = NULL; 584 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 585 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 586 if (incl_deleted) 587 return f; 588 else if (f->deleted == 0) 589 return f; 590 } 591 } 592 return NULL; 593 } 594 595 fc_port_t * 596 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 597 u8 incl_deleted) 598 { 599 fc_port_t *f, *tf; 600 601 f = tf = NULL; 602 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 603 if (f->d_id.b24 == id->b24) { 604 if (incl_deleted) 605 return f; 606 else if (f->deleted == 0) 607 return f; 608 } 609 } 610 return NULL; 611 } 612 613 /** 614 * qla2x00_async_event() - Process aynchronous events. 615 * @ha: SCSI driver HA context 616 * @mb: Mailbox registers (0 - 3) 617 */ 618 void 619 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 620 { 621 uint16_t handle_cnt; 622 uint16_t cnt, mbx; 623 uint32_t handles[5]; 624 struct qla_hw_data *ha = vha->hw; 625 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 626 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 627 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 628 uint32_t rscn_entry, host_pid; 629 unsigned long flags; 630 fc_port_t *fcport = NULL; 631 632 /* Setup to process RIO completion. */ 633 handle_cnt = 0; 634 if (IS_CNA_CAPABLE(ha)) 635 goto skip_rio; 636 switch (mb[0]) { 637 case MBA_SCSI_COMPLETION: 638 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 639 handle_cnt = 1; 640 break; 641 case MBA_CMPLT_1_16BIT: 642 handles[0] = mb[1]; 643 handle_cnt = 1; 644 mb[0] = MBA_SCSI_COMPLETION; 645 break; 646 case MBA_CMPLT_2_16BIT: 647 handles[0] = mb[1]; 648 handles[1] = mb[2]; 649 handle_cnt = 2; 650 mb[0] = MBA_SCSI_COMPLETION; 651 break; 652 case MBA_CMPLT_3_16BIT: 653 handles[0] = mb[1]; 654 handles[1] = mb[2]; 655 handles[2] = mb[3]; 656 handle_cnt = 3; 657 mb[0] = MBA_SCSI_COMPLETION; 658 break; 659 case MBA_CMPLT_4_16BIT: 660 handles[0] = mb[1]; 661 handles[1] = mb[2]; 662 handles[2] = mb[3]; 663 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 664 handle_cnt = 4; 665 mb[0] = MBA_SCSI_COMPLETION; 666 break; 667 case MBA_CMPLT_5_16BIT: 668 handles[0] = mb[1]; 669 handles[1] = mb[2]; 670 handles[2] = mb[3]; 671 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 672 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 673 handle_cnt = 5; 674 mb[0] = MBA_SCSI_COMPLETION; 675 break; 676 case MBA_CMPLT_2_32BIT: 677 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 678 handles[1] = le32_to_cpu( 679 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 680 RD_MAILBOX_REG(ha, reg, 6)); 681 handle_cnt = 2; 682 mb[0] = MBA_SCSI_COMPLETION; 683 break; 684 default: 685 break; 686 } 687 skip_rio: 688 switch (mb[0]) { 689 case MBA_SCSI_COMPLETION: /* Fast Post */ 690 if (!vha->flags.online) 691 break; 692 693 for (cnt = 0; cnt < handle_cnt; cnt++) 694 qla2x00_process_completed_request(vha, rsp->req, 695 handles[cnt]); 696 break; 697 698 case MBA_RESET: /* Reset */ 699 ql_dbg(ql_dbg_async, vha, 0x5002, 700 "Asynchronous RESET.\n"); 701 702 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 703 break; 704 705 case MBA_SYSTEM_ERR: /* System Error */ 706 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? 707 RD_REG_WORD(®24->mailbox7) : 0; 708 ql_log(ql_log_warn, vha, 0x5003, 709 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 710 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 711 712 ha->isp_ops->fw_dump(vha, 1); 713 ha->flags.fw_init_done = 0; 714 QLA_FW_STOPPED(ha); 715 716 if (IS_FWI2_CAPABLE(ha)) { 717 if (mb[1] == 0 && mb[2] == 0) { 718 ql_log(ql_log_fatal, vha, 0x5004, 719 "Unrecoverable Hardware Error: adapter " 720 "marked OFFLINE!\n"); 721 vha->flags.online = 0; 722 vha->device_flags |= DFLG_DEV_FAILED; 723 } else { 724 /* Check to see if MPI timeout occurred */ 725 if ((mbx & MBX_3) && (ha->port_no == 0)) 726 set_bit(MPI_RESET_NEEDED, 727 &vha->dpc_flags); 728 729 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 730 } 731 } else if (mb[1] == 0) { 732 ql_log(ql_log_fatal, vha, 0x5005, 733 "Unrecoverable Hardware Error: adapter marked " 734 "OFFLINE!\n"); 735 vha->flags.online = 0; 736 vha->device_flags |= DFLG_DEV_FAILED; 737 } else 738 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 739 break; 740 741 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 742 ql_log(ql_log_warn, vha, 0x5006, 743 "ISP Request Transfer Error (%x).\n", mb[1]); 744 745 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 746 break; 747 748 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 749 ql_log(ql_log_warn, vha, 0x5007, 750 "ISP Response Transfer Error (%x).\n", mb[1]); 751 752 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 753 break; 754 755 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 756 ql_dbg(ql_dbg_async, vha, 0x5008, 757 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 758 break; 759 760 case MBA_LOOP_INIT_ERR: 761 ql_log(ql_log_warn, vha, 0x5090, 762 "LOOP INIT ERROR (%x).\n", mb[1]); 763 ha->isp_ops->fw_dump(vha, 1); 764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 765 break; 766 767 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 768 ha->flags.lip_ae = 1; 769 ha->flags.n2n_ae = 0; 770 771 ql_dbg(ql_dbg_async, vha, 0x5009, 772 "LIP occurred (%x).\n", mb[1]); 773 774 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 775 atomic_set(&vha->loop_state, LOOP_DOWN); 776 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 777 qla2x00_mark_all_devices_lost(vha, 1); 778 } 779 780 if (vha->vp_idx) { 781 atomic_set(&vha->vp_state, VP_FAILED); 782 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 783 } 784 785 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 786 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 787 788 vha->flags.management_server_logged_in = 0; 789 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 790 break; 791 792 case MBA_LOOP_UP: /* Loop Up Event */ 793 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 794 ha->link_data_rate = PORT_SPEED_1GB; 795 else 796 ha->link_data_rate = mb[1]; 797 798 ql_log(ql_log_info, vha, 0x500a, 799 "LOOP UP detected (%s Gbps).\n", 800 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 801 802 vha->flags.management_server_logged_in = 0; 803 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 804 805 if (AUTO_DETECT_SFP_SUPPORT(vha)) { 806 set_bit(DETECT_SFP_CHANGE, &vha->dpc_flags); 807 qla2xxx_wake_dpc(vha); 808 } 809 break; 810 811 case MBA_LOOP_DOWN: /* Loop Down Event */ 812 SAVE_TOPO(ha); 813 ha->flags.n2n_ae = 0; 814 ha->flags.lip_ae = 0; 815 ha->current_topology = 0; 816 817 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 818 ? RD_REG_WORD(®24->mailbox4) : 0; 819 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 820 : mbx; 821 ql_log(ql_log_info, vha, 0x500b, 822 "LOOP DOWN detected (%x %x %x %x).\n", 823 mb[1], mb[2], mb[3], mbx); 824 825 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 826 atomic_set(&vha->loop_state, LOOP_DOWN); 827 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 828 /* 829 * In case of loop down, restore WWPN from 830 * NVRAM in case of FA-WWPN capable ISP 831 * Restore for Physical Port only 832 */ 833 if (!vha->vp_idx) { 834 if (ha->flags.fawwpn_enabled) { 835 void *wwpn = ha->init_cb->port_name; 836 memcpy(vha->port_name, wwpn, WWN_SIZE); 837 fc_host_port_name(vha->host) = 838 wwn_to_u64(vha->port_name); 839 ql_dbg(ql_dbg_init + ql_dbg_verbose, 840 vha, 0x00d8, "LOOP DOWN detected," 841 "restore WWPN %016llx\n", 842 wwn_to_u64(vha->port_name)); 843 } 844 845 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 846 } 847 848 vha->device_flags |= DFLG_NO_CABLE; 849 qla2x00_mark_all_devices_lost(vha, 1); 850 } 851 852 if (vha->vp_idx) { 853 atomic_set(&vha->vp_state, VP_FAILED); 854 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 855 } 856 857 vha->flags.management_server_logged_in = 0; 858 ha->link_data_rate = PORT_SPEED_UNKNOWN; 859 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 860 break; 861 862 case MBA_LIP_RESET: /* LIP reset occurred */ 863 ql_dbg(ql_dbg_async, vha, 0x500c, 864 "LIP reset occurred (%x).\n", mb[1]); 865 866 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 867 atomic_set(&vha->loop_state, LOOP_DOWN); 868 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 869 qla2x00_mark_all_devices_lost(vha, 1); 870 } 871 872 if (vha->vp_idx) { 873 atomic_set(&vha->vp_state, VP_FAILED); 874 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 875 } 876 877 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 878 879 ha->operating_mode = LOOP; 880 vha->flags.management_server_logged_in = 0; 881 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 882 break; 883 884 /* case MBA_DCBX_COMPLETE: */ 885 case MBA_POINT_TO_POINT: /* Point-to-Point */ 886 ha->flags.lip_ae = 0; 887 ha->flags.n2n_ae = 1; 888 889 if (IS_QLA2100(ha)) 890 break; 891 892 if (IS_CNA_CAPABLE(ha)) { 893 ql_dbg(ql_dbg_async, vha, 0x500d, 894 "DCBX Completed -- %04x %04x %04x.\n", 895 mb[1], mb[2], mb[3]); 896 if (ha->notify_dcbx_comp && !vha->vp_idx) 897 complete(&ha->dcbx_comp); 898 899 } else 900 ql_dbg(ql_dbg_async, vha, 0x500e, 901 "Asynchronous P2P MODE received.\n"); 902 903 /* 904 * Until there's a transition from loop down to loop up, treat 905 * this as loop down only. 906 */ 907 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 908 atomic_set(&vha->loop_state, LOOP_DOWN); 909 if (!atomic_read(&vha->loop_down_timer)) 910 atomic_set(&vha->loop_down_timer, 911 LOOP_DOWN_TIME); 912 qla2x00_mark_all_devices_lost(vha, 1); 913 } 914 915 if (vha->vp_idx) { 916 atomic_set(&vha->vp_state, VP_FAILED); 917 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 918 } 919 920 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 921 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 922 923 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 924 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 925 926 vha->flags.management_server_logged_in = 0; 927 break; 928 929 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 930 if (IS_QLA2100(ha)) 931 break; 932 933 ql_dbg(ql_dbg_async, vha, 0x500f, 934 "Configuration change detected: value=%x.\n", mb[1]); 935 936 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 937 atomic_set(&vha->loop_state, LOOP_DOWN); 938 if (!atomic_read(&vha->loop_down_timer)) 939 atomic_set(&vha->loop_down_timer, 940 LOOP_DOWN_TIME); 941 qla2x00_mark_all_devices_lost(vha, 1); 942 } 943 944 if (vha->vp_idx) { 945 atomic_set(&vha->vp_state, VP_FAILED); 946 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 947 } 948 949 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 950 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 951 break; 952 953 case MBA_PORT_UPDATE: /* Port database update */ 954 /* 955 * Handle only global and vn-port update events 956 * 957 * Relevant inputs: 958 * mb[1] = N_Port handle of changed port 959 * OR 0xffff for global event 960 * mb[2] = New login state 961 * 7 = Port logged out 962 * mb[3] = LSB is vp_idx, 0xff = all vps 963 * 964 * Skip processing if: 965 * Event is global, vp_idx is NOT all vps, 966 * vp_idx does not match 967 * Event is not global, vp_idx does not match 968 */ 969 if (IS_QLA2XXX_MIDTYPE(ha) && 970 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 971 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 972 break; 973 974 if (mb[2] == 0x7) { 975 ql_dbg(ql_dbg_async, vha, 0x5010, 976 "Port %s %04x %04x %04x.\n", 977 mb[1] == 0xffff ? "unavailable" : "logout", 978 mb[1], mb[2], mb[3]); 979 980 if (mb[1] == 0xffff) 981 goto global_port_update; 982 983 if (mb[1] == NPH_SNS_LID(ha)) { 984 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 985 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 986 break; 987 } 988 989 /* use handle_cnt for loop id/nport handle */ 990 if (IS_FWI2_CAPABLE(ha)) 991 handle_cnt = NPH_SNS; 992 else 993 handle_cnt = SIMPLE_NAME_SERVER; 994 if (mb[1] == handle_cnt) { 995 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 996 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 997 break; 998 } 999 1000 /* Port logout */ 1001 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 1002 if (!fcport) 1003 break; 1004 if (atomic_read(&fcport->state) != FCS_ONLINE) 1005 break; 1006 ql_dbg(ql_dbg_async, vha, 0x508a, 1007 "Marking port lost loopid=%04x portid=%06x.\n", 1008 fcport->loop_id, fcport->d_id.b24); 1009 if (qla_ini_mode_enabled(vha)) { 1010 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 1011 fcport->logout_on_delete = 0; 1012 qlt_schedule_sess_for_deletion(fcport); 1013 } 1014 break; 1015 1016 global_port_update: 1017 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 1018 atomic_set(&vha->loop_state, LOOP_DOWN); 1019 atomic_set(&vha->loop_down_timer, 1020 LOOP_DOWN_TIME); 1021 vha->device_flags |= DFLG_NO_CABLE; 1022 qla2x00_mark_all_devices_lost(vha, 1); 1023 } 1024 1025 if (vha->vp_idx) { 1026 atomic_set(&vha->vp_state, VP_FAILED); 1027 fc_vport_set_state(vha->fc_vport, 1028 FC_VPORT_FAILED); 1029 qla2x00_mark_all_devices_lost(vha, 1); 1030 } 1031 1032 vha->flags.management_server_logged_in = 0; 1033 ha->link_data_rate = PORT_SPEED_UNKNOWN; 1034 break; 1035 } 1036 1037 /* 1038 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1039 * event etc. earlier indicating loop is down) then process 1040 * it. Otherwise ignore it and Wait for RSCN to come in. 1041 */ 1042 atomic_set(&vha->loop_down_timer, 0); 1043 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1044 !ha->flags.n2n_ae && 1045 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1046 ql_dbg(ql_dbg_async, vha, 0x5011, 1047 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1048 mb[1], mb[2], mb[3]); 1049 1050 qlt_async_event(mb[0], vha, mb); 1051 break; 1052 } 1053 1054 ql_dbg(ql_dbg_async, vha, 0x5012, 1055 "Port database changed %04x %04x %04x.\n", 1056 mb[1], mb[2], mb[3]); 1057 1058 /* 1059 * Mark all devices as missing so we will login again. 1060 */ 1061 atomic_set(&vha->loop_state, LOOP_UP); 1062 vha->scan.scan_retry = 0; 1063 1064 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1065 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1066 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1067 1068 qlt_async_event(mb[0], vha, mb); 1069 break; 1070 1071 case MBA_RSCN_UPDATE: /* State Change Registration */ 1072 /* Check if the Vport has issued a SCR */ 1073 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1074 break; 1075 /* Only handle SCNs for our Vport index. */ 1076 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1077 break; 1078 1079 ql_dbg(ql_dbg_async, vha, 0x5013, 1080 "RSCN database changed -- %04x %04x %04x.\n", 1081 mb[1], mb[2], mb[3]); 1082 1083 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1084 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1085 | vha->d_id.b.al_pa; 1086 if (rscn_entry == host_pid) { 1087 ql_dbg(ql_dbg_async, vha, 0x5014, 1088 "Ignoring RSCN update to local host " 1089 "port ID (%06x).\n", host_pid); 1090 break; 1091 } 1092 1093 /* Ignore reserved bits from RSCN-payload. */ 1094 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1095 1096 /* Skip RSCNs for virtual ports on the same physical port */ 1097 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1098 break; 1099 1100 atomic_set(&vha->loop_down_timer, 0); 1101 vha->flags.management_server_logged_in = 0; 1102 { 1103 struct event_arg ea; 1104 1105 memset(&ea, 0, sizeof(ea)); 1106 ea.event = FCME_RSCN; 1107 ea.id.b24 = rscn_entry; 1108 ea.id.b.rsvd_1 = rscn_entry >> 24; 1109 qla2x00_fcport_event_handler(vha, &ea); 1110 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1111 } 1112 break; 1113 /* case MBA_RIO_RESPONSE: */ 1114 case MBA_ZIO_RESPONSE: 1115 ql_dbg(ql_dbg_async, vha, 0x5015, 1116 "[R|Z]IO update completion.\n"); 1117 1118 if (IS_FWI2_CAPABLE(ha)) 1119 qla24xx_process_response_queue(vha, rsp); 1120 else 1121 qla2x00_process_response_queue(rsp); 1122 break; 1123 1124 case MBA_DISCARD_RND_FRAME: 1125 ql_dbg(ql_dbg_async, vha, 0x5016, 1126 "Discard RND Frame -- %04x %04x %04x.\n", 1127 mb[1], mb[2], mb[3]); 1128 break; 1129 1130 case MBA_TRACE_NOTIFICATION: 1131 ql_dbg(ql_dbg_async, vha, 0x5017, 1132 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1133 break; 1134 1135 case MBA_ISP84XX_ALERT: 1136 ql_dbg(ql_dbg_async, vha, 0x5018, 1137 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1138 mb[1], mb[2], mb[3]); 1139 1140 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1141 switch (mb[1]) { 1142 case A84_PANIC_RECOVERY: 1143 ql_log(ql_log_info, vha, 0x5019, 1144 "Alert 84XX: panic recovery %04x %04x.\n", 1145 mb[2], mb[3]); 1146 break; 1147 case A84_OP_LOGIN_COMPLETE: 1148 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1149 ql_log(ql_log_info, vha, 0x501a, 1150 "Alert 84XX: firmware version %x.\n", 1151 ha->cs84xx->op_fw_version); 1152 break; 1153 case A84_DIAG_LOGIN_COMPLETE: 1154 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1155 ql_log(ql_log_info, vha, 0x501b, 1156 "Alert 84XX: diagnostic firmware version %x.\n", 1157 ha->cs84xx->diag_fw_version); 1158 break; 1159 case A84_GOLD_LOGIN_COMPLETE: 1160 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1161 ha->cs84xx->fw_update = 1; 1162 ql_log(ql_log_info, vha, 0x501c, 1163 "Alert 84XX: gold firmware version %x.\n", 1164 ha->cs84xx->gold_fw_version); 1165 break; 1166 default: 1167 ql_log(ql_log_warn, vha, 0x501d, 1168 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1169 mb[1], mb[2], mb[3]); 1170 } 1171 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1172 break; 1173 case MBA_DCBX_START: 1174 ql_dbg(ql_dbg_async, vha, 0x501e, 1175 "DCBX Started -- %04x %04x %04x.\n", 1176 mb[1], mb[2], mb[3]); 1177 break; 1178 case MBA_DCBX_PARAM_UPDATE: 1179 ql_dbg(ql_dbg_async, vha, 0x501f, 1180 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1181 mb[1], mb[2], mb[3]); 1182 break; 1183 case MBA_FCF_CONF_ERR: 1184 ql_dbg(ql_dbg_async, vha, 0x5020, 1185 "FCF Configuration Error -- %04x %04x %04x.\n", 1186 mb[1], mb[2], mb[3]); 1187 break; 1188 case MBA_IDC_NOTIFY: 1189 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1190 mb[4] = RD_REG_WORD(®24->mailbox4); 1191 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1192 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1193 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1194 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1195 /* 1196 * Extend loop down timer since port is active. 1197 */ 1198 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1199 atomic_set(&vha->loop_down_timer, 1200 LOOP_DOWN_TIME); 1201 qla2xxx_wake_dpc(vha); 1202 } 1203 } 1204 /* fall through */ 1205 case MBA_IDC_COMPLETE: 1206 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1207 complete(&ha->lb_portup_comp); 1208 /* Fallthru */ 1209 case MBA_IDC_TIME_EXT: 1210 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1211 IS_QLA8044(ha)) 1212 qla81xx_idc_event(vha, mb[0], mb[1]); 1213 break; 1214 1215 case MBA_IDC_AEN: 1216 mb[4] = RD_REG_WORD(®24->mailbox4); 1217 mb[5] = RD_REG_WORD(®24->mailbox5); 1218 mb[6] = RD_REG_WORD(®24->mailbox6); 1219 mb[7] = RD_REG_WORD(®24->mailbox7); 1220 qla83xx_handle_8200_aen(vha, mb); 1221 break; 1222 1223 case MBA_DPORT_DIAGNOSTICS: 1224 ql_dbg(ql_dbg_async, vha, 0x5052, 1225 "D-Port Diagnostics: %04x result=%s\n", 1226 mb[0], 1227 mb[1] == 0 ? "start" : 1228 mb[1] == 1 ? "done (pass)" : 1229 mb[1] == 2 ? "done (error)" : "other"); 1230 break; 1231 1232 case MBA_TEMPERATURE_ALERT: 1233 ql_dbg(ql_dbg_async, vha, 0x505e, 1234 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1235 if (mb[1] == 0x12) 1236 schedule_work(&ha->board_disable); 1237 break; 1238 1239 case MBA_TRANS_INSERT: 1240 ql_dbg(ql_dbg_async, vha, 0x5091, 1241 "Transceiver Insertion: %04x\n", mb[1]); 1242 break; 1243 1244 default: 1245 ql_dbg(ql_dbg_async, vha, 0x5057, 1246 "Unknown AEN:%04x %04x %04x %04x\n", 1247 mb[0], mb[1], mb[2], mb[3]); 1248 } 1249 1250 qlt_async_event(mb[0], vha, mb); 1251 1252 if (!vha->vp_idx && ha->num_vhosts) 1253 qla2x00_alert_all_vps(rsp, mb); 1254 } 1255 1256 /** 1257 * qla2x00_process_completed_request() - Process a Fast Post response. 1258 * @ha: SCSI driver HA context 1259 * @index: SRB index 1260 */ 1261 void 1262 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1263 struct req_que *req, uint32_t index) 1264 { 1265 srb_t *sp; 1266 struct qla_hw_data *ha = vha->hw; 1267 1268 /* Validate handle. */ 1269 if (index >= req->num_outstanding_cmds) { 1270 ql_log(ql_log_warn, vha, 0x3014, 1271 "Invalid SCSI command index (%x).\n", index); 1272 1273 if (IS_P3P_TYPE(ha)) 1274 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1275 else 1276 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1277 return; 1278 } 1279 1280 sp = req->outstanding_cmds[index]; 1281 if (sp) { 1282 /* Free outstanding command slot. */ 1283 req->outstanding_cmds[index] = NULL; 1284 1285 /* Save ISP completion status */ 1286 sp->done(sp, DID_OK << 16); 1287 } else { 1288 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1289 1290 if (IS_P3P_TYPE(ha)) 1291 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1292 else 1293 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1294 } 1295 } 1296 1297 srb_t * 1298 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1299 struct req_que *req, void *iocb) 1300 { 1301 struct qla_hw_data *ha = vha->hw; 1302 sts_entry_t *pkt = iocb; 1303 srb_t *sp = NULL; 1304 uint16_t index; 1305 1306 index = LSW(pkt->handle); 1307 if (index >= req->num_outstanding_cmds) { 1308 ql_log(ql_log_warn, vha, 0x5031, 1309 "Invalid command index (%x) type %8ph.\n", 1310 index, iocb); 1311 if (IS_P3P_TYPE(ha)) 1312 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1313 else 1314 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1315 goto done; 1316 } 1317 sp = req->outstanding_cmds[index]; 1318 if (!sp) { 1319 ql_log(ql_log_warn, vha, 0x5032, 1320 "Invalid completion handle (%x) -- timed-out.\n", index); 1321 return sp; 1322 } 1323 if (sp->handle != index) { 1324 ql_log(ql_log_warn, vha, 0x5033, 1325 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1326 return NULL; 1327 } 1328 1329 req->outstanding_cmds[index] = NULL; 1330 1331 done: 1332 return sp; 1333 } 1334 1335 static void 1336 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1337 struct mbx_entry *mbx) 1338 { 1339 const char func[] = "MBX-IOCB"; 1340 const char *type; 1341 fc_port_t *fcport; 1342 srb_t *sp; 1343 struct srb_iocb *lio; 1344 uint16_t *data; 1345 uint16_t status; 1346 1347 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1348 if (!sp) 1349 return; 1350 1351 lio = &sp->u.iocb_cmd; 1352 type = sp->name; 1353 fcport = sp->fcport; 1354 data = lio->u.logio.data; 1355 1356 data[0] = MBS_COMMAND_ERROR; 1357 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1358 QLA_LOGIO_LOGIN_RETRIED : 0; 1359 if (mbx->entry_status) { 1360 ql_dbg(ql_dbg_async, vha, 0x5043, 1361 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1362 "entry-status=%x status=%x state-flag=%x " 1363 "status-flags=%x.\n", type, sp->handle, 1364 fcport->d_id.b.domain, fcport->d_id.b.area, 1365 fcport->d_id.b.al_pa, mbx->entry_status, 1366 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1367 le16_to_cpu(mbx->status_flags)); 1368 1369 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1370 (uint8_t *)mbx, sizeof(*mbx)); 1371 1372 goto logio_done; 1373 } 1374 1375 status = le16_to_cpu(mbx->status); 1376 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1377 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1378 status = 0; 1379 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1380 ql_dbg(ql_dbg_async, vha, 0x5045, 1381 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1382 type, sp->handle, fcport->d_id.b.domain, 1383 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1384 le16_to_cpu(mbx->mb1)); 1385 1386 data[0] = MBS_COMMAND_COMPLETE; 1387 if (sp->type == SRB_LOGIN_CMD) { 1388 fcport->port_type = FCT_TARGET; 1389 if (le16_to_cpu(mbx->mb1) & BIT_0) 1390 fcport->port_type = FCT_INITIATOR; 1391 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1392 fcport->flags |= FCF_FCP2_DEVICE; 1393 } 1394 goto logio_done; 1395 } 1396 1397 data[0] = le16_to_cpu(mbx->mb0); 1398 switch (data[0]) { 1399 case MBS_PORT_ID_USED: 1400 data[1] = le16_to_cpu(mbx->mb1); 1401 break; 1402 case MBS_LOOP_ID_USED: 1403 break; 1404 default: 1405 data[0] = MBS_COMMAND_ERROR; 1406 break; 1407 } 1408 1409 ql_log(ql_log_warn, vha, 0x5046, 1410 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1411 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1412 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1413 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1414 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1415 le16_to_cpu(mbx->mb7)); 1416 1417 logio_done: 1418 sp->done(sp, 0); 1419 } 1420 1421 static void 1422 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1423 struct mbx_24xx_entry *pkt) 1424 { 1425 const char func[] = "MBX-IOCB2"; 1426 srb_t *sp; 1427 struct srb_iocb *si; 1428 u16 sz, i; 1429 int res; 1430 1431 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1432 if (!sp) 1433 return; 1434 1435 si = &sp->u.iocb_cmd; 1436 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1437 1438 for (i = 0; i < sz; i++) 1439 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); 1440 1441 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1442 1443 sp->done(sp, res); 1444 } 1445 1446 static void 1447 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1448 struct nack_to_isp *pkt) 1449 { 1450 const char func[] = "nack"; 1451 srb_t *sp; 1452 int res = 0; 1453 1454 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1455 if (!sp) 1456 return; 1457 1458 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1459 res = QLA_FUNCTION_FAILED; 1460 1461 sp->done(sp, res); 1462 } 1463 1464 static void 1465 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1466 sts_entry_t *pkt, int iocb_type) 1467 { 1468 const char func[] = "CT_IOCB"; 1469 const char *type; 1470 srb_t *sp; 1471 struct bsg_job *bsg_job; 1472 struct fc_bsg_reply *bsg_reply; 1473 uint16_t comp_status; 1474 int res = 0; 1475 1476 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1477 if (!sp) 1478 return; 1479 1480 switch (sp->type) { 1481 case SRB_CT_CMD: 1482 bsg_job = sp->u.bsg_job; 1483 bsg_reply = bsg_job->reply; 1484 1485 type = "ct pass-through"; 1486 1487 comp_status = le16_to_cpu(pkt->comp_status); 1488 1489 /* 1490 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1491 * fc payload to the caller 1492 */ 1493 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1494 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1495 1496 if (comp_status != CS_COMPLETE) { 1497 if (comp_status == CS_DATA_UNDERRUN) { 1498 res = DID_OK << 16; 1499 bsg_reply->reply_payload_rcv_len = 1500 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1501 1502 ql_log(ql_log_warn, vha, 0x5048, 1503 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1504 type, comp_status, 1505 bsg_reply->reply_payload_rcv_len); 1506 } else { 1507 ql_log(ql_log_warn, vha, 0x5049, 1508 "CT pass-through-%s error comp_status=0x%x.\n", 1509 type, comp_status); 1510 res = DID_ERROR << 16; 1511 bsg_reply->reply_payload_rcv_len = 0; 1512 } 1513 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1514 (uint8_t *)pkt, sizeof(*pkt)); 1515 } else { 1516 res = DID_OK << 16; 1517 bsg_reply->reply_payload_rcv_len = 1518 bsg_job->reply_payload.payload_len; 1519 bsg_job->reply_len = 0; 1520 } 1521 break; 1522 case SRB_CT_PTHRU_CMD: 1523 /* 1524 * borrowing sts_entry_24xx.comp_status. 1525 * same location as ct_entry_24xx.comp_status 1526 */ 1527 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1528 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1529 sp->name); 1530 break; 1531 } 1532 1533 sp->done(sp, res); 1534 } 1535 1536 static void 1537 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1538 struct sts_entry_24xx *pkt, int iocb_type) 1539 { 1540 const char func[] = "ELS_CT_IOCB"; 1541 const char *type; 1542 srb_t *sp; 1543 struct bsg_job *bsg_job; 1544 struct fc_bsg_reply *bsg_reply; 1545 uint16_t comp_status; 1546 uint32_t fw_status[3]; 1547 int res; 1548 struct srb_iocb *els; 1549 1550 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1551 if (!sp) 1552 return; 1553 1554 type = NULL; 1555 switch (sp->type) { 1556 case SRB_ELS_CMD_RPT: 1557 case SRB_ELS_CMD_HST: 1558 type = "els"; 1559 break; 1560 case SRB_CT_CMD: 1561 type = "ct pass-through"; 1562 break; 1563 case SRB_ELS_DCMD: 1564 type = "Driver ELS logo"; 1565 if (iocb_type != ELS_IOCB_TYPE) { 1566 ql_dbg(ql_dbg_user, vha, 0x5047, 1567 "Completing %s: (%p) type=%d.\n", 1568 type, sp, sp->type); 1569 sp->done(sp, 0); 1570 return; 1571 } 1572 break; 1573 case SRB_CT_PTHRU_CMD: 1574 /* borrowing sts_entry_24xx.comp_status. 1575 same location as ct_entry_24xx.comp_status 1576 */ 1577 res = qla2x00_chk_ms_status(sp->vha, (ms_iocb_entry_t *)pkt, 1578 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1579 sp->name); 1580 sp->done(sp, res); 1581 return; 1582 default: 1583 ql_dbg(ql_dbg_user, vha, 0x503e, 1584 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1585 return; 1586 } 1587 1588 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1589 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1590 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1591 1592 if (iocb_type == ELS_IOCB_TYPE) { 1593 els = &sp->u.iocb_cmd; 1594 els->u.els_plogi.fw_status[0] = fw_status[0]; 1595 els->u.els_plogi.fw_status[1] = fw_status[1]; 1596 els->u.els_plogi.fw_status[2] = fw_status[2]; 1597 els->u.els_plogi.comp_status = fw_status[0]; 1598 if (comp_status == CS_COMPLETE) { 1599 res = DID_OK << 16; 1600 } else { 1601 if (comp_status == CS_DATA_UNDERRUN) { 1602 res = DID_OK << 16; 1603 els->u.els_plogi.len = 1604 le16_to_cpu(((struct els_sts_entry_24xx *) 1605 pkt)->total_byte_count); 1606 } else { 1607 els->u.els_plogi.len = 0; 1608 res = DID_ERROR << 16; 1609 } 1610 } 1611 ql_log(ql_log_info, vha, 0x503f, 1612 "ELS IOCB Done -%s error hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", 1613 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1614 le16_to_cpu(((struct els_sts_entry_24xx *) 1615 pkt)->total_byte_count)); 1616 goto els_ct_done; 1617 } 1618 1619 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1620 * fc payload to the caller 1621 */ 1622 bsg_job = sp->u.bsg_job; 1623 bsg_reply = bsg_job->reply; 1624 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1625 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1626 1627 if (comp_status != CS_COMPLETE) { 1628 if (comp_status == CS_DATA_UNDERRUN) { 1629 res = DID_OK << 16; 1630 bsg_reply->reply_payload_rcv_len = 1631 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1632 1633 ql_dbg(ql_dbg_user, vha, 0x503f, 1634 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1635 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1636 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1637 le16_to_cpu(((struct els_sts_entry_24xx *) 1638 pkt)->total_byte_count)); 1639 } else { 1640 ql_dbg(ql_dbg_user, vha, 0x5040, 1641 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1642 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1643 type, sp->handle, comp_status, 1644 le16_to_cpu(((struct els_sts_entry_24xx *) 1645 pkt)->error_subcode_1), 1646 le16_to_cpu(((struct els_sts_entry_24xx *) 1647 pkt)->error_subcode_2)); 1648 res = DID_ERROR << 16; 1649 bsg_reply->reply_payload_rcv_len = 0; 1650 } 1651 memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), 1652 fw_status, sizeof(fw_status)); 1653 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1654 (uint8_t *)pkt, sizeof(*pkt)); 1655 } 1656 else { 1657 res = DID_OK << 16; 1658 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1659 bsg_job->reply_len = 0; 1660 } 1661 els_ct_done: 1662 1663 sp->done(sp, res); 1664 } 1665 1666 static void 1667 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1668 struct logio_entry_24xx *logio) 1669 { 1670 const char func[] = "LOGIO-IOCB"; 1671 const char *type; 1672 fc_port_t *fcport; 1673 srb_t *sp; 1674 struct srb_iocb *lio; 1675 uint16_t *data; 1676 uint32_t iop[2]; 1677 1678 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1679 if (!sp) 1680 return; 1681 1682 lio = &sp->u.iocb_cmd; 1683 type = sp->name; 1684 fcport = sp->fcport; 1685 data = lio->u.logio.data; 1686 1687 data[0] = MBS_COMMAND_ERROR; 1688 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1689 QLA_LOGIO_LOGIN_RETRIED : 0; 1690 if (logio->entry_status) { 1691 ql_log(ql_log_warn, fcport->vha, 0x5034, 1692 "Async-%s error entry - %8phC hdl=%x" 1693 "portid=%02x%02x%02x entry-status=%x.\n", 1694 type, fcport->port_name, sp->handle, fcport->d_id.b.domain, 1695 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1696 logio->entry_status); 1697 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1698 (uint8_t *)logio, sizeof(*logio)); 1699 1700 goto logio_done; 1701 } 1702 1703 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1704 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1705 "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x " 1706 "iop0=%x.\n", type, fcport->port_name, sp->handle, 1707 fcport->d_id.b.domain, 1708 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1709 le32_to_cpu(logio->io_parameter[0])); 1710 1711 vha->hw->exch_starvation = 0; 1712 data[0] = MBS_COMMAND_COMPLETE; 1713 if (sp->type != SRB_LOGIN_CMD) 1714 goto logio_done; 1715 1716 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1717 if (iop[0] & BIT_4) { 1718 fcport->port_type = FCT_TARGET; 1719 if (iop[0] & BIT_8) 1720 fcport->flags |= FCF_FCP2_DEVICE; 1721 } else if (iop[0] & BIT_5) 1722 fcport->port_type = FCT_INITIATOR; 1723 1724 if (iop[0] & BIT_7) 1725 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1726 1727 if (logio->io_parameter[7] || logio->io_parameter[8]) 1728 fcport->supported_classes |= FC_COS_CLASS2; 1729 if (logio->io_parameter[9] || logio->io_parameter[10]) 1730 fcport->supported_classes |= FC_COS_CLASS3; 1731 1732 goto logio_done; 1733 } 1734 1735 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1736 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1737 lio->u.logio.iop[0] = iop[0]; 1738 lio->u.logio.iop[1] = iop[1]; 1739 switch (iop[0]) { 1740 case LSC_SCODE_PORTID_USED: 1741 data[0] = MBS_PORT_ID_USED; 1742 data[1] = LSW(iop[1]); 1743 break; 1744 case LSC_SCODE_NPORT_USED: 1745 data[0] = MBS_LOOP_ID_USED; 1746 break; 1747 case LSC_SCODE_CMD_FAILED: 1748 if (iop[1] == 0x0606) { 1749 /* 1750 * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, 1751 * Target side acked. 1752 */ 1753 data[0] = MBS_COMMAND_COMPLETE; 1754 goto logio_done; 1755 } 1756 data[0] = MBS_COMMAND_ERROR; 1757 break; 1758 case LSC_SCODE_NOXCB: 1759 vha->hw->exch_starvation++; 1760 if (vha->hw->exch_starvation > 5) { 1761 ql_log(ql_log_warn, vha, 0xd046, 1762 "Exchange starvation. Resetting RISC\n"); 1763 1764 vha->hw->exch_starvation = 0; 1765 1766 if (IS_P3P_TYPE(vha->hw)) 1767 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1768 else 1769 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1770 qla2xxx_wake_dpc(vha); 1771 } 1772 /* fall through */ 1773 default: 1774 data[0] = MBS_COMMAND_ERROR; 1775 break; 1776 } 1777 1778 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1779 "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x " 1780 "iop0=%x iop1=%x.\n", type, fcport->port_name, 1781 sp->handle, fcport->d_id.b.domain, 1782 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1783 le16_to_cpu(logio->comp_status), 1784 le32_to_cpu(logio->io_parameter[0]), 1785 le32_to_cpu(logio->io_parameter[1])); 1786 1787 logio_done: 1788 sp->done(sp, 0); 1789 } 1790 1791 static void 1792 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1793 { 1794 const char func[] = "TMF-IOCB"; 1795 const char *type; 1796 fc_port_t *fcport; 1797 srb_t *sp; 1798 struct srb_iocb *iocb; 1799 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1800 1801 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1802 if (!sp) 1803 return; 1804 1805 iocb = &sp->u.iocb_cmd; 1806 type = sp->name; 1807 fcport = sp->fcport; 1808 iocb->u.tmf.data = QLA_SUCCESS; 1809 1810 if (sts->entry_status) { 1811 ql_log(ql_log_warn, fcport->vha, 0x5038, 1812 "Async-%s error - hdl=%x entry-status(%x).\n", 1813 type, sp->handle, sts->entry_status); 1814 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1815 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 1816 ql_log(ql_log_warn, fcport->vha, 0x5039, 1817 "Async-%s error - hdl=%x completion status(%x).\n", 1818 type, sp->handle, sts->comp_status); 1819 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1820 } else if ((le16_to_cpu(sts->scsi_status) & 1821 SS_RESPONSE_INFO_LEN_VALID)) { 1822 if (le32_to_cpu(sts->rsp_data_len) < 4) { 1823 ql_log(ql_log_warn, fcport->vha, 0x503b, 1824 "Async-%s error - hdl=%x not enough response(%d).\n", 1825 type, sp->handle, sts->rsp_data_len); 1826 } else if (sts->data[3]) { 1827 ql_log(ql_log_warn, fcport->vha, 0x503c, 1828 "Async-%s error - hdl=%x response(%x).\n", 1829 type, sp->handle, sts->data[3]); 1830 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1831 } 1832 } 1833 1834 if (iocb->u.tmf.data != QLA_SUCCESS) 1835 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1836 (uint8_t *)sts, sizeof(*sts)); 1837 1838 sp->done(sp, 0); 1839 } 1840 1841 static void 1842 qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1843 { 1844 const char func[] = "NVME-IOCB"; 1845 fc_port_t *fcport; 1846 srb_t *sp; 1847 struct srb_iocb *iocb; 1848 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1849 uint16_t state_flags; 1850 struct nvmefc_fcp_req *fd; 1851 uint16_t ret = 0; 1852 struct srb_iocb *nvme; 1853 1854 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1855 if (!sp) 1856 return; 1857 1858 iocb = &sp->u.iocb_cmd; 1859 fcport = sp->fcport; 1860 iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status); 1861 state_flags = le16_to_cpu(sts->state_flags); 1862 fd = iocb->u.nvme.desc; 1863 nvme = &sp->u.iocb_cmd; 1864 1865 if (unlikely(nvme->u.nvme.aen_op)) 1866 atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); 1867 1868 /* 1869 * State flags: Bit 6 and 0. 1870 * If 0 is set, we don't care about 6. 1871 * both cases resp was dma'd to host buffer 1872 * if both are 0, that is good path case. 1873 * if six is set and 0 is clear, we need to 1874 * copy resp data from status iocb to resp buffer. 1875 */ 1876 if (!(state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP))) { 1877 iocb->u.nvme.rsp_pyld_len = 0; 1878 } else if ((state_flags & SF_FCP_RSP_DMA)) { 1879 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); 1880 } else if (state_flags & SF_NVME_ERSP) { 1881 uint32_t *inbuf, *outbuf; 1882 uint16_t iter; 1883 1884 inbuf = (uint32_t *)&sts->nvme_ersp_data; 1885 outbuf = (uint32_t *)fd->rspaddr; 1886 iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); 1887 iter = iocb->u.nvme.rsp_pyld_len >> 2; 1888 for (; iter; iter--) 1889 *outbuf++ = swab32(*inbuf++); 1890 } else { /* unhandled case */ 1891 ql_log(ql_log_warn, fcport->vha, 0x503a, 1892 "NVME-%s error. Unhandled state_flags of %x\n", 1893 sp->name, state_flags); 1894 } 1895 1896 fd->transferred_length = fd->payload_length - 1897 le32_to_cpu(sts->residual_len); 1898 1899 /* 1900 * If transport error then Failure (HBA rejects request) 1901 * otherwise transport will handle. 1902 */ 1903 if (sts->entry_status) { 1904 ql_log(ql_log_warn, fcport->vha, 0x5038, 1905 "NVME-%s error - hdl=%x entry-status(%x).\n", 1906 sp->name, sp->handle, sts->entry_status); 1907 ret = QLA_FUNCTION_FAILED; 1908 } else { 1909 switch (le16_to_cpu(sts->comp_status)) { 1910 case CS_COMPLETE: 1911 ret = 0; 1912 break; 1913 1914 case CS_ABORTED: 1915 case CS_RESET: 1916 case CS_PORT_UNAVAILABLE: 1917 case CS_PORT_LOGGED_OUT: 1918 case CS_PORT_BUSY: 1919 ql_log(ql_log_warn, fcport->vha, 0x5060, 1920 "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n", 1921 sp->name, sp->handle, sts->comp_status, 1922 le32_to_cpu(sts->residual_len), sts->ox_id); 1923 fd->transferred_length = fd->payload_length; 1924 ret = QLA_ABORTED; 1925 break; 1926 1927 default: 1928 ql_log(ql_log_warn, fcport->vha, 0x5060, 1929 "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n", 1930 sp->name, sp->handle, sts->comp_status, 1931 le32_to_cpu(sts->residual_len), sts->ox_id); 1932 ret = QLA_FUNCTION_FAILED; 1933 break; 1934 } 1935 } 1936 sp->done(sp, ret); 1937 } 1938 1939 static void qla_ctrlvp_completed(scsi_qla_host_t *vha, struct req_que *req, 1940 struct vp_ctrl_entry_24xx *vce) 1941 { 1942 const char func[] = "CTRLVP-IOCB"; 1943 srb_t *sp; 1944 int rval = QLA_SUCCESS; 1945 1946 sp = qla2x00_get_sp_from_handle(vha, func, req, vce); 1947 if (!sp) 1948 return; 1949 1950 if (vce->entry_status != 0) { 1951 ql_dbg(ql_dbg_vport, vha, 0x10c4, 1952 "%s: Failed to complete IOCB -- error status (%x)\n", 1953 sp->name, vce->entry_status); 1954 rval = QLA_FUNCTION_FAILED; 1955 } else if (vce->comp_status != cpu_to_le16(CS_COMPLETE)) { 1956 ql_dbg(ql_dbg_vport, vha, 0x10c5, 1957 "%s: Failed to complete IOCB -- completion status (%x) vpidx %x\n", 1958 sp->name, le16_to_cpu(vce->comp_status), 1959 le16_to_cpu(vce->vp_idx_failed)); 1960 rval = QLA_FUNCTION_FAILED; 1961 } else { 1962 ql_dbg(ql_dbg_vport, vha, 0x10c6, 1963 "Done %s.\n", __func__); 1964 } 1965 1966 sp->rc = rval; 1967 sp->done(sp, rval); 1968 } 1969 1970 /** 1971 * qla2x00_process_response_queue() - Process response queue entries. 1972 * @ha: SCSI driver HA context 1973 */ 1974 void 1975 qla2x00_process_response_queue(struct rsp_que *rsp) 1976 { 1977 struct scsi_qla_host *vha; 1978 struct qla_hw_data *ha = rsp->hw; 1979 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1980 sts_entry_t *pkt; 1981 uint16_t handle_cnt; 1982 uint16_t cnt; 1983 1984 vha = pci_get_drvdata(ha->pdev); 1985 1986 if (!vha->flags.online) 1987 return; 1988 1989 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1990 pkt = (sts_entry_t *)rsp->ring_ptr; 1991 1992 rsp->ring_index++; 1993 if (rsp->ring_index == rsp->length) { 1994 rsp->ring_index = 0; 1995 rsp->ring_ptr = rsp->ring; 1996 } else { 1997 rsp->ring_ptr++; 1998 } 1999 2000 if (pkt->entry_status != 0) { 2001 qla2x00_error_entry(vha, rsp, pkt); 2002 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2003 wmb(); 2004 continue; 2005 } 2006 2007 switch (pkt->entry_type) { 2008 case STATUS_TYPE: 2009 qla2x00_status_entry(vha, rsp, pkt); 2010 break; 2011 case STATUS_TYPE_21: 2012 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 2013 for (cnt = 0; cnt < handle_cnt; cnt++) { 2014 qla2x00_process_completed_request(vha, rsp->req, 2015 ((sts21_entry_t *)pkt)->handle[cnt]); 2016 } 2017 break; 2018 case STATUS_TYPE_22: 2019 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 2020 for (cnt = 0; cnt < handle_cnt; cnt++) { 2021 qla2x00_process_completed_request(vha, rsp->req, 2022 ((sts22_entry_t *)pkt)->handle[cnt]); 2023 } 2024 break; 2025 case STATUS_CONT_TYPE: 2026 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2027 break; 2028 case MBX_IOCB_TYPE: 2029 qla2x00_mbx_iocb_entry(vha, rsp->req, 2030 (struct mbx_entry *)pkt); 2031 break; 2032 case CT_IOCB_TYPE: 2033 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2034 break; 2035 default: 2036 /* Type Not Supported. */ 2037 ql_log(ql_log_warn, vha, 0x504a, 2038 "Received unknown response pkt type %x " 2039 "entry status=%x.\n", 2040 pkt->entry_type, pkt->entry_status); 2041 break; 2042 } 2043 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2044 wmb(); 2045 } 2046 2047 /* Adjust ring index */ 2048 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 2049 } 2050 2051 static inline void 2052 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 2053 uint32_t sense_len, struct rsp_que *rsp, int res) 2054 { 2055 struct scsi_qla_host *vha = sp->vha; 2056 struct scsi_cmnd *cp = GET_CMD_SP(sp); 2057 uint32_t track_sense_len; 2058 2059 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 2060 sense_len = SCSI_SENSE_BUFFERSIZE; 2061 2062 SET_CMD_SENSE_LEN(sp, sense_len); 2063 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 2064 track_sense_len = sense_len; 2065 2066 if (sense_len > par_sense_len) 2067 sense_len = par_sense_len; 2068 2069 memcpy(cp->sense_buffer, sense_data, sense_len); 2070 2071 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 2072 track_sense_len -= sense_len; 2073 SET_CMD_SENSE_LEN(sp, track_sense_len); 2074 2075 if (track_sense_len != 0) { 2076 rsp->status_srb = sp; 2077 cp->result = res; 2078 } 2079 2080 if (sense_len) { 2081 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 2082 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 2083 sp->vha->host_no, cp->device->id, cp->device->lun, 2084 cp); 2085 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 2086 cp->sense_buffer, sense_len); 2087 } 2088 } 2089 2090 struct scsi_dif_tuple { 2091 __be16 guard; /* Checksum */ 2092 __be16 app_tag; /* APPL identifier */ 2093 __be32 ref_tag; /* Target LBA or indirect LBA */ 2094 }; 2095 2096 /* 2097 * Checks the guard or meta-data for the type of error 2098 * detected by the HBA. In case of errors, we set the 2099 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 2100 * to indicate to the kernel that the HBA detected error. 2101 */ 2102 static inline int 2103 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 2104 { 2105 struct scsi_qla_host *vha = sp->vha; 2106 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 2107 uint8_t *ap = &sts24->data[12]; 2108 uint8_t *ep = &sts24->data[20]; 2109 uint32_t e_ref_tag, a_ref_tag; 2110 uint16_t e_app_tag, a_app_tag; 2111 uint16_t e_guard, a_guard; 2112 2113 /* 2114 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 2115 * would make guard field appear at offset 2 2116 */ 2117 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 2118 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 2119 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 2120 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 2121 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 2122 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 2123 2124 ql_dbg(ql_dbg_io, vha, 0x3023, 2125 "iocb(s) %p Returned STATUS.\n", sts24); 2126 2127 ql_dbg(ql_dbg_io, vha, 0x3024, 2128 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 2129 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 2130 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 2131 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 2132 a_app_tag, e_app_tag, a_guard, e_guard); 2133 2134 /* 2135 * Ignore sector if: 2136 * For type 3: ref & app tag is all 'f's 2137 * For type 0,1,2: app tag is all 'f's 2138 */ 2139 if ((a_app_tag == T10_PI_APP_ESCAPE) && 2140 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 2141 (a_ref_tag == T10_PI_REF_ESCAPE))) { 2142 uint32_t blocks_done, resid; 2143 sector_t lba_s = scsi_get_lba(cmd); 2144 2145 /* 2TB boundary case covered automatically with this */ 2146 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 2147 2148 resid = scsi_bufflen(cmd) - (blocks_done * 2149 cmd->device->sector_size); 2150 2151 scsi_set_resid(cmd, resid); 2152 cmd->result = DID_OK << 16; 2153 2154 /* Update protection tag */ 2155 if (scsi_prot_sg_count(cmd)) { 2156 uint32_t i, j = 0, k = 0, num_ent; 2157 struct scatterlist *sg; 2158 struct t10_pi_tuple *spt; 2159 2160 /* Patch the corresponding protection tags */ 2161 scsi_for_each_prot_sg(cmd, sg, 2162 scsi_prot_sg_count(cmd), i) { 2163 num_ent = sg_dma_len(sg) / 8; 2164 if (k + num_ent < blocks_done) { 2165 k += num_ent; 2166 continue; 2167 } 2168 j = blocks_done - k - 1; 2169 k = blocks_done; 2170 break; 2171 } 2172 2173 if (k != blocks_done) { 2174 ql_log(ql_log_warn, vha, 0x302f, 2175 "unexpected tag values tag:lba=%x:%llx)\n", 2176 e_ref_tag, (unsigned long long)lba_s); 2177 return 1; 2178 } 2179 2180 spt = page_address(sg_page(sg)) + sg->offset; 2181 spt += j; 2182 2183 spt->app_tag = T10_PI_APP_ESCAPE; 2184 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 2185 spt->ref_tag = T10_PI_REF_ESCAPE; 2186 } 2187 2188 return 0; 2189 } 2190 2191 /* check guard */ 2192 if (e_guard != a_guard) { 2193 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2194 0x10, 0x1); 2195 set_driver_byte(cmd, DRIVER_SENSE); 2196 set_host_byte(cmd, DID_ABORT); 2197 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 2198 return 1; 2199 } 2200 2201 /* check ref tag */ 2202 if (e_ref_tag != a_ref_tag) { 2203 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2204 0x10, 0x3); 2205 set_driver_byte(cmd, DRIVER_SENSE); 2206 set_host_byte(cmd, DID_ABORT); 2207 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 2208 return 1; 2209 } 2210 2211 /* check appl tag */ 2212 if (e_app_tag != a_app_tag) { 2213 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2214 0x10, 0x2); 2215 set_driver_byte(cmd, DRIVER_SENSE); 2216 set_host_byte(cmd, DID_ABORT); 2217 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 2218 return 1; 2219 } 2220 2221 return 1; 2222 } 2223 2224 static void 2225 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2226 struct req_que *req, uint32_t index) 2227 { 2228 struct qla_hw_data *ha = vha->hw; 2229 srb_t *sp; 2230 uint16_t comp_status; 2231 uint16_t scsi_status; 2232 uint16_t thread_id; 2233 uint32_t rval = EXT_STATUS_OK; 2234 struct bsg_job *bsg_job = NULL; 2235 struct fc_bsg_request *bsg_request; 2236 struct fc_bsg_reply *bsg_reply; 2237 sts_entry_t *sts; 2238 struct sts_entry_24xx *sts24; 2239 sts = (sts_entry_t *) pkt; 2240 sts24 = (struct sts_entry_24xx *) pkt; 2241 2242 /* Validate handle. */ 2243 if (index >= req->num_outstanding_cmds) { 2244 ql_log(ql_log_warn, vha, 0x70af, 2245 "Invalid SCSI completion handle 0x%x.\n", index); 2246 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2247 return; 2248 } 2249 2250 sp = req->outstanding_cmds[index]; 2251 if (!sp) { 2252 ql_log(ql_log_warn, vha, 0x70b0, 2253 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2254 req->id, index); 2255 2256 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2257 return; 2258 } 2259 2260 /* Free outstanding command slot. */ 2261 req->outstanding_cmds[index] = NULL; 2262 bsg_job = sp->u.bsg_job; 2263 bsg_request = bsg_job->request; 2264 bsg_reply = bsg_job->reply; 2265 2266 if (IS_FWI2_CAPABLE(ha)) { 2267 comp_status = le16_to_cpu(sts24->comp_status); 2268 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2269 } else { 2270 comp_status = le16_to_cpu(sts->comp_status); 2271 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2272 } 2273 2274 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2275 switch (comp_status) { 2276 case CS_COMPLETE: 2277 if (scsi_status == 0) { 2278 bsg_reply->reply_payload_rcv_len = 2279 bsg_job->reply_payload.payload_len; 2280 vha->qla_stats.input_bytes += 2281 bsg_reply->reply_payload_rcv_len; 2282 vha->qla_stats.input_requests++; 2283 rval = EXT_STATUS_OK; 2284 } 2285 goto done; 2286 2287 case CS_DATA_OVERRUN: 2288 ql_dbg(ql_dbg_user, vha, 0x70b1, 2289 "Command completed with data overrun thread_id=%d\n", 2290 thread_id); 2291 rval = EXT_STATUS_DATA_OVERRUN; 2292 break; 2293 2294 case CS_DATA_UNDERRUN: 2295 ql_dbg(ql_dbg_user, vha, 0x70b2, 2296 "Command completed with data underrun thread_id=%d\n", 2297 thread_id); 2298 rval = EXT_STATUS_DATA_UNDERRUN; 2299 break; 2300 case CS_BIDIR_RD_OVERRUN: 2301 ql_dbg(ql_dbg_user, vha, 0x70b3, 2302 "Command completed with read data overrun thread_id=%d\n", 2303 thread_id); 2304 rval = EXT_STATUS_DATA_OVERRUN; 2305 break; 2306 2307 case CS_BIDIR_RD_WR_OVERRUN: 2308 ql_dbg(ql_dbg_user, vha, 0x70b4, 2309 "Command completed with read and write data overrun " 2310 "thread_id=%d\n", thread_id); 2311 rval = EXT_STATUS_DATA_OVERRUN; 2312 break; 2313 2314 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2315 ql_dbg(ql_dbg_user, vha, 0x70b5, 2316 "Command completed with read data over and write data " 2317 "underrun thread_id=%d\n", thread_id); 2318 rval = EXT_STATUS_DATA_OVERRUN; 2319 break; 2320 2321 case CS_BIDIR_RD_UNDERRUN: 2322 ql_dbg(ql_dbg_user, vha, 0x70b6, 2323 "Command completed with read data underrun " 2324 "thread_id=%d\n", thread_id); 2325 rval = EXT_STATUS_DATA_UNDERRUN; 2326 break; 2327 2328 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2329 ql_dbg(ql_dbg_user, vha, 0x70b7, 2330 "Command completed with read data under and write data " 2331 "overrun thread_id=%d\n", thread_id); 2332 rval = EXT_STATUS_DATA_UNDERRUN; 2333 break; 2334 2335 case CS_BIDIR_RD_WR_UNDERRUN: 2336 ql_dbg(ql_dbg_user, vha, 0x70b8, 2337 "Command completed with read and write data underrun " 2338 "thread_id=%d\n", thread_id); 2339 rval = EXT_STATUS_DATA_UNDERRUN; 2340 break; 2341 2342 case CS_BIDIR_DMA: 2343 ql_dbg(ql_dbg_user, vha, 0x70b9, 2344 "Command completed with data DMA error thread_id=%d\n", 2345 thread_id); 2346 rval = EXT_STATUS_DMA_ERR; 2347 break; 2348 2349 case CS_TIMEOUT: 2350 ql_dbg(ql_dbg_user, vha, 0x70ba, 2351 "Command completed with timeout thread_id=%d\n", 2352 thread_id); 2353 rval = EXT_STATUS_TIMEOUT; 2354 break; 2355 default: 2356 ql_dbg(ql_dbg_user, vha, 0x70bb, 2357 "Command completed with completion status=0x%x " 2358 "thread_id=%d\n", comp_status, thread_id); 2359 rval = EXT_STATUS_ERR; 2360 break; 2361 } 2362 bsg_reply->reply_payload_rcv_len = 0; 2363 2364 done: 2365 /* Return the vendor specific reply to API */ 2366 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2367 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2368 /* Always return DID_OK, bsg will send the vendor specific response 2369 * in this case only */ 2370 sp->done(sp, DID_OK << 6); 2371 2372 } 2373 2374 /** 2375 * qla2x00_status_entry() - Process a Status IOCB entry. 2376 * @ha: SCSI driver HA context 2377 * @pkt: Entry pointer 2378 */ 2379 static void 2380 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2381 { 2382 srb_t *sp; 2383 fc_port_t *fcport; 2384 struct scsi_cmnd *cp; 2385 sts_entry_t *sts; 2386 struct sts_entry_24xx *sts24; 2387 uint16_t comp_status; 2388 uint16_t scsi_status; 2389 uint16_t ox_id; 2390 uint8_t lscsi_status; 2391 int32_t resid; 2392 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2393 fw_resid_len; 2394 uint8_t *rsp_info, *sense_data; 2395 struct qla_hw_data *ha = vha->hw; 2396 uint32_t handle; 2397 uint16_t que; 2398 struct req_que *req; 2399 int logit = 1; 2400 int res = 0; 2401 uint16_t state_flags = 0; 2402 uint16_t retry_delay = 0; 2403 2404 sts = (sts_entry_t *) pkt; 2405 sts24 = (struct sts_entry_24xx *) pkt; 2406 if (IS_FWI2_CAPABLE(ha)) { 2407 comp_status = le16_to_cpu(sts24->comp_status); 2408 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2409 state_flags = le16_to_cpu(sts24->state_flags); 2410 } else { 2411 comp_status = le16_to_cpu(sts->comp_status); 2412 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2413 } 2414 handle = (uint32_t) LSW(sts->handle); 2415 que = MSW(sts->handle); 2416 req = ha->req_q_map[que]; 2417 2418 /* Check for invalid queue pointer */ 2419 if (req == NULL || 2420 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2421 ql_dbg(ql_dbg_io, vha, 0x3059, 2422 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2423 "que=%u.\n", sts->handle, req, que); 2424 return; 2425 } 2426 2427 /* Validate handle. */ 2428 if (handle < req->num_outstanding_cmds) { 2429 sp = req->outstanding_cmds[handle]; 2430 if (!sp) { 2431 ql_dbg(ql_dbg_io, vha, 0x3075, 2432 "%s(%ld): Already returned command for status handle (0x%x).\n", 2433 __func__, vha->host_no, sts->handle); 2434 return; 2435 } 2436 } else { 2437 ql_dbg(ql_dbg_io, vha, 0x3017, 2438 "Invalid status handle, out of range (0x%x).\n", 2439 sts->handle); 2440 2441 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2442 if (IS_P3P_TYPE(ha)) 2443 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2444 else 2445 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2446 qla2xxx_wake_dpc(vha); 2447 } 2448 return; 2449 } 2450 2451 if (sp->cmd_type != TYPE_SRB) { 2452 req->outstanding_cmds[handle] = NULL; 2453 ql_dbg(ql_dbg_io, vha, 0x3015, 2454 "Unknown sp->cmd_type %x %p).\n", 2455 sp->cmd_type, sp); 2456 return; 2457 } 2458 2459 /* NVME completion. */ 2460 if (sp->type == SRB_NVME_CMD) { 2461 qla24xx_nvme_iocb_entry(vha, req, pkt); 2462 return; 2463 } 2464 2465 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2466 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2467 return; 2468 } 2469 2470 /* Task Management completion. */ 2471 if (sp->type == SRB_TM_CMD) { 2472 qla24xx_tm_iocb_entry(vha, req, pkt); 2473 return; 2474 } 2475 2476 /* Fast path completion. */ 2477 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2478 qla2x00_process_completed_request(vha, req, handle); 2479 2480 return; 2481 } 2482 2483 req->outstanding_cmds[handle] = NULL; 2484 cp = GET_CMD_SP(sp); 2485 if (cp == NULL) { 2486 ql_dbg(ql_dbg_io, vha, 0x3018, 2487 "Command already returned (0x%x/%p).\n", 2488 sts->handle, sp); 2489 2490 return; 2491 } 2492 2493 lscsi_status = scsi_status & STATUS_MASK; 2494 2495 fcport = sp->fcport; 2496 2497 ox_id = 0; 2498 sense_len = par_sense_len = rsp_info_len = resid_len = 2499 fw_resid_len = 0; 2500 if (IS_FWI2_CAPABLE(ha)) { 2501 if (scsi_status & SS_SENSE_LEN_VALID) 2502 sense_len = le32_to_cpu(sts24->sense_len); 2503 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2504 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2505 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2506 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2507 if (comp_status == CS_DATA_UNDERRUN) 2508 fw_resid_len = le32_to_cpu(sts24->residual_len); 2509 rsp_info = sts24->data; 2510 sense_data = sts24->data; 2511 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2512 ox_id = le16_to_cpu(sts24->ox_id); 2513 par_sense_len = sizeof(sts24->data); 2514 /* Valid values of the retry delay timer are 0x1-0xffef */ 2515 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) 2516 retry_delay = sts24->retry_delay; 2517 } else { 2518 if (scsi_status & SS_SENSE_LEN_VALID) 2519 sense_len = le16_to_cpu(sts->req_sense_length); 2520 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2521 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2522 resid_len = le32_to_cpu(sts->residual_length); 2523 rsp_info = sts->rsp_info; 2524 sense_data = sts->req_sense_data; 2525 par_sense_len = sizeof(sts->req_sense_data); 2526 } 2527 2528 /* Check for any FCP transport errors. */ 2529 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2530 /* Sense data lies beyond any FCP RESPONSE data. */ 2531 if (IS_FWI2_CAPABLE(ha)) { 2532 sense_data += rsp_info_len; 2533 par_sense_len -= rsp_info_len; 2534 } 2535 if (rsp_info_len > 3 && rsp_info[3]) { 2536 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2537 "FCP I/O protocol failure (0x%x/0x%x).\n", 2538 rsp_info_len, rsp_info[3]); 2539 2540 res = DID_BUS_BUSY << 16; 2541 goto out; 2542 } 2543 } 2544 2545 /* Check for overrun. */ 2546 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2547 scsi_status & SS_RESIDUAL_OVER) 2548 comp_status = CS_DATA_OVERRUN; 2549 2550 /* 2551 * Check retry_delay_timer value if we receive a busy or 2552 * queue full. 2553 */ 2554 if (lscsi_status == SAM_STAT_TASK_SET_FULL || 2555 lscsi_status == SAM_STAT_BUSY) 2556 qla2x00_set_retry_delay_timestamp(fcport, retry_delay); 2557 2558 /* 2559 * Based on Host and scsi status generate status code for Linux 2560 */ 2561 switch (comp_status) { 2562 case CS_COMPLETE: 2563 case CS_QUEUE_FULL: 2564 if (scsi_status == 0) { 2565 res = DID_OK << 16; 2566 break; 2567 } 2568 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2569 resid = resid_len; 2570 scsi_set_resid(cp, resid); 2571 2572 if (!lscsi_status && 2573 ((unsigned)(scsi_bufflen(cp) - resid) < 2574 cp->underflow)) { 2575 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2576 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2577 resid, scsi_bufflen(cp)); 2578 2579 res = DID_ERROR << 16; 2580 break; 2581 } 2582 } 2583 res = DID_OK << 16 | lscsi_status; 2584 2585 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2586 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2587 "QUEUE FULL detected.\n"); 2588 break; 2589 } 2590 logit = 0; 2591 if (lscsi_status != SS_CHECK_CONDITION) 2592 break; 2593 2594 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2595 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2596 break; 2597 2598 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2599 rsp, res); 2600 break; 2601 2602 case CS_DATA_UNDERRUN: 2603 /* Use F/W calculated residual length. */ 2604 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2605 scsi_set_resid(cp, resid); 2606 if (scsi_status & SS_RESIDUAL_UNDER) { 2607 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2608 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2609 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2610 resid, scsi_bufflen(cp)); 2611 2612 res = DID_ERROR << 16 | lscsi_status; 2613 goto check_scsi_status; 2614 } 2615 2616 if (!lscsi_status && 2617 ((unsigned)(scsi_bufflen(cp) - resid) < 2618 cp->underflow)) { 2619 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2620 "Mid-layer underflow detected (0x%x of 0x%x bytes).\n", 2621 resid, scsi_bufflen(cp)); 2622 2623 res = DID_ERROR << 16; 2624 break; 2625 } 2626 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2627 lscsi_status != SAM_STAT_BUSY) { 2628 /* 2629 * scsi status of task set and busy are considered to be 2630 * task not completed. 2631 */ 2632 2633 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2634 "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", 2635 resid, scsi_bufflen(cp)); 2636 2637 res = DID_ERROR << 16 | lscsi_status; 2638 goto check_scsi_status; 2639 } else { 2640 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2641 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2642 scsi_status, lscsi_status); 2643 } 2644 2645 res = DID_OK << 16 | lscsi_status; 2646 logit = 0; 2647 2648 check_scsi_status: 2649 /* 2650 * Check to see if SCSI Status is non zero. If so report SCSI 2651 * Status. 2652 */ 2653 if (lscsi_status != 0) { 2654 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2655 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2656 "QUEUE FULL detected.\n"); 2657 logit = 1; 2658 break; 2659 } 2660 if (lscsi_status != SS_CHECK_CONDITION) 2661 break; 2662 2663 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2664 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2665 break; 2666 2667 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2668 sense_len, rsp, res); 2669 } 2670 break; 2671 2672 case CS_PORT_LOGGED_OUT: 2673 case CS_PORT_CONFIG_CHG: 2674 case CS_PORT_BUSY: 2675 case CS_INCOMPLETE: 2676 case CS_PORT_UNAVAILABLE: 2677 case CS_TIMEOUT: 2678 case CS_RESET: 2679 2680 /* 2681 * We are going to have the fc class block the rport 2682 * while we try to recover so instruct the mid layer 2683 * to requeue until the class decides how to handle this. 2684 */ 2685 res = DID_TRANSPORT_DISRUPTED << 16; 2686 2687 if (comp_status == CS_TIMEOUT) { 2688 if (IS_FWI2_CAPABLE(ha)) 2689 break; 2690 else if ((le16_to_cpu(sts->status_flags) & 2691 SF_LOGOUT_SENT) == 0) 2692 break; 2693 } 2694 2695 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2696 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2697 "Port to be marked lost on fcport=%02x%02x%02x, current " 2698 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 2699 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2700 port_state_str[atomic_read(&fcport->state)], 2701 comp_status); 2702 2703 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2704 qlt_schedule_sess_for_deletion(fcport); 2705 } 2706 2707 break; 2708 2709 case CS_ABORTED: 2710 res = DID_RESET << 16; 2711 break; 2712 2713 case CS_DIF_ERROR: 2714 logit = qla2x00_handle_dif_error(sp, sts24); 2715 res = cp->result; 2716 break; 2717 2718 case CS_TRANSPORT: 2719 res = DID_ERROR << 16; 2720 2721 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2722 break; 2723 2724 if (state_flags & BIT_4) 2725 scmd_printk(KERN_WARNING, cp, 2726 "Unsupported device '%s' found.\n", 2727 cp->device->vendor); 2728 break; 2729 2730 default: 2731 res = DID_ERROR << 16; 2732 break; 2733 } 2734 2735 out: 2736 if (logit) 2737 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2738 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2739 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2740 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 2741 comp_status, scsi_status, res, vha->host_no, 2742 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2743 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2744 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2745 resid_len, fw_resid_len, sp, cp); 2746 2747 if (rsp->status_srb == NULL) 2748 sp->done(sp, res); 2749 } 2750 2751 /** 2752 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2753 * @ha: SCSI driver HA context 2754 * @pkt: Entry pointer 2755 * 2756 * Extended sense data. 2757 */ 2758 static void 2759 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2760 { 2761 uint8_t sense_sz = 0; 2762 struct qla_hw_data *ha = rsp->hw; 2763 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2764 srb_t *sp = rsp->status_srb; 2765 struct scsi_cmnd *cp; 2766 uint32_t sense_len; 2767 uint8_t *sense_ptr; 2768 2769 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2770 return; 2771 2772 sense_len = GET_CMD_SENSE_LEN(sp); 2773 sense_ptr = GET_CMD_SENSE_PTR(sp); 2774 2775 cp = GET_CMD_SP(sp); 2776 if (cp == NULL) { 2777 ql_log(ql_log_warn, vha, 0x3025, 2778 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2779 2780 rsp->status_srb = NULL; 2781 return; 2782 } 2783 2784 if (sense_len > sizeof(pkt->data)) 2785 sense_sz = sizeof(pkt->data); 2786 else 2787 sense_sz = sense_len; 2788 2789 /* Move sense data. */ 2790 if (IS_FWI2_CAPABLE(ha)) 2791 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2792 memcpy(sense_ptr, pkt->data, sense_sz); 2793 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2794 sense_ptr, sense_sz); 2795 2796 sense_len -= sense_sz; 2797 sense_ptr += sense_sz; 2798 2799 SET_CMD_SENSE_PTR(sp, sense_ptr); 2800 SET_CMD_SENSE_LEN(sp, sense_len); 2801 2802 /* Place command on done queue. */ 2803 if (sense_len == 0) { 2804 rsp->status_srb = NULL; 2805 sp->done(sp, cp->result); 2806 } 2807 } 2808 2809 /** 2810 * qla2x00_error_entry() - Process an error entry. 2811 * @ha: SCSI driver HA context 2812 * @pkt: Entry pointer 2813 * return : 1=allow further error analysis. 0=no additional error analysis. 2814 */ 2815 static int 2816 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2817 { 2818 srb_t *sp; 2819 struct qla_hw_data *ha = vha->hw; 2820 const char func[] = "ERROR-IOCB"; 2821 uint16_t que = MSW(pkt->handle); 2822 struct req_que *req = NULL; 2823 int res = DID_ERROR << 16; 2824 2825 ql_dbg(ql_dbg_async, vha, 0x502a, 2826 "iocb type %xh with error status %xh, handle %xh, rspq id %d\n", 2827 pkt->entry_type, pkt->entry_status, pkt->handle, rsp->id); 2828 2829 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2830 goto fatal; 2831 2832 req = ha->req_q_map[que]; 2833 2834 if (pkt->entry_status & RF_BUSY) 2835 res = DID_BUS_BUSY << 16; 2836 2837 if ((pkt->handle & ~QLA_TGT_HANDLE_MASK) == QLA_TGT_SKIP_HANDLE) 2838 return 0; 2839 2840 switch (pkt->entry_type) { 2841 case NOTIFY_ACK_TYPE: 2842 case STATUS_TYPE: 2843 case STATUS_CONT_TYPE: 2844 case LOGINOUT_PORT_IOCB_TYPE: 2845 case CT_IOCB_TYPE: 2846 case ELS_IOCB_TYPE: 2847 case ABORT_IOCB_TYPE: 2848 case MBX_IOCB_TYPE: 2849 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2850 if (sp) { 2851 sp->done(sp, res); 2852 return 0; 2853 } 2854 break; 2855 2856 case ABTS_RESP_24XX: 2857 case CTIO_TYPE7: 2858 case CTIO_CRC2: 2859 default: 2860 return 1; 2861 } 2862 fatal: 2863 ql_log(ql_log_warn, vha, 0x5030, 2864 "Error entry - invalid handle/queue (%04x).\n", que); 2865 return 0; 2866 } 2867 2868 /** 2869 * qla24xx_mbx_completion() - Process mailbox command completions. 2870 * @ha: SCSI driver HA context 2871 * @mb0: Mailbox0 register 2872 */ 2873 static void 2874 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2875 { 2876 uint16_t cnt; 2877 uint32_t mboxes; 2878 uint16_t __iomem *wptr; 2879 struct qla_hw_data *ha = vha->hw; 2880 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2881 2882 /* Read all mbox registers? */ 2883 mboxes = (1 << ha->mbx_count) - 1; 2884 if (!ha->mcp) 2885 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2886 else 2887 mboxes = ha->mcp->in_mb; 2888 2889 /* Load return mailbox registers. */ 2890 ha->flags.mbox_int = 1; 2891 ha->mailbox_out[0] = mb0; 2892 mboxes >>= 1; 2893 wptr = (uint16_t __iomem *)®->mailbox1; 2894 2895 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2896 if (mboxes & BIT_0) 2897 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2898 2899 mboxes >>= 1; 2900 wptr++; 2901 } 2902 } 2903 2904 static void 2905 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2906 struct abort_entry_24xx *pkt) 2907 { 2908 const char func[] = "ABT_IOCB"; 2909 srb_t *sp; 2910 struct srb_iocb *abt; 2911 2912 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2913 if (!sp) 2914 return; 2915 2916 abt = &sp->u.iocb_cmd; 2917 abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); 2918 sp->done(sp, 0); 2919 } 2920 2921 void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, 2922 struct pt_ls4_request *pkt, struct req_que *req) 2923 { 2924 srb_t *sp; 2925 const char func[] = "LS4_IOCB"; 2926 uint16_t comp_status; 2927 2928 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2929 if (!sp) 2930 return; 2931 2932 comp_status = le16_to_cpu(pkt->status); 2933 sp->done(sp, comp_status); 2934 } 2935 2936 /** 2937 * qla24xx_process_response_queue() - Process response queue entries. 2938 * @ha: SCSI driver HA context 2939 */ 2940 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2941 struct rsp_que *rsp) 2942 { 2943 struct sts_entry_24xx *pkt; 2944 struct qla_hw_data *ha = vha->hw; 2945 2946 if (!ha->flags.fw_started) 2947 return; 2948 2949 if (rsp->qpair->cpuid != smp_processor_id()) 2950 qla_cpu_update(rsp->qpair, smp_processor_id()); 2951 2952 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2953 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2954 2955 rsp->ring_index++; 2956 if (rsp->ring_index == rsp->length) { 2957 rsp->ring_index = 0; 2958 rsp->ring_ptr = rsp->ring; 2959 } else { 2960 rsp->ring_ptr++; 2961 } 2962 2963 if (pkt->entry_status != 0) { 2964 if (qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt)) 2965 goto process_err; 2966 2967 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2968 wmb(); 2969 continue; 2970 } 2971 process_err: 2972 2973 switch (pkt->entry_type) { 2974 case STATUS_TYPE: 2975 qla2x00_status_entry(vha, rsp, pkt); 2976 break; 2977 case STATUS_CONT_TYPE: 2978 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2979 break; 2980 case VP_RPT_ID_IOCB_TYPE: 2981 qla24xx_report_id_acquisition(vha, 2982 (struct vp_rpt_id_entry_24xx *)pkt); 2983 break; 2984 case LOGINOUT_PORT_IOCB_TYPE: 2985 qla24xx_logio_entry(vha, rsp->req, 2986 (struct logio_entry_24xx *)pkt); 2987 break; 2988 case CT_IOCB_TYPE: 2989 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2990 break; 2991 case ELS_IOCB_TYPE: 2992 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2993 break; 2994 case ABTS_RECV_24XX: 2995 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2996 /* ensure that the ATIO queue is empty */ 2997 qlt_handle_abts_recv(vha, rsp, 2998 (response_t *)pkt); 2999 break; 3000 } else { 3001 qlt_24xx_process_atio_queue(vha, 1); 3002 } 3003 /* fall through */ 3004 case ABTS_RESP_24XX: 3005 case CTIO_TYPE7: 3006 case CTIO_CRC2: 3007 qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt); 3008 break; 3009 case PT_LS4_REQUEST: 3010 qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt, 3011 rsp->req); 3012 break; 3013 case NOTIFY_ACK_TYPE: 3014 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 3015 qlt_response_pkt_all_vps(vha, rsp, 3016 (response_t *)pkt); 3017 else 3018 qla24xxx_nack_iocb_entry(vha, rsp->req, 3019 (struct nack_to_isp *)pkt); 3020 break; 3021 case MARKER_TYPE: 3022 /* Do nothing in this case, this check is to prevent it 3023 * from falling into default case 3024 */ 3025 break; 3026 case ABORT_IOCB_TYPE: 3027 qla24xx_abort_iocb_entry(vha, rsp->req, 3028 (struct abort_entry_24xx *)pkt); 3029 break; 3030 case MBX_IOCB_TYPE: 3031 qla24xx_mbx_iocb_entry(vha, rsp->req, 3032 (struct mbx_24xx_entry *)pkt); 3033 break; 3034 case VP_CTRL_IOCB_TYPE: 3035 qla_ctrlvp_completed(vha, rsp->req, 3036 (struct vp_ctrl_entry_24xx *)pkt); 3037 break; 3038 default: 3039 /* Type Not Supported. */ 3040 ql_dbg(ql_dbg_async, vha, 0x5042, 3041 "Received unknown response pkt type %x " 3042 "entry status=%x.\n", 3043 pkt->entry_type, pkt->entry_status); 3044 break; 3045 } 3046 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 3047 wmb(); 3048 } 3049 3050 /* Adjust ring index */ 3051 if (IS_P3P_TYPE(ha)) { 3052 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 3053 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 3054 } else { 3055 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 3056 } 3057 } 3058 3059 static void 3060 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 3061 { 3062 int rval; 3063 uint32_t cnt; 3064 struct qla_hw_data *ha = vha->hw; 3065 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 3066 3067 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 3068 !IS_QLA27XX(ha)) 3069 return; 3070 3071 rval = QLA_SUCCESS; 3072 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 3073 RD_REG_DWORD(®->iobase_addr); 3074 WRT_REG_DWORD(®->iobase_window, 0x0001); 3075 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 3076 rval == QLA_SUCCESS; cnt--) { 3077 if (cnt) { 3078 WRT_REG_DWORD(®->iobase_window, 0x0001); 3079 udelay(10); 3080 } else 3081 rval = QLA_FUNCTION_TIMEOUT; 3082 } 3083 if (rval == QLA_SUCCESS) 3084 goto next_test; 3085 3086 rval = QLA_SUCCESS; 3087 WRT_REG_DWORD(®->iobase_window, 0x0003); 3088 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 3089 rval == QLA_SUCCESS; cnt--) { 3090 if (cnt) { 3091 WRT_REG_DWORD(®->iobase_window, 0x0003); 3092 udelay(10); 3093 } else 3094 rval = QLA_FUNCTION_TIMEOUT; 3095 } 3096 if (rval != QLA_SUCCESS) 3097 goto done; 3098 3099 next_test: 3100 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 3101 ql_log(ql_log_info, vha, 0x504c, 3102 "Additional code -- 0x55AA.\n"); 3103 3104 done: 3105 WRT_REG_DWORD(®->iobase_window, 0x0000); 3106 RD_REG_DWORD(®->iobase_window); 3107 } 3108 3109 /** 3110 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 3111 * @irq: 3112 * @dev_id: SCSI driver HA context 3113 * 3114 * Called by system whenever the host adapter generates an interrupt. 3115 * 3116 * Returns handled flag. 3117 */ 3118 irqreturn_t 3119 qla24xx_intr_handler(int irq, void *dev_id) 3120 { 3121 scsi_qla_host_t *vha; 3122 struct qla_hw_data *ha; 3123 struct device_reg_24xx __iomem *reg; 3124 int status; 3125 unsigned long iter; 3126 uint32_t stat; 3127 uint32_t hccr; 3128 uint16_t mb[8]; 3129 struct rsp_que *rsp; 3130 unsigned long flags; 3131 3132 rsp = (struct rsp_que *) dev_id; 3133 if (!rsp) { 3134 ql_log(ql_log_info, NULL, 0x5059, 3135 "%s: NULL response queue pointer.\n", __func__); 3136 return IRQ_NONE; 3137 } 3138 3139 ha = rsp->hw; 3140 reg = &ha->iobase->isp24; 3141 status = 0; 3142 3143 if (unlikely(pci_channel_offline(ha->pdev))) 3144 return IRQ_HANDLED; 3145 3146 spin_lock_irqsave(&ha->hardware_lock, flags); 3147 vha = pci_get_drvdata(ha->pdev); 3148 for (iter = 50; iter--; ) { 3149 stat = RD_REG_DWORD(®->host_status); 3150 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3151 break; 3152 if (stat & HSRX_RISC_PAUSED) { 3153 if (unlikely(pci_channel_offline(ha->pdev))) 3154 break; 3155 3156 hccr = RD_REG_DWORD(®->hccr); 3157 3158 ql_log(ql_log_warn, vha, 0x504b, 3159 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3160 hccr); 3161 3162 qla2xxx_check_risc_status(vha); 3163 3164 ha->isp_ops->fw_dump(vha, 1); 3165 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3166 break; 3167 } else if ((stat & HSRX_RISC_INT) == 0) 3168 break; 3169 3170 switch (stat & 0xff) { 3171 case INTR_ROM_MB_SUCCESS: 3172 case INTR_ROM_MB_FAILED: 3173 case INTR_MB_SUCCESS: 3174 case INTR_MB_FAILED: 3175 qla24xx_mbx_completion(vha, MSW(stat)); 3176 status |= MBX_INTERRUPT; 3177 3178 break; 3179 case INTR_ASYNC_EVENT: 3180 mb[0] = MSW(stat); 3181 mb[1] = RD_REG_WORD(®->mailbox1); 3182 mb[2] = RD_REG_WORD(®->mailbox2); 3183 mb[3] = RD_REG_WORD(®->mailbox3); 3184 qla2x00_async_event(vha, rsp, mb); 3185 break; 3186 case INTR_RSP_QUE_UPDATE: 3187 case INTR_RSP_QUE_UPDATE_83XX: 3188 qla24xx_process_response_queue(vha, rsp); 3189 break; 3190 case INTR_ATIO_QUE_UPDATE_27XX: 3191 case INTR_ATIO_QUE_UPDATE:{ 3192 unsigned long flags2; 3193 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3194 qlt_24xx_process_atio_queue(vha, 1); 3195 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3196 break; 3197 } 3198 case INTR_ATIO_RSP_QUE_UPDATE: { 3199 unsigned long flags2; 3200 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3201 qlt_24xx_process_atio_queue(vha, 1); 3202 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3203 3204 qla24xx_process_response_queue(vha, rsp); 3205 break; 3206 } 3207 default: 3208 ql_dbg(ql_dbg_async, vha, 0x504f, 3209 "Unrecognized interrupt type (%d).\n", stat * 0xff); 3210 break; 3211 } 3212 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3213 RD_REG_DWORD_RELAXED(®->hccr); 3214 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 3215 ndelay(3500); 3216 } 3217 qla2x00_handle_mbx_completion(ha, status); 3218 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3219 3220 return IRQ_HANDLED; 3221 } 3222 3223 static irqreturn_t 3224 qla24xx_msix_rsp_q(int irq, void *dev_id) 3225 { 3226 struct qla_hw_data *ha; 3227 struct rsp_que *rsp; 3228 struct device_reg_24xx __iomem *reg; 3229 struct scsi_qla_host *vha; 3230 unsigned long flags; 3231 3232 rsp = (struct rsp_que *) dev_id; 3233 if (!rsp) { 3234 ql_log(ql_log_info, NULL, 0x505a, 3235 "%s: NULL response queue pointer.\n", __func__); 3236 return IRQ_NONE; 3237 } 3238 ha = rsp->hw; 3239 reg = &ha->iobase->isp24; 3240 3241 spin_lock_irqsave(&ha->hardware_lock, flags); 3242 3243 vha = pci_get_drvdata(ha->pdev); 3244 qla24xx_process_response_queue(vha, rsp); 3245 if (!ha->flags.disable_msix_handshake) { 3246 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3247 RD_REG_DWORD_RELAXED(®->hccr); 3248 } 3249 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3250 3251 return IRQ_HANDLED; 3252 } 3253 3254 static irqreturn_t 3255 qla24xx_msix_default(int irq, void *dev_id) 3256 { 3257 scsi_qla_host_t *vha; 3258 struct qla_hw_data *ha; 3259 struct rsp_que *rsp; 3260 struct device_reg_24xx __iomem *reg; 3261 int status; 3262 uint32_t stat; 3263 uint32_t hccr; 3264 uint16_t mb[8]; 3265 unsigned long flags; 3266 3267 rsp = (struct rsp_que *) dev_id; 3268 if (!rsp) { 3269 ql_log(ql_log_info, NULL, 0x505c, 3270 "%s: NULL response queue pointer.\n", __func__); 3271 return IRQ_NONE; 3272 } 3273 ha = rsp->hw; 3274 reg = &ha->iobase->isp24; 3275 status = 0; 3276 3277 spin_lock_irqsave(&ha->hardware_lock, flags); 3278 vha = pci_get_drvdata(ha->pdev); 3279 do { 3280 stat = RD_REG_DWORD(®->host_status); 3281 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3282 break; 3283 if (stat & HSRX_RISC_PAUSED) { 3284 if (unlikely(pci_channel_offline(ha->pdev))) 3285 break; 3286 3287 hccr = RD_REG_DWORD(®->hccr); 3288 3289 ql_log(ql_log_info, vha, 0x5050, 3290 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3291 hccr); 3292 3293 qla2xxx_check_risc_status(vha); 3294 3295 ha->isp_ops->fw_dump(vha, 1); 3296 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3297 break; 3298 } else if ((stat & HSRX_RISC_INT) == 0) 3299 break; 3300 3301 switch (stat & 0xff) { 3302 case INTR_ROM_MB_SUCCESS: 3303 case INTR_ROM_MB_FAILED: 3304 case INTR_MB_SUCCESS: 3305 case INTR_MB_FAILED: 3306 qla24xx_mbx_completion(vha, MSW(stat)); 3307 status |= MBX_INTERRUPT; 3308 3309 break; 3310 case INTR_ASYNC_EVENT: 3311 mb[0] = MSW(stat); 3312 mb[1] = RD_REG_WORD(®->mailbox1); 3313 mb[2] = RD_REG_WORD(®->mailbox2); 3314 mb[3] = RD_REG_WORD(®->mailbox3); 3315 qla2x00_async_event(vha, rsp, mb); 3316 break; 3317 case INTR_RSP_QUE_UPDATE: 3318 case INTR_RSP_QUE_UPDATE_83XX: 3319 qla24xx_process_response_queue(vha, rsp); 3320 break; 3321 case INTR_ATIO_QUE_UPDATE_27XX: 3322 case INTR_ATIO_QUE_UPDATE:{ 3323 unsigned long flags2; 3324 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3325 qlt_24xx_process_atio_queue(vha, 1); 3326 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3327 break; 3328 } 3329 case INTR_ATIO_RSP_QUE_UPDATE: { 3330 unsigned long flags2; 3331 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3332 qlt_24xx_process_atio_queue(vha, 1); 3333 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3334 3335 qla24xx_process_response_queue(vha, rsp); 3336 break; 3337 } 3338 default: 3339 ql_dbg(ql_dbg_async, vha, 0x5051, 3340 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3341 break; 3342 } 3343 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3344 } while (0); 3345 qla2x00_handle_mbx_completion(ha, status); 3346 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3347 3348 return IRQ_HANDLED; 3349 } 3350 3351 irqreturn_t 3352 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3353 { 3354 struct qla_hw_data *ha; 3355 struct qla_qpair *qpair; 3356 struct device_reg_24xx __iomem *reg; 3357 unsigned long flags; 3358 3359 qpair = dev_id; 3360 if (!qpair) { 3361 ql_log(ql_log_info, NULL, 0x505b, 3362 "%s: NULL response queue pointer.\n", __func__); 3363 return IRQ_NONE; 3364 } 3365 ha = qpair->hw; 3366 3367 /* Clear the interrupt, if enabled, for this response queue */ 3368 if (unlikely(!ha->flags.disable_msix_handshake)) { 3369 reg = &ha->iobase->isp24; 3370 spin_lock_irqsave(&ha->hardware_lock, flags); 3371 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3372 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3373 } 3374 3375 queue_work(ha->wq, &qpair->q_work); 3376 3377 return IRQ_HANDLED; 3378 } 3379 3380 /* Interrupt handling helpers. */ 3381 3382 struct qla_init_msix_entry { 3383 const char *name; 3384 irq_handler_t handler; 3385 }; 3386 3387 static const struct qla_init_msix_entry msix_entries[] = { 3388 { "default", qla24xx_msix_default }, 3389 { "rsp_q", qla24xx_msix_rsp_q }, 3390 { "atio_q", qla83xx_msix_atio_q }, 3391 { "qpair_multiq", qla2xxx_msix_rsp_q }, 3392 }; 3393 3394 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3395 { "qla2xxx (default)", qla82xx_msix_default }, 3396 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3397 }; 3398 3399 static int 3400 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3401 { 3402 int i, ret; 3403 struct qla_msix_entry *qentry; 3404 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3405 int min_vecs = QLA_BASE_VECTORS; 3406 struct irq_affinity desc = { 3407 .pre_vectors = QLA_BASE_VECTORS, 3408 }; 3409 3410 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3411 IS_ATIO_MSIX_CAPABLE(ha)) { 3412 desc.pre_vectors++; 3413 min_vecs++; 3414 } 3415 3416 if (USER_CTRL_IRQ(ha)) { 3417 /* user wants to control IRQ setting for target mode */ 3418 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, 3419 ha->msix_count, PCI_IRQ_MSIX); 3420 } else 3421 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 3422 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 3423 &desc); 3424 3425 if (ret < 0) { 3426 ql_log(ql_log_fatal, vha, 0x00c7, 3427 "MSI-X: Failed to enable support, " 3428 "giving up -- %d/%d.\n", 3429 ha->msix_count, ret); 3430 goto msix_out; 3431 } else if (ret < ha->msix_count) { 3432 ql_log(ql_log_warn, vha, 0x00c6, 3433 "MSI-X: Failed to enable support " 3434 "with %d vectors, using %d vectors.\n", 3435 ha->msix_count, ret); 3436 ha->msix_count = ret; 3437 /* Recalculate queue values */ 3438 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { 3439 ha->max_req_queues = ha->msix_count - 1; 3440 3441 /* ATIOQ needs 1 vector. That's 1 less QPair */ 3442 if (QLA_TGT_MODE_ENABLED()) 3443 ha->max_req_queues--; 3444 3445 ha->max_rsp_queues = ha->max_req_queues; 3446 3447 ha->max_qpairs = ha->max_req_queues - 1; 3448 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 3449 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3450 } 3451 } 3452 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3453 ha->msix_count, GFP_KERNEL); 3454 if (!ha->msix_entries) { 3455 ql_log(ql_log_fatal, vha, 0x00c8, 3456 "Failed to allocate memory for ha->msix_entries.\n"); 3457 ret = -ENOMEM; 3458 goto msix_out; 3459 } 3460 ha->flags.msix_enabled = 1; 3461 3462 for (i = 0; i < ha->msix_count; i++) { 3463 qentry = &ha->msix_entries[i]; 3464 qentry->vector = pci_irq_vector(ha->pdev, i); 3465 qentry->entry = i; 3466 qentry->have_irq = 0; 3467 qentry->in_use = 0; 3468 qentry->handle = NULL; 3469 } 3470 3471 /* Enable MSI-X vectors for the base queue */ 3472 for (i = 0; i < QLA_BASE_VECTORS; i++) { 3473 qentry = &ha->msix_entries[i]; 3474 qentry->handle = rsp; 3475 rsp->msix = qentry; 3476 scnprintf(qentry->name, sizeof(qentry->name), 3477 "qla2xxx%lu_%s", vha->host_no, msix_entries[i].name); 3478 if (IS_P3P_TYPE(ha)) 3479 ret = request_irq(qentry->vector, 3480 qla82xx_msix_entries[i].handler, 3481 0, qla82xx_msix_entries[i].name, rsp); 3482 else 3483 ret = request_irq(qentry->vector, 3484 msix_entries[i].handler, 3485 0, qentry->name, rsp); 3486 if (ret) 3487 goto msix_register_fail; 3488 qentry->have_irq = 1; 3489 qentry->in_use = 1; 3490 } 3491 3492 /* 3493 * If target mode is enable, also request the vector for the ATIO 3494 * queue. 3495 */ 3496 if (QLA_TGT_MODE_ENABLED() && (ql2xenablemsix != 0) && 3497 IS_ATIO_MSIX_CAPABLE(ha)) { 3498 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 3499 rsp->msix = qentry; 3500 qentry->handle = rsp; 3501 scnprintf(qentry->name, sizeof(qentry->name), 3502 "qla2xxx%lu_%s", vha->host_no, 3503 msix_entries[QLA_ATIO_VECTOR].name); 3504 qentry->in_use = 1; 3505 ret = request_irq(qentry->vector, 3506 msix_entries[QLA_ATIO_VECTOR].handler, 3507 0, qentry->name, rsp); 3508 qentry->have_irq = 1; 3509 } 3510 3511 msix_register_fail: 3512 if (ret) { 3513 ql_log(ql_log_fatal, vha, 0x00cb, 3514 "MSI-X: unable to register handler -- %x/%d.\n", 3515 qentry->vector, ret); 3516 qla2x00_free_irqs(vha); 3517 ha->mqenable = 0; 3518 goto msix_out; 3519 } 3520 3521 /* Enable MSI-X vector for response queue update for queue 0 */ 3522 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3523 if (ha->msixbase && ha->mqiobase && 3524 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3525 ql2xmqsupport)) 3526 ha->mqenable = 1; 3527 } else 3528 if (ha->mqiobase && 3529 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3530 ql2xmqsupport)) 3531 ha->mqenable = 1; 3532 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3533 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3534 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3535 ql_dbg(ql_dbg_init, vha, 0x0055, 3536 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3537 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3538 3539 msix_out: 3540 return ret; 3541 } 3542 3543 int 3544 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3545 { 3546 int ret = QLA_FUNCTION_FAILED; 3547 device_reg_t *reg = ha->iobase; 3548 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3549 3550 /* If possible, enable MSI-X. */ 3551 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && 3552 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && 3553 !IS_QLAFX00(ha) && !IS_QLA27XX(ha))) 3554 goto skip_msi; 3555 3556 if (ql2xenablemsix == 2) 3557 goto skip_msix; 3558 3559 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3560 (ha->pdev->subsystem_device == 0x7040 || 3561 ha->pdev->subsystem_device == 0x7041 || 3562 ha->pdev->subsystem_device == 0x1705)) { 3563 ql_log(ql_log_warn, vha, 0x0034, 3564 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3565 ha->pdev->subsystem_vendor, 3566 ha->pdev->subsystem_device); 3567 goto skip_msi; 3568 } 3569 3570 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3571 ql_log(ql_log_warn, vha, 0x0035, 3572 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3573 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3574 goto skip_msix; 3575 } 3576 3577 ret = qla24xx_enable_msix(ha, rsp); 3578 if (!ret) { 3579 ql_dbg(ql_dbg_init, vha, 0x0036, 3580 "MSI-X: Enabled (0x%X, 0x%X).\n", 3581 ha->chip_revision, ha->fw_attributes); 3582 goto clear_risc_ints; 3583 } 3584 3585 skip_msix: 3586 3587 ql_log(ql_log_info, vha, 0x0037, 3588 "Falling back-to MSI mode -%d.\n", ret); 3589 3590 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3591 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3592 !IS_QLA27XX(ha)) 3593 goto skip_msi; 3594 3595 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 3596 if (!ret) { 3597 ql_dbg(ql_dbg_init, vha, 0x0038, 3598 "MSI: Enabled.\n"); 3599 ha->flags.msi_enabled = 1; 3600 } else 3601 ql_log(ql_log_warn, vha, 0x0039, 3602 "Falling back-to INTa mode -- %d.\n", ret); 3603 skip_msi: 3604 3605 /* Skip INTx on ISP82xx. */ 3606 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3607 return QLA_FUNCTION_FAILED; 3608 3609 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3610 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3611 QLA2XXX_DRIVER_NAME, rsp); 3612 if (ret) { 3613 ql_log(ql_log_warn, vha, 0x003a, 3614 "Failed to reserve interrupt %d already in use.\n", 3615 ha->pdev->irq); 3616 goto fail; 3617 } else if (!ha->flags.msi_enabled) { 3618 ql_dbg(ql_dbg_init, vha, 0x0125, 3619 "INTa mode: Enabled.\n"); 3620 ha->flags.mr_intr_valid = 1; 3621 } 3622 3623 clear_risc_ints: 3624 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 3625 goto fail; 3626 3627 spin_lock_irq(&ha->hardware_lock); 3628 WRT_REG_WORD(®->isp.semaphore, 0); 3629 spin_unlock_irq(&ha->hardware_lock); 3630 3631 fail: 3632 return ret; 3633 } 3634 3635 void 3636 qla2x00_free_irqs(scsi_qla_host_t *vha) 3637 { 3638 struct qla_hw_data *ha = vha->hw; 3639 struct rsp_que *rsp; 3640 struct qla_msix_entry *qentry; 3641 int i; 3642 3643 /* 3644 * We need to check that ha->rsp_q_map is valid in case we are called 3645 * from a probe failure context. 3646 */ 3647 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3648 goto free_irqs; 3649 rsp = ha->rsp_q_map[0]; 3650 3651 if (ha->flags.msix_enabled) { 3652 for (i = 0; i < ha->msix_count; i++) { 3653 qentry = &ha->msix_entries[i]; 3654 if (qentry->have_irq) { 3655 irq_set_affinity_notifier(qentry->vector, NULL); 3656 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 3657 } 3658 } 3659 kfree(ha->msix_entries); 3660 ha->msix_entries = NULL; 3661 ha->flags.msix_enabled = 0; 3662 ql_dbg(ql_dbg_init, vha, 0x0042, 3663 "Disabled MSI-X.\n"); 3664 } else { 3665 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 3666 } 3667 3668 free_irqs: 3669 pci_free_irq_vectors(ha->pdev); 3670 } 3671 3672 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 3673 struct qla_msix_entry *msix, int vector_type) 3674 { 3675 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 3676 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3677 int ret; 3678 3679 scnprintf(msix->name, sizeof(msix->name), 3680 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 3681 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 3682 if (ret) { 3683 ql_log(ql_log_fatal, vha, 0x00e6, 3684 "MSI-X: Unable to register handler -- %x/%d.\n", 3685 msix->vector, ret); 3686 return ret; 3687 } 3688 msix->have_irq = 1; 3689 msix->handle = qpair; 3690 return ret; 3691 } 3692