1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <linux/t10-pi.h> 13 #include <scsi/scsi_tcq.h> 14 #include <scsi/scsi_bsg_fc.h> 15 #include <scsi/scsi_eh.h> 16 17 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 21 sts_entry_t *); 22 23 /** 24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 25 * @irq: 26 * @dev_id: SCSI driver HA context 27 * 28 * Called by system whenever the host adapter generates an interrupt. 29 * 30 * Returns handled flag. 31 */ 32 irqreturn_t 33 qla2100_intr_handler(int irq, void *dev_id) 34 { 35 scsi_qla_host_t *vha; 36 struct qla_hw_data *ha; 37 struct device_reg_2xxx __iomem *reg; 38 int status; 39 unsigned long iter; 40 uint16_t hccr; 41 uint16_t mb[4]; 42 struct rsp_que *rsp; 43 unsigned long flags; 44 45 rsp = (struct rsp_que *) dev_id; 46 if (!rsp) { 47 ql_log(ql_log_info, NULL, 0x505d, 48 "%s: NULL response queue pointer.\n", __func__); 49 return (IRQ_NONE); 50 } 51 52 ha = rsp->hw; 53 reg = &ha->iobase->isp; 54 status = 0; 55 56 spin_lock_irqsave(&ha->hardware_lock, flags); 57 vha = pci_get_drvdata(ha->pdev); 58 for (iter = 50; iter--; ) { 59 hccr = RD_REG_WORD(®->hccr); 60 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 61 break; 62 if (hccr & HCCR_RISC_PAUSE) { 63 if (pci_channel_offline(ha->pdev)) 64 break; 65 66 /* 67 * Issue a "HARD" reset in order for the RISC interrupt 68 * bit to be cleared. Schedule a big hammer to get 69 * out of the RISC PAUSED state. 70 */ 71 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 72 RD_REG_WORD(®->hccr); 73 74 ha->isp_ops->fw_dump(vha, 1); 75 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 76 break; 77 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 78 break; 79 80 if (RD_REG_WORD(®->semaphore) & BIT_0) { 81 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 82 RD_REG_WORD(®->hccr); 83 84 /* Get mailbox data. */ 85 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 86 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 87 qla2x00_mbx_completion(vha, mb[0]); 88 status |= MBX_INTERRUPT; 89 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 90 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 91 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 92 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 93 qla2x00_async_event(vha, rsp, mb); 94 } else { 95 /*EMPTY*/ 96 ql_dbg(ql_dbg_async, vha, 0x5025, 97 "Unrecognized interrupt type (%d).\n", 98 mb[0]); 99 } 100 /* Release mailbox registers. */ 101 WRT_REG_WORD(®->semaphore, 0); 102 RD_REG_WORD(®->semaphore); 103 } else { 104 qla2x00_process_response_queue(rsp); 105 106 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 107 RD_REG_WORD(®->hccr); 108 } 109 } 110 qla2x00_handle_mbx_completion(ha, status); 111 spin_unlock_irqrestore(&ha->hardware_lock, flags); 112 113 return (IRQ_HANDLED); 114 } 115 116 bool 117 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 118 { 119 /* Check for PCI disconnection */ 120 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 121 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 122 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 123 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 124 /* 125 * Schedule this (only once) on the default system 126 * workqueue so that all the adapter workqueues and the 127 * DPC thread can be shutdown cleanly. 128 */ 129 schedule_work(&vha->hw->board_disable); 130 } 131 return true; 132 } else 133 return false; 134 } 135 136 bool 137 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 138 { 139 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 140 } 141 142 /** 143 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 144 * @irq: 145 * @dev_id: SCSI driver HA context 146 * 147 * Called by system whenever the host adapter generates an interrupt. 148 * 149 * Returns handled flag. 150 */ 151 irqreturn_t 152 qla2300_intr_handler(int irq, void *dev_id) 153 { 154 scsi_qla_host_t *vha; 155 struct device_reg_2xxx __iomem *reg; 156 int status; 157 unsigned long iter; 158 uint32_t stat; 159 uint16_t hccr; 160 uint16_t mb[4]; 161 struct rsp_que *rsp; 162 struct qla_hw_data *ha; 163 unsigned long flags; 164 165 rsp = (struct rsp_que *) dev_id; 166 if (!rsp) { 167 ql_log(ql_log_info, NULL, 0x5058, 168 "%s: NULL response queue pointer.\n", __func__); 169 return (IRQ_NONE); 170 } 171 172 ha = rsp->hw; 173 reg = &ha->iobase->isp; 174 status = 0; 175 176 spin_lock_irqsave(&ha->hardware_lock, flags); 177 vha = pci_get_drvdata(ha->pdev); 178 for (iter = 50; iter--; ) { 179 stat = RD_REG_DWORD(®->u.isp2300.host_status); 180 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 181 break; 182 if (stat & HSR_RISC_PAUSED) { 183 if (unlikely(pci_channel_offline(ha->pdev))) 184 break; 185 186 hccr = RD_REG_WORD(®->hccr); 187 188 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 189 ql_log(ql_log_warn, vha, 0x5026, 190 "Parity error -- HCCR=%x, Dumping " 191 "firmware.\n", hccr); 192 else 193 ql_log(ql_log_warn, vha, 0x5027, 194 "RISC paused -- HCCR=%x, Dumping " 195 "firmware.\n", hccr); 196 197 /* 198 * Issue a "HARD" reset in order for the RISC 199 * interrupt bit to be cleared. Schedule a big 200 * hammer to get out of the RISC PAUSED state. 201 */ 202 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 203 RD_REG_WORD(®->hccr); 204 205 ha->isp_ops->fw_dump(vha, 1); 206 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 207 break; 208 } else if ((stat & HSR_RISC_INT) == 0) 209 break; 210 211 switch (stat & 0xff) { 212 case 0x1: 213 case 0x2: 214 case 0x10: 215 case 0x11: 216 qla2x00_mbx_completion(vha, MSW(stat)); 217 status |= MBX_INTERRUPT; 218 219 /* Release mailbox registers. */ 220 WRT_REG_WORD(®->semaphore, 0); 221 break; 222 case 0x12: 223 mb[0] = MSW(stat); 224 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 225 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 226 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 227 qla2x00_async_event(vha, rsp, mb); 228 break; 229 case 0x13: 230 qla2x00_process_response_queue(rsp); 231 break; 232 case 0x15: 233 mb[0] = MBA_CMPLT_1_16BIT; 234 mb[1] = MSW(stat); 235 qla2x00_async_event(vha, rsp, mb); 236 break; 237 case 0x16: 238 mb[0] = MBA_SCSI_COMPLETION; 239 mb[1] = MSW(stat); 240 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 241 qla2x00_async_event(vha, rsp, mb); 242 break; 243 default: 244 ql_dbg(ql_dbg_async, vha, 0x5028, 245 "Unrecognized interrupt type (%d).\n", stat & 0xff); 246 break; 247 } 248 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 249 RD_REG_WORD_RELAXED(®->hccr); 250 } 251 qla2x00_handle_mbx_completion(ha, status); 252 spin_unlock_irqrestore(&ha->hardware_lock, flags); 253 254 return (IRQ_HANDLED); 255 } 256 257 /** 258 * qla2x00_mbx_completion() - Process mailbox command completions. 259 * @ha: SCSI driver HA context 260 * @mb0: Mailbox0 register 261 */ 262 static void 263 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 264 { 265 uint16_t cnt; 266 uint32_t mboxes; 267 uint16_t __iomem *wptr; 268 struct qla_hw_data *ha = vha->hw; 269 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 270 271 /* Read all mbox registers? */ 272 mboxes = (1 << ha->mbx_count) - 1; 273 if (!ha->mcp) 274 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 275 else 276 mboxes = ha->mcp->in_mb; 277 278 /* Load return mailbox registers. */ 279 ha->flags.mbox_int = 1; 280 ha->mailbox_out[0] = mb0; 281 mboxes >>= 1; 282 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 283 284 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 285 if (IS_QLA2200(ha) && cnt == 8) 286 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 287 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 288 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 289 else if (mboxes & BIT_0) 290 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 291 292 wptr++; 293 mboxes >>= 1; 294 } 295 } 296 297 static void 298 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 299 { 300 static char *event[] = 301 { "Complete", "Request Notification", "Time Extension" }; 302 int rval; 303 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 304 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 305 uint16_t __iomem *wptr; 306 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 307 308 /* Seed data -- mailbox1 -> mailbox7. */ 309 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 310 wptr = (uint16_t __iomem *)®24->mailbox1; 311 else if (IS_QLA8044(vha->hw)) 312 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 313 else 314 return; 315 316 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 317 mb[cnt] = RD_REG_WORD(wptr); 318 319 ql_dbg(ql_dbg_async, vha, 0x5021, 320 "Inter-Driver Communication %s -- " 321 "%04x %04x %04x %04x %04x %04x %04x.\n", 322 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 323 mb[4], mb[5], mb[6]); 324 switch (aen) { 325 /* Handle IDC Error completion case. */ 326 case MBA_IDC_COMPLETE: 327 if (mb[1] >> 15) { 328 vha->hw->flags.idc_compl_status = 1; 329 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 330 complete(&vha->hw->dcbx_comp); 331 } 332 break; 333 334 case MBA_IDC_NOTIFY: 335 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 336 timeout = (descr >> 8) & 0xf; 337 ql_dbg(ql_dbg_async, vha, 0x5022, 338 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 339 vha->host_no, event[aen & 0xff], timeout); 340 341 if (!timeout) 342 return; 343 rval = qla2x00_post_idc_ack_work(vha, mb); 344 if (rval != QLA_SUCCESS) 345 ql_log(ql_log_warn, vha, 0x5023, 346 "IDC failed to post ACK.\n"); 347 break; 348 case MBA_IDC_TIME_EXT: 349 vha->hw->idc_extend_tmo = descr; 350 ql_dbg(ql_dbg_async, vha, 0x5087, 351 "%lu Inter-Driver Communication %s -- " 352 "Extend timeout by=%d.\n", 353 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 354 break; 355 } 356 } 357 358 #define LS_UNKNOWN 2 359 const char * 360 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 361 { 362 static const char *const link_speeds[] = { 363 "1", "2", "?", "4", "8", "16", "32", "10" 364 }; 365 #define QLA_LAST_SPEED 7 366 367 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 368 return link_speeds[0]; 369 else if (speed == 0x13) 370 return link_speeds[QLA_LAST_SPEED]; 371 else if (speed < QLA_LAST_SPEED) 372 return link_speeds[speed]; 373 else 374 return link_speeds[LS_UNKNOWN]; 375 } 376 377 static void 378 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 379 { 380 struct qla_hw_data *ha = vha->hw; 381 382 /* 383 * 8200 AEN Interpretation: 384 * mb[0] = AEN code 385 * mb[1] = AEN Reason code 386 * mb[2] = LSW of Peg-Halt Status-1 Register 387 * mb[6] = MSW of Peg-Halt Status-1 Register 388 * mb[3] = LSW of Peg-Halt Status-2 register 389 * mb[7] = MSW of Peg-Halt Status-2 register 390 * mb[4] = IDC Device-State Register value 391 * mb[5] = IDC Driver-Presence Register value 392 */ 393 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 394 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 395 mb[0], mb[1], mb[2], mb[6]); 396 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 397 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 398 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 399 400 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 401 IDC_HEARTBEAT_FAILURE)) { 402 ha->flags.nic_core_hung = 1; 403 ql_log(ql_log_warn, vha, 0x5060, 404 "83XX: F/W Error Reported: Check if reset required.\n"); 405 406 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 407 uint32_t protocol_engine_id, fw_err_code, err_level; 408 409 /* 410 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 411 * - PEG-Halt Status-1 Register: 412 * (LSW = mb[2], MSW = mb[6]) 413 * Bits 0-7 = protocol-engine ID 414 * Bits 8-28 = f/w error code 415 * Bits 29-31 = Error-level 416 * Error-level 0x1 = Non-Fatal error 417 * Error-level 0x2 = Recoverable Fatal error 418 * Error-level 0x4 = UnRecoverable Fatal error 419 * - PEG-Halt Status-2 Register: 420 * (LSW = mb[3], MSW = mb[7]) 421 */ 422 protocol_engine_id = (mb[2] & 0xff); 423 fw_err_code = (((mb[2] & 0xff00) >> 8) | 424 ((mb[6] & 0x1fff) << 8)); 425 err_level = ((mb[6] & 0xe000) >> 13); 426 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 427 "Register: protocol_engine_id=0x%x " 428 "fw_err_code=0x%x err_level=0x%x.\n", 429 protocol_engine_id, fw_err_code, err_level); 430 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 431 "Register: 0x%x%x.\n", mb[7], mb[3]); 432 if (err_level == ERR_LEVEL_NON_FATAL) { 433 ql_log(ql_log_warn, vha, 0x5063, 434 "Not a fatal error, f/w has recovered " 435 "iteself.\n"); 436 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 437 ql_log(ql_log_fatal, vha, 0x5064, 438 "Recoverable Fatal error: Chip reset " 439 "required.\n"); 440 qla83xx_schedule_work(vha, 441 QLA83XX_NIC_CORE_RESET); 442 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 443 ql_log(ql_log_fatal, vha, 0x5065, 444 "Unrecoverable Fatal error: Set FAILED " 445 "state, reboot required.\n"); 446 qla83xx_schedule_work(vha, 447 QLA83XX_NIC_CORE_UNRECOVERABLE); 448 } 449 } 450 451 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 452 uint16_t peg_fw_state, nw_interface_link_up; 453 uint16_t nw_interface_signal_detect, sfp_status; 454 uint16_t htbt_counter, htbt_monitor_enable; 455 uint16_t sfp_additonal_info, sfp_multirate; 456 uint16_t sfp_tx_fault, link_speed, dcbx_status; 457 458 /* 459 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 460 * - PEG-to-FC Status Register: 461 * (LSW = mb[2], MSW = mb[6]) 462 * Bits 0-7 = Peg-Firmware state 463 * Bit 8 = N/W Interface Link-up 464 * Bit 9 = N/W Interface signal detected 465 * Bits 10-11 = SFP Status 466 * SFP Status 0x0 = SFP+ transceiver not expected 467 * SFP Status 0x1 = SFP+ transceiver not present 468 * SFP Status 0x2 = SFP+ transceiver invalid 469 * SFP Status 0x3 = SFP+ transceiver present and 470 * valid 471 * Bits 12-14 = Heartbeat Counter 472 * Bit 15 = Heartbeat Monitor Enable 473 * Bits 16-17 = SFP Additional Info 474 * SFP info 0x0 = Unregocnized transceiver for 475 * Ethernet 476 * SFP info 0x1 = SFP+ brand validation failed 477 * SFP info 0x2 = SFP+ speed validation failed 478 * SFP info 0x3 = SFP+ access error 479 * Bit 18 = SFP Multirate 480 * Bit 19 = SFP Tx Fault 481 * Bits 20-22 = Link Speed 482 * Bits 23-27 = Reserved 483 * Bits 28-30 = DCBX Status 484 * DCBX Status 0x0 = DCBX Disabled 485 * DCBX Status 0x1 = DCBX Enabled 486 * DCBX Status 0x2 = DCBX Exchange error 487 * Bit 31 = Reserved 488 */ 489 peg_fw_state = (mb[2] & 0x00ff); 490 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 491 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 492 sfp_status = ((mb[2] & 0x0c00) >> 10); 493 htbt_counter = ((mb[2] & 0x7000) >> 12); 494 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 495 sfp_additonal_info = (mb[6] & 0x0003); 496 sfp_multirate = ((mb[6] & 0x0004) >> 2); 497 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 498 link_speed = ((mb[6] & 0x0070) >> 4); 499 dcbx_status = ((mb[6] & 0x7000) >> 12); 500 501 ql_log(ql_log_warn, vha, 0x5066, 502 "Peg-to-Fc Status Register:\n" 503 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 504 "nw_interface_signal_detect=0x%x" 505 "\nsfp_statis=0x%x.\n ", peg_fw_state, 506 nw_interface_link_up, nw_interface_signal_detect, 507 sfp_status); 508 ql_log(ql_log_warn, vha, 0x5067, 509 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 510 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", 511 htbt_counter, htbt_monitor_enable, 512 sfp_additonal_info, sfp_multirate); 513 ql_log(ql_log_warn, vha, 0x5068, 514 "sfp_tx_fault=0x%x, link_state=0x%x, " 515 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 516 dcbx_status); 517 518 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 519 } 520 521 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 522 ql_log(ql_log_warn, vha, 0x5069, 523 "Heartbeat Failure encountered, chip reset " 524 "required.\n"); 525 526 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 527 } 528 } 529 530 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 531 ql_log(ql_log_info, vha, 0x506a, 532 "IDC Device-State changed = 0x%x.\n", mb[4]); 533 if (ha->flags.nic_core_reset_owner) 534 return; 535 qla83xx_schedule_work(vha, MBA_IDC_AEN); 536 } 537 } 538 539 int 540 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 541 { 542 struct qla_hw_data *ha = vha->hw; 543 scsi_qla_host_t *vp; 544 uint32_t vp_did; 545 unsigned long flags; 546 int ret = 0; 547 548 if (!ha->num_vhosts) 549 return ret; 550 551 spin_lock_irqsave(&ha->vport_slock, flags); 552 list_for_each_entry(vp, &ha->vp_list, list) { 553 vp_did = vp->d_id.b24; 554 if (vp_did == rscn_entry) { 555 ret = 1; 556 break; 557 } 558 } 559 spin_unlock_irqrestore(&ha->vport_slock, flags); 560 561 return ret; 562 } 563 564 fc_port_t * 565 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 566 { 567 fc_port_t *f, *tf; 568 569 f = tf = NULL; 570 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) 571 if (f->loop_id == loop_id) 572 return f; 573 return NULL; 574 } 575 576 fc_port_t * 577 qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) 578 { 579 fc_port_t *f, *tf; 580 581 f = tf = NULL; 582 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 583 if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { 584 if (incl_deleted) 585 return f; 586 else if (f->deleted == 0) 587 return f; 588 } 589 } 590 return NULL; 591 } 592 593 fc_port_t * 594 qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, 595 u8 incl_deleted) 596 { 597 fc_port_t *f, *tf; 598 599 f = tf = NULL; 600 list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { 601 if (f->d_id.b24 == id->b24) { 602 if (incl_deleted) 603 return f; 604 else if (f->deleted == 0) 605 return f; 606 } 607 } 608 return NULL; 609 } 610 611 /** 612 * qla2x00_async_event() - Process aynchronous events. 613 * @ha: SCSI driver HA context 614 * @mb: Mailbox registers (0 - 3) 615 */ 616 void 617 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 618 { 619 uint16_t handle_cnt; 620 uint16_t cnt, mbx; 621 uint32_t handles[5]; 622 struct qla_hw_data *ha = vha->hw; 623 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 624 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 625 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 626 uint32_t rscn_entry, host_pid; 627 unsigned long flags; 628 fc_port_t *fcport = NULL; 629 630 /* Setup to process RIO completion. */ 631 handle_cnt = 0; 632 if (IS_CNA_CAPABLE(ha)) 633 goto skip_rio; 634 switch (mb[0]) { 635 case MBA_SCSI_COMPLETION: 636 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 637 handle_cnt = 1; 638 break; 639 case MBA_CMPLT_1_16BIT: 640 handles[0] = mb[1]; 641 handle_cnt = 1; 642 mb[0] = MBA_SCSI_COMPLETION; 643 break; 644 case MBA_CMPLT_2_16BIT: 645 handles[0] = mb[1]; 646 handles[1] = mb[2]; 647 handle_cnt = 2; 648 mb[0] = MBA_SCSI_COMPLETION; 649 break; 650 case MBA_CMPLT_3_16BIT: 651 handles[0] = mb[1]; 652 handles[1] = mb[2]; 653 handles[2] = mb[3]; 654 handle_cnt = 3; 655 mb[0] = MBA_SCSI_COMPLETION; 656 break; 657 case MBA_CMPLT_4_16BIT: 658 handles[0] = mb[1]; 659 handles[1] = mb[2]; 660 handles[2] = mb[3]; 661 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 662 handle_cnt = 4; 663 mb[0] = MBA_SCSI_COMPLETION; 664 break; 665 case MBA_CMPLT_5_16BIT: 666 handles[0] = mb[1]; 667 handles[1] = mb[2]; 668 handles[2] = mb[3]; 669 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 670 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 671 handle_cnt = 5; 672 mb[0] = MBA_SCSI_COMPLETION; 673 break; 674 case MBA_CMPLT_2_32BIT: 675 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 676 handles[1] = le32_to_cpu( 677 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 678 RD_MAILBOX_REG(ha, reg, 6)); 679 handle_cnt = 2; 680 mb[0] = MBA_SCSI_COMPLETION; 681 break; 682 default: 683 break; 684 } 685 skip_rio: 686 switch (mb[0]) { 687 case MBA_SCSI_COMPLETION: /* Fast Post */ 688 if (!vha->flags.online) 689 break; 690 691 for (cnt = 0; cnt < handle_cnt; cnt++) 692 qla2x00_process_completed_request(vha, rsp->req, 693 handles[cnt]); 694 break; 695 696 case MBA_RESET: /* Reset */ 697 ql_dbg(ql_dbg_async, vha, 0x5002, 698 "Asynchronous RESET.\n"); 699 700 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 701 break; 702 703 case MBA_SYSTEM_ERR: /* System Error */ 704 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? 705 RD_REG_WORD(®24->mailbox7) : 0; 706 ql_log(ql_log_warn, vha, 0x5003, 707 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 708 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 709 710 ha->isp_ops->fw_dump(vha, 1); 711 712 if (IS_FWI2_CAPABLE(ha)) { 713 if (mb[1] == 0 && mb[2] == 0) { 714 ql_log(ql_log_fatal, vha, 0x5004, 715 "Unrecoverable Hardware Error: adapter " 716 "marked OFFLINE!\n"); 717 vha->flags.online = 0; 718 vha->device_flags |= DFLG_DEV_FAILED; 719 } else { 720 /* Check to see if MPI timeout occurred */ 721 if ((mbx & MBX_3) && (ha->port_no == 0)) 722 set_bit(MPI_RESET_NEEDED, 723 &vha->dpc_flags); 724 725 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 726 } 727 } else if (mb[1] == 0) { 728 ql_log(ql_log_fatal, vha, 0x5005, 729 "Unrecoverable Hardware Error: adapter marked " 730 "OFFLINE!\n"); 731 vha->flags.online = 0; 732 vha->device_flags |= DFLG_DEV_FAILED; 733 } else 734 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 735 break; 736 737 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 738 ql_log(ql_log_warn, vha, 0x5006, 739 "ISP Request Transfer Error (%x).\n", mb[1]); 740 741 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 742 break; 743 744 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 745 ql_log(ql_log_warn, vha, 0x5007, 746 "ISP Response Transfer Error (%x).\n", mb[1]); 747 748 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 749 break; 750 751 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 752 ql_dbg(ql_dbg_async, vha, 0x5008, 753 "Asynchronous WAKEUP_THRES (%x).\n", mb[1]); 754 break; 755 756 case MBA_LOOP_INIT_ERR: 757 ql_log(ql_log_warn, vha, 0x5090, 758 "LOOP INIT ERROR (%x).\n", mb[1]); 759 ha->isp_ops->fw_dump(vha, 1); 760 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 761 break; 762 763 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 764 ql_dbg(ql_dbg_async, vha, 0x5009, 765 "LIP occurred (%x).\n", mb[1]); 766 767 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 768 atomic_set(&vha->loop_state, LOOP_DOWN); 769 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 770 qla2x00_mark_all_devices_lost(vha, 1); 771 } 772 773 if (vha->vp_idx) { 774 atomic_set(&vha->vp_state, VP_FAILED); 775 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 776 } 777 778 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 779 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 780 781 vha->flags.management_server_logged_in = 0; 782 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 783 break; 784 785 case MBA_LOOP_UP: /* Loop Up Event */ 786 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 787 ha->link_data_rate = PORT_SPEED_1GB; 788 else 789 ha->link_data_rate = mb[1]; 790 791 ql_log(ql_log_info, vha, 0x500a, 792 "LOOP UP detected (%s Gbps).\n", 793 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 794 795 vha->flags.management_server_logged_in = 0; 796 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 797 break; 798 799 case MBA_LOOP_DOWN: /* Loop Down Event */ 800 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 801 ? RD_REG_WORD(®24->mailbox4) : 0; 802 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 803 : mbx; 804 ql_log(ql_log_info, vha, 0x500b, 805 "LOOP DOWN detected (%x %x %x %x).\n", 806 mb[1], mb[2], mb[3], mbx); 807 808 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 809 atomic_set(&vha->loop_state, LOOP_DOWN); 810 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 811 /* 812 * In case of loop down, restore WWPN from 813 * NVRAM in case of FA-WWPN capable ISP 814 * Restore for Physical Port only 815 */ 816 if (!vha->vp_idx) { 817 if (ha->flags.fawwpn_enabled) { 818 void *wwpn = ha->init_cb->port_name; 819 memcpy(vha->port_name, wwpn, WWN_SIZE); 820 fc_host_port_name(vha->host) = 821 wwn_to_u64(vha->port_name); 822 ql_dbg(ql_dbg_init + ql_dbg_verbose, 823 vha, 0x0144, "LOOP DOWN detected," 824 "restore WWPN %016llx\n", 825 wwn_to_u64(vha->port_name)); 826 } 827 828 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 829 } 830 831 vha->device_flags |= DFLG_NO_CABLE; 832 qla2x00_mark_all_devices_lost(vha, 1); 833 } 834 835 if (vha->vp_idx) { 836 atomic_set(&vha->vp_state, VP_FAILED); 837 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 838 } 839 840 vha->flags.management_server_logged_in = 0; 841 ha->link_data_rate = PORT_SPEED_UNKNOWN; 842 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 843 break; 844 845 case MBA_LIP_RESET: /* LIP reset occurred */ 846 ql_dbg(ql_dbg_async, vha, 0x500c, 847 "LIP reset occurred (%x).\n", mb[1]); 848 849 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 850 atomic_set(&vha->loop_state, LOOP_DOWN); 851 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 852 qla2x00_mark_all_devices_lost(vha, 1); 853 } 854 855 if (vha->vp_idx) { 856 atomic_set(&vha->vp_state, VP_FAILED); 857 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 858 } 859 860 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 861 862 ha->operating_mode = LOOP; 863 vha->flags.management_server_logged_in = 0; 864 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 865 break; 866 867 /* case MBA_DCBX_COMPLETE: */ 868 case MBA_POINT_TO_POINT: /* Point-to-Point */ 869 if (IS_QLA2100(ha)) 870 break; 871 872 if (IS_CNA_CAPABLE(ha)) { 873 ql_dbg(ql_dbg_async, vha, 0x500d, 874 "DCBX Completed -- %04x %04x %04x.\n", 875 mb[1], mb[2], mb[3]); 876 if (ha->notify_dcbx_comp && !vha->vp_idx) 877 complete(&ha->dcbx_comp); 878 879 } else 880 ql_dbg(ql_dbg_async, vha, 0x500e, 881 "Asynchronous P2P MODE received.\n"); 882 883 /* 884 * Until there's a transition from loop down to loop up, treat 885 * this as loop down only. 886 */ 887 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 888 atomic_set(&vha->loop_state, LOOP_DOWN); 889 if (!atomic_read(&vha->loop_down_timer)) 890 atomic_set(&vha->loop_down_timer, 891 LOOP_DOWN_TIME); 892 qla2x00_mark_all_devices_lost(vha, 1); 893 } 894 895 if (vha->vp_idx) { 896 atomic_set(&vha->vp_state, VP_FAILED); 897 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 898 } 899 900 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 901 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 902 903 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 904 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 905 906 ha->flags.gpsc_supported = 1; 907 vha->flags.management_server_logged_in = 0; 908 break; 909 910 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 911 if (IS_QLA2100(ha)) 912 break; 913 914 ql_dbg(ql_dbg_async, vha, 0x500f, 915 "Configuration change detected: value=%x.\n", mb[1]); 916 917 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 918 atomic_set(&vha->loop_state, LOOP_DOWN); 919 if (!atomic_read(&vha->loop_down_timer)) 920 atomic_set(&vha->loop_down_timer, 921 LOOP_DOWN_TIME); 922 qla2x00_mark_all_devices_lost(vha, 1); 923 } 924 925 if (vha->vp_idx) { 926 atomic_set(&vha->vp_state, VP_FAILED); 927 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 928 } 929 930 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 931 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 932 break; 933 934 case MBA_PORT_UPDATE: /* Port database update */ 935 /* 936 * Handle only global and vn-port update events 937 * 938 * Relevant inputs: 939 * mb[1] = N_Port handle of changed port 940 * OR 0xffff for global event 941 * mb[2] = New login state 942 * 7 = Port logged out 943 * mb[3] = LSB is vp_idx, 0xff = all vps 944 * 945 * Skip processing if: 946 * Event is global, vp_idx is NOT all vps, 947 * vp_idx does not match 948 * Event is not global, vp_idx does not match 949 */ 950 if (IS_QLA2XXX_MIDTYPE(ha) && 951 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 952 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 953 break; 954 955 if (mb[2] == 0x7) { 956 ql_dbg(ql_dbg_async, vha, 0x5010, 957 "Port %s %04x %04x %04x.\n", 958 mb[1] == 0xffff ? "unavailable" : "logout", 959 mb[1], mb[2], mb[3]); 960 961 if (mb[1] == 0xffff) 962 goto global_port_update; 963 964 /* Port logout */ 965 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 966 if (!fcport) 967 break; 968 if (atomic_read(&fcport->state) != FCS_ONLINE) 969 break; 970 ql_dbg(ql_dbg_async, vha, 0x508a, 971 "Marking port lost loopid=%04x portid=%06x.\n", 972 fcport->loop_id, fcport->d_id.b24); 973 if (qla_ini_mode_enabled(vha)) { 974 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 975 fcport->logout_on_delete = 0; 976 qlt_schedule_sess_for_deletion_lock(fcport); 977 } 978 break; 979 980 global_port_update: 981 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 982 atomic_set(&vha->loop_state, LOOP_DOWN); 983 atomic_set(&vha->loop_down_timer, 984 LOOP_DOWN_TIME); 985 vha->device_flags |= DFLG_NO_CABLE; 986 qla2x00_mark_all_devices_lost(vha, 1); 987 } 988 989 if (vha->vp_idx) { 990 atomic_set(&vha->vp_state, VP_FAILED); 991 fc_vport_set_state(vha->fc_vport, 992 FC_VPORT_FAILED); 993 qla2x00_mark_all_devices_lost(vha, 1); 994 } 995 996 vha->flags.management_server_logged_in = 0; 997 ha->link_data_rate = PORT_SPEED_UNKNOWN; 998 break; 999 } 1000 1001 /* 1002 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 1003 * event etc. earlier indicating loop is down) then process 1004 * it. Otherwise ignore it and Wait for RSCN to come in. 1005 */ 1006 atomic_set(&vha->loop_down_timer, 0); 1007 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 1008 atomic_read(&vha->loop_state) != LOOP_DEAD) { 1009 ql_dbg(ql_dbg_async, vha, 0x5011, 1010 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 1011 mb[1], mb[2], mb[3]); 1012 1013 qlt_async_event(mb[0], vha, mb); 1014 break; 1015 } 1016 1017 ql_dbg(ql_dbg_async, vha, 0x5012, 1018 "Port database changed %04x %04x %04x.\n", 1019 mb[1], mb[2], mb[3]); 1020 1021 /* 1022 * Mark all devices as missing so we will login again. 1023 */ 1024 atomic_set(&vha->loop_state, LOOP_UP); 1025 1026 qla2x00_mark_all_devices_lost(vha, 1); 1027 1028 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1029 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 1030 set_bit(VP_CONFIG_OK, &vha->vp_flags); 1031 1032 qlt_async_event(mb[0], vha, mb); 1033 break; 1034 1035 case MBA_RSCN_UPDATE: /* State Change Registration */ 1036 /* Check if the Vport has issued a SCR */ 1037 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1038 break; 1039 /* Only handle SCNs for our Vport index. */ 1040 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1041 break; 1042 1043 ql_dbg(ql_dbg_async, vha, 0x5013, 1044 "RSCN database changed -- %04x %04x %04x.\n", 1045 mb[1], mb[2], mb[3]); 1046 1047 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1048 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1049 | vha->d_id.b.al_pa; 1050 if (rscn_entry == host_pid) { 1051 ql_dbg(ql_dbg_async, vha, 0x5014, 1052 "Ignoring RSCN update to local host " 1053 "port ID (%06x).\n", host_pid); 1054 break; 1055 } 1056 1057 /* Ignore reserved bits from RSCN-payload. */ 1058 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1059 1060 /* Skip RSCNs for virtual ports on the same physical port */ 1061 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1062 break; 1063 1064 atomic_set(&vha->loop_down_timer, 0); 1065 vha->flags.management_server_logged_in = 0; 1066 { 1067 struct event_arg ea; 1068 1069 memset(&ea, 0, sizeof(ea)); 1070 ea.event = FCME_RSCN; 1071 ea.id.b24 = rscn_entry; 1072 ea.id.b.rsvd_1 = rscn_entry >> 24; 1073 qla2x00_fcport_event_handler(vha, &ea); 1074 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1075 } 1076 break; 1077 /* case MBA_RIO_RESPONSE: */ 1078 case MBA_ZIO_RESPONSE: 1079 ql_dbg(ql_dbg_async, vha, 0x5015, 1080 "[R|Z]IO update completion.\n"); 1081 1082 if (IS_FWI2_CAPABLE(ha)) 1083 qla24xx_process_response_queue(vha, rsp); 1084 else 1085 qla2x00_process_response_queue(rsp); 1086 break; 1087 1088 case MBA_DISCARD_RND_FRAME: 1089 ql_dbg(ql_dbg_async, vha, 0x5016, 1090 "Discard RND Frame -- %04x %04x %04x.\n", 1091 mb[1], mb[2], mb[3]); 1092 break; 1093 1094 case MBA_TRACE_NOTIFICATION: 1095 ql_dbg(ql_dbg_async, vha, 0x5017, 1096 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1097 break; 1098 1099 case MBA_ISP84XX_ALERT: 1100 ql_dbg(ql_dbg_async, vha, 0x5018, 1101 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1102 mb[1], mb[2], mb[3]); 1103 1104 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1105 switch (mb[1]) { 1106 case A84_PANIC_RECOVERY: 1107 ql_log(ql_log_info, vha, 0x5019, 1108 "Alert 84XX: panic recovery %04x %04x.\n", 1109 mb[2], mb[3]); 1110 break; 1111 case A84_OP_LOGIN_COMPLETE: 1112 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1113 ql_log(ql_log_info, vha, 0x501a, 1114 "Alert 84XX: firmware version %x.\n", 1115 ha->cs84xx->op_fw_version); 1116 break; 1117 case A84_DIAG_LOGIN_COMPLETE: 1118 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1119 ql_log(ql_log_info, vha, 0x501b, 1120 "Alert 84XX: diagnostic firmware version %x.\n", 1121 ha->cs84xx->diag_fw_version); 1122 break; 1123 case A84_GOLD_LOGIN_COMPLETE: 1124 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1125 ha->cs84xx->fw_update = 1; 1126 ql_log(ql_log_info, vha, 0x501c, 1127 "Alert 84XX: gold firmware version %x.\n", 1128 ha->cs84xx->gold_fw_version); 1129 break; 1130 default: 1131 ql_log(ql_log_warn, vha, 0x501d, 1132 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1133 mb[1], mb[2], mb[3]); 1134 } 1135 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1136 break; 1137 case MBA_DCBX_START: 1138 ql_dbg(ql_dbg_async, vha, 0x501e, 1139 "DCBX Started -- %04x %04x %04x.\n", 1140 mb[1], mb[2], mb[3]); 1141 break; 1142 case MBA_DCBX_PARAM_UPDATE: 1143 ql_dbg(ql_dbg_async, vha, 0x501f, 1144 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1145 mb[1], mb[2], mb[3]); 1146 break; 1147 case MBA_FCF_CONF_ERR: 1148 ql_dbg(ql_dbg_async, vha, 0x5020, 1149 "FCF Configuration Error -- %04x %04x %04x.\n", 1150 mb[1], mb[2], mb[3]); 1151 break; 1152 case MBA_IDC_NOTIFY: 1153 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1154 mb[4] = RD_REG_WORD(®24->mailbox4); 1155 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1156 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1157 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1158 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1159 /* 1160 * Extend loop down timer since port is active. 1161 */ 1162 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1163 atomic_set(&vha->loop_down_timer, 1164 LOOP_DOWN_TIME); 1165 qla2xxx_wake_dpc(vha); 1166 } 1167 } 1168 case MBA_IDC_COMPLETE: 1169 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1170 complete(&ha->lb_portup_comp); 1171 /* Fallthru */ 1172 case MBA_IDC_TIME_EXT: 1173 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1174 IS_QLA8044(ha)) 1175 qla81xx_idc_event(vha, mb[0], mb[1]); 1176 break; 1177 1178 case MBA_IDC_AEN: 1179 mb[4] = RD_REG_WORD(®24->mailbox4); 1180 mb[5] = RD_REG_WORD(®24->mailbox5); 1181 mb[6] = RD_REG_WORD(®24->mailbox6); 1182 mb[7] = RD_REG_WORD(®24->mailbox7); 1183 qla83xx_handle_8200_aen(vha, mb); 1184 break; 1185 1186 case MBA_DPORT_DIAGNOSTICS: 1187 ql_dbg(ql_dbg_async, vha, 0x5052, 1188 "D-Port Diagnostics: %04x result=%s\n", 1189 mb[0], 1190 mb[1] == 0 ? "start" : 1191 mb[1] == 1 ? "done (pass)" : 1192 mb[1] == 2 ? "done (error)" : "other"); 1193 break; 1194 1195 case MBA_TEMPERATURE_ALERT: 1196 ql_dbg(ql_dbg_async, vha, 0x505e, 1197 "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); 1198 if (mb[1] == 0x12) 1199 schedule_work(&ha->board_disable); 1200 break; 1201 1202 default: 1203 ql_dbg(ql_dbg_async, vha, 0x5057, 1204 "Unknown AEN:%04x %04x %04x %04x\n", 1205 mb[0], mb[1], mb[2], mb[3]); 1206 } 1207 1208 qlt_async_event(mb[0], vha, mb); 1209 1210 if (!vha->vp_idx && ha->num_vhosts) 1211 qla2x00_alert_all_vps(rsp, mb); 1212 } 1213 1214 /** 1215 * qla2x00_process_completed_request() - Process a Fast Post response. 1216 * @ha: SCSI driver HA context 1217 * @index: SRB index 1218 */ 1219 void 1220 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1221 struct req_que *req, uint32_t index) 1222 { 1223 srb_t *sp; 1224 struct qla_hw_data *ha = vha->hw; 1225 1226 /* Validate handle. */ 1227 if (index >= req->num_outstanding_cmds) { 1228 ql_log(ql_log_warn, vha, 0x3014, 1229 "Invalid SCSI command index (%x).\n", index); 1230 1231 if (IS_P3P_TYPE(ha)) 1232 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1233 else 1234 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1235 return; 1236 } 1237 1238 sp = req->outstanding_cmds[index]; 1239 if (sp) { 1240 /* Free outstanding command slot. */ 1241 req->outstanding_cmds[index] = NULL; 1242 1243 /* Save ISP completion status */ 1244 sp->done(sp, DID_OK << 16); 1245 } else { 1246 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1247 1248 if (IS_P3P_TYPE(ha)) 1249 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1250 else 1251 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1252 } 1253 } 1254 1255 srb_t * 1256 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1257 struct req_que *req, void *iocb) 1258 { 1259 struct qla_hw_data *ha = vha->hw; 1260 sts_entry_t *pkt = iocb; 1261 srb_t *sp = NULL; 1262 uint16_t index; 1263 1264 index = LSW(pkt->handle); 1265 if (index >= req->num_outstanding_cmds) { 1266 ql_log(ql_log_warn, vha, 0x5031, 1267 "Invalid command index (%x) type %8ph.\n", 1268 index, iocb); 1269 if (IS_P3P_TYPE(ha)) 1270 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1271 else 1272 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1273 goto done; 1274 } 1275 sp = req->outstanding_cmds[index]; 1276 if (!sp) { 1277 ql_log(ql_log_warn, vha, 0x5032, 1278 "Invalid completion handle (%x) -- timed-out.\n", index); 1279 return sp; 1280 } 1281 if (sp->handle != index) { 1282 ql_log(ql_log_warn, vha, 0x5033, 1283 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1284 return NULL; 1285 } 1286 1287 req->outstanding_cmds[index] = NULL; 1288 1289 done: 1290 return sp; 1291 } 1292 1293 static void 1294 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1295 struct mbx_entry *mbx) 1296 { 1297 const char func[] = "MBX-IOCB"; 1298 const char *type; 1299 fc_port_t *fcport; 1300 srb_t *sp; 1301 struct srb_iocb *lio; 1302 uint16_t *data; 1303 uint16_t status; 1304 1305 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1306 if (!sp) 1307 return; 1308 1309 lio = &sp->u.iocb_cmd; 1310 type = sp->name; 1311 fcport = sp->fcport; 1312 data = lio->u.logio.data; 1313 1314 data[0] = MBS_COMMAND_ERROR; 1315 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1316 QLA_LOGIO_LOGIN_RETRIED : 0; 1317 if (mbx->entry_status) { 1318 ql_dbg(ql_dbg_async, vha, 0x5043, 1319 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1320 "entry-status=%x status=%x state-flag=%x " 1321 "status-flags=%x.\n", type, sp->handle, 1322 fcport->d_id.b.domain, fcport->d_id.b.area, 1323 fcport->d_id.b.al_pa, mbx->entry_status, 1324 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1325 le16_to_cpu(mbx->status_flags)); 1326 1327 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1328 (uint8_t *)mbx, sizeof(*mbx)); 1329 1330 goto logio_done; 1331 } 1332 1333 status = le16_to_cpu(mbx->status); 1334 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1335 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1336 status = 0; 1337 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1338 ql_dbg(ql_dbg_async, vha, 0x5045, 1339 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1340 type, sp->handle, fcport->d_id.b.domain, 1341 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1342 le16_to_cpu(mbx->mb1)); 1343 1344 data[0] = MBS_COMMAND_COMPLETE; 1345 if (sp->type == SRB_LOGIN_CMD) { 1346 fcport->port_type = FCT_TARGET; 1347 if (le16_to_cpu(mbx->mb1) & BIT_0) 1348 fcport->port_type = FCT_INITIATOR; 1349 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1350 fcport->flags |= FCF_FCP2_DEVICE; 1351 } 1352 goto logio_done; 1353 } 1354 1355 data[0] = le16_to_cpu(mbx->mb0); 1356 switch (data[0]) { 1357 case MBS_PORT_ID_USED: 1358 data[1] = le16_to_cpu(mbx->mb1); 1359 break; 1360 case MBS_LOOP_ID_USED: 1361 break; 1362 default: 1363 data[0] = MBS_COMMAND_ERROR; 1364 break; 1365 } 1366 1367 ql_log(ql_log_warn, vha, 0x5046, 1368 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1369 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1370 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1371 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1372 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1373 le16_to_cpu(mbx->mb7)); 1374 1375 logio_done: 1376 sp->done(sp, 0); 1377 } 1378 1379 static void 1380 qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1381 struct mbx_24xx_entry *pkt) 1382 { 1383 const char func[] = "MBX-IOCB2"; 1384 srb_t *sp; 1385 struct srb_iocb *si; 1386 u16 sz, i; 1387 int res; 1388 1389 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1390 if (!sp) 1391 return; 1392 1393 si = &sp->u.iocb_cmd; 1394 sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); 1395 1396 for (i = 0; i < sz; i++) 1397 si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); 1398 1399 res = (si->u.mbx.in_mb[0] & MBS_MASK); 1400 1401 sp->done(sp, res); 1402 } 1403 1404 static void 1405 qla24xxx_nack_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1406 struct nack_to_isp *pkt) 1407 { 1408 const char func[] = "nack"; 1409 srb_t *sp; 1410 int res = 0; 1411 1412 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1413 if (!sp) 1414 return; 1415 1416 if (pkt->u.isp2x.status != cpu_to_le16(NOTIFY_ACK_SUCCESS)) 1417 res = QLA_FUNCTION_FAILED; 1418 1419 sp->done(sp, res); 1420 } 1421 1422 static void 1423 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1424 sts_entry_t *pkt, int iocb_type) 1425 { 1426 const char func[] = "CT_IOCB"; 1427 const char *type; 1428 srb_t *sp; 1429 struct bsg_job *bsg_job; 1430 struct fc_bsg_reply *bsg_reply; 1431 uint16_t comp_status; 1432 int res = 0; 1433 1434 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1435 if (!sp) 1436 return; 1437 1438 switch (sp->type) { 1439 case SRB_CT_CMD: 1440 bsg_job = sp->u.bsg_job; 1441 bsg_reply = bsg_job->reply; 1442 1443 type = "ct pass-through"; 1444 1445 comp_status = le16_to_cpu(pkt->comp_status); 1446 1447 /* 1448 * return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1449 * fc payload to the caller 1450 */ 1451 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1452 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1453 1454 if (comp_status != CS_COMPLETE) { 1455 if (comp_status == CS_DATA_UNDERRUN) { 1456 res = DID_OK << 16; 1457 bsg_reply->reply_payload_rcv_len = 1458 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1459 1460 ql_log(ql_log_warn, vha, 0x5048, 1461 "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", 1462 type, comp_status, 1463 bsg_reply->reply_payload_rcv_len); 1464 } else { 1465 ql_log(ql_log_warn, vha, 0x5049, 1466 "CT pass-through-%s error comp_status=0x%x.\n", 1467 type, comp_status); 1468 res = DID_ERROR << 16; 1469 bsg_reply->reply_payload_rcv_len = 0; 1470 } 1471 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1472 (uint8_t *)pkt, sizeof(*pkt)); 1473 } else { 1474 res = DID_OK << 16; 1475 bsg_reply->reply_payload_rcv_len = 1476 bsg_job->reply_payload.payload_len; 1477 bsg_job->reply_len = 0; 1478 } 1479 break; 1480 case SRB_CT_PTHRU_CMD: 1481 /* 1482 * borrowing sts_entry_24xx.comp_status. 1483 * same location as ct_entry_24xx.comp_status 1484 */ 1485 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1486 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1487 sp->name); 1488 break; 1489 } 1490 1491 sp->done(sp, res); 1492 } 1493 1494 static void 1495 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1496 struct sts_entry_24xx *pkt, int iocb_type) 1497 { 1498 const char func[] = "ELS_CT_IOCB"; 1499 const char *type; 1500 srb_t *sp; 1501 struct bsg_job *bsg_job; 1502 struct fc_bsg_reply *bsg_reply; 1503 uint16_t comp_status; 1504 uint32_t fw_status[3]; 1505 uint8_t* fw_sts_ptr; 1506 int res; 1507 1508 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1509 if (!sp) 1510 return; 1511 bsg_job = sp->u.bsg_job; 1512 bsg_reply = bsg_job->reply; 1513 1514 type = NULL; 1515 switch (sp->type) { 1516 case SRB_ELS_CMD_RPT: 1517 case SRB_ELS_CMD_HST: 1518 type = "els"; 1519 break; 1520 case SRB_CT_CMD: 1521 type = "ct pass-through"; 1522 break; 1523 case SRB_ELS_DCMD: 1524 type = "Driver ELS logo"; 1525 ql_dbg(ql_dbg_user, vha, 0x5047, 1526 "Completing %s: (%p) type=%d.\n", type, sp, sp->type); 1527 sp->done(sp, 0); 1528 return; 1529 case SRB_CT_PTHRU_CMD: 1530 /* borrowing sts_entry_24xx.comp_status. 1531 same location as ct_entry_24xx.comp_status 1532 */ 1533 res = qla2x00_chk_ms_status(vha, (ms_iocb_entry_t *)pkt, 1534 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp, 1535 sp->name); 1536 sp->done(sp, res); 1537 return; 1538 default: 1539 ql_dbg(ql_dbg_user, vha, 0x503e, 1540 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1541 return; 1542 } 1543 1544 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1545 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1546 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1547 1548 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1549 * fc payload to the caller 1550 */ 1551 bsg_reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1552 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1553 1554 if (comp_status != CS_COMPLETE) { 1555 if (comp_status == CS_DATA_UNDERRUN) { 1556 res = DID_OK << 16; 1557 bsg_reply->reply_payload_rcv_len = 1558 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1559 1560 ql_dbg(ql_dbg_user, vha, 0x503f, 1561 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1562 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1563 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1564 le16_to_cpu(((struct els_sts_entry_24xx *) 1565 pkt)->total_byte_count)); 1566 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) + 1567 sizeof(struct fc_bsg_reply); 1568 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1569 } 1570 else { 1571 ql_dbg(ql_dbg_user, vha, 0x5040, 1572 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1573 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1574 type, sp->handle, comp_status, 1575 le16_to_cpu(((struct els_sts_entry_24xx *) 1576 pkt)->error_subcode_1), 1577 le16_to_cpu(((struct els_sts_entry_24xx *) 1578 pkt)->error_subcode_2)); 1579 res = DID_ERROR << 16; 1580 bsg_reply->reply_payload_rcv_len = 0; 1581 fw_sts_ptr = ((uint8_t*)scsi_req(bsg_job->req)->sense) + 1582 sizeof(struct fc_bsg_reply); 1583 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1584 } 1585 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1586 (uint8_t *)pkt, sizeof(*pkt)); 1587 } 1588 else { 1589 res = DID_OK << 16; 1590 bsg_reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1591 bsg_job->reply_len = 0; 1592 } 1593 1594 sp->done(sp, res); 1595 } 1596 1597 static void 1598 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1599 struct logio_entry_24xx *logio) 1600 { 1601 const char func[] = "LOGIO-IOCB"; 1602 const char *type; 1603 fc_port_t *fcport; 1604 srb_t *sp; 1605 struct srb_iocb *lio; 1606 uint16_t *data; 1607 uint32_t iop[2]; 1608 1609 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1610 if (!sp) 1611 return; 1612 1613 lio = &sp->u.iocb_cmd; 1614 type = sp->name; 1615 fcport = sp->fcport; 1616 data = lio->u.logio.data; 1617 1618 data[0] = MBS_COMMAND_ERROR; 1619 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1620 QLA_LOGIO_LOGIN_RETRIED : 0; 1621 if (logio->entry_status) { 1622 ql_log(ql_log_warn, fcport->vha, 0x5034, 1623 "Async-%s error entry - hdl=%x" 1624 "portid=%02x%02x%02x entry-status=%x.\n", 1625 type, sp->handle, fcport->d_id.b.domain, 1626 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1627 logio->entry_status); 1628 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1629 (uint8_t *)logio, sizeof(*logio)); 1630 1631 goto logio_done; 1632 } 1633 1634 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1635 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1636 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1637 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1638 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1639 le32_to_cpu(logio->io_parameter[0])); 1640 1641 vha->hw->exch_starvation = 0; 1642 data[0] = MBS_COMMAND_COMPLETE; 1643 if (sp->type != SRB_LOGIN_CMD) 1644 goto logio_done; 1645 1646 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1647 if (iop[0] & BIT_4) { 1648 fcport->port_type = FCT_TARGET; 1649 if (iop[0] & BIT_8) 1650 fcport->flags |= FCF_FCP2_DEVICE; 1651 } else if (iop[0] & BIT_5) 1652 fcport->port_type = FCT_INITIATOR; 1653 1654 if (iop[0] & BIT_7) 1655 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1656 1657 if (logio->io_parameter[7] || logio->io_parameter[8]) 1658 fcport->supported_classes |= FC_COS_CLASS2; 1659 if (logio->io_parameter[9] || logio->io_parameter[10]) 1660 fcport->supported_classes |= FC_COS_CLASS3; 1661 1662 goto logio_done; 1663 } 1664 1665 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1666 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1667 lio->u.logio.iop[0] = iop[0]; 1668 lio->u.logio.iop[1] = iop[1]; 1669 switch (iop[0]) { 1670 case LSC_SCODE_PORTID_USED: 1671 data[0] = MBS_PORT_ID_USED; 1672 data[1] = LSW(iop[1]); 1673 break; 1674 case LSC_SCODE_NPORT_USED: 1675 data[0] = MBS_LOOP_ID_USED; 1676 break; 1677 case LSC_SCODE_NOXCB: 1678 vha->hw->exch_starvation++; 1679 if (vha->hw->exch_starvation > 5) { 1680 ql_log(ql_log_warn, vha, 0xffff, 1681 "Exchange starvation. Resetting RISC\n"); 1682 1683 vha->hw->exch_starvation = 0; 1684 1685 if (IS_P3P_TYPE(vha->hw)) 1686 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1687 else 1688 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1689 qla2xxx_wake_dpc(vha); 1690 } 1691 /* drop through */ 1692 default: 1693 data[0] = MBS_COMMAND_ERROR; 1694 break; 1695 } 1696 1697 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1698 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1699 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1700 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1701 le16_to_cpu(logio->comp_status), 1702 le32_to_cpu(logio->io_parameter[0]), 1703 le32_to_cpu(logio->io_parameter[1])); 1704 1705 logio_done: 1706 sp->done(sp, 0); 1707 } 1708 1709 static void 1710 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1711 { 1712 const char func[] = "TMF-IOCB"; 1713 const char *type; 1714 fc_port_t *fcport; 1715 srb_t *sp; 1716 struct srb_iocb *iocb; 1717 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1718 1719 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1720 if (!sp) 1721 return; 1722 1723 iocb = &sp->u.iocb_cmd; 1724 type = sp->name; 1725 fcport = sp->fcport; 1726 iocb->u.tmf.data = QLA_SUCCESS; 1727 1728 if (sts->entry_status) { 1729 ql_log(ql_log_warn, fcport->vha, 0x5038, 1730 "Async-%s error - hdl=%x entry-status(%x).\n", 1731 type, sp->handle, sts->entry_status); 1732 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1733 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 1734 ql_log(ql_log_warn, fcport->vha, 0x5039, 1735 "Async-%s error - hdl=%x completion status(%x).\n", 1736 type, sp->handle, sts->comp_status); 1737 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1738 } else if ((le16_to_cpu(sts->scsi_status) & 1739 SS_RESPONSE_INFO_LEN_VALID)) { 1740 if (le32_to_cpu(sts->rsp_data_len) < 4) { 1741 ql_log(ql_log_warn, fcport->vha, 0x503b, 1742 "Async-%s error - hdl=%x not enough response(%d).\n", 1743 type, sp->handle, sts->rsp_data_len); 1744 } else if (sts->data[3]) { 1745 ql_log(ql_log_warn, fcport->vha, 0x503c, 1746 "Async-%s error - hdl=%x response(%x).\n", 1747 type, sp->handle, sts->data[3]); 1748 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1749 } 1750 } 1751 1752 if (iocb->u.tmf.data != QLA_SUCCESS) 1753 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1754 (uint8_t *)sts, sizeof(*sts)); 1755 1756 sp->done(sp, 0); 1757 } 1758 1759 /** 1760 * qla2x00_process_response_queue() - Process response queue entries. 1761 * @ha: SCSI driver HA context 1762 */ 1763 void 1764 qla2x00_process_response_queue(struct rsp_que *rsp) 1765 { 1766 struct scsi_qla_host *vha; 1767 struct qla_hw_data *ha = rsp->hw; 1768 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1769 sts_entry_t *pkt; 1770 uint16_t handle_cnt; 1771 uint16_t cnt; 1772 1773 vha = pci_get_drvdata(ha->pdev); 1774 1775 if (!vha->flags.online) 1776 return; 1777 1778 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1779 pkt = (sts_entry_t *)rsp->ring_ptr; 1780 1781 rsp->ring_index++; 1782 if (rsp->ring_index == rsp->length) { 1783 rsp->ring_index = 0; 1784 rsp->ring_ptr = rsp->ring; 1785 } else { 1786 rsp->ring_ptr++; 1787 } 1788 1789 if (pkt->entry_status != 0) { 1790 qla2x00_error_entry(vha, rsp, pkt); 1791 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1792 wmb(); 1793 continue; 1794 } 1795 1796 switch (pkt->entry_type) { 1797 case STATUS_TYPE: 1798 qla2x00_status_entry(vha, rsp, pkt); 1799 break; 1800 case STATUS_TYPE_21: 1801 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1802 for (cnt = 0; cnt < handle_cnt; cnt++) { 1803 qla2x00_process_completed_request(vha, rsp->req, 1804 ((sts21_entry_t *)pkt)->handle[cnt]); 1805 } 1806 break; 1807 case STATUS_TYPE_22: 1808 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1809 for (cnt = 0; cnt < handle_cnt; cnt++) { 1810 qla2x00_process_completed_request(vha, rsp->req, 1811 ((sts22_entry_t *)pkt)->handle[cnt]); 1812 } 1813 break; 1814 case STATUS_CONT_TYPE: 1815 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1816 break; 1817 case MBX_IOCB_TYPE: 1818 qla2x00_mbx_iocb_entry(vha, rsp->req, 1819 (struct mbx_entry *)pkt); 1820 break; 1821 case CT_IOCB_TYPE: 1822 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1823 break; 1824 default: 1825 /* Type Not Supported. */ 1826 ql_log(ql_log_warn, vha, 0x504a, 1827 "Received unknown response pkt type %x " 1828 "entry status=%x.\n", 1829 pkt->entry_type, pkt->entry_status); 1830 break; 1831 } 1832 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1833 wmb(); 1834 } 1835 1836 /* Adjust ring index */ 1837 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1838 } 1839 1840 static inline void 1841 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1842 uint32_t sense_len, struct rsp_que *rsp, int res) 1843 { 1844 struct scsi_qla_host *vha = sp->vha; 1845 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1846 uint32_t track_sense_len; 1847 1848 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1849 sense_len = SCSI_SENSE_BUFFERSIZE; 1850 1851 SET_CMD_SENSE_LEN(sp, sense_len); 1852 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1853 track_sense_len = sense_len; 1854 1855 if (sense_len > par_sense_len) 1856 sense_len = par_sense_len; 1857 1858 memcpy(cp->sense_buffer, sense_data, sense_len); 1859 1860 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1861 track_sense_len -= sense_len; 1862 SET_CMD_SENSE_LEN(sp, track_sense_len); 1863 1864 if (track_sense_len != 0) { 1865 rsp->status_srb = sp; 1866 cp->result = res; 1867 } 1868 1869 if (sense_len) { 1870 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1871 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 1872 sp->vha->host_no, cp->device->id, cp->device->lun, 1873 cp); 1874 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1875 cp->sense_buffer, sense_len); 1876 } 1877 } 1878 1879 struct scsi_dif_tuple { 1880 __be16 guard; /* Checksum */ 1881 __be16 app_tag; /* APPL identifier */ 1882 __be32 ref_tag; /* Target LBA or indirect LBA */ 1883 }; 1884 1885 /* 1886 * Checks the guard or meta-data for the type of error 1887 * detected by the HBA. In case of errors, we set the 1888 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1889 * to indicate to the kernel that the HBA detected error. 1890 */ 1891 static inline int 1892 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1893 { 1894 struct scsi_qla_host *vha = sp->vha; 1895 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1896 uint8_t *ap = &sts24->data[12]; 1897 uint8_t *ep = &sts24->data[20]; 1898 uint32_t e_ref_tag, a_ref_tag; 1899 uint16_t e_app_tag, a_app_tag; 1900 uint16_t e_guard, a_guard; 1901 1902 /* 1903 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1904 * would make guard field appear at offset 2 1905 */ 1906 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1907 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1908 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1909 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1910 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1911 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1912 1913 ql_dbg(ql_dbg_io, vha, 0x3023, 1914 "iocb(s) %p Returned STATUS.\n", sts24); 1915 1916 ql_dbg(ql_dbg_io, vha, 0x3024, 1917 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1918 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1919 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1920 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1921 a_app_tag, e_app_tag, a_guard, e_guard); 1922 1923 /* 1924 * Ignore sector if: 1925 * For type 3: ref & app tag is all 'f's 1926 * For type 0,1,2: app tag is all 'f's 1927 */ 1928 if ((a_app_tag == 0xffff) && 1929 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1930 (a_ref_tag == 0xffffffff))) { 1931 uint32_t blocks_done, resid; 1932 sector_t lba_s = scsi_get_lba(cmd); 1933 1934 /* 2TB boundary case covered automatically with this */ 1935 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1936 1937 resid = scsi_bufflen(cmd) - (blocks_done * 1938 cmd->device->sector_size); 1939 1940 scsi_set_resid(cmd, resid); 1941 cmd->result = DID_OK << 16; 1942 1943 /* Update protection tag */ 1944 if (scsi_prot_sg_count(cmd)) { 1945 uint32_t i, j = 0, k = 0, num_ent; 1946 struct scatterlist *sg; 1947 struct t10_pi_tuple *spt; 1948 1949 /* Patch the corresponding protection tags */ 1950 scsi_for_each_prot_sg(cmd, sg, 1951 scsi_prot_sg_count(cmd), i) { 1952 num_ent = sg_dma_len(sg) / 8; 1953 if (k + num_ent < blocks_done) { 1954 k += num_ent; 1955 continue; 1956 } 1957 j = blocks_done - k - 1; 1958 k = blocks_done; 1959 break; 1960 } 1961 1962 if (k != blocks_done) { 1963 ql_log(ql_log_warn, vha, 0x302f, 1964 "unexpected tag values tag:lba=%x:%llx)\n", 1965 e_ref_tag, (unsigned long long)lba_s); 1966 return 1; 1967 } 1968 1969 spt = page_address(sg_page(sg)) + sg->offset; 1970 spt += j; 1971 1972 spt->app_tag = 0xffff; 1973 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1974 spt->ref_tag = 0xffffffff; 1975 } 1976 1977 return 0; 1978 } 1979 1980 /* check guard */ 1981 if (e_guard != a_guard) { 1982 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1983 0x10, 0x1); 1984 set_driver_byte(cmd, DRIVER_SENSE); 1985 set_host_byte(cmd, DID_ABORT); 1986 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1987 return 1; 1988 } 1989 1990 /* check ref tag */ 1991 if (e_ref_tag != a_ref_tag) { 1992 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1993 0x10, 0x3); 1994 set_driver_byte(cmd, DRIVER_SENSE); 1995 set_host_byte(cmd, DID_ABORT); 1996 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1997 return 1; 1998 } 1999 2000 /* check appl tag */ 2001 if (e_app_tag != a_app_tag) { 2002 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 2003 0x10, 0x2); 2004 set_driver_byte(cmd, DRIVER_SENSE); 2005 set_host_byte(cmd, DID_ABORT); 2006 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 2007 return 1; 2008 } 2009 2010 return 1; 2011 } 2012 2013 static void 2014 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 2015 struct req_que *req, uint32_t index) 2016 { 2017 struct qla_hw_data *ha = vha->hw; 2018 srb_t *sp; 2019 uint16_t comp_status; 2020 uint16_t scsi_status; 2021 uint16_t thread_id; 2022 uint32_t rval = EXT_STATUS_OK; 2023 struct bsg_job *bsg_job = NULL; 2024 struct fc_bsg_request *bsg_request; 2025 struct fc_bsg_reply *bsg_reply; 2026 sts_entry_t *sts; 2027 struct sts_entry_24xx *sts24; 2028 sts = (sts_entry_t *) pkt; 2029 sts24 = (struct sts_entry_24xx *) pkt; 2030 2031 /* Validate handle. */ 2032 if (index >= req->num_outstanding_cmds) { 2033 ql_log(ql_log_warn, vha, 0x70af, 2034 "Invalid SCSI completion handle 0x%x.\n", index); 2035 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2036 return; 2037 } 2038 2039 sp = req->outstanding_cmds[index]; 2040 if (!sp) { 2041 ql_log(ql_log_warn, vha, 0x70b0, 2042 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 2043 req->id, index); 2044 2045 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2046 return; 2047 } 2048 2049 /* Free outstanding command slot. */ 2050 req->outstanding_cmds[index] = NULL; 2051 bsg_job = sp->u.bsg_job; 2052 bsg_request = bsg_job->request; 2053 bsg_reply = bsg_job->reply; 2054 2055 if (IS_FWI2_CAPABLE(ha)) { 2056 comp_status = le16_to_cpu(sts24->comp_status); 2057 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2058 } else { 2059 comp_status = le16_to_cpu(sts->comp_status); 2060 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2061 } 2062 2063 thread_id = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; 2064 switch (comp_status) { 2065 case CS_COMPLETE: 2066 if (scsi_status == 0) { 2067 bsg_reply->reply_payload_rcv_len = 2068 bsg_job->reply_payload.payload_len; 2069 vha->qla_stats.input_bytes += 2070 bsg_reply->reply_payload_rcv_len; 2071 vha->qla_stats.input_requests++; 2072 rval = EXT_STATUS_OK; 2073 } 2074 goto done; 2075 2076 case CS_DATA_OVERRUN: 2077 ql_dbg(ql_dbg_user, vha, 0x70b1, 2078 "Command completed with date overrun thread_id=%d\n", 2079 thread_id); 2080 rval = EXT_STATUS_DATA_OVERRUN; 2081 break; 2082 2083 case CS_DATA_UNDERRUN: 2084 ql_dbg(ql_dbg_user, vha, 0x70b2, 2085 "Command completed with date underrun thread_id=%d\n", 2086 thread_id); 2087 rval = EXT_STATUS_DATA_UNDERRUN; 2088 break; 2089 case CS_BIDIR_RD_OVERRUN: 2090 ql_dbg(ql_dbg_user, vha, 0x70b3, 2091 "Command completed with read data overrun thread_id=%d\n", 2092 thread_id); 2093 rval = EXT_STATUS_DATA_OVERRUN; 2094 break; 2095 2096 case CS_BIDIR_RD_WR_OVERRUN: 2097 ql_dbg(ql_dbg_user, vha, 0x70b4, 2098 "Command completed with read and write data overrun " 2099 "thread_id=%d\n", thread_id); 2100 rval = EXT_STATUS_DATA_OVERRUN; 2101 break; 2102 2103 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 2104 ql_dbg(ql_dbg_user, vha, 0x70b5, 2105 "Command completed with read data over and write data " 2106 "underrun thread_id=%d\n", thread_id); 2107 rval = EXT_STATUS_DATA_OVERRUN; 2108 break; 2109 2110 case CS_BIDIR_RD_UNDERRUN: 2111 ql_dbg(ql_dbg_user, vha, 0x70b6, 2112 "Command completed with read data data underrun " 2113 "thread_id=%d\n", thread_id); 2114 rval = EXT_STATUS_DATA_UNDERRUN; 2115 break; 2116 2117 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 2118 ql_dbg(ql_dbg_user, vha, 0x70b7, 2119 "Command completed with read data under and write data " 2120 "overrun thread_id=%d\n", thread_id); 2121 rval = EXT_STATUS_DATA_UNDERRUN; 2122 break; 2123 2124 case CS_BIDIR_RD_WR_UNDERRUN: 2125 ql_dbg(ql_dbg_user, vha, 0x70b8, 2126 "Command completed with read and write data underrun " 2127 "thread_id=%d\n", thread_id); 2128 rval = EXT_STATUS_DATA_UNDERRUN; 2129 break; 2130 2131 case CS_BIDIR_DMA: 2132 ql_dbg(ql_dbg_user, vha, 0x70b9, 2133 "Command completed with data DMA error thread_id=%d\n", 2134 thread_id); 2135 rval = EXT_STATUS_DMA_ERR; 2136 break; 2137 2138 case CS_TIMEOUT: 2139 ql_dbg(ql_dbg_user, vha, 0x70ba, 2140 "Command completed with timeout thread_id=%d\n", 2141 thread_id); 2142 rval = EXT_STATUS_TIMEOUT; 2143 break; 2144 default: 2145 ql_dbg(ql_dbg_user, vha, 0x70bb, 2146 "Command completed with completion status=0x%x " 2147 "thread_id=%d\n", comp_status, thread_id); 2148 rval = EXT_STATUS_ERR; 2149 break; 2150 } 2151 bsg_reply->reply_payload_rcv_len = 0; 2152 2153 done: 2154 /* Return the vendor specific reply to API */ 2155 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2156 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2157 /* Always return DID_OK, bsg will send the vendor specific response 2158 * in this case only */ 2159 sp->done(sp, DID_OK << 6); 2160 2161 } 2162 2163 /** 2164 * qla2x00_status_entry() - Process a Status IOCB entry. 2165 * @ha: SCSI driver HA context 2166 * @pkt: Entry pointer 2167 */ 2168 static void 2169 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2170 { 2171 srb_t *sp; 2172 fc_port_t *fcport; 2173 struct scsi_cmnd *cp; 2174 sts_entry_t *sts; 2175 struct sts_entry_24xx *sts24; 2176 uint16_t comp_status; 2177 uint16_t scsi_status; 2178 uint16_t ox_id; 2179 uint8_t lscsi_status; 2180 int32_t resid; 2181 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2182 fw_resid_len; 2183 uint8_t *rsp_info, *sense_data; 2184 struct qla_hw_data *ha = vha->hw; 2185 uint32_t handle; 2186 uint16_t que; 2187 struct req_que *req; 2188 int logit = 1; 2189 int res = 0; 2190 uint16_t state_flags = 0; 2191 uint16_t retry_delay = 0; 2192 uint8_t no_logout = 0; 2193 2194 sts = (sts_entry_t *) pkt; 2195 sts24 = (struct sts_entry_24xx *) pkt; 2196 if (IS_FWI2_CAPABLE(ha)) { 2197 comp_status = le16_to_cpu(sts24->comp_status); 2198 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2199 state_flags = le16_to_cpu(sts24->state_flags); 2200 } else { 2201 comp_status = le16_to_cpu(sts->comp_status); 2202 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2203 } 2204 handle = (uint32_t) LSW(sts->handle); 2205 que = MSW(sts->handle); 2206 req = ha->req_q_map[que]; 2207 2208 /* Check for invalid queue pointer */ 2209 if (req == NULL || 2210 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2211 ql_dbg(ql_dbg_io, vha, 0x3059, 2212 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2213 "que=%u.\n", sts->handle, req, que); 2214 return; 2215 } 2216 2217 /* Validate handle. */ 2218 if (handle < req->num_outstanding_cmds) { 2219 sp = req->outstanding_cmds[handle]; 2220 if (!sp) { 2221 ql_dbg(ql_dbg_io, vha, 0x3075, 2222 "%s(%ld): Already returned command for status handle (0x%x).\n", 2223 __func__, vha->host_no, sts->handle); 2224 return; 2225 } 2226 } else { 2227 ql_dbg(ql_dbg_io, vha, 0x3017, 2228 "Invalid status handle, out of range (0x%x).\n", 2229 sts->handle); 2230 2231 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2232 if (IS_P3P_TYPE(ha)) 2233 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2234 else 2235 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2236 qla2xxx_wake_dpc(vha); 2237 } 2238 return; 2239 } 2240 2241 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2242 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2243 return; 2244 } 2245 2246 /* Task Management completion. */ 2247 if (sp->type == SRB_TM_CMD) { 2248 qla24xx_tm_iocb_entry(vha, req, pkt); 2249 return; 2250 } 2251 2252 /* Fast path completion. */ 2253 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2254 qla2x00_process_completed_request(vha, req, handle); 2255 2256 return; 2257 } 2258 2259 req->outstanding_cmds[handle] = NULL; 2260 cp = GET_CMD_SP(sp); 2261 if (cp == NULL) { 2262 ql_dbg(ql_dbg_io, vha, 0x3018, 2263 "Command already returned (0x%x/%p).\n", 2264 sts->handle, sp); 2265 2266 return; 2267 } 2268 2269 lscsi_status = scsi_status & STATUS_MASK; 2270 2271 fcport = sp->fcport; 2272 2273 ox_id = 0; 2274 sense_len = par_sense_len = rsp_info_len = resid_len = 2275 fw_resid_len = 0; 2276 if (IS_FWI2_CAPABLE(ha)) { 2277 if (scsi_status & SS_SENSE_LEN_VALID) 2278 sense_len = le32_to_cpu(sts24->sense_len); 2279 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2280 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2281 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2282 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2283 if (comp_status == CS_DATA_UNDERRUN) 2284 fw_resid_len = le32_to_cpu(sts24->residual_len); 2285 rsp_info = sts24->data; 2286 sense_data = sts24->data; 2287 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2288 ox_id = le16_to_cpu(sts24->ox_id); 2289 par_sense_len = sizeof(sts24->data); 2290 /* Valid values of the retry delay timer are 0x1-0xffef */ 2291 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) 2292 retry_delay = sts24->retry_delay; 2293 } else { 2294 if (scsi_status & SS_SENSE_LEN_VALID) 2295 sense_len = le16_to_cpu(sts->req_sense_length); 2296 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2297 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2298 resid_len = le32_to_cpu(sts->residual_length); 2299 rsp_info = sts->rsp_info; 2300 sense_data = sts->req_sense_data; 2301 par_sense_len = sizeof(sts->req_sense_data); 2302 } 2303 2304 /* Check for any FCP transport errors. */ 2305 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2306 /* Sense data lies beyond any FCP RESPONSE data. */ 2307 if (IS_FWI2_CAPABLE(ha)) { 2308 sense_data += rsp_info_len; 2309 par_sense_len -= rsp_info_len; 2310 } 2311 if (rsp_info_len > 3 && rsp_info[3]) { 2312 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2313 "FCP I/O protocol failure (0x%x/0x%x).\n", 2314 rsp_info_len, rsp_info[3]); 2315 2316 res = DID_BUS_BUSY << 16; 2317 goto out; 2318 } 2319 } 2320 2321 /* Check for overrun. */ 2322 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2323 scsi_status & SS_RESIDUAL_OVER) 2324 comp_status = CS_DATA_OVERRUN; 2325 2326 /* 2327 * Check retry_delay_timer value if we receive a busy or 2328 * queue full. 2329 */ 2330 if (lscsi_status == SAM_STAT_TASK_SET_FULL || 2331 lscsi_status == SAM_STAT_BUSY) 2332 qla2x00_set_retry_delay_timestamp(fcport, retry_delay); 2333 2334 /* 2335 * Based on Host and scsi status generate status code for Linux 2336 */ 2337 switch (comp_status) { 2338 case CS_COMPLETE: 2339 case CS_QUEUE_FULL: 2340 if (scsi_status == 0) { 2341 res = DID_OK << 16; 2342 break; 2343 } 2344 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2345 resid = resid_len; 2346 scsi_set_resid(cp, resid); 2347 2348 if (!lscsi_status && 2349 ((unsigned)(scsi_bufflen(cp) - resid) < 2350 cp->underflow)) { 2351 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2352 "Mid-layer underflow " 2353 "detected (0x%x of 0x%x bytes).\n", 2354 resid, scsi_bufflen(cp)); 2355 2356 res = DID_ERROR << 16; 2357 break; 2358 } 2359 } 2360 res = DID_OK << 16 | lscsi_status; 2361 2362 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2363 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2364 "QUEUE FULL detected.\n"); 2365 break; 2366 } 2367 logit = 0; 2368 if (lscsi_status != SS_CHECK_CONDITION) 2369 break; 2370 2371 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2372 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2373 break; 2374 2375 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2376 rsp, res); 2377 break; 2378 2379 case CS_DATA_UNDERRUN: 2380 /* Use F/W calculated residual length. */ 2381 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2382 scsi_set_resid(cp, resid); 2383 if (scsi_status & SS_RESIDUAL_UNDER) { 2384 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2385 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2386 "Dropped frame(s) detected " 2387 "(0x%x of 0x%x bytes).\n", 2388 resid, scsi_bufflen(cp)); 2389 2390 res = DID_ERROR << 16 | lscsi_status; 2391 goto check_scsi_status; 2392 } 2393 2394 if (!lscsi_status && 2395 ((unsigned)(scsi_bufflen(cp) - resid) < 2396 cp->underflow)) { 2397 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2398 "Mid-layer underflow " 2399 "detected (0x%x of 0x%x bytes).\n", 2400 resid, scsi_bufflen(cp)); 2401 2402 res = DID_ERROR << 16; 2403 break; 2404 } 2405 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2406 lscsi_status != SAM_STAT_BUSY) { 2407 /* 2408 * scsi status of task set and busy are considered to be 2409 * task not completed. 2410 */ 2411 2412 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2413 "Dropped frame(s) detected (0x%x " 2414 "of 0x%x bytes).\n", resid, 2415 scsi_bufflen(cp)); 2416 2417 res = DID_ERROR << 16 | lscsi_status; 2418 goto check_scsi_status; 2419 } else { 2420 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2421 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2422 scsi_status, lscsi_status); 2423 } 2424 2425 res = DID_OK << 16 | lscsi_status; 2426 logit = 0; 2427 2428 check_scsi_status: 2429 /* 2430 * Check to see if SCSI Status is non zero. If so report SCSI 2431 * Status. 2432 */ 2433 if (lscsi_status != 0) { 2434 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2435 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2436 "QUEUE FULL detected.\n"); 2437 logit = 1; 2438 break; 2439 } 2440 if (lscsi_status != SS_CHECK_CONDITION) 2441 break; 2442 2443 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2444 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2445 break; 2446 2447 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2448 sense_len, rsp, res); 2449 } 2450 break; 2451 2452 case CS_PORT_LOGGED_OUT: 2453 no_logout = 1; 2454 case CS_PORT_CONFIG_CHG: 2455 case CS_PORT_BUSY: 2456 case CS_INCOMPLETE: 2457 case CS_PORT_UNAVAILABLE: 2458 case CS_TIMEOUT: 2459 case CS_RESET: 2460 2461 /* 2462 * We are going to have the fc class block the rport 2463 * while we try to recover so instruct the mid layer 2464 * to requeue until the class decides how to handle this. 2465 */ 2466 res = DID_TRANSPORT_DISRUPTED << 16; 2467 2468 if (comp_status == CS_TIMEOUT) { 2469 if (IS_FWI2_CAPABLE(ha)) 2470 break; 2471 else if ((le16_to_cpu(sts->status_flags) & 2472 SF_LOGOUT_SENT) == 0) 2473 break; 2474 } 2475 2476 if (atomic_read(&fcport->state) == FCS_ONLINE) { 2477 ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, 2478 "Port to be marked lost on fcport=%02x%02x%02x, current " 2479 "port state= %s comp_status %x.\n", fcport->d_id.b.domain, 2480 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2481 port_state_str[atomic_read(&fcport->state)], 2482 comp_status); 2483 2484 if (no_logout) 2485 fcport->logout_on_delete = 0; 2486 2487 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2488 qlt_schedule_sess_for_deletion_lock(fcport); 2489 } 2490 2491 break; 2492 2493 case CS_ABORTED: 2494 res = DID_RESET << 16; 2495 break; 2496 2497 case CS_DIF_ERROR: 2498 logit = qla2x00_handle_dif_error(sp, sts24); 2499 res = cp->result; 2500 break; 2501 2502 case CS_TRANSPORT: 2503 res = DID_ERROR << 16; 2504 2505 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2506 break; 2507 2508 if (state_flags & BIT_4) 2509 scmd_printk(KERN_WARNING, cp, 2510 "Unsupported device '%s' found.\n", 2511 cp->device->vendor); 2512 break; 2513 2514 default: 2515 res = DID_ERROR << 16; 2516 break; 2517 } 2518 2519 out: 2520 if (logit) 2521 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2522 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2523 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2524 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 2525 comp_status, scsi_status, res, vha->host_no, 2526 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2527 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2528 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2529 resid_len, fw_resid_len, sp, cp); 2530 2531 if (rsp->status_srb == NULL) 2532 sp->done(sp, res); 2533 } 2534 2535 /** 2536 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2537 * @ha: SCSI driver HA context 2538 * @pkt: Entry pointer 2539 * 2540 * Extended sense data. 2541 */ 2542 static void 2543 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2544 { 2545 uint8_t sense_sz = 0; 2546 struct qla_hw_data *ha = rsp->hw; 2547 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2548 srb_t *sp = rsp->status_srb; 2549 struct scsi_cmnd *cp; 2550 uint32_t sense_len; 2551 uint8_t *sense_ptr; 2552 2553 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2554 return; 2555 2556 sense_len = GET_CMD_SENSE_LEN(sp); 2557 sense_ptr = GET_CMD_SENSE_PTR(sp); 2558 2559 cp = GET_CMD_SP(sp); 2560 if (cp == NULL) { 2561 ql_log(ql_log_warn, vha, 0x3025, 2562 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2563 2564 rsp->status_srb = NULL; 2565 return; 2566 } 2567 2568 if (sense_len > sizeof(pkt->data)) 2569 sense_sz = sizeof(pkt->data); 2570 else 2571 sense_sz = sense_len; 2572 2573 /* Move sense data. */ 2574 if (IS_FWI2_CAPABLE(ha)) 2575 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2576 memcpy(sense_ptr, pkt->data, sense_sz); 2577 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2578 sense_ptr, sense_sz); 2579 2580 sense_len -= sense_sz; 2581 sense_ptr += sense_sz; 2582 2583 SET_CMD_SENSE_PTR(sp, sense_ptr); 2584 SET_CMD_SENSE_LEN(sp, sense_len); 2585 2586 /* Place command on done queue. */ 2587 if (sense_len == 0) { 2588 rsp->status_srb = NULL; 2589 sp->done(sp, cp->result); 2590 } 2591 } 2592 2593 /** 2594 * qla2x00_error_entry() - Process an error entry. 2595 * @ha: SCSI driver HA context 2596 * @pkt: Entry pointer 2597 */ 2598 static void 2599 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2600 { 2601 srb_t *sp; 2602 struct qla_hw_data *ha = vha->hw; 2603 const char func[] = "ERROR-IOCB"; 2604 uint16_t que = MSW(pkt->handle); 2605 struct req_que *req = NULL; 2606 int res = DID_ERROR << 16; 2607 2608 ql_dbg(ql_dbg_async, vha, 0x502a, 2609 "type of error status in response: 0x%x\n", pkt->entry_status); 2610 2611 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2612 goto fatal; 2613 2614 req = ha->req_q_map[que]; 2615 2616 if (pkt->entry_status & RF_BUSY) 2617 res = DID_BUS_BUSY << 16; 2618 2619 if (pkt->entry_type == NOTIFY_ACK_TYPE && 2620 pkt->handle == QLA_TGT_SKIP_HANDLE) 2621 return; 2622 2623 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2624 if (sp) { 2625 sp->done(sp, res); 2626 return; 2627 } 2628 fatal: 2629 ql_log(ql_log_warn, vha, 0x5030, 2630 "Error entry - invalid handle/queue (%04x).\n", que); 2631 } 2632 2633 /** 2634 * qla24xx_mbx_completion() - Process mailbox command completions. 2635 * @ha: SCSI driver HA context 2636 * @mb0: Mailbox0 register 2637 */ 2638 static void 2639 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2640 { 2641 uint16_t cnt; 2642 uint32_t mboxes; 2643 uint16_t __iomem *wptr; 2644 struct qla_hw_data *ha = vha->hw; 2645 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2646 2647 /* Read all mbox registers? */ 2648 mboxes = (1 << ha->mbx_count) - 1; 2649 if (!ha->mcp) 2650 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2651 else 2652 mboxes = ha->mcp->in_mb; 2653 2654 /* Load return mailbox registers. */ 2655 ha->flags.mbox_int = 1; 2656 ha->mailbox_out[0] = mb0; 2657 mboxes >>= 1; 2658 wptr = (uint16_t __iomem *)®->mailbox1; 2659 2660 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2661 if (mboxes & BIT_0) 2662 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2663 2664 mboxes >>= 1; 2665 wptr++; 2666 } 2667 } 2668 2669 static void 2670 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2671 struct abort_entry_24xx *pkt) 2672 { 2673 const char func[] = "ABT_IOCB"; 2674 srb_t *sp; 2675 struct srb_iocb *abt; 2676 2677 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2678 if (!sp) 2679 return; 2680 2681 abt = &sp->u.iocb_cmd; 2682 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2683 sp->done(sp, 0); 2684 } 2685 2686 /** 2687 * qla24xx_process_response_queue() - Process response queue entries. 2688 * @ha: SCSI driver HA context 2689 */ 2690 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2691 struct rsp_que *rsp) 2692 { 2693 struct sts_entry_24xx *pkt; 2694 struct qla_hw_data *ha = vha->hw; 2695 2696 if (!vha->flags.online) 2697 return; 2698 2699 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2700 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2701 2702 rsp->ring_index++; 2703 if (rsp->ring_index == rsp->length) { 2704 rsp->ring_index = 0; 2705 rsp->ring_ptr = rsp->ring; 2706 } else { 2707 rsp->ring_ptr++; 2708 } 2709 2710 if (pkt->entry_status != 0) { 2711 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2712 2713 if (qlt_24xx_process_response_error(vha, pkt)) 2714 goto process_err; 2715 2716 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2717 wmb(); 2718 continue; 2719 } 2720 process_err: 2721 2722 switch (pkt->entry_type) { 2723 case STATUS_TYPE: 2724 qla2x00_status_entry(vha, rsp, pkt); 2725 break; 2726 case STATUS_CONT_TYPE: 2727 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2728 break; 2729 case VP_RPT_ID_IOCB_TYPE: 2730 qla24xx_report_id_acquisition(vha, 2731 (struct vp_rpt_id_entry_24xx *)pkt); 2732 break; 2733 case LOGINOUT_PORT_IOCB_TYPE: 2734 qla24xx_logio_entry(vha, rsp->req, 2735 (struct logio_entry_24xx *)pkt); 2736 break; 2737 case CT_IOCB_TYPE: 2738 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2739 break; 2740 case ELS_IOCB_TYPE: 2741 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2742 break; 2743 case ABTS_RECV_24XX: 2744 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2745 /* ensure that the ATIO queue is empty */ 2746 qlt_handle_abts_recv(vha, (response_t *)pkt); 2747 break; 2748 } else { 2749 /* drop through */ 2750 qlt_24xx_process_atio_queue(vha, 1); 2751 } 2752 case ABTS_RESP_24XX: 2753 case CTIO_TYPE7: 2754 case CTIO_CRC2: 2755 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2756 break; 2757 case NOTIFY_ACK_TYPE: 2758 if (pkt->handle == QLA_TGT_SKIP_HANDLE) 2759 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2760 else 2761 qla24xxx_nack_iocb_entry(vha, rsp->req, 2762 (struct nack_to_isp *)pkt); 2763 break; 2764 case MARKER_TYPE: 2765 /* Do nothing in this case, this check is to prevent it 2766 * from falling into default case 2767 */ 2768 break; 2769 case ABORT_IOCB_TYPE: 2770 qla24xx_abort_iocb_entry(vha, rsp->req, 2771 (struct abort_entry_24xx *)pkt); 2772 break; 2773 case MBX_IOCB_TYPE: 2774 qla24xx_mbx_iocb_entry(vha, rsp->req, 2775 (struct mbx_24xx_entry *)pkt); 2776 break; 2777 default: 2778 /* Type Not Supported. */ 2779 ql_dbg(ql_dbg_async, vha, 0x5042, 2780 "Received unknown response pkt type %x " 2781 "entry status=%x.\n", 2782 pkt->entry_type, pkt->entry_status); 2783 break; 2784 } 2785 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2786 wmb(); 2787 } 2788 2789 /* Adjust ring index */ 2790 if (IS_P3P_TYPE(ha)) { 2791 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2792 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2793 } else { 2794 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2795 } 2796 } 2797 2798 static void 2799 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2800 { 2801 int rval; 2802 uint32_t cnt; 2803 struct qla_hw_data *ha = vha->hw; 2804 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2805 2806 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2807 !IS_QLA27XX(ha)) 2808 return; 2809 2810 rval = QLA_SUCCESS; 2811 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2812 RD_REG_DWORD(®->iobase_addr); 2813 WRT_REG_DWORD(®->iobase_window, 0x0001); 2814 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2815 rval == QLA_SUCCESS; cnt--) { 2816 if (cnt) { 2817 WRT_REG_DWORD(®->iobase_window, 0x0001); 2818 udelay(10); 2819 } else 2820 rval = QLA_FUNCTION_TIMEOUT; 2821 } 2822 if (rval == QLA_SUCCESS) 2823 goto next_test; 2824 2825 rval = QLA_SUCCESS; 2826 WRT_REG_DWORD(®->iobase_window, 0x0003); 2827 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2828 rval == QLA_SUCCESS; cnt--) { 2829 if (cnt) { 2830 WRT_REG_DWORD(®->iobase_window, 0x0003); 2831 udelay(10); 2832 } else 2833 rval = QLA_FUNCTION_TIMEOUT; 2834 } 2835 if (rval != QLA_SUCCESS) 2836 goto done; 2837 2838 next_test: 2839 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2840 ql_log(ql_log_info, vha, 0x504c, 2841 "Additional code -- 0x55AA.\n"); 2842 2843 done: 2844 WRT_REG_DWORD(®->iobase_window, 0x0000); 2845 RD_REG_DWORD(®->iobase_window); 2846 } 2847 2848 /** 2849 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2850 * @irq: 2851 * @dev_id: SCSI driver HA context 2852 * 2853 * Called by system whenever the host adapter generates an interrupt. 2854 * 2855 * Returns handled flag. 2856 */ 2857 irqreturn_t 2858 qla24xx_intr_handler(int irq, void *dev_id) 2859 { 2860 scsi_qla_host_t *vha; 2861 struct qla_hw_data *ha; 2862 struct device_reg_24xx __iomem *reg; 2863 int status; 2864 unsigned long iter; 2865 uint32_t stat; 2866 uint32_t hccr; 2867 uint16_t mb[8]; 2868 struct rsp_que *rsp; 2869 unsigned long flags; 2870 2871 rsp = (struct rsp_que *) dev_id; 2872 if (!rsp) { 2873 ql_log(ql_log_info, NULL, 0x5059, 2874 "%s: NULL response queue pointer.\n", __func__); 2875 return IRQ_NONE; 2876 } 2877 2878 ha = rsp->hw; 2879 reg = &ha->iobase->isp24; 2880 status = 0; 2881 2882 if (unlikely(pci_channel_offline(ha->pdev))) 2883 return IRQ_HANDLED; 2884 2885 spin_lock_irqsave(&ha->hardware_lock, flags); 2886 vha = pci_get_drvdata(ha->pdev); 2887 for (iter = 50; iter--; ) { 2888 stat = RD_REG_DWORD(®->host_status); 2889 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2890 break; 2891 if (stat & HSRX_RISC_PAUSED) { 2892 if (unlikely(pci_channel_offline(ha->pdev))) 2893 break; 2894 2895 hccr = RD_REG_DWORD(®->hccr); 2896 2897 ql_log(ql_log_warn, vha, 0x504b, 2898 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2899 hccr); 2900 2901 qla2xxx_check_risc_status(vha); 2902 2903 ha->isp_ops->fw_dump(vha, 1); 2904 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2905 break; 2906 } else if ((stat & HSRX_RISC_INT) == 0) 2907 break; 2908 2909 switch (stat & 0xff) { 2910 case INTR_ROM_MB_SUCCESS: 2911 case INTR_ROM_MB_FAILED: 2912 case INTR_MB_SUCCESS: 2913 case INTR_MB_FAILED: 2914 qla24xx_mbx_completion(vha, MSW(stat)); 2915 status |= MBX_INTERRUPT; 2916 2917 break; 2918 case INTR_ASYNC_EVENT: 2919 mb[0] = MSW(stat); 2920 mb[1] = RD_REG_WORD(®->mailbox1); 2921 mb[2] = RD_REG_WORD(®->mailbox2); 2922 mb[3] = RD_REG_WORD(®->mailbox3); 2923 qla2x00_async_event(vha, rsp, mb); 2924 break; 2925 case INTR_RSP_QUE_UPDATE: 2926 case INTR_RSP_QUE_UPDATE_83XX: 2927 qla24xx_process_response_queue(vha, rsp); 2928 break; 2929 case INTR_ATIO_QUE_UPDATE:{ 2930 unsigned long flags2; 2931 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 2932 qlt_24xx_process_atio_queue(vha, 1); 2933 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 2934 break; 2935 } 2936 case INTR_ATIO_RSP_QUE_UPDATE: { 2937 unsigned long flags2; 2938 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 2939 qlt_24xx_process_atio_queue(vha, 1); 2940 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 2941 2942 qla24xx_process_response_queue(vha, rsp); 2943 break; 2944 } 2945 default: 2946 ql_dbg(ql_dbg_async, vha, 0x504f, 2947 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2948 break; 2949 } 2950 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2951 RD_REG_DWORD_RELAXED(®->hccr); 2952 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2953 ndelay(3500); 2954 } 2955 qla2x00_handle_mbx_completion(ha, status); 2956 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2957 2958 return IRQ_HANDLED; 2959 } 2960 2961 static irqreturn_t 2962 qla24xx_msix_rsp_q(int irq, void *dev_id) 2963 { 2964 struct qla_hw_data *ha; 2965 struct rsp_que *rsp; 2966 struct device_reg_24xx __iomem *reg; 2967 struct scsi_qla_host *vha; 2968 unsigned long flags; 2969 uint32_t stat = 0; 2970 2971 rsp = (struct rsp_que *) dev_id; 2972 if (!rsp) { 2973 ql_log(ql_log_info, NULL, 0x505a, 2974 "%s: NULL response queue pointer.\n", __func__); 2975 return IRQ_NONE; 2976 } 2977 ha = rsp->hw; 2978 reg = &ha->iobase->isp24; 2979 2980 spin_lock_irqsave(&ha->hardware_lock, flags); 2981 2982 vha = pci_get_drvdata(ha->pdev); 2983 /* 2984 * Use host_status register to check to PCI disconnection before we 2985 * we process the response queue. 2986 */ 2987 stat = RD_REG_DWORD(®->host_status); 2988 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2989 goto out; 2990 qla24xx_process_response_queue(vha, rsp); 2991 if (!ha->flags.disable_msix_handshake) { 2992 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2993 RD_REG_DWORD_RELAXED(®->hccr); 2994 } 2995 out: 2996 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2997 2998 return IRQ_HANDLED; 2999 } 3000 3001 static irqreturn_t 3002 qla24xx_msix_default(int irq, void *dev_id) 3003 { 3004 scsi_qla_host_t *vha; 3005 struct qla_hw_data *ha; 3006 struct rsp_que *rsp; 3007 struct device_reg_24xx __iomem *reg; 3008 int status; 3009 uint32_t stat; 3010 uint32_t hccr; 3011 uint16_t mb[8]; 3012 unsigned long flags; 3013 3014 rsp = (struct rsp_que *) dev_id; 3015 if (!rsp) { 3016 ql_log(ql_log_info, NULL, 0x505c, 3017 "%s: NULL response queue pointer.\n", __func__); 3018 return IRQ_NONE; 3019 } 3020 ha = rsp->hw; 3021 reg = &ha->iobase->isp24; 3022 status = 0; 3023 3024 spin_lock_irqsave(&ha->hardware_lock, flags); 3025 vha = pci_get_drvdata(ha->pdev); 3026 do { 3027 stat = RD_REG_DWORD(®->host_status); 3028 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 3029 break; 3030 if (stat & HSRX_RISC_PAUSED) { 3031 if (unlikely(pci_channel_offline(ha->pdev))) 3032 break; 3033 3034 hccr = RD_REG_DWORD(®->hccr); 3035 3036 ql_log(ql_log_info, vha, 0x5050, 3037 "RISC paused -- HCCR=%x, Dumping firmware.\n", 3038 hccr); 3039 3040 qla2xxx_check_risc_status(vha); 3041 3042 ha->isp_ops->fw_dump(vha, 1); 3043 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3044 break; 3045 } else if ((stat & HSRX_RISC_INT) == 0) 3046 break; 3047 3048 switch (stat & 0xff) { 3049 case INTR_ROM_MB_SUCCESS: 3050 case INTR_ROM_MB_FAILED: 3051 case INTR_MB_SUCCESS: 3052 case INTR_MB_FAILED: 3053 qla24xx_mbx_completion(vha, MSW(stat)); 3054 status |= MBX_INTERRUPT; 3055 3056 break; 3057 case INTR_ASYNC_EVENT: 3058 mb[0] = MSW(stat); 3059 mb[1] = RD_REG_WORD(®->mailbox1); 3060 mb[2] = RD_REG_WORD(®->mailbox2); 3061 mb[3] = RD_REG_WORD(®->mailbox3); 3062 qla2x00_async_event(vha, rsp, mb); 3063 break; 3064 case INTR_RSP_QUE_UPDATE: 3065 case INTR_RSP_QUE_UPDATE_83XX: 3066 qla24xx_process_response_queue(vha, rsp); 3067 break; 3068 case INTR_ATIO_QUE_UPDATE:{ 3069 unsigned long flags2; 3070 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3071 qlt_24xx_process_atio_queue(vha, 1); 3072 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3073 break; 3074 } 3075 case INTR_ATIO_RSP_QUE_UPDATE: { 3076 unsigned long flags2; 3077 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 3078 qlt_24xx_process_atio_queue(vha, 1); 3079 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 3080 3081 qla24xx_process_response_queue(vha, rsp); 3082 break; 3083 } 3084 default: 3085 ql_dbg(ql_dbg_async, vha, 0x5051, 3086 "Unrecognized interrupt type (%d).\n", stat & 0xff); 3087 break; 3088 } 3089 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3090 } while (0); 3091 qla2x00_handle_mbx_completion(ha, status); 3092 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3093 3094 return IRQ_HANDLED; 3095 } 3096 3097 irqreturn_t 3098 qla2xxx_msix_rsp_q(int irq, void *dev_id) 3099 { 3100 struct qla_hw_data *ha; 3101 struct qla_qpair *qpair; 3102 struct device_reg_24xx __iomem *reg; 3103 unsigned long flags; 3104 3105 qpair = dev_id; 3106 if (!qpair) { 3107 ql_log(ql_log_info, NULL, 0x505b, 3108 "%s: NULL response queue pointer.\n", __func__); 3109 return IRQ_NONE; 3110 } 3111 ha = qpair->hw; 3112 3113 /* Clear the interrupt, if enabled, for this response queue */ 3114 if (unlikely(!ha->flags.disable_msix_handshake)) { 3115 reg = &ha->iobase->isp24; 3116 spin_lock_irqsave(&ha->hardware_lock, flags); 3117 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 3118 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3119 } 3120 3121 queue_work(ha->wq, &qpair->q_work); 3122 3123 return IRQ_HANDLED; 3124 } 3125 3126 /* Interrupt handling helpers. */ 3127 3128 struct qla_init_msix_entry { 3129 const char *name; 3130 irq_handler_t handler; 3131 }; 3132 3133 static const struct qla_init_msix_entry msix_entries[] = { 3134 { "qla2xxx (default)", qla24xx_msix_default }, 3135 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 3136 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 3137 { "qla2xxx (qpair_multiq)", qla2xxx_msix_rsp_q }, 3138 }; 3139 3140 static const struct qla_init_msix_entry qla82xx_msix_entries[] = { 3141 { "qla2xxx (default)", qla82xx_msix_default }, 3142 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3143 }; 3144 3145 static int 3146 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3147 { 3148 int i, ret; 3149 struct qla_msix_entry *qentry; 3150 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3151 int min_vecs = QLA_BASE_VECTORS; 3152 struct irq_affinity desc = { 3153 .pre_vectors = QLA_BASE_VECTORS, 3154 }; 3155 3156 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3157 desc.pre_vectors++; 3158 min_vecs++; 3159 } 3160 3161 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, 3162 ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, 3163 &desc); 3164 3165 if (ret < 0) { 3166 ql_log(ql_log_fatal, vha, 0x00c7, 3167 "MSI-X: Failed to enable support, " 3168 "giving up -- %d/%d.\n", 3169 ha->msix_count, ret); 3170 goto msix_out; 3171 } else if (ret < ha->msix_count) { 3172 ql_log(ql_log_warn, vha, 0x00c6, 3173 "MSI-X: Failed to enable support " 3174 "with %d vectors, using %d vectors.\n", 3175 ha->msix_count, ret); 3176 ha->msix_count = ret; 3177 /* Recalculate queue values */ 3178 if (ha->mqiobase && ql2xmqsupport) { 3179 ha->max_req_queues = ha->msix_count - 1; 3180 3181 /* ATIOQ needs 1 vector. That's 1 less QPair */ 3182 if (QLA_TGT_MODE_ENABLED()) 3183 ha->max_req_queues--; 3184 3185 ha->max_rsp_queues = ha->max_req_queues; 3186 3187 ha->max_qpairs = ha->max_req_queues - 1; 3188 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190, 3189 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs); 3190 } 3191 } 3192 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3193 ha->msix_count, GFP_KERNEL); 3194 if (!ha->msix_entries) { 3195 ql_log(ql_log_fatal, vha, 0x00c8, 3196 "Failed to allocate memory for ha->msix_entries.\n"); 3197 ret = -ENOMEM; 3198 goto msix_out; 3199 } 3200 ha->flags.msix_enabled = 1; 3201 3202 for (i = 0; i < ha->msix_count; i++) { 3203 qentry = &ha->msix_entries[i]; 3204 qentry->vector = pci_irq_vector(ha->pdev, i); 3205 qentry->entry = i; 3206 qentry->have_irq = 0; 3207 qentry->in_use = 0; 3208 qentry->handle = NULL; 3209 } 3210 3211 /* Enable MSI-X vectors for the base queue */ 3212 for (i = 0; i < QLA_BASE_VECTORS; i++) { 3213 qentry = &ha->msix_entries[i]; 3214 qentry->handle = rsp; 3215 rsp->msix = qentry; 3216 scnprintf(qentry->name, sizeof(qentry->name), 3217 "%s", msix_entries[i].name); 3218 if (IS_P3P_TYPE(ha)) 3219 ret = request_irq(qentry->vector, 3220 qla82xx_msix_entries[i].handler, 3221 0, qla82xx_msix_entries[i].name, rsp); 3222 else 3223 ret = request_irq(qentry->vector, 3224 msix_entries[i].handler, 3225 0, msix_entries[i].name, rsp); 3226 if (ret) 3227 goto msix_register_fail; 3228 qentry->have_irq = 1; 3229 qentry->in_use = 1; 3230 } 3231 3232 /* 3233 * If target mode is enable, also request the vector for the ATIO 3234 * queue. 3235 */ 3236 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3237 qentry = &ha->msix_entries[QLA_ATIO_VECTOR]; 3238 rsp->msix = qentry; 3239 qentry->handle = rsp; 3240 scnprintf(qentry->name, sizeof(qentry->name), 3241 "%s", msix_entries[QLA_ATIO_VECTOR].name); 3242 qentry->in_use = 1; 3243 ret = request_irq(qentry->vector, 3244 msix_entries[QLA_ATIO_VECTOR].handler, 3245 0, msix_entries[QLA_ATIO_VECTOR].name, rsp); 3246 qentry->have_irq = 1; 3247 } 3248 3249 msix_register_fail: 3250 if (ret) { 3251 ql_log(ql_log_fatal, vha, 0x00cb, 3252 "MSI-X: unable to register handler -- %x/%d.\n", 3253 qentry->vector, ret); 3254 qla2x00_free_irqs(vha); 3255 ha->mqenable = 0; 3256 goto msix_out; 3257 } 3258 3259 /* Enable MSI-X vector for response queue update for queue 0 */ 3260 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3261 if (ha->msixbase && ha->mqiobase && 3262 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3263 ql2xmqsupport)) 3264 ha->mqenable = 1; 3265 } else 3266 if (ha->mqiobase && 3267 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || 3268 ql2xmqsupport)) 3269 ha->mqenable = 1; 3270 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3271 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3272 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3273 ql_dbg(ql_dbg_init, vha, 0x0055, 3274 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3275 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3276 3277 msix_out: 3278 return ret; 3279 } 3280 3281 int 3282 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3283 { 3284 int ret = QLA_FUNCTION_FAILED; 3285 device_reg_t *reg = ha->iobase; 3286 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3287 3288 /* If possible, enable MSI-X. */ 3289 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3290 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && 3291 !IS_QLA27XX(ha)) 3292 goto skip_msi; 3293 3294 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3295 (ha->pdev->subsystem_device == 0x7040 || 3296 ha->pdev->subsystem_device == 0x7041 || 3297 ha->pdev->subsystem_device == 0x1705)) { 3298 ql_log(ql_log_warn, vha, 0x0034, 3299 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3300 ha->pdev->subsystem_vendor, 3301 ha->pdev->subsystem_device); 3302 goto skip_msi; 3303 } 3304 3305 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3306 ql_log(ql_log_warn, vha, 0x0035, 3307 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3308 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3309 goto skip_msix; 3310 } 3311 3312 ret = qla24xx_enable_msix(ha, rsp); 3313 if (!ret) { 3314 ql_dbg(ql_dbg_init, vha, 0x0036, 3315 "MSI-X: Enabled (0x%X, 0x%X).\n", 3316 ha->chip_revision, ha->fw_attributes); 3317 goto clear_risc_ints; 3318 } 3319 3320 skip_msix: 3321 3322 ql_log(ql_log_info, vha, 0x0037, 3323 "Falling back-to MSI mode -%d.\n", ret); 3324 3325 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3326 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3327 !IS_QLA27XX(ha)) 3328 goto skip_msi; 3329 3330 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); 3331 if (!ret) { 3332 ql_dbg(ql_dbg_init, vha, 0x0038, 3333 "MSI: Enabled.\n"); 3334 ha->flags.msi_enabled = 1; 3335 } else 3336 ql_log(ql_log_warn, vha, 0x0039, 3337 "Falling back-to INTa mode -- %d.\n", ret); 3338 skip_msi: 3339 3340 /* Skip INTx on ISP82xx. */ 3341 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3342 return QLA_FUNCTION_FAILED; 3343 3344 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3345 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3346 QLA2XXX_DRIVER_NAME, rsp); 3347 if (ret) { 3348 ql_log(ql_log_warn, vha, 0x003a, 3349 "Failed to reserve interrupt %d already in use.\n", 3350 ha->pdev->irq); 3351 goto fail; 3352 } else if (!ha->flags.msi_enabled) { 3353 ql_dbg(ql_dbg_init, vha, 0x0125, 3354 "INTa mode: Enabled.\n"); 3355 ha->flags.mr_intr_valid = 1; 3356 } 3357 3358 clear_risc_ints: 3359 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 3360 goto fail; 3361 3362 spin_lock_irq(&ha->hardware_lock); 3363 WRT_REG_WORD(®->isp.semaphore, 0); 3364 spin_unlock_irq(&ha->hardware_lock); 3365 3366 fail: 3367 return ret; 3368 } 3369 3370 void 3371 qla2x00_free_irqs(scsi_qla_host_t *vha) 3372 { 3373 struct qla_hw_data *ha = vha->hw; 3374 struct rsp_que *rsp; 3375 struct qla_msix_entry *qentry; 3376 int i; 3377 3378 /* 3379 * We need to check that ha->rsp_q_map is valid in case we are called 3380 * from a probe failure context. 3381 */ 3382 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3383 goto free_irqs; 3384 rsp = ha->rsp_q_map[0]; 3385 3386 if (ha->flags.msix_enabled) { 3387 for (i = 0; i < ha->msix_count; i++) { 3388 qentry = &ha->msix_entries[i]; 3389 if (qentry->have_irq) { 3390 irq_set_affinity_notifier(qentry->vector, NULL); 3391 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle); 3392 } 3393 } 3394 kfree(ha->msix_entries); 3395 ha->msix_entries = NULL; 3396 ha->flags.msix_enabled = 0; 3397 ql_dbg(ql_dbg_init, vha, 0x0042, 3398 "Disabled MSI-X.\n"); 3399 } else { 3400 free_irq(pci_irq_vector(ha->pdev, 0), rsp); 3401 } 3402 3403 free_irqs: 3404 pci_free_irq_vectors(ha->pdev); 3405 } 3406 3407 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, 3408 struct qla_msix_entry *msix, int vector_type) 3409 { 3410 const struct qla_init_msix_entry *intr = &msix_entries[vector_type]; 3411 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3412 int ret; 3413 3414 scnprintf(msix->name, sizeof(msix->name), 3415 "qla2xxx%lu_qpair%d", vha->host_no, qpair->id); 3416 ret = request_irq(msix->vector, intr->handler, 0, msix->name, qpair); 3417 if (ret) { 3418 ql_log(ql_log_fatal, vha, 0x00e6, 3419 "MSI-X: Unable to register handler -- %x/%d.\n", 3420 msix->vector, ret); 3421 return ret; 3422 } 3423 msix->have_irq = 1; 3424 msix->handle = qpair; 3425 return ret; 3426 } 3427