1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsi_bsg_fc.h> 14 #include <scsi/scsi_eh.h> 15 16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20 sts_entry_t *); 21 22 /** 23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * @irq: 25 * @dev_id: SCSI driver HA context 26 * 27 * Called by system whenever the host adapter generates an interrupt. 28 * 29 * Returns handled flag. 30 */ 31 irqreturn_t 32 qla2100_intr_handler(int irq, void *dev_id) 33 { 34 scsi_qla_host_t *vha; 35 struct qla_hw_data *ha; 36 struct device_reg_2xxx __iomem *reg; 37 int status; 38 unsigned long iter; 39 uint16_t hccr; 40 uint16_t mb[4]; 41 struct rsp_que *rsp; 42 unsigned long flags; 43 44 rsp = (struct rsp_que *) dev_id; 45 if (!rsp) { 46 ql_log(ql_log_info, NULL, 0x505d, 47 "%s: NULL response queue pointer.\n", __func__); 48 return (IRQ_NONE); 49 } 50 51 ha = rsp->hw; 52 reg = &ha->iobase->isp; 53 status = 0; 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 vha = pci_get_drvdata(ha->pdev); 57 for (iter = 50; iter--; ) { 58 hccr = RD_REG_WORD(®->hccr); 59 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 60 break; 61 if (hccr & HCCR_RISC_PAUSE) { 62 if (pci_channel_offline(ha->pdev)) 63 break; 64 65 /* 66 * Issue a "HARD" reset in order for the RISC interrupt 67 * bit to be cleared. Schedule a big hammer to get 68 * out of the RISC PAUSED state. 69 */ 70 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 71 RD_REG_WORD(®->hccr); 72 73 ha->isp_ops->fw_dump(vha, 1); 74 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 75 break; 76 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 77 break; 78 79 if (RD_REG_WORD(®->semaphore) & BIT_0) { 80 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 81 RD_REG_WORD(®->hccr); 82 83 /* Get mailbox data. */ 84 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 85 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 86 qla2x00_mbx_completion(vha, mb[0]); 87 status |= MBX_INTERRUPT; 88 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 89 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 90 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 91 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 92 qla2x00_async_event(vha, rsp, mb); 93 } else { 94 /*EMPTY*/ 95 ql_dbg(ql_dbg_async, vha, 0x5025, 96 "Unrecognized interrupt type (%d).\n", 97 mb[0]); 98 } 99 /* Release mailbox registers. */ 100 WRT_REG_WORD(®->semaphore, 0); 101 RD_REG_WORD(®->semaphore); 102 } else { 103 qla2x00_process_response_queue(rsp); 104 105 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 106 RD_REG_WORD(®->hccr); 107 } 108 } 109 qla2x00_handle_mbx_completion(ha, status); 110 spin_unlock_irqrestore(&ha->hardware_lock, flags); 111 112 return (IRQ_HANDLED); 113 } 114 115 bool 116 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 117 { 118 /* Check for PCI disconnection */ 119 if (reg == 0xffffffff) { 120 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 121 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 122 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 123 /* 124 * Schedule this (only once) on the default system 125 * workqueue so that all the adapter workqueues and the 126 * DPC thread can be shutdown cleanly. 127 */ 128 schedule_work(&vha->hw->board_disable); 129 } 130 return true; 131 } else 132 return false; 133 } 134 135 bool 136 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 137 { 138 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 139 } 140 141 /** 142 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 143 * @irq: 144 * @dev_id: SCSI driver HA context 145 * 146 * Called by system whenever the host adapter generates an interrupt. 147 * 148 * Returns handled flag. 149 */ 150 irqreturn_t 151 qla2300_intr_handler(int irq, void *dev_id) 152 { 153 scsi_qla_host_t *vha; 154 struct device_reg_2xxx __iomem *reg; 155 int status; 156 unsigned long iter; 157 uint32_t stat; 158 uint16_t hccr; 159 uint16_t mb[4]; 160 struct rsp_que *rsp; 161 struct qla_hw_data *ha; 162 unsigned long flags; 163 164 rsp = (struct rsp_que *) dev_id; 165 if (!rsp) { 166 ql_log(ql_log_info, NULL, 0x5058, 167 "%s: NULL response queue pointer.\n", __func__); 168 return (IRQ_NONE); 169 } 170 171 ha = rsp->hw; 172 reg = &ha->iobase->isp; 173 status = 0; 174 175 spin_lock_irqsave(&ha->hardware_lock, flags); 176 vha = pci_get_drvdata(ha->pdev); 177 for (iter = 50; iter--; ) { 178 stat = RD_REG_DWORD(®->u.isp2300.host_status); 179 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 180 break; 181 if (stat & HSR_RISC_PAUSED) { 182 if (unlikely(pci_channel_offline(ha->pdev))) 183 break; 184 185 hccr = RD_REG_WORD(®->hccr); 186 187 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 188 ql_log(ql_log_warn, vha, 0x5026, 189 "Parity error -- HCCR=%x, Dumping " 190 "firmware.\n", hccr); 191 else 192 ql_log(ql_log_warn, vha, 0x5027, 193 "RISC paused -- HCCR=%x, Dumping " 194 "firmware.\n", hccr); 195 196 /* 197 * Issue a "HARD" reset in order for the RISC 198 * interrupt bit to be cleared. Schedule a big 199 * hammer to get out of the RISC PAUSED state. 200 */ 201 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 202 RD_REG_WORD(®->hccr); 203 204 ha->isp_ops->fw_dump(vha, 1); 205 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 206 break; 207 } else if ((stat & HSR_RISC_INT) == 0) 208 break; 209 210 switch (stat & 0xff) { 211 case 0x1: 212 case 0x2: 213 case 0x10: 214 case 0x11: 215 qla2x00_mbx_completion(vha, MSW(stat)); 216 status |= MBX_INTERRUPT; 217 218 /* Release mailbox registers. */ 219 WRT_REG_WORD(®->semaphore, 0); 220 break; 221 case 0x12: 222 mb[0] = MSW(stat); 223 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 224 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 225 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 226 qla2x00_async_event(vha, rsp, mb); 227 break; 228 case 0x13: 229 qla2x00_process_response_queue(rsp); 230 break; 231 case 0x15: 232 mb[0] = MBA_CMPLT_1_16BIT; 233 mb[1] = MSW(stat); 234 qla2x00_async_event(vha, rsp, mb); 235 break; 236 case 0x16: 237 mb[0] = MBA_SCSI_COMPLETION; 238 mb[1] = MSW(stat); 239 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 240 qla2x00_async_event(vha, rsp, mb); 241 break; 242 default: 243 ql_dbg(ql_dbg_async, vha, 0x5028, 244 "Unrecognized interrupt type (%d).\n", stat & 0xff); 245 break; 246 } 247 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 248 RD_REG_WORD_RELAXED(®->hccr); 249 } 250 qla2x00_handle_mbx_completion(ha, status); 251 spin_unlock_irqrestore(&ha->hardware_lock, flags); 252 253 return (IRQ_HANDLED); 254 } 255 256 /** 257 * qla2x00_mbx_completion() - Process mailbox command completions. 258 * @ha: SCSI driver HA context 259 * @mb0: Mailbox0 register 260 */ 261 static void 262 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 263 { 264 uint16_t cnt; 265 uint32_t mboxes; 266 uint16_t __iomem *wptr; 267 struct qla_hw_data *ha = vha->hw; 268 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 269 270 /* Read all mbox registers? */ 271 mboxes = (1 << ha->mbx_count) - 1; 272 if (!ha->mcp) 273 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 274 else 275 mboxes = ha->mcp->in_mb; 276 277 /* Load return mailbox registers. */ 278 ha->flags.mbox_int = 1; 279 ha->mailbox_out[0] = mb0; 280 mboxes >>= 1; 281 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 282 283 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 284 if (IS_QLA2200(ha) && cnt == 8) 285 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 286 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 287 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 288 else if (mboxes & BIT_0) 289 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 290 291 wptr++; 292 mboxes >>= 1; 293 } 294 } 295 296 static void 297 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 298 { 299 static char *event[] = 300 { "Complete", "Request Notification", "Time Extension" }; 301 int rval; 302 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 303 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 304 uint16_t __iomem *wptr; 305 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 306 307 /* Seed data -- mailbox1 -> mailbox7. */ 308 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 309 wptr = (uint16_t __iomem *)®24->mailbox1; 310 else if (IS_QLA8044(vha->hw)) 311 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 312 else 313 return; 314 315 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 316 mb[cnt] = RD_REG_WORD(wptr); 317 318 ql_dbg(ql_dbg_async, vha, 0x5021, 319 "Inter-Driver Communication %s -- " 320 "%04x %04x %04x %04x %04x %04x %04x.\n", 321 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 322 mb[4], mb[5], mb[6]); 323 switch (aen) { 324 /* Handle IDC Error completion case. */ 325 case MBA_IDC_COMPLETE: 326 if (mb[1] >> 15) { 327 vha->hw->flags.idc_compl_status = 1; 328 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 329 complete(&vha->hw->dcbx_comp); 330 } 331 break; 332 333 case MBA_IDC_NOTIFY: 334 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 335 timeout = (descr >> 8) & 0xf; 336 ql_dbg(ql_dbg_async, vha, 0x5022, 337 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 338 vha->host_no, event[aen & 0xff], timeout); 339 340 if (!timeout) 341 return; 342 rval = qla2x00_post_idc_ack_work(vha, mb); 343 if (rval != QLA_SUCCESS) 344 ql_log(ql_log_warn, vha, 0x5023, 345 "IDC failed to post ACK.\n"); 346 break; 347 case MBA_IDC_TIME_EXT: 348 vha->hw->idc_extend_tmo = descr; 349 ql_dbg(ql_dbg_async, vha, 0x5087, 350 "%lu Inter-Driver Communication %s -- " 351 "Extend timeout by=%d.\n", 352 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 353 break; 354 } 355 } 356 357 #define LS_UNKNOWN 2 358 const char * 359 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 360 { 361 static const char *const link_speeds[] = { 362 "1", "2", "?", "4", "8", "16", "32", "10" 363 }; 364 #define QLA_LAST_SPEED 7 365 366 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 367 return link_speeds[0]; 368 else if (speed == 0x13) 369 return link_speeds[QLA_LAST_SPEED]; 370 else if (speed < QLA_LAST_SPEED) 371 return link_speeds[speed]; 372 else 373 return link_speeds[LS_UNKNOWN]; 374 } 375 376 static void 377 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 378 { 379 struct qla_hw_data *ha = vha->hw; 380 381 /* 382 * 8200 AEN Interpretation: 383 * mb[0] = AEN code 384 * mb[1] = AEN Reason code 385 * mb[2] = LSW of Peg-Halt Status-1 Register 386 * mb[6] = MSW of Peg-Halt Status-1 Register 387 * mb[3] = LSW of Peg-Halt Status-2 register 388 * mb[7] = MSW of Peg-Halt Status-2 register 389 * mb[4] = IDC Device-State Register value 390 * mb[5] = IDC Driver-Presence Register value 391 */ 392 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 393 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 394 mb[0], mb[1], mb[2], mb[6]); 395 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 396 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 397 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 398 399 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 400 IDC_HEARTBEAT_FAILURE)) { 401 ha->flags.nic_core_hung = 1; 402 ql_log(ql_log_warn, vha, 0x5060, 403 "83XX: F/W Error Reported: Check if reset required.\n"); 404 405 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 406 uint32_t protocol_engine_id, fw_err_code, err_level; 407 408 /* 409 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 410 * - PEG-Halt Status-1 Register: 411 * (LSW = mb[2], MSW = mb[6]) 412 * Bits 0-7 = protocol-engine ID 413 * Bits 8-28 = f/w error code 414 * Bits 29-31 = Error-level 415 * Error-level 0x1 = Non-Fatal error 416 * Error-level 0x2 = Recoverable Fatal error 417 * Error-level 0x4 = UnRecoverable Fatal error 418 * - PEG-Halt Status-2 Register: 419 * (LSW = mb[3], MSW = mb[7]) 420 */ 421 protocol_engine_id = (mb[2] & 0xff); 422 fw_err_code = (((mb[2] & 0xff00) >> 8) | 423 ((mb[6] & 0x1fff) << 8)); 424 err_level = ((mb[6] & 0xe000) >> 13); 425 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 426 "Register: protocol_engine_id=0x%x " 427 "fw_err_code=0x%x err_level=0x%x.\n", 428 protocol_engine_id, fw_err_code, err_level); 429 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 430 "Register: 0x%x%x.\n", mb[7], mb[3]); 431 if (err_level == ERR_LEVEL_NON_FATAL) { 432 ql_log(ql_log_warn, vha, 0x5063, 433 "Not a fatal error, f/w has recovered " 434 "iteself.\n"); 435 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 436 ql_log(ql_log_fatal, vha, 0x5064, 437 "Recoverable Fatal error: Chip reset " 438 "required.\n"); 439 qla83xx_schedule_work(vha, 440 QLA83XX_NIC_CORE_RESET); 441 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 442 ql_log(ql_log_fatal, vha, 0x5065, 443 "Unrecoverable Fatal error: Set FAILED " 444 "state, reboot required.\n"); 445 qla83xx_schedule_work(vha, 446 QLA83XX_NIC_CORE_UNRECOVERABLE); 447 } 448 } 449 450 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 451 uint16_t peg_fw_state, nw_interface_link_up; 452 uint16_t nw_interface_signal_detect, sfp_status; 453 uint16_t htbt_counter, htbt_monitor_enable; 454 uint16_t sfp_additonal_info, sfp_multirate; 455 uint16_t sfp_tx_fault, link_speed, dcbx_status; 456 457 /* 458 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 459 * - PEG-to-FC Status Register: 460 * (LSW = mb[2], MSW = mb[6]) 461 * Bits 0-7 = Peg-Firmware state 462 * Bit 8 = N/W Interface Link-up 463 * Bit 9 = N/W Interface signal detected 464 * Bits 10-11 = SFP Status 465 * SFP Status 0x0 = SFP+ transceiver not expected 466 * SFP Status 0x1 = SFP+ transceiver not present 467 * SFP Status 0x2 = SFP+ transceiver invalid 468 * SFP Status 0x3 = SFP+ transceiver present and 469 * valid 470 * Bits 12-14 = Heartbeat Counter 471 * Bit 15 = Heartbeat Monitor Enable 472 * Bits 16-17 = SFP Additional Info 473 * SFP info 0x0 = Unregocnized transceiver for 474 * Ethernet 475 * SFP info 0x1 = SFP+ brand validation failed 476 * SFP info 0x2 = SFP+ speed validation failed 477 * SFP info 0x3 = SFP+ access error 478 * Bit 18 = SFP Multirate 479 * Bit 19 = SFP Tx Fault 480 * Bits 20-22 = Link Speed 481 * Bits 23-27 = Reserved 482 * Bits 28-30 = DCBX Status 483 * DCBX Status 0x0 = DCBX Disabled 484 * DCBX Status 0x1 = DCBX Enabled 485 * DCBX Status 0x2 = DCBX Exchange error 486 * Bit 31 = Reserved 487 */ 488 peg_fw_state = (mb[2] & 0x00ff); 489 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 490 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 491 sfp_status = ((mb[2] & 0x0c00) >> 10); 492 htbt_counter = ((mb[2] & 0x7000) >> 12); 493 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 494 sfp_additonal_info = (mb[6] & 0x0003); 495 sfp_multirate = ((mb[6] & 0x0004) >> 2); 496 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 497 link_speed = ((mb[6] & 0x0070) >> 4); 498 dcbx_status = ((mb[6] & 0x7000) >> 12); 499 500 ql_log(ql_log_warn, vha, 0x5066, 501 "Peg-to-Fc Status Register:\n" 502 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 503 "nw_interface_signal_detect=0x%x" 504 "\nsfp_statis=0x%x.\n ", peg_fw_state, 505 nw_interface_link_up, nw_interface_signal_detect, 506 sfp_status); 507 ql_log(ql_log_warn, vha, 0x5067, 508 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 509 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", 510 htbt_counter, htbt_monitor_enable, 511 sfp_additonal_info, sfp_multirate); 512 ql_log(ql_log_warn, vha, 0x5068, 513 "sfp_tx_fault=0x%x, link_state=0x%x, " 514 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 515 dcbx_status); 516 517 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 518 } 519 520 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 521 ql_log(ql_log_warn, vha, 0x5069, 522 "Heartbeat Failure encountered, chip reset " 523 "required.\n"); 524 525 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 526 } 527 } 528 529 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 530 ql_log(ql_log_info, vha, 0x506a, 531 "IDC Device-State changed = 0x%x.\n", mb[4]); 532 if (ha->flags.nic_core_reset_owner) 533 return; 534 qla83xx_schedule_work(vha, MBA_IDC_AEN); 535 } 536 } 537 538 int 539 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 540 { 541 struct qla_hw_data *ha = vha->hw; 542 scsi_qla_host_t *vp; 543 uint32_t vp_did; 544 unsigned long flags; 545 int ret = 0; 546 547 if (!ha->num_vhosts) 548 return ret; 549 550 spin_lock_irqsave(&ha->vport_slock, flags); 551 list_for_each_entry(vp, &ha->vp_list, list) { 552 vp_did = vp->d_id.b24; 553 if (vp_did == rscn_entry) { 554 ret = 1; 555 break; 556 } 557 } 558 spin_unlock_irqrestore(&ha->vport_slock, flags); 559 560 return ret; 561 } 562 563 /** 564 * qla2x00_async_event() - Process aynchronous events. 565 * @ha: SCSI driver HA context 566 * @mb: Mailbox registers (0 - 3) 567 */ 568 void 569 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 570 { 571 uint16_t handle_cnt; 572 uint16_t cnt, mbx; 573 uint32_t handles[5]; 574 struct qla_hw_data *ha = vha->hw; 575 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 576 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 577 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 578 uint32_t rscn_entry, host_pid, tmp_pid; 579 unsigned long flags; 580 fc_port_t *fcport = NULL; 581 582 /* Setup to process RIO completion. */ 583 handle_cnt = 0; 584 if (IS_CNA_CAPABLE(ha)) 585 goto skip_rio; 586 switch (mb[0]) { 587 case MBA_SCSI_COMPLETION: 588 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 589 handle_cnt = 1; 590 break; 591 case MBA_CMPLT_1_16BIT: 592 handles[0] = mb[1]; 593 handle_cnt = 1; 594 mb[0] = MBA_SCSI_COMPLETION; 595 break; 596 case MBA_CMPLT_2_16BIT: 597 handles[0] = mb[1]; 598 handles[1] = mb[2]; 599 handle_cnt = 2; 600 mb[0] = MBA_SCSI_COMPLETION; 601 break; 602 case MBA_CMPLT_3_16BIT: 603 handles[0] = mb[1]; 604 handles[1] = mb[2]; 605 handles[2] = mb[3]; 606 handle_cnt = 3; 607 mb[0] = MBA_SCSI_COMPLETION; 608 break; 609 case MBA_CMPLT_4_16BIT: 610 handles[0] = mb[1]; 611 handles[1] = mb[2]; 612 handles[2] = mb[3]; 613 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 614 handle_cnt = 4; 615 mb[0] = MBA_SCSI_COMPLETION; 616 break; 617 case MBA_CMPLT_5_16BIT: 618 handles[0] = mb[1]; 619 handles[1] = mb[2]; 620 handles[2] = mb[3]; 621 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 622 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 623 handle_cnt = 5; 624 mb[0] = MBA_SCSI_COMPLETION; 625 break; 626 case MBA_CMPLT_2_32BIT: 627 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 628 handles[1] = le32_to_cpu( 629 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 630 RD_MAILBOX_REG(ha, reg, 6)); 631 handle_cnt = 2; 632 mb[0] = MBA_SCSI_COMPLETION; 633 break; 634 default: 635 break; 636 } 637 skip_rio: 638 switch (mb[0]) { 639 case MBA_SCSI_COMPLETION: /* Fast Post */ 640 if (!vha->flags.online) 641 break; 642 643 for (cnt = 0; cnt < handle_cnt; cnt++) 644 qla2x00_process_completed_request(vha, rsp->req, 645 handles[cnt]); 646 break; 647 648 case MBA_RESET: /* Reset */ 649 ql_dbg(ql_dbg_async, vha, 0x5002, 650 "Asynchronous RESET.\n"); 651 652 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 653 break; 654 655 case MBA_SYSTEM_ERR: /* System Error */ 656 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? 657 RD_REG_WORD(®24->mailbox7) : 0; 658 ql_log(ql_log_warn, vha, 0x5003, 659 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 660 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 661 662 ha->isp_ops->fw_dump(vha, 1); 663 664 if (IS_FWI2_CAPABLE(ha)) { 665 if (mb[1] == 0 && mb[2] == 0) { 666 ql_log(ql_log_fatal, vha, 0x5004, 667 "Unrecoverable Hardware Error: adapter " 668 "marked OFFLINE!\n"); 669 vha->flags.online = 0; 670 vha->device_flags |= DFLG_DEV_FAILED; 671 } else { 672 /* Check to see if MPI timeout occurred */ 673 if ((mbx & MBX_3) && (ha->port_no == 0)) 674 set_bit(MPI_RESET_NEEDED, 675 &vha->dpc_flags); 676 677 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 678 } 679 } else if (mb[1] == 0) { 680 ql_log(ql_log_fatal, vha, 0x5005, 681 "Unrecoverable Hardware Error: adapter marked " 682 "OFFLINE!\n"); 683 vha->flags.online = 0; 684 vha->device_flags |= DFLG_DEV_FAILED; 685 } else 686 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 687 break; 688 689 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 690 ql_log(ql_log_warn, vha, 0x5006, 691 "ISP Request Transfer Error (%x).\n", mb[1]); 692 693 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 694 break; 695 696 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 697 ql_log(ql_log_warn, vha, 0x5007, 698 "ISP Response Transfer Error.\n"); 699 700 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 701 break; 702 703 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 704 ql_dbg(ql_dbg_async, vha, 0x5008, 705 "Asynchronous WAKEUP_THRES.\n"); 706 707 break; 708 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 709 ql_dbg(ql_dbg_async, vha, 0x5009, 710 "LIP occurred (%x).\n", mb[1]); 711 712 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 713 atomic_set(&vha->loop_state, LOOP_DOWN); 714 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 715 qla2x00_mark_all_devices_lost(vha, 1); 716 } 717 718 if (vha->vp_idx) { 719 atomic_set(&vha->vp_state, VP_FAILED); 720 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 721 } 722 723 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 724 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 725 726 vha->flags.management_server_logged_in = 0; 727 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 728 break; 729 730 case MBA_LOOP_UP: /* Loop Up Event */ 731 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 732 ha->link_data_rate = PORT_SPEED_1GB; 733 else 734 ha->link_data_rate = mb[1]; 735 736 ql_log(ql_log_info, vha, 0x500a, 737 "LOOP UP detected (%s Gbps).\n", 738 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 739 740 vha->flags.management_server_logged_in = 0; 741 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 742 break; 743 744 case MBA_LOOP_DOWN: /* Loop Down Event */ 745 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 746 ? RD_REG_WORD(®24->mailbox4) : 0; 747 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 748 : mbx; 749 ql_log(ql_log_info, vha, 0x500b, 750 "LOOP DOWN detected (%x %x %x %x).\n", 751 mb[1], mb[2], mb[3], mbx); 752 753 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 754 atomic_set(&vha->loop_state, LOOP_DOWN); 755 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 756 /* 757 * In case of loop down, restore WWPN from 758 * NVRAM in case of FA-WWPN capable ISP 759 */ 760 if (ha->flags.fawwpn_enabled) { 761 void *wwpn = ha->init_cb->port_name; 762 763 memcpy(vha->port_name, wwpn, WWN_SIZE); 764 } 765 766 vha->device_flags |= DFLG_NO_CABLE; 767 qla2x00_mark_all_devices_lost(vha, 1); 768 } 769 770 if (vha->vp_idx) { 771 atomic_set(&vha->vp_state, VP_FAILED); 772 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 773 } 774 775 vha->flags.management_server_logged_in = 0; 776 ha->link_data_rate = PORT_SPEED_UNKNOWN; 777 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 778 break; 779 780 case MBA_LIP_RESET: /* LIP reset occurred */ 781 ql_dbg(ql_dbg_async, vha, 0x500c, 782 "LIP reset occurred (%x).\n", mb[1]); 783 784 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 785 atomic_set(&vha->loop_state, LOOP_DOWN); 786 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 787 qla2x00_mark_all_devices_lost(vha, 1); 788 } 789 790 if (vha->vp_idx) { 791 atomic_set(&vha->vp_state, VP_FAILED); 792 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 793 } 794 795 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 796 797 ha->operating_mode = LOOP; 798 vha->flags.management_server_logged_in = 0; 799 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 800 break; 801 802 /* case MBA_DCBX_COMPLETE: */ 803 case MBA_POINT_TO_POINT: /* Point-to-Point */ 804 if (IS_QLA2100(ha)) 805 break; 806 807 if (IS_CNA_CAPABLE(ha)) { 808 ql_dbg(ql_dbg_async, vha, 0x500d, 809 "DCBX Completed -- %04x %04x %04x.\n", 810 mb[1], mb[2], mb[3]); 811 if (ha->notify_dcbx_comp && !vha->vp_idx) 812 complete(&ha->dcbx_comp); 813 814 } else 815 ql_dbg(ql_dbg_async, vha, 0x500e, 816 "Asynchronous P2P MODE received.\n"); 817 818 /* 819 * Until there's a transition from loop down to loop up, treat 820 * this as loop down only. 821 */ 822 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 823 atomic_set(&vha->loop_state, LOOP_DOWN); 824 if (!atomic_read(&vha->loop_down_timer)) 825 atomic_set(&vha->loop_down_timer, 826 LOOP_DOWN_TIME); 827 qla2x00_mark_all_devices_lost(vha, 1); 828 } 829 830 if (vha->vp_idx) { 831 atomic_set(&vha->vp_state, VP_FAILED); 832 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 833 } 834 835 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 836 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 837 838 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 839 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 840 841 ha->flags.gpsc_supported = 1; 842 vha->flags.management_server_logged_in = 0; 843 break; 844 845 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 846 if (IS_QLA2100(ha)) 847 break; 848 849 ql_dbg(ql_dbg_async, vha, 0x500f, 850 "Configuration change detected: value=%x.\n", mb[1]); 851 852 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 853 atomic_set(&vha->loop_state, LOOP_DOWN); 854 if (!atomic_read(&vha->loop_down_timer)) 855 atomic_set(&vha->loop_down_timer, 856 LOOP_DOWN_TIME); 857 qla2x00_mark_all_devices_lost(vha, 1); 858 } 859 860 if (vha->vp_idx) { 861 atomic_set(&vha->vp_state, VP_FAILED); 862 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 863 } 864 865 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 866 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 867 break; 868 869 case MBA_PORT_UPDATE: /* Port database update */ 870 /* 871 * Handle only global and vn-port update events 872 * 873 * Relevant inputs: 874 * mb[1] = N_Port handle of changed port 875 * OR 0xffff for global event 876 * mb[2] = New login state 877 * 7 = Port logged out 878 * mb[3] = LSB is vp_idx, 0xff = all vps 879 * 880 * Skip processing if: 881 * Event is global, vp_idx is NOT all vps, 882 * vp_idx does not match 883 * Event is not global, vp_idx does not match 884 */ 885 if (IS_QLA2XXX_MIDTYPE(ha) && 886 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 887 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 888 break; 889 890 /* Global event -- port logout or port unavailable. */ 891 if (mb[1] == 0xffff && mb[2] == 0x7) { 892 ql_dbg(ql_dbg_async, vha, 0x5010, 893 "Port unavailable %04x %04x %04x.\n", 894 mb[1], mb[2], mb[3]); 895 ql_log(ql_log_warn, vha, 0x505e, 896 "Link is offline.\n"); 897 898 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 899 atomic_set(&vha->loop_state, LOOP_DOWN); 900 atomic_set(&vha->loop_down_timer, 901 LOOP_DOWN_TIME); 902 vha->device_flags |= DFLG_NO_CABLE; 903 qla2x00_mark_all_devices_lost(vha, 1); 904 } 905 906 if (vha->vp_idx) { 907 atomic_set(&vha->vp_state, VP_FAILED); 908 fc_vport_set_state(vha->fc_vport, 909 FC_VPORT_FAILED); 910 qla2x00_mark_all_devices_lost(vha, 1); 911 } 912 913 vha->flags.management_server_logged_in = 0; 914 ha->link_data_rate = PORT_SPEED_UNKNOWN; 915 break; 916 } 917 918 /* 919 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 920 * event etc. earlier indicating loop is down) then process 921 * it. Otherwise ignore it and Wait for RSCN to come in. 922 */ 923 atomic_set(&vha->loop_down_timer, 0); 924 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 925 atomic_read(&vha->loop_state) != LOOP_DEAD) { 926 ql_dbg(ql_dbg_async, vha, 0x5011, 927 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 928 mb[1], mb[2], mb[3]); 929 930 qlt_async_event(mb[0], vha, mb); 931 break; 932 } 933 934 ql_dbg(ql_dbg_async, vha, 0x5012, 935 "Port database changed %04x %04x %04x.\n", 936 mb[1], mb[2], mb[3]); 937 938 /* 939 * Mark all devices as missing so we will login again. 940 */ 941 atomic_set(&vha->loop_state, LOOP_UP); 942 943 qla2x00_mark_all_devices_lost(vha, 1); 944 945 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) 946 set_bit(SCR_PENDING, &vha->dpc_flags); 947 948 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 949 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 950 951 qlt_async_event(mb[0], vha, mb); 952 break; 953 954 case MBA_RSCN_UPDATE: /* State Change Registration */ 955 /* Check if the Vport has issued a SCR */ 956 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 957 break; 958 /* Only handle SCNs for our Vport index. */ 959 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 960 break; 961 962 ql_dbg(ql_dbg_async, vha, 0x5013, 963 "RSCN database changed -- %04x %04x %04x.\n", 964 mb[1], mb[2], mb[3]); 965 966 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 967 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 968 | vha->d_id.b.al_pa; 969 if (rscn_entry == host_pid) { 970 ql_dbg(ql_dbg_async, vha, 0x5014, 971 "Ignoring RSCN update to local host " 972 "port ID (%06x).\n", host_pid); 973 break; 974 } 975 976 /* Ignore reserved bits from RSCN-payload. */ 977 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 978 979 /* Skip RSCNs for virtual ports on the same physical port */ 980 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 981 break; 982 983 /* 984 * Search for the rport related to this RSCN entry and mark it 985 * as lost. 986 */ 987 list_for_each_entry(fcport, &vha->vp_fcports, list) { 988 if (atomic_read(&fcport->state) != FCS_ONLINE) 989 continue; 990 tmp_pid = fcport->d_id.b24; 991 if (fcport->d_id.b24 == rscn_entry) { 992 qla2x00_mark_device_lost(vha, fcport, 0, 0); 993 break; 994 } 995 } 996 997 atomic_set(&vha->loop_down_timer, 0); 998 vha->flags.management_server_logged_in = 0; 999 1000 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1001 set_bit(RSCN_UPDATE, &vha->dpc_flags); 1002 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1003 break; 1004 1005 /* case MBA_RIO_RESPONSE: */ 1006 case MBA_ZIO_RESPONSE: 1007 ql_dbg(ql_dbg_async, vha, 0x5015, 1008 "[R|Z]IO update completion.\n"); 1009 1010 if (IS_FWI2_CAPABLE(ha)) 1011 qla24xx_process_response_queue(vha, rsp); 1012 else 1013 qla2x00_process_response_queue(rsp); 1014 break; 1015 1016 case MBA_DISCARD_RND_FRAME: 1017 ql_dbg(ql_dbg_async, vha, 0x5016, 1018 "Discard RND Frame -- %04x %04x %04x.\n", 1019 mb[1], mb[2], mb[3]); 1020 break; 1021 1022 case MBA_TRACE_NOTIFICATION: 1023 ql_dbg(ql_dbg_async, vha, 0x5017, 1024 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1025 break; 1026 1027 case MBA_ISP84XX_ALERT: 1028 ql_dbg(ql_dbg_async, vha, 0x5018, 1029 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1030 mb[1], mb[2], mb[3]); 1031 1032 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1033 switch (mb[1]) { 1034 case A84_PANIC_RECOVERY: 1035 ql_log(ql_log_info, vha, 0x5019, 1036 "Alert 84XX: panic recovery %04x %04x.\n", 1037 mb[2], mb[3]); 1038 break; 1039 case A84_OP_LOGIN_COMPLETE: 1040 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1041 ql_log(ql_log_info, vha, 0x501a, 1042 "Alert 84XX: firmware version %x.\n", 1043 ha->cs84xx->op_fw_version); 1044 break; 1045 case A84_DIAG_LOGIN_COMPLETE: 1046 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1047 ql_log(ql_log_info, vha, 0x501b, 1048 "Alert 84XX: diagnostic firmware version %x.\n", 1049 ha->cs84xx->diag_fw_version); 1050 break; 1051 case A84_GOLD_LOGIN_COMPLETE: 1052 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1053 ha->cs84xx->fw_update = 1; 1054 ql_log(ql_log_info, vha, 0x501c, 1055 "Alert 84XX: gold firmware version %x.\n", 1056 ha->cs84xx->gold_fw_version); 1057 break; 1058 default: 1059 ql_log(ql_log_warn, vha, 0x501d, 1060 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1061 mb[1], mb[2], mb[3]); 1062 } 1063 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1064 break; 1065 case MBA_DCBX_START: 1066 ql_dbg(ql_dbg_async, vha, 0x501e, 1067 "DCBX Started -- %04x %04x %04x.\n", 1068 mb[1], mb[2], mb[3]); 1069 break; 1070 case MBA_DCBX_PARAM_UPDATE: 1071 ql_dbg(ql_dbg_async, vha, 0x501f, 1072 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1073 mb[1], mb[2], mb[3]); 1074 break; 1075 case MBA_FCF_CONF_ERR: 1076 ql_dbg(ql_dbg_async, vha, 0x5020, 1077 "FCF Configuration Error -- %04x %04x %04x.\n", 1078 mb[1], mb[2], mb[3]); 1079 break; 1080 case MBA_IDC_NOTIFY: 1081 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1082 mb[4] = RD_REG_WORD(®24->mailbox4); 1083 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1084 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1085 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1086 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1087 /* 1088 * Extend loop down timer since port is active. 1089 */ 1090 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1091 atomic_set(&vha->loop_down_timer, 1092 LOOP_DOWN_TIME); 1093 qla2xxx_wake_dpc(vha); 1094 } 1095 } 1096 case MBA_IDC_COMPLETE: 1097 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1098 complete(&ha->lb_portup_comp); 1099 /* Fallthru */ 1100 case MBA_IDC_TIME_EXT: 1101 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1102 IS_QLA8044(ha)) 1103 qla81xx_idc_event(vha, mb[0], mb[1]); 1104 break; 1105 1106 case MBA_IDC_AEN: 1107 mb[4] = RD_REG_WORD(®24->mailbox4); 1108 mb[5] = RD_REG_WORD(®24->mailbox5); 1109 mb[6] = RD_REG_WORD(®24->mailbox6); 1110 mb[7] = RD_REG_WORD(®24->mailbox7); 1111 qla83xx_handle_8200_aen(vha, mb); 1112 break; 1113 1114 case MBA_DPORT_DIAGNOSTICS: 1115 ql_dbg(ql_dbg_async, vha, 0x5052, 1116 "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1], 1117 mb[1] == 0 ? "start" : 1118 mb[1] == 1 ? "done (ok)" : 1119 mb[1] == 2 ? "done (error)" : "other"); 1120 break; 1121 1122 default: 1123 ql_dbg(ql_dbg_async, vha, 0x5057, 1124 "Unknown AEN:%04x %04x %04x %04x\n", 1125 mb[0], mb[1], mb[2], mb[3]); 1126 } 1127 1128 qlt_async_event(mb[0], vha, mb); 1129 1130 if (!vha->vp_idx && ha->num_vhosts) 1131 qla2x00_alert_all_vps(rsp, mb); 1132 } 1133 1134 /** 1135 * qla2x00_process_completed_request() - Process a Fast Post response. 1136 * @ha: SCSI driver HA context 1137 * @index: SRB index 1138 */ 1139 void 1140 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1141 struct req_que *req, uint32_t index) 1142 { 1143 srb_t *sp; 1144 struct qla_hw_data *ha = vha->hw; 1145 1146 /* Validate handle. */ 1147 if (index >= req->num_outstanding_cmds) { 1148 ql_log(ql_log_warn, vha, 0x3014, 1149 "Invalid SCSI command index (%x).\n", index); 1150 1151 if (IS_P3P_TYPE(ha)) 1152 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1153 else 1154 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1155 return; 1156 } 1157 1158 sp = req->outstanding_cmds[index]; 1159 if (sp) { 1160 /* Free outstanding command slot. */ 1161 req->outstanding_cmds[index] = NULL; 1162 1163 /* Save ISP completion status */ 1164 sp->done(ha, sp, DID_OK << 16); 1165 } else { 1166 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1167 1168 if (IS_P3P_TYPE(ha)) 1169 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1170 else 1171 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1172 } 1173 } 1174 1175 srb_t * 1176 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1177 struct req_que *req, void *iocb) 1178 { 1179 struct qla_hw_data *ha = vha->hw; 1180 sts_entry_t *pkt = iocb; 1181 srb_t *sp = NULL; 1182 uint16_t index; 1183 1184 index = LSW(pkt->handle); 1185 if (index >= req->num_outstanding_cmds) { 1186 ql_log(ql_log_warn, vha, 0x5031, 1187 "Invalid command index (%x).\n", index); 1188 if (IS_P3P_TYPE(ha)) 1189 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1190 else 1191 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1192 goto done; 1193 } 1194 sp = req->outstanding_cmds[index]; 1195 if (!sp) { 1196 ql_log(ql_log_warn, vha, 0x5032, 1197 "Invalid completion handle (%x) -- timed-out.\n", index); 1198 return sp; 1199 } 1200 if (sp->handle != index) { 1201 ql_log(ql_log_warn, vha, 0x5033, 1202 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1203 return NULL; 1204 } 1205 1206 req->outstanding_cmds[index] = NULL; 1207 1208 done: 1209 return sp; 1210 } 1211 1212 static void 1213 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1214 struct mbx_entry *mbx) 1215 { 1216 const char func[] = "MBX-IOCB"; 1217 const char *type; 1218 fc_port_t *fcport; 1219 srb_t *sp; 1220 struct srb_iocb *lio; 1221 uint16_t *data; 1222 uint16_t status; 1223 1224 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1225 if (!sp) 1226 return; 1227 1228 lio = &sp->u.iocb_cmd; 1229 type = sp->name; 1230 fcport = sp->fcport; 1231 data = lio->u.logio.data; 1232 1233 data[0] = MBS_COMMAND_ERROR; 1234 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1235 QLA_LOGIO_LOGIN_RETRIED : 0; 1236 if (mbx->entry_status) { 1237 ql_dbg(ql_dbg_async, vha, 0x5043, 1238 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1239 "entry-status=%x status=%x state-flag=%x " 1240 "status-flags=%x.\n", type, sp->handle, 1241 fcport->d_id.b.domain, fcport->d_id.b.area, 1242 fcport->d_id.b.al_pa, mbx->entry_status, 1243 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1244 le16_to_cpu(mbx->status_flags)); 1245 1246 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1247 (uint8_t *)mbx, sizeof(*mbx)); 1248 1249 goto logio_done; 1250 } 1251 1252 status = le16_to_cpu(mbx->status); 1253 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1254 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1255 status = 0; 1256 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1257 ql_dbg(ql_dbg_async, vha, 0x5045, 1258 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1259 type, sp->handle, fcport->d_id.b.domain, 1260 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1261 le16_to_cpu(mbx->mb1)); 1262 1263 data[0] = MBS_COMMAND_COMPLETE; 1264 if (sp->type == SRB_LOGIN_CMD) { 1265 fcport->port_type = FCT_TARGET; 1266 if (le16_to_cpu(mbx->mb1) & BIT_0) 1267 fcport->port_type = FCT_INITIATOR; 1268 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1269 fcport->flags |= FCF_FCP2_DEVICE; 1270 } 1271 goto logio_done; 1272 } 1273 1274 data[0] = le16_to_cpu(mbx->mb0); 1275 switch (data[0]) { 1276 case MBS_PORT_ID_USED: 1277 data[1] = le16_to_cpu(mbx->mb1); 1278 break; 1279 case MBS_LOOP_ID_USED: 1280 break; 1281 default: 1282 data[0] = MBS_COMMAND_ERROR; 1283 break; 1284 } 1285 1286 ql_log(ql_log_warn, vha, 0x5046, 1287 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1288 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1289 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1290 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1291 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1292 le16_to_cpu(mbx->mb7)); 1293 1294 logio_done: 1295 sp->done(vha, sp, 0); 1296 } 1297 1298 static void 1299 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1300 sts_entry_t *pkt, int iocb_type) 1301 { 1302 const char func[] = "CT_IOCB"; 1303 const char *type; 1304 srb_t *sp; 1305 struct fc_bsg_job *bsg_job; 1306 uint16_t comp_status; 1307 int res; 1308 1309 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1310 if (!sp) 1311 return; 1312 1313 bsg_job = sp->u.bsg_job; 1314 1315 type = "ct pass-through"; 1316 1317 comp_status = le16_to_cpu(pkt->comp_status); 1318 1319 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1320 * fc payload to the caller 1321 */ 1322 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1323 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1324 1325 if (comp_status != CS_COMPLETE) { 1326 if (comp_status == CS_DATA_UNDERRUN) { 1327 res = DID_OK << 16; 1328 bsg_job->reply->reply_payload_rcv_len = 1329 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1330 1331 ql_log(ql_log_warn, vha, 0x5048, 1332 "CT pass-through-%s error " 1333 "comp_status-status=0x%x total_byte = 0x%x.\n", 1334 type, comp_status, 1335 bsg_job->reply->reply_payload_rcv_len); 1336 } else { 1337 ql_log(ql_log_warn, vha, 0x5049, 1338 "CT pass-through-%s error " 1339 "comp_status-status=0x%x.\n", type, comp_status); 1340 res = DID_ERROR << 16; 1341 bsg_job->reply->reply_payload_rcv_len = 0; 1342 } 1343 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1344 (uint8_t *)pkt, sizeof(*pkt)); 1345 } else { 1346 res = DID_OK << 16; 1347 bsg_job->reply->reply_payload_rcv_len = 1348 bsg_job->reply_payload.payload_len; 1349 bsg_job->reply_len = 0; 1350 } 1351 1352 sp->done(vha, sp, res); 1353 } 1354 1355 static void 1356 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1357 struct sts_entry_24xx *pkt, int iocb_type) 1358 { 1359 const char func[] = "ELS_CT_IOCB"; 1360 const char *type; 1361 srb_t *sp; 1362 struct fc_bsg_job *bsg_job; 1363 uint16_t comp_status; 1364 uint32_t fw_status[3]; 1365 uint8_t* fw_sts_ptr; 1366 int res; 1367 1368 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1369 if (!sp) 1370 return; 1371 bsg_job = sp->u.bsg_job; 1372 1373 type = NULL; 1374 switch (sp->type) { 1375 case SRB_ELS_CMD_RPT: 1376 case SRB_ELS_CMD_HST: 1377 type = "els"; 1378 break; 1379 case SRB_CT_CMD: 1380 type = "ct pass-through"; 1381 break; 1382 default: 1383 ql_dbg(ql_dbg_user, vha, 0x503e, 1384 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1385 return; 1386 } 1387 1388 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1389 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1390 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1391 1392 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1393 * fc payload to the caller 1394 */ 1395 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1396 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1397 1398 if (comp_status != CS_COMPLETE) { 1399 if (comp_status == CS_DATA_UNDERRUN) { 1400 res = DID_OK << 16; 1401 bsg_job->reply->reply_payload_rcv_len = 1402 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1403 1404 ql_dbg(ql_dbg_user, vha, 0x503f, 1405 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1406 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1407 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1408 le16_to_cpu(((struct els_sts_entry_24xx *) 1409 pkt)->total_byte_count)); 1410 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1411 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1412 } 1413 else { 1414 ql_dbg(ql_dbg_user, vha, 0x5040, 1415 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1416 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1417 type, sp->handle, comp_status, 1418 le16_to_cpu(((struct els_sts_entry_24xx *) 1419 pkt)->error_subcode_1), 1420 le16_to_cpu(((struct els_sts_entry_24xx *) 1421 pkt)->error_subcode_2)); 1422 res = DID_ERROR << 16; 1423 bsg_job->reply->reply_payload_rcv_len = 0; 1424 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1425 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1426 } 1427 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1428 (uint8_t *)pkt, sizeof(*pkt)); 1429 } 1430 else { 1431 res = DID_OK << 16; 1432 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1433 bsg_job->reply_len = 0; 1434 } 1435 1436 sp->done(vha, sp, res); 1437 } 1438 1439 static void 1440 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1441 struct logio_entry_24xx *logio) 1442 { 1443 const char func[] = "LOGIO-IOCB"; 1444 const char *type; 1445 fc_port_t *fcport; 1446 srb_t *sp; 1447 struct srb_iocb *lio; 1448 uint16_t *data; 1449 uint32_t iop[2]; 1450 1451 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1452 if (!sp) 1453 return; 1454 1455 lio = &sp->u.iocb_cmd; 1456 type = sp->name; 1457 fcport = sp->fcport; 1458 data = lio->u.logio.data; 1459 1460 data[0] = MBS_COMMAND_ERROR; 1461 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1462 QLA_LOGIO_LOGIN_RETRIED : 0; 1463 if (logio->entry_status) { 1464 ql_log(ql_log_warn, fcport->vha, 0x5034, 1465 "Async-%s error entry - hdl=%x" 1466 "portid=%02x%02x%02x entry-status=%x.\n", 1467 type, sp->handle, fcport->d_id.b.domain, 1468 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1469 logio->entry_status); 1470 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1471 (uint8_t *)logio, sizeof(*logio)); 1472 1473 goto logio_done; 1474 } 1475 1476 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1477 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1478 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1479 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1480 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1481 le32_to_cpu(logio->io_parameter[0])); 1482 1483 data[0] = MBS_COMMAND_COMPLETE; 1484 if (sp->type != SRB_LOGIN_CMD) 1485 goto logio_done; 1486 1487 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1488 if (iop[0] & BIT_4) { 1489 fcport->port_type = FCT_TARGET; 1490 if (iop[0] & BIT_8) 1491 fcport->flags |= FCF_FCP2_DEVICE; 1492 } else if (iop[0] & BIT_5) 1493 fcport->port_type = FCT_INITIATOR; 1494 1495 if (iop[0] & BIT_7) 1496 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1497 1498 if (logio->io_parameter[7] || logio->io_parameter[8]) 1499 fcport->supported_classes |= FC_COS_CLASS2; 1500 if (logio->io_parameter[9] || logio->io_parameter[10]) 1501 fcport->supported_classes |= FC_COS_CLASS3; 1502 1503 goto logio_done; 1504 } 1505 1506 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1507 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1508 switch (iop[0]) { 1509 case LSC_SCODE_PORTID_USED: 1510 data[0] = MBS_PORT_ID_USED; 1511 data[1] = LSW(iop[1]); 1512 break; 1513 case LSC_SCODE_NPORT_USED: 1514 data[0] = MBS_LOOP_ID_USED; 1515 break; 1516 default: 1517 data[0] = MBS_COMMAND_ERROR; 1518 break; 1519 } 1520 1521 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1522 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1523 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1524 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1525 le16_to_cpu(logio->comp_status), 1526 le32_to_cpu(logio->io_parameter[0]), 1527 le32_to_cpu(logio->io_parameter[1])); 1528 1529 logio_done: 1530 sp->done(vha, sp, 0); 1531 } 1532 1533 static void 1534 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1535 { 1536 const char func[] = "TMF-IOCB"; 1537 const char *type; 1538 fc_port_t *fcport; 1539 srb_t *sp; 1540 struct srb_iocb *iocb; 1541 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1542 1543 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1544 if (!sp) 1545 return; 1546 1547 iocb = &sp->u.iocb_cmd; 1548 type = sp->name; 1549 fcport = sp->fcport; 1550 iocb->u.tmf.data = QLA_SUCCESS; 1551 1552 if (sts->entry_status) { 1553 ql_log(ql_log_warn, fcport->vha, 0x5038, 1554 "Async-%s error - hdl=%x entry-status(%x).\n", 1555 type, sp->handle, sts->entry_status); 1556 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1557 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1558 ql_log(ql_log_warn, fcport->vha, 0x5039, 1559 "Async-%s error - hdl=%x completion status(%x).\n", 1560 type, sp->handle, sts->comp_status); 1561 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1562 } else if ((le16_to_cpu(sts->scsi_status) & 1563 SS_RESPONSE_INFO_LEN_VALID)) { 1564 if (le32_to_cpu(sts->rsp_data_len) < 4) { 1565 ql_log(ql_log_warn, fcport->vha, 0x503b, 1566 "Async-%s error - hdl=%x not enough response(%d).\n", 1567 type, sp->handle, sts->rsp_data_len); 1568 } else if (sts->data[3]) { 1569 ql_log(ql_log_warn, fcport->vha, 0x503c, 1570 "Async-%s error - hdl=%x response(%x).\n", 1571 type, sp->handle, sts->data[3]); 1572 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1573 } 1574 } 1575 1576 if (iocb->u.tmf.data != QLA_SUCCESS) 1577 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1578 (uint8_t *)sts, sizeof(*sts)); 1579 1580 sp->done(vha, sp, 0); 1581 } 1582 1583 /** 1584 * qla2x00_process_response_queue() - Process response queue entries. 1585 * @ha: SCSI driver HA context 1586 */ 1587 void 1588 qla2x00_process_response_queue(struct rsp_que *rsp) 1589 { 1590 struct scsi_qla_host *vha; 1591 struct qla_hw_data *ha = rsp->hw; 1592 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1593 sts_entry_t *pkt; 1594 uint16_t handle_cnt; 1595 uint16_t cnt; 1596 1597 vha = pci_get_drvdata(ha->pdev); 1598 1599 if (!vha->flags.online) 1600 return; 1601 1602 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1603 pkt = (sts_entry_t *)rsp->ring_ptr; 1604 1605 rsp->ring_index++; 1606 if (rsp->ring_index == rsp->length) { 1607 rsp->ring_index = 0; 1608 rsp->ring_ptr = rsp->ring; 1609 } else { 1610 rsp->ring_ptr++; 1611 } 1612 1613 if (pkt->entry_status != 0) { 1614 qla2x00_error_entry(vha, rsp, pkt); 1615 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1616 wmb(); 1617 continue; 1618 } 1619 1620 switch (pkt->entry_type) { 1621 case STATUS_TYPE: 1622 qla2x00_status_entry(vha, rsp, pkt); 1623 break; 1624 case STATUS_TYPE_21: 1625 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1626 for (cnt = 0; cnt < handle_cnt; cnt++) { 1627 qla2x00_process_completed_request(vha, rsp->req, 1628 ((sts21_entry_t *)pkt)->handle[cnt]); 1629 } 1630 break; 1631 case STATUS_TYPE_22: 1632 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1633 for (cnt = 0; cnt < handle_cnt; cnt++) { 1634 qla2x00_process_completed_request(vha, rsp->req, 1635 ((sts22_entry_t *)pkt)->handle[cnt]); 1636 } 1637 break; 1638 case STATUS_CONT_TYPE: 1639 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1640 break; 1641 case MBX_IOCB_TYPE: 1642 qla2x00_mbx_iocb_entry(vha, rsp->req, 1643 (struct mbx_entry *)pkt); 1644 break; 1645 case CT_IOCB_TYPE: 1646 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1647 break; 1648 default: 1649 /* Type Not Supported. */ 1650 ql_log(ql_log_warn, vha, 0x504a, 1651 "Received unknown response pkt type %x " 1652 "entry status=%x.\n", 1653 pkt->entry_type, pkt->entry_status); 1654 break; 1655 } 1656 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1657 wmb(); 1658 } 1659 1660 /* Adjust ring index */ 1661 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1662 } 1663 1664 static inline void 1665 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1666 uint32_t sense_len, struct rsp_que *rsp, int res) 1667 { 1668 struct scsi_qla_host *vha = sp->fcport->vha; 1669 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1670 uint32_t track_sense_len; 1671 1672 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1673 sense_len = SCSI_SENSE_BUFFERSIZE; 1674 1675 SET_CMD_SENSE_LEN(sp, sense_len); 1676 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1677 track_sense_len = sense_len; 1678 1679 if (sense_len > par_sense_len) 1680 sense_len = par_sense_len; 1681 1682 memcpy(cp->sense_buffer, sense_data, sense_len); 1683 1684 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1685 track_sense_len -= sense_len; 1686 SET_CMD_SENSE_LEN(sp, track_sense_len); 1687 1688 if (track_sense_len != 0) { 1689 rsp->status_srb = sp; 1690 cp->result = res; 1691 } 1692 1693 if (sense_len) { 1694 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1695 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 1696 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1697 cp); 1698 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1699 cp->sense_buffer, sense_len); 1700 } 1701 } 1702 1703 struct scsi_dif_tuple { 1704 __be16 guard; /* Checksum */ 1705 __be16 app_tag; /* APPL identifier */ 1706 __be32 ref_tag; /* Target LBA or indirect LBA */ 1707 }; 1708 1709 /* 1710 * Checks the guard or meta-data for the type of error 1711 * detected by the HBA. In case of errors, we set the 1712 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1713 * to indicate to the kernel that the HBA detected error. 1714 */ 1715 static inline int 1716 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1717 { 1718 struct scsi_qla_host *vha = sp->fcport->vha; 1719 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1720 uint8_t *ap = &sts24->data[12]; 1721 uint8_t *ep = &sts24->data[20]; 1722 uint32_t e_ref_tag, a_ref_tag; 1723 uint16_t e_app_tag, a_app_tag; 1724 uint16_t e_guard, a_guard; 1725 1726 /* 1727 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1728 * would make guard field appear at offset 2 1729 */ 1730 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1731 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1732 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1733 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1734 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1735 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1736 1737 ql_dbg(ql_dbg_io, vha, 0x3023, 1738 "iocb(s) %p Returned STATUS.\n", sts24); 1739 1740 ql_dbg(ql_dbg_io, vha, 0x3024, 1741 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1742 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1743 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1744 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1745 a_app_tag, e_app_tag, a_guard, e_guard); 1746 1747 /* 1748 * Ignore sector if: 1749 * For type 3: ref & app tag is all 'f's 1750 * For type 0,1,2: app tag is all 'f's 1751 */ 1752 if ((a_app_tag == 0xffff) && 1753 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1754 (a_ref_tag == 0xffffffff))) { 1755 uint32_t blocks_done, resid; 1756 sector_t lba_s = scsi_get_lba(cmd); 1757 1758 /* 2TB boundary case covered automatically with this */ 1759 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1760 1761 resid = scsi_bufflen(cmd) - (blocks_done * 1762 cmd->device->sector_size); 1763 1764 scsi_set_resid(cmd, resid); 1765 cmd->result = DID_OK << 16; 1766 1767 /* Update protection tag */ 1768 if (scsi_prot_sg_count(cmd)) { 1769 uint32_t i, j = 0, k = 0, num_ent; 1770 struct scatterlist *sg; 1771 struct sd_dif_tuple *spt; 1772 1773 /* Patch the corresponding protection tags */ 1774 scsi_for_each_prot_sg(cmd, sg, 1775 scsi_prot_sg_count(cmd), i) { 1776 num_ent = sg_dma_len(sg) / 8; 1777 if (k + num_ent < blocks_done) { 1778 k += num_ent; 1779 continue; 1780 } 1781 j = blocks_done - k - 1; 1782 k = blocks_done; 1783 break; 1784 } 1785 1786 if (k != blocks_done) { 1787 ql_log(ql_log_warn, vha, 0x302f, 1788 "unexpected tag values tag:lba=%x:%llx)\n", 1789 e_ref_tag, (unsigned long long)lba_s); 1790 return 1; 1791 } 1792 1793 spt = page_address(sg_page(sg)) + sg->offset; 1794 spt += j; 1795 1796 spt->app_tag = 0xffff; 1797 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1798 spt->ref_tag = 0xffffffff; 1799 } 1800 1801 return 0; 1802 } 1803 1804 /* check guard */ 1805 if (e_guard != a_guard) { 1806 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1807 0x10, 0x1); 1808 set_driver_byte(cmd, DRIVER_SENSE); 1809 set_host_byte(cmd, DID_ABORT); 1810 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1811 return 1; 1812 } 1813 1814 /* check ref tag */ 1815 if (e_ref_tag != a_ref_tag) { 1816 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1817 0x10, 0x3); 1818 set_driver_byte(cmd, DRIVER_SENSE); 1819 set_host_byte(cmd, DID_ABORT); 1820 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1821 return 1; 1822 } 1823 1824 /* check appl tag */ 1825 if (e_app_tag != a_app_tag) { 1826 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1827 0x10, 0x2); 1828 set_driver_byte(cmd, DRIVER_SENSE); 1829 set_host_byte(cmd, DID_ABORT); 1830 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1831 return 1; 1832 } 1833 1834 return 1; 1835 } 1836 1837 static void 1838 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 1839 struct req_que *req, uint32_t index) 1840 { 1841 struct qla_hw_data *ha = vha->hw; 1842 srb_t *sp; 1843 uint16_t comp_status; 1844 uint16_t scsi_status; 1845 uint16_t thread_id; 1846 uint32_t rval = EXT_STATUS_OK; 1847 struct fc_bsg_job *bsg_job = NULL; 1848 sts_entry_t *sts; 1849 struct sts_entry_24xx *sts24; 1850 sts = (sts_entry_t *) pkt; 1851 sts24 = (struct sts_entry_24xx *) pkt; 1852 1853 /* Validate handle. */ 1854 if (index >= req->num_outstanding_cmds) { 1855 ql_log(ql_log_warn, vha, 0x70af, 1856 "Invalid SCSI completion handle 0x%x.\n", index); 1857 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1858 return; 1859 } 1860 1861 sp = req->outstanding_cmds[index]; 1862 if (sp) { 1863 /* Free outstanding command slot. */ 1864 req->outstanding_cmds[index] = NULL; 1865 bsg_job = sp->u.bsg_job; 1866 } else { 1867 ql_log(ql_log_warn, vha, 0x70b0, 1868 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 1869 req->id, index); 1870 1871 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1872 return; 1873 } 1874 1875 if (IS_FWI2_CAPABLE(ha)) { 1876 comp_status = le16_to_cpu(sts24->comp_status); 1877 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1878 } else { 1879 comp_status = le16_to_cpu(sts->comp_status); 1880 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1881 } 1882 1883 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1884 switch (comp_status) { 1885 case CS_COMPLETE: 1886 if (scsi_status == 0) { 1887 bsg_job->reply->reply_payload_rcv_len = 1888 bsg_job->reply_payload.payload_len; 1889 vha->qla_stats.input_bytes += 1890 bsg_job->reply->reply_payload_rcv_len; 1891 vha->qla_stats.input_requests++; 1892 rval = EXT_STATUS_OK; 1893 } 1894 goto done; 1895 1896 case CS_DATA_OVERRUN: 1897 ql_dbg(ql_dbg_user, vha, 0x70b1, 1898 "Command completed with date overrun thread_id=%d\n", 1899 thread_id); 1900 rval = EXT_STATUS_DATA_OVERRUN; 1901 break; 1902 1903 case CS_DATA_UNDERRUN: 1904 ql_dbg(ql_dbg_user, vha, 0x70b2, 1905 "Command completed with date underrun thread_id=%d\n", 1906 thread_id); 1907 rval = EXT_STATUS_DATA_UNDERRUN; 1908 break; 1909 case CS_BIDIR_RD_OVERRUN: 1910 ql_dbg(ql_dbg_user, vha, 0x70b3, 1911 "Command completed with read data overrun thread_id=%d\n", 1912 thread_id); 1913 rval = EXT_STATUS_DATA_OVERRUN; 1914 break; 1915 1916 case CS_BIDIR_RD_WR_OVERRUN: 1917 ql_dbg(ql_dbg_user, vha, 0x70b4, 1918 "Command completed with read and write data overrun " 1919 "thread_id=%d\n", thread_id); 1920 rval = EXT_STATUS_DATA_OVERRUN; 1921 break; 1922 1923 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 1924 ql_dbg(ql_dbg_user, vha, 0x70b5, 1925 "Command completed with read data over and write data " 1926 "underrun thread_id=%d\n", thread_id); 1927 rval = EXT_STATUS_DATA_OVERRUN; 1928 break; 1929 1930 case CS_BIDIR_RD_UNDERRUN: 1931 ql_dbg(ql_dbg_user, vha, 0x70b6, 1932 "Command completed with read data data underrun " 1933 "thread_id=%d\n", thread_id); 1934 rval = EXT_STATUS_DATA_UNDERRUN; 1935 break; 1936 1937 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 1938 ql_dbg(ql_dbg_user, vha, 0x70b7, 1939 "Command completed with read data under and write data " 1940 "overrun thread_id=%d\n", thread_id); 1941 rval = EXT_STATUS_DATA_UNDERRUN; 1942 break; 1943 1944 case CS_BIDIR_RD_WR_UNDERRUN: 1945 ql_dbg(ql_dbg_user, vha, 0x70b8, 1946 "Command completed with read and write data underrun " 1947 "thread_id=%d\n", thread_id); 1948 rval = EXT_STATUS_DATA_UNDERRUN; 1949 break; 1950 1951 case CS_BIDIR_DMA: 1952 ql_dbg(ql_dbg_user, vha, 0x70b9, 1953 "Command completed with data DMA error thread_id=%d\n", 1954 thread_id); 1955 rval = EXT_STATUS_DMA_ERR; 1956 break; 1957 1958 case CS_TIMEOUT: 1959 ql_dbg(ql_dbg_user, vha, 0x70ba, 1960 "Command completed with timeout thread_id=%d\n", 1961 thread_id); 1962 rval = EXT_STATUS_TIMEOUT; 1963 break; 1964 default: 1965 ql_dbg(ql_dbg_user, vha, 0x70bb, 1966 "Command completed with completion status=0x%x " 1967 "thread_id=%d\n", comp_status, thread_id); 1968 rval = EXT_STATUS_ERR; 1969 break; 1970 } 1971 bsg_job->reply->reply_payload_rcv_len = 0; 1972 1973 done: 1974 /* Return the vendor specific reply to API */ 1975 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1976 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1977 /* Always return DID_OK, bsg will send the vendor specific response 1978 * in this case only */ 1979 sp->done(vha, sp, (DID_OK << 6)); 1980 1981 } 1982 1983 /** 1984 * qla2x00_status_entry() - Process a Status IOCB entry. 1985 * @ha: SCSI driver HA context 1986 * @pkt: Entry pointer 1987 */ 1988 static void 1989 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1990 { 1991 srb_t *sp; 1992 fc_port_t *fcport; 1993 struct scsi_cmnd *cp; 1994 sts_entry_t *sts; 1995 struct sts_entry_24xx *sts24; 1996 uint16_t comp_status; 1997 uint16_t scsi_status; 1998 uint16_t ox_id; 1999 uint8_t lscsi_status; 2000 int32_t resid; 2001 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2002 fw_resid_len; 2003 uint8_t *rsp_info, *sense_data; 2004 struct qla_hw_data *ha = vha->hw; 2005 uint32_t handle; 2006 uint16_t que; 2007 struct req_que *req; 2008 int logit = 1; 2009 int res = 0; 2010 uint16_t state_flags = 0; 2011 uint16_t retry_delay = 0; 2012 2013 sts = (sts_entry_t *) pkt; 2014 sts24 = (struct sts_entry_24xx *) pkt; 2015 if (IS_FWI2_CAPABLE(ha)) { 2016 comp_status = le16_to_cpu(sts24->comp_status); 2017 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2018 state_flags = le16_to_cpu(sts24->state_flags); 2019 } else { 2020 comp_status = le16_to_cpu(sts->comp_status); 2021 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2022 } 2023 handle = (uint32_t) LSW(sts->handle); 2024 que = MSW(sts->handle); 2025 req = ha->req_q_map[que]; 2026 2027 /* Check for invalid queue pointer */ 2028 if (req == NULL || 2029 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2030 ql_dbg(ql_dbg_io, vha, 0x3059, 2031 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2032 "que=%u.\n", sts->handle, req, que); 2033 return; 2034 } 2035 2036 /* Validate handle. */ 2037 if (handle < req->num_outstanding_cmds) 2038 sp = req->outstanding_cmds[handle]; 2039 else 2040 sp = NULL; 2041 2042 if (sp == NULL) { 2043 ql_dbg(ql_dbg_io, vha, 0x3017, 2044 "Invalid status handle (0x%x).\n", sts->handle); 2045 2046 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2047 if (IS_P3P_TYPE(ha)) 2048 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2049 else 2050 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2051 qla2xxx_wake_dpc(vha); 2052 } 2053 return; 2054 } 2055 2056 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2057 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2058 return; 2059 } 2060 2061 /* Task Management completion. */ 2062 if (sp->type == SRB_TM_CMD) { 2063 qla24xx_tm_iocb_entry(vha, req, pkt); 2064 return; 2065 } 2066 2067 /* Fast path completion. */ 2068 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2069 qla2x00_process_completed_request(vha, req, handle); 2070 2071 return; 2072 } 2073 2074 req->outstanding_cmds[handle] = NULL; 2075 cp = GET_CMD_SP(sp); 2076 if (cp == NULL) { 2077 ql_dbg(ql_dbg_io, vha, 0x3018, 2078 "Command already returned (0x%x/%p).\n", 2079 sts->handle, sp); 2080 2081 return; 2082 } 2083 2084 lscsi_status = scsi_status & STATUS_MASK; 2085 2086 fcport = sp->fcport; 2087 2088 ox_id = 0; 2089 sense_len = par_sense_len = rsp_info_len = resid_len = 2090 fw_resid_len = 0; 2091 if (IS_FWI2_CAPABLE(ha)) { 2092 if (scsi_status & SS_SENSE_LEN_VALID) 2093 sense_len = le32_to_cpu(sts24->sense_len); 2094 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2095 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2096 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2097 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2098 if (comp_status == CS_DATA_UNDERRUN) 2099 fw_resid_len = le32_to_cpu(sts24->residual_len); 2100 rsp_info = sts24->data; 2101 sense_data = sts24->data; 2102 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2103 ox_id = le16_to_cpu(sts24->ox_id); 2104 par_sense_len = sizeof(sts24->data); 2105 /* Valid values of the retry delay timer are 0x1-0xffef */ 2106 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) 2107 retry_delay = sts24->retry_delay; 2108 } else { 2109 if (scsi_status & SS_SENSE_LEN_VALID) 2110 sense_len = le16_to_cpu(sts->req_sense_length); 2111 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2112 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2113 resid_len = le32_to_cpu(sts->residual_length); 2114 rsp_info = sts->rsp_info; 2115 sense_data = sts->req_sense_data; 2116 par_sense_len = sizeof(sts->req_sense_data); 2117 } 2118 2119 /* Check for any FCP transport errors. */ 2120 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2121 /* Sense data lies beyond any FCP RESPONSE data. */ 2122 if (IS_FWI2_CAPABLE(ha)) { 2123 sense_data += rsp_info_len; 2124 par_sense_len -= rsp_info_len; 2125 } 2126 if (rsp_info_len > 3 && rsp_info[3]) { 2127 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2128 "FCP I/O protocol failure (0x%x/0x%x).\n", 2129 rsp_info_len, rsp_info[3]); 2130 2131 res = DID_BUS_BUSY << 16; 2132 goto out; 2133 } 2134 } 2135 2136 /* Check for overrun. */ 2137 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2138 scsi_status & SS_RESIDUAL_OVER) 2139 comp_status = CS_DATA_OVERRUN; 2140 2141 /* 2142 * Check retry_delay_timer value if we receive a busy or 2143 * queue full. 2144 */ 2145 if (lscsi_status == SAM_STAT_TASK_SET_FULL || 2146 lscsi_status == SAM_STAT_BUSY) 2147 qla2x00_set_retry_delay_timestamp(fcport, retry_delay); 2148 2149 /* 2150 * Based on Host and scsi status generate status code for Linux 2151 */ 2152 switch (comp_status) { 2153 case CS_COMPLETE: 2154 case CS_QUEUE_FULL: 2155 if (scsi_status == 0) { 2156 res = DID_OK << 16; 2157 break; 2158 } 2159 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2160 resid = resid_len; 2161 scsi_set_resid(cp, resid); 2162 2163 if (!lscsi_status && 2164 ((unsigned)(scsi_bufflen(cp) - resid) < 2165 cp->underflow)) { 2166 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2167 "Mid-layer underflow " 2168 "detected (0x%x of 0x%x bytes).\n", 2169 resid, scsi_bufflen(cp)); 2170 2171 res = DID_ERROR << 16; 2172 break; 2173 } 2174 } 2175 res = DID_OK << 16 | lscsi_status; 2176 2177 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2178 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2179 "QUEUE FULL detected.\n"); 2180 break; 2181 } 2182 logit = 0; 2183 if (lscsi_status != SS_CHECK_CONDITION) 2184 break; 2185 2186 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2187 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2188 break; 2189 2190 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2191 rsp, res); 2192 break; 2193 2194 case CS_DATA_UNDERRUN: 2195 /* Use F/W calculated residual length. */ 2196 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2197 scsi_set_resid(cp, resid); 2198 if (scsi_status & SS_RESIDUAL_UNDER) { 2199 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2200 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2201 "Dropped frame(s) detected " 2202 "(0x%x of 0x%x bytes).\n", 2203 resid, scsi_bufflen(cp)); 2204 2205 res = DID_ERROR << 16 | lscsi_status; 2206 goto check_scsi_status; 2207 } 2208 2209 if (!lscsi_status && 2210 ((unsigned)(scsi_bufflen(cp) - resid) < 2211 cp->underflow)) { 2212 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2213 "Mid-layer underflow " 2214 "detected (0x%x of 0x%x bytes).\n", 2215 resid, scsi_bufflen(cp)); 2216 2217 res = DID_ERROR << 16; 2218 break; 2219 } 2220 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2221 lscsi_status != SAM_STAT_BUSY) { 2222 /* 2223 * scsi status of task set and busy are considered to be 2224 * task not completed. 2225 */ 2226 2227 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2228 "Dropped frame(s) detected (0x%x " 2229 "of 0x%x bytes).\n", resid, 2230 scsi_bufflen(cp)); 2231 2232 res = DID_ERROR << 16 | lscsi_status; 2233 goto check_scsi_status; 2234 } else { 2235 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2236 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2237 scsi_status, lscsi_status); 2238 } 2239 2240 res = DID_OK << 16 | lscsi_status; 2241 logit = 0; 2242 2243 check_scsi_status: 2244 /* 2245 * Check to see if SCSI Status is non zero. If so report SCSI 2246 * Status. 2247 */ 2248 if (lscsi_status != 0) { 2249 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2250 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2251 "QUEUE FULL detected.\n"); 2252 logit = 1; 2253 break; 2254 } 2255 if (lscsi_status != SS_CHECK_CONDITION) 2256 break; 2257 2258 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2259 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2260 break; 2261 2262 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2263 sense_len, rsp, res); 2264 } 2265 break; 2266 2267 case CS_PORT_LOGGED_OUT: 2268 case CS_PORT_CONFIG_CHG: 2269 case CS_PORT_BUSY: 2270 case CS_INCOMPLETE: 2271 case CS_PORT_UNAVAILABLE: 2272 case CS_TIMEOUT: 2273 case CS_RESET: 2274 2275 /* 2276 * We are going to have the fc class block the rport 2277 * while we try to recover so instruct the mid layer 2278 * to requeue until the class decides how to handle this. 2279 */ 2280 res = DID_TRANSPORT_DISRUPTED << 16; 2281 2282 if (comp_status == CS_TIMEOUT) { 2283 if (IS_FWI2_CAPABLE(ha)) 2284 break; 2285 else if ((le16_to_cpu(sts->status_flags) & 2286 SF_LOGOUT_SENT) == 0) 2287 break; 2288 } 2289 2290 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 2291 "Port to be marked lost on fcport=%02x%02x%02x, current " 2292 "port state= %s.\n", fcport->d_id.b.domain, 2293 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2294 port_state_str[atomic_read(&fcport->state)]); 2295 2296 if (atomic_read(&fcport->state) == FCS_ONLINE) 2297 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2298 break; 2299 2300 case CS_ABORTED: 2301 res = DID_RESET << 16; 2302 break; 2303 2304 case CS_DIF_ERROR: 2305 logit = qla2x00_handle_dif_error(sp, sts24); 2306 res = cp->result; 2307 break; 2308 2309 case CS_TRANSPORT: 2310 res = DID_ERROR << 16; 2311 2312 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2313 break; 2314 2315 if (state_flags & BIT_4) 2316 scmd_printk(KERN_WARNING, cp, 2317 "Unsupported device '%s' found.\n", 2318 cp->device->vendor); 2319 break; 2320 2321 default: 2322 res = DID_ERROR << 16; 2323 break; 2324 } 2325 2326 out: 2327 if (logit) 2328 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2329 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2330 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2331 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 2332 comp_status, scsi_status, res, vha->host_no, 2333 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2334 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2335 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2336 resid_len, fw_resid_len); 2337 2338 if (rsp->status_srb == NULL) 2339 sp->done(ha, sp, res); 2340 } 2341 2342 /** 2343 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2344 * @ha: SCSI driver HA context 2345 * @pkt: Entry pointer 2346 * 2347 * Extended sense data. 2348 */ 2349 static void 2350 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2351 { 2352 uint8_t sense_sz = 0; 2353 struct qla_hw_data *ha = rsp->hw; 2354 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2355 srb_t *sp = rsp->status_srb; 2356 struct scsi_cmnd *cp; 2357 uint32_t sense_len; 2358 uint8_t *sense_ptr; 2359 2360 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2361 return; 2362 2363 sense_len = GET_CMD_SENSE_LEN(sp); 2364 sense_ptr = GET_CMD_SENSE_PTR(sp); 2365 2366 cp = GET_CMD_SP(sp); 2367 if (cp == NULL) { 2368 ql_log(ql_log_warn, vha, 0x3025, 2369 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2370 2371 rsp->status_srb = NULL; 2372 return; 2373 } 2374 2375 if (sense_len > sizeof(pkt->data)) 2376 sense_sz = sizeof(pkt->data); 2377 else 2378 sense_sz = sense_len; 2379 2380 /* Move sense data. */ 2381 if (IS_FWI2_CAPABLE(ha)) 2382 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2383 memcpy(sense_ptr, pkt->data, sense_sz); 2384 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2385 sense_ptr, sense_sz); 2386 2387 sense_len -= sense_sz; 2388 sense_ptr += sense_sz; 2389 2390 SET_CMD_SENSE_PTR(sp, sense_ptr); 2391 SET_CMD_SENSE_LEN(sp, sense_len); 2392 2393 /* Place command on done queue. */ 2394 if (sense_len == 0) { 2395 rsp->status_srb = NULL; 2396 sp->done(ha, sp, cp->result); 2397 } 2398 } 2399 2400 /** 2401 * qla2x00_error_entry() - Process an error entry. 2402 * @ha: SCSI driver HA context 2403 * @pkt: Entry pointer 2404 */ 2405 static void 2406 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2407 { 2408 srb_t *sp; 2409 struct qla_hw_data *ha = vha->hw; 2410 const char func[] = "ERROR-IOCB"; 2411 uint16_t que = MSW(pkt->handle); 2412 struct req_que *req = NULL; 2413 int res = DID_ERROR << 16; 2414 2415 ql_dbg(ql_dbg_async, vha, 0x502a, 2416 "type of error status in response: 0x%x\n", pkt->entry_status); 2417 2418 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2419 goto fatal; 2420 2421 req = ha->req_q_map[que]; 2422 2423 if (pkt->entry_status & RF_BUSY) 2424 res = DID_BUS_BUSY << 16; 2425 2426 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2427 if (sp) { 2428 sp->done(ha, sp, res); 2429 return; 2430 } 2431 fatal: 2432 ql_log(ql_log_warn, vha, 0x5030, 2433 "Error entry - invalid handle/queue.\n"); 2434 2435 if (IS_P3P_TYPE(ha)) 2436 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2437 else 2438 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2439 qla2xxx_wake_dpc(vha); 2440 } 2441 2442 /** 2443 * qla24xx_mbx_completion() - Process mailbox command completions. 2444 * @ha: SCSI driver HA context 2445 * @mb0: Mailbox0 register 2446 */ 2447 static void 2448 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2449 { 2450 uint16_t cnt; 2451 uint32_t mboxes; 2452 uint16_t __iomem *wptr; 2453 struct qla_hw_data *ha = vha->hw; 2454 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2455 2456 /* Read all mbox registers? */ 2457 mboxes = (1 << ha->mbx_count) - 1; 2458 if (!ha->mcp) 2459 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2460 else 2461 mboxes = ha->mcp->in_mb; 2462 2463 /* Load return mailbox registers. */ 2464 ha->flags.mbox_int = 1; 2465 ha->mailbox_out[0] = mb0; 2466 mboxes >>= 1; 2467 wptr = (uint16_t __iomem *)®->mailbox1; 2468 2469 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2470 if (mboxes & BIT_0) 2471 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2472 2473 mboxes >>= 1; 2474 wptr++; 2475 } 2476 } 2477 2478 static void 2479 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2480 struct abort_entry_24xx *pkt) 2481 { 2482 const char func[] = "ABT_IOCB"; 2483 srb_t *sp; 2484 struct srb_iocb *abt; 2485 2486 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2487 if (!sp) 2488 return; 2489 2490 abt = &sp->u.iocb_cmd; 2491 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2492 sp->done(vha, sp, 0); 2493 } 2494 2495 /** 2496 * qla24xx_process_response_queue() - Process response queue entries. 2497 * @ha: SCSI driver HA context 2498 */ 2499 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2500 struct rsp_que *rsp) 2501 { 2502 struct sts_entry_24xx *pkt; 2503 struct qla_hw_data *ha = vha->hw; 2504 2505 if (!vha->flags.online) 2506 return; 2507 2508 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2509 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2510 2511 rsp->ring_index++; 2512 if (rsp->ring_index == rsp->length) { 2513 rsp->ring_index = 0; 2514 rsp->ring_ptr = rsp->ring; 2515 } else { 2516 rsp->ring_ptr++; 2517 } 2518 2519 if (pkt->entry_status != 0) { 2520 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2521 2522 if (qlt_24xx_process_response_error(vha, pkt)) 2523 goto process_err; 2524 2525 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2526 wmb(); 2527 continue; 2528 } 2529 process_err: 2530 2531 switch (pkt->entry_type) { 2532 case STATUS_TYPE: 2533 qla2x00_status_entry(vha, rsp, pkt); 2534 break; 2535 case STATUS_CONT_TYPE: 2536 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2537 break; 2538 case VP_RPT_ID_IOCB_TYPE: 2539 qla24xx_report_id_acquisition(vha, 2540 (struct vp_rpt_id_entry_24xx *)pkt); 2541 break; 2542 case LOGINOUT_PORT_IOCB_TYPE: 2543 qla24xx_logio_entry(vha, rsp->req, 2544 (struct logio_entry_24xx *)pkt); 2545 break; 2546 case CT_IOCB_TYPE: 2547 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2548 break; 2549 case ELS_IOCB_TYPE: 2550 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2551 break; 2552 case ABTS_RECV_24XX: 2553 /* ensure that the ATIO queue is empty */ 2554 qlt_24xx_process_atio_queue(vha); 2555 case ABTS_RESP_24XX: 2556 case CTIO_TYPE7: 2557 case NOTIFY_ACK_TYPE: 2558 case CTIO_CRC2: 2559 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2560 break; 2561 case MARKER_TYPE: 2562 /* Do nothing in this case, this check is to prevent it 2563 * from falling into default case 2564 */ 2565 break; 2566 case ABORT_IOCB_TYPE: 2567 qla24xx_abort_iocb_entry(vha, rsp->req, 2568 (struct abort_entry_24xx *)pkt); 2569 break; 2570 default: 2571 /* Type Not Supported. */ 2572 ql_dbg(ql_dbg_async, vha, 0x5042, 2573 "Received unknown response pkt type %x " 2574 "entry status=%x.\n", 2575 pkt->entry_type, pkt->entry_status); 2576 break; 2577 } 2578 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2579 wmb(); 2580 } 2581 2582 /* Adjust ring index */ 2583 if (IS_P3P_TYPE(ha)) { 2584 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2585 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2586 } else 2587 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2588 } 2589 2590 static void 2591 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2592 { 2593 int rval; 2594 uint32_t cnt; 2595 struct qla_hw_data *ha = vha->hw; 2596 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2597 2598 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2599 !IS_QLA27XX(ha)) 2600 return; 2601 2602 rval = QLA_SUCCESS; 2603 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2604 RD_REG_DWORD(®->iobase_addr); 2605 WRT_REG_DWORD(®->iobase_window, 0x0001); 2606 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2607 rval == QLA_SUCCESS; cnt--) { 2608 if (cnt) { 2609 WRT_REG_DWORD(®->iobase_window, 0x0001); 2610 udelay(10); 2611 } else 2612 rval = QLA_FUNCTION_TIMEOUT; 2613 } 2614 if (rval == QLA_SUCCESS) 2615 goto next_test; 2616 2617 rval = QLA_SUCCESS; 2618 WRT_REG_DWORD(®->iobase_window, 0x0003); 2619 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2620 rval == QLA_SUCCESS; cnt--) { 2621 if (cnt) { 2622 WRT_REG_DWORD(®->iobase_window, 0x0003); 2623 udelay(10); 2624 } else 2625 rval = QLA_FUNCTION_TIMEOUT; 2626 } 2627 if (rval != QLA_SUCCESS) 2628 goto done; 2629 2630 next_test: 2631 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2632 ql_log(ql_log_info, vha, 0x504c, 2633 "Additional code -- 0x55AA.\n"); 2634 2635 done: 2636 WRT_REG_DWORD(®->iobase_window, 0x0000); 2637 RD_REG_DWORD(®->iobase_window); 2638 } 2639 2640 /** 2641 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2642 * @irq: 2643 * @dev_id: SCSI driver HA context 2644 * 2645 * Called by system whenever the host adapter generates an interrupt. 2646 * 2647 * Returns handled flag. 2648 */ 2649 irqreturn_t 2650 qla24xx_intr_handler(int irq, void *dev_id) 2651 { 2652 scsi_qla_host_t *vha; 2653 struct qla_hw_data *ha; 2654 struct device_reg_24xx __iomem *reg; 2655 int status; 2656 unsigned long iter; 2657 uint32_t stat; 2658 uint32_t hccr; 2659 uint16_t mb[8]; 2660 struct rsp_que *rsp; 2661 unsigned long flags; 2662 2663 rsp = (struct rsp_que *) dev_id; 2664 if (!rsp) { 2665 ql_log(ql_log_info, NULL, 0x5059, 2666 "%s: NULL response queue pointer.\n", __func__); 2667 return IRQ_NONE; 2668 } 2669 2670 ha = rsp->hw; 2671 reg = &ha->iobase->isp24; 2672 status = 0; 2673 2674 if (unlikely(pci_channel_offline(ha->pdev))) 2675 return IRQ_HANDLED; 2676 2677 spin_lock_irqsave(&ha->hardware_lock, flags); 2678 vha = pci_get_drvdata(ha->pdev); 2679 for (iter = 50; iter--; ) { 2680 stat = RD_REG_DWORD(®->host_status); 2681 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2682 break; 2683 if (stat & HSRX_RISC_PAUSED) { 2684 if (unlikely(pci_channel_offline(ha->pdev))) 2685 break; 2686 2687 hccr = RD_REG_DWORD(®->hccr); 2688 2689 ql_log(ql_log_warn, vha, 0x504b, 2690 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2691 hccr); 2692 2693 qla2xxx_check_risc_status(vha); 2694 2695 ha->isp_ops->fw_dump(vha, 1); 2696 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2697 break; 2698 } else if ((stat & HSRX_RISC_INT) == 0) 2699 break; 2700 2701 switch (stat & 0xff) { 2702 case INTR_ROM_MB_SUCCESS: 2703 case INTR_ROM_MB_FAILED: 2704 case INTR_MB_SUCCESS: 2705 case INTR_MB_FAILED: 2706 qla24xx_mbx_completion(vha, MSW(stat)); 2707 status |= MBX_INTERRUPT; 2708 2709 break; 2710 case INTR_ASYNC_EVENT: 2711 mb[0] = MSW(stat); 2712 mb[1] = RD_REG_WORD(®->mailbox1); 2713 mb[2] = RD_REG_WORD(®->mailbox2); 2714 mb[3] = RD_REG_WORD(®->mailbox3); 2715 qla2x00_async_event(vha, rsp, mb); 2716 break; 2717 case INTR_RSP_QUE_UPDATE: 2718 case INTR_RSP_QUE_UPDATE_83XX: 2719 qla24xx_process_response_queue(vha, rsp); 2720 break; 2721 case INTR_ATIO_QUE_UPDATE: 2722 qlt_24xx_process_atio_queue(vha); 2723 break; 2724 case INTR_ATIO_RSP_QUE_UPDATE: 2725 qlt_24xx_process_atio_queue(vha); 2726 qla24xx_process_response_queue(vha, rsp); 2727 break; 2728 default: 2729 ql_dbg(ql_dbg_async, vha, 0x504f, 2730 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2731 break; 2732 } 2733 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2734 RD_REG_DWORD_RELAXED(®->hccr); 2735 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2736 ndelay(3500); 2737 } 2738 qla2x00_handle_mbx_completion(ha, status); 2739 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2740 2741 return IRQ_HANDLED; 2742 } 2743 2744 static irqreturn_t 2745 qla24xx_msix_rsp_q(int irq, void *dev_id) 2746 { 2747 struct qla_hw_data *ha; 2748 struct rsp_que *rsp; 2749 struct device_reg_24xx __iomem *reg; 2750 struct scsi_qla_host *vha; 2751 unsigned long flags; 2752 uint32_t stat = 0; 2753 2754 rsp = (struct rsp_que *) dev_id; 2755 if (!rsp) { 2756 ql_log(ql_log_info, NULL, 0x505a, 2757 "%s: NULL response queue pointer.\n", __func__); 2758 return IRQ_NONE; 2759 } 2760 ha = rsp->hw; 2761 reg = &ha->iobase->isp24; 2762 2763 spin_lock_irqsave(&ha->hardware_lock, flags); 2764 2765 vha = pci_get_drvdata(ha->pdev); 2766 /* 2767 * Use host_status register to check to PCI disconnection before we 2768 * we process the response queue. 2769 */ 2770 stat = RD_REG_DWORD(®->host_status); 2771 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2772 goto out; 2773 qla24xx_process_response_queue(vha, rsp); 2774 if (!ha->flags.disable_msix_handshake) { 2775 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2776 RD_REG_DWORD_RELAXED(®->hccr); 2777 } 2778 out: 2779 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2780 2781 return IRQ_HANDLED; 2782 } 2783 2784 static irqreturn_t 2785 qla25xx_msix_rsp_q(int irq, void *dev_id) 2786 { 2787 struct qla_hw_data *ha; 2788 scsi_qla_host_t *vha; 2789 struct rsp_que *rsp; 2790 struct device_reg_24xx __iomem *reg; 2791 unsigned long flags; 2792 uint32_t hccr = 0; 2793 2794 rsp = (struct rsp_que *) dev_id; 2795 if (!rsp) { 2796 ql_log(ql_log_info, NULL, 0x505b, 2797 "%s: NULL response queue pointer.\n", __func__); 2798 return IRQ_NONE; 2799 } 2800 ha = rsp->hw; 2801 vha = pci_get_drvdata(ha->pdev); 2802 2803 /* Clear the interrupt, if enabled, for this response queue */ 2804 if (!ha->flags.disable_msix_handshake) { 2805 reg = &ha->iobase->isp24; 2806 spin_lock_irqsave(&ha->hardware_lock, flags); 2807 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2808 hccr = RD_REG_DWORD_RELAXED(®->hccr); 2809 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2810 } 2811 if (qla2x00_check_reg32_for_disconnect(vha, hccr)) 2812 goto out; 2813 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2814 2815 out: 2816 return IRQ_HANDLED; 2817 } 2818 2819 static irqreturn_t 2820 qla24xx_msix_default(int irq, void *dev_id) 2821 { 2822 scsi_qla_host_t *vha; 2823 struct qla_hw_data *ha; 2824 struct rsp_que *rsp; 2825 struct device_reg_24xx __iomem *reg; 2826 int status; 2827 uint32_t stat; 2828 uint32_t hccr; 2829 uint16_t mb[8]; 2830 unsigned long flags; 2831 2832 rsp = (struct rsp_que *) dev_id; 2833 if (!rsp) { 2834 ql_log(ql_log_info, NULL, 0x505c, 2835 "%s: NULL response queue pointer.\n", __func__); 2836 return IRQ_NONE; 2837 } 2838 ha = rsp->hw; 2839 reg = &ha->iobase->isp24; 2840 status = 0; 2841 2842 spin_lock_irqsave(&ha->hardware_lock, flags); 2843 vha = pci_get_drvdata(ha->pdev); 2844 do { 2845 stat = RD_REG_DWORD(®->host_status); 2846 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2847 break; 2848 if (stat & HSRX_RISC_PAUSED) { 2849 if (unlikely(pci_channel_offline(ha->pdev))) 2850 break; 2851 2852 hccr = RD_REG_DWORD(®->hccr); 2853 2854 ql_log(ql_log_info, vha, 0x5050, 2855 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2856 hccr); 2857 2858 qla2xxx_check_risc_status(vha); 2859 2860 ha->isp_ops->fw_dump(vha, 1); 2861 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2862 break; 2863 } else if ((stat & HSRX_RISC_INT) == 0) 2864 break; 2865 2866 switch (stat & 0xff) { 2867 case INTR_ROM_MB_SUCCESS: 2868 case INTR_ROM_MB_FAILED: 2869 case INTR_MB_SUCCESS: 2870 case INTR_MB_FAILED: 2871 qla24xx_mbx_completion(vha, MSW(stat)); 2872 status |= MBX_INTERRUPT; 2873 2874 break; 2875 case INTR_ASYNC_EVENT: 2876 mb[0] = MSW(stat); 2877 mb[1] = RD_REG_WORD(®->mailbox1); 2878 mb[2] = RD_REG_WORD(®->mailbox2); 2879 mb[3] = RD_REG_WORD(®->mailbox3); 2880 qla2x00_async_event(vha, rsp, mb); 2881 break; 2882 case INTR_RSP_QUE_UPDATE: 2883 case INTR_RSP_QUE_UPDATE_83XX: 2884 qla24xx_process_response_queue(vha, rsp); 2885 break; 2886 case INTR_ATIO_QUE_UPDATE: 2887 qlt_24xx_process_atio_queue(vha); 2888 break; 2889 case INTR_ATIO_RSP_QUE_UPDATE: 2890 qlt_24xx_process_atio_queue(vha); 2891 qla24xx_process_response_queue(vha, rsp); 2892 break; 2893 default: 2894 ql_dbg(ql_dbg_async, vha, 0x5051, 2895 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2896 break; 2897 } 2898 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2899 } while (0); 2900 qla2x00_handle_mbx_completion(ha, status); 2901 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2902 2903 return IRQ_HANDLED; 2904 } 2905 2906 /* Interrupt handling helpers. */ 2907 2908 struct qla_init_msix_entry { 2909 const char *name; 2910 irq_handler_t handler; 2911 }; 2912 2913 static struct qla_init_msix_entry msix_entries[3] = { 2914 { "qla2xxx (default)", qla24xx_msix_default }, 2915 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2916 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2917 }; 2918 2919 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2920 { "qla2xxx (default)", qla82xx_msix_default }, 2921 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2922 }; 2923 2924 static struct qla_init_msix_entry qla83xx_msix_entries[3] = { 2925 { "qla2xxx (default)", qla24xx_msix_default }, 2926 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2927 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 2928 }; 2929 2930 static void 2931 qla24xx_disable_msix(struct qla_hw_data *ha) 2932 { 2933 int i; 2934 struct qla_msix_entry *qentry; 2935 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2936 2937 for (i = 0; i < ha->msix_count; i++) { 2938 qentry = &ha->msix_entries[i]; 2939 if (qentry->have_irq) 2940 free_irq(qentry->vector, qentry->rsp); 2941 } 2942 pci_disable_msix(ha->pdev); 2943 kfree(ha->msix_entries); 2944 ha->msix_entries = NULL; 2945 ha->flags.msix_enabled = 0; 2946 ql_dbg(ql_dbg_init, vha, 0x0042, 2947 "Disabled the MSI.\n"); 2948 } 2949 2950 static int 2951 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2952 { 2953 #define MIN_MSIX_COUNT 2 2954 #define ATIO_VECTOR 2 2955 int i, ret; 2956 struct msix_entry *entries; 2957 struct qla_msix_entry *qentry; 2958 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2959 2960 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2961 GFP_KERNEL); 2962 if (!entries) { 2963 ql_log(ql_log_warn, vha, 0x00bc, 2964 "Failed to allocate memory for msix_entry.\n"); 2965 return -ENOMEM; 2966 } 2967 2968 for (i = 0; i < ha->msix_count; i++) 2969 entries[i].entry = i; 2970 2971 ret = pci_enable_msix_range(ha->pdev, 2972 entries, MIN_MSIX_COUNT, ha->msix_count); 2973 if (ret < 0) { 2974 ql_log(ql_log_fatal, vha, 0x00c7, 2975 "MSI-X: Failed to enable support, " 2976 "giving up -- %d/%d.\n", 2977 ha->msix_count, ret); 2978 goto msix_out; 2979 } else if (ret < ha->msix_count) { 2980 ql_log(ql_log_warn, vha, 0x00c6, 2981 "MSI-X: Failed to enable support " 2982 "-- %d/%d\n Retry with %d vectors.\n", 2983 ha->msix_count, ret, ret); 2984 } 2985 ha->msix_count = ret; 2986 ha->max_rsp_queues = ha->msix_count - 1; 2987 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2988 ha->msix_count, GFP_KERNEL); 2989 if (!ha->msix_entries) { 2990 ql_log(ql_log_fatal, vha, 0x00c8, 2991 "Failed to allocate memory for ha->msix_entries.\n"); 2992 ret = -ENOMEM; 2993 goto msix_out; 2994 } 2995 ha->flags.msix_enabled = 1; 2996 2997 for (i = 0; i < ha->msix_count; i++) { 2998 qentry = &ha->msix_entries[i]; 2999 qentry->vector = entries[i].vector; 3000 qentry->entry = entries[i].entry; 3001 qentry->have_irq = 0; 3002 qentry->rsp = NULL; 3003 } 3004 3005 /* Enable MSI-X vectors for the base queue */ 3006 for (i = 0; i < 2; i++) { 3007 qentry = &ha->msix_entries[i]; 3008 if (IS_P3P_TYPE(ha)) 3009 ret = request_irq(qentry->vector, 3010 qla82xx_msix_entries[i].handler, 3011 0, qla82xx_msix_entries[i].name, rsp); 3012 else 3013 ret = request_irq(qentry->vector, 3014 msix_entries[i].handler, 3015 0, msix_entries[i].name, rsp); 3016 if (ret) 3017 goto msix_register_fail; 3018 qentry->have_irq = 1; 3019 qentry->rsp = rsp; 3020 rsp->msix = qentry; 3021 } 3022 3023 /* 3024 * If target mode is enable, also request the vector for the ATIO 3025 * queue. 3026 */ 3027 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3028 qentry = &ha->msix_entries[ATIO_VECTOR]; 3029 ret = request_irq(qentry->vector, 3030 qla83xx_msix_entries[ATIO_VECTOR].handler, 3031 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); 3032 qentry->have_irq = 1; 3033 qentry->rsp = rsp; 3034 rsp->msix = qentry; 3035 } 3036 3037 msix_register_fail: 3038 if (ret) { 3039 ql_log(ql_log_fatal, vha, 0x00cb, 3040 "MSI-X: unable to register handler -- %x/%d.\n", 3041 qentry->vector, ret); 3042 qla24xx_disable_msix(ha); 3043 ha->mqenable = 0; 3044 goto msix_out; 3045 } 3046 3047 /* Enable MSI-X vector for response queue update for queue 0 */ 3048 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3049 if (ha->msixbase && ha->mqiobase && 3050 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3051 ha->mqenable = 1; 3052 } else 3053 if (ha->mqiobase 3054 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3055 ha->mqenable = 1; 3056 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3057 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3058 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3059 ql_dbg(ql_dbg_init, vha, 0x0055, 3060 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3061 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3062 3063 msix_out: 3064 kfree(entries); 3065 return ret; 3066 } 3067 3068 int 3069 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3070 { 3071 int ret = QLA_FUNCTION_FAILED; 3072 device_reg_t *reg = ha->iobase; 3073 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3074 3075 /* If possible, enable MSI-X. */ 3076 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3077 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && 3078 !IS_QLA27XX(ha)) 3079 goto skip_msi; 3080 3081 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3082 (ha->pdev->subsystem_device == 0x7040 || 3083 ha->pdev->subsystem_device == 0x7041 || 3084 ha->pdev->subsystem_device == 0x1705)) { 3085 ql_log(ql_log_warn, vha, 0x0034, 3086 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3087 ha->pdev->subsystem_vendor, 3088 ha->pdev->subsystem_device); 3089 goto skip_msi; 3090 } 3091 3092 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3093 ql_log(ql_log_warn, vha, 0x0035, 3094 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3095 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3096 goto skip_msix; 3097 } 3098 3099 ret = qla24xx_enable_msix(ha, rsp); 3100 if (!ret) { 3101 ql_dbg(ql_dbg_init, vha, 0x0036, 3102 "MSI-X: Enabled (0x%X, 0x%X).\n", 3103 ha->chip_revision, ha->fw_attributes); 3104 goto clear_risc_ints; 3105 } 3106 3107 skip_msix: 3108 3109 ql_log(ql_log_info, vha, 0x0037, 3110 "Falling back-to MSI mode -%d.\n", ret); 3111 3112 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3113 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3114 !IS_QLA27XX(ha)) 3115 goto skip_msi; 3116 3117 ret = pci_enable_msi(ha->pdev); 3118 if (!ret) { 3119 ql_dbg(ql_dbg_init, vha, 0x0038, 3120 "MSI: Enabled.\n"); 3121 ha->flags.msi_enabled = 1; 3122 } else 3123 ql_log(ql_log_warn, vha, 0x0039, 3124 "Falling back-to INTa mode -- %d.\n", ret); 3125 skip_msi: 3126 3127 /* Skip INTx on ISP82xx. */ 3128 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3129 return QLA_FUNCTION_FAILED; 3130 3131 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3132 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3133 QLA2XXX_DRIVER_NAME, rsp); 3134 if (ret) { 3135 ql_log(ql_log_warn, vha, 0x003a, 3136 "Failed to reserve interrupt %d already in use.\n", 3137 ha->pdev->irq); 3138 goto fail; 3139 } else if (!ha->flags.msi_enabled) { 3140 ql_dbg(ql_dbg_init, vha, 0x0125, 3141 "INTa mode: Enabled.\n"); 3142 ha->flags.mr_intr_valid = 1; 3143 } 3144 3145 clear_risc_ints: 3146 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 3147 goto fail; 3148 3149 spin_lock_irq(&ha->hardware_lock); 3150 WRT_REG_WORD(®->isp.semaphore, 0); 3151 spin_unlock_irq(&ha->hardware_lock); 3152 3153 fail: 3154 return ret; 3155 } 3156 3157 void 3158 qla2x00_free_irqs(scsi_qla_host_t *vha) 3159 { 3160 struct qla_hw_data *ha = vha->hw; 3161 struct rsp_que *rsp; 3162 3163 /* 3164 * We need to check that ha->rsp_q_map is valid in case we are called 3165 * from a probe failure context. 3166 */ 3167 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3168 return; 3169 rsp = ha->rsp_q_map[0]; 3170 3171 if (ha->flags.msix_enabled) 3172 qla24xx_disable_msix(ha); 3173 else if (ha->flags.msi_enabled) { 3174 free_irq(ha->pdev->irq, rsp); 3175 pci_disable_msi(ha->pdev); 3176 } else 3177 free_irq(ha->pdev->irq, rsp); 3178 } 3179 3180 3181 int qla25xx_request_irq(struct rsp_que *rsp) 3182 { 3183 struct qla_hw_data *ha = rsp->hw; 3184 struct qla_init_msix_entry *intr = &msix_entries[2]; 3185 struct qla_msix_entry *msix = rsp->msix; 3186 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3187 int ret; 3188 3189 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 3190 if (ret) { 3191 ql_log(ql_log_fatal, vha, 0x00e6, 3192 "MSI-X: Unable to register handler -- %x/%d.\n", 3193 msix->vector, ret); 3194 return ret; 3195 } 3196 msix->have_irq = 1; 3197 msix->rsp = rsp; 3198 return ret; 3199 } 3200