1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2013 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsi_bsg_fc.h> 14 #include <scsi/scsi_eh.h> 15 16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20 sts_entry_t *); 21 22 /** 23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 24 * @irq: 25 * @dev_id: SCSI driver HA context 26 * 27 * Called by system whenever the host adapter generates an interrupt. 28 * 29 * Returns handled flag. 30 */ 31 irqreturn_t 32 qla2100_intr_handler(int irq, void *dev_id) 33 { 34 scsi_qla_host_t *vha; 35 struct qla_hw_data *ha; 36 struct device_reg_2xxx __iomem *reg; 37 int status; 38 unsigned long iter; 39 uint16_t hccr; 40 uint16_t mb[4]; 41 struct rsp_que *rsp; 42 unsigned long flags; 43 44 rsp = (struct rsp_que *) dev_id; 45 if (!rsp) { 46 ql_log(ql_log_info, NULL, 0x505d, 47 "%s: NULL response queue pointer.\n", __func__); 48 return (IRQ_NONE); 49 } 50 51 ha = rsp->hw; 52 reg = &ha->iobase->isp; 53 status = 0; 54 55 spin_lock_irqsave(&ha->hardware_lock, flags); 56 vha = pci_get_drvdata(ha->pdev); 57 for (iter = 50; iter--; ) { 58 hccr = RD_REG_WORD(®->hccr); 59 /* Check for PCI disconnection */ 60 if (hccr == 0xffff) { 61 /* 62 * Schedule this on the default system workqueue so that 63 * all the adapter workqueues and the DPC thread can be 64 * shutdown cleanly. 65 */ 66 schedule_work(&ha->board_disable); 67 break; 68 } 69 if (hccr & HCCR_RISC_PAUSE) { 70 if (pci_channel_offline(ha->pdev)) 71 break; 72 73 /* 74 * Issue a "HARD" reset in order for the RISC interrupt 75 * bit to be cleared. Schedule a big hammer to get 76 * out of the RISC PAUSED state. 77 */ 78 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 79 RD_REG_WORD(®->hccr); 80 81 ha->isp_ops->fw_dump(vha, 1); 82 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 83 break; 84 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 85 break; 86 87 if (RD_REG_WORD(®->semaphore) & BIT_0) { 88 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 89 RD_REG_WORD(®->hccr); 90 91 /* Get mailbox data. */ 92 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 93 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 94 qla2x00_mbx_completion(vha, mb[0]); 95 status |= MBX_INTERRUPT; 96 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 97 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 98 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 99 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 100 qla2x00_async_event(vha, rsp, mb); 101 } else { 102 /*EMPTY*/ 103 ql_dbg(ql_dbg_async, vha, 0x5025, 104 "Unrecognized interrupt type (%d).\n", 105 mb[0]); 106 } 107 /* Release mailbox registers. */ 108 WRT_REG_WORD(®->semaphore, 0); 109 RD_REG_WORD(®->semaphore); 110 } else { 111 qla2x00_process_response_queue(rsp); 112 113 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 114 RD_REG_WORD(®->hccr); 115 } 116 } 117 qla2x00_handle_mbx_completion(ha, status); 118 spin_unlock_irqrestore(&ha->hardware_lock, flags); 119 120 return (IRQ_HANDLED); 121 } 122 123 bool 124 qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 125 { 126 /* Check for PCI disconnection */ 127 if (reg == 0xffffffff) { 128 /* 129 * Schedule this on the default system workqueue so that all the 130 * adapter workqueues and the DPC thread can be shutdown 131 * cleanly. 132 */ 133 schedule_work(&vha->hw->board_disable); 134 return true; 135 } else 136 return false; 137 } 138 139 /** 140 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 141 * @irq: 142 * @dev_id: SCSI driver HA context 143 * 144 * Called by system whenever the host adapter generates an interrupt. 145 * 146 * Returns handled flag. 147 */ 148 irqreturn_t 149 qla2300_intr_handler(int irq, void *dev_id) 150 { 151 scsi_qla_host_t *vha; 152 struct device_reg_2xxx __iomem *reg; 153 int status; 154 unsigned long iter; 155 uint32_t stat; 156 uint16_t hccr; 157 uint16_t mb[4]; 158 struct rsp_que *rsp; 159 struct qla_hw_data *ha; 160 unsigned long flags; 161 162 rsp = (struct rsp_que *) dev_id; 163 if (!rsp) { 164 ql_log(ql_log_info, NULL, 0x5058, 165 "%s: NULL response queue pointer.\n", __func__); 166 return (IRQ_NONE); 167 } 168 169 ha = rsp->hw; 170 reg = &ha->iobase->isp; 171 status = 0; 172 173 spin_lock_irqsave(&ha->hardware_lock, flags); 174 vha = pci_get_drvdata(ha->pdev); 175 for (iter = 50; iter--; ) { 176 stat = RD_REG_DWORD(®->u.isp2300.host_status); 177 if (qla2x00_check_reg_for_disconnect(vha, stat)) 178 break; 179 if (stat & HSR_RISC_PAUSED) { 180 if (unlikely(pci_channel_offline(ha->pdev))) 181 break; 182 183 hccr = RD_REG_WORD(®->hccr); 184 185 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 186 ql_log(ql_log_warn, vha, 0x5026, 187 "Parity error -- HCCR=%x, Dumping " 188 "firmware.\n", hccr); 189 else 190 ql_log(ql_log_warn, vha, 0x5027, 191 "RISC paused -- HCCR=%x, Dumping " 192 "firmware.\n", hccr); 193 194 /* 195 * Issue a "HARD" reset in order for the RISC 196 * interrupt bit to be cleared. Schedule a big 197 * hammer to get out of the RISC PAUSED state. 198 */ 199 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 200 RD_REG_WORD(®->hccr); 201 202 ha->isp_ops->fw_dump(vha, 1); 203 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 204 break; 205 } else if ((stat & HSR_RISC_INT) == 0) 206 break; 207 208 switch (stat & 0xff) { 209 case 0x1: 210 case 0x2: 211 case 0x10: 212 case 0x11: 213 qla2x00_mbx_completion(vha, MSW(stat)); 214 status |= MBX_INTERRUPT; 215 216 /* Release mailbox registers. */ 217 WRT_REG_WORD(®->semaphore, 0); 218 break; 219 case 0x12: 220 mb[0] = MSW(stat); 221 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 222 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 223 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 224 qla2x00_async_event(vha, rsp, mb); 225 break; 226 case 0x13: 227 qla2x00_process_response_queue(rsp); 228 break; 229 case 0x15: 230 mb[0] = MBA_CMPLT_1_16BIT; 231 mb[1] = MSW(stat); 232 qla2x00_async_event(vha, rsp, mb); 233 break; 234 case 0x16: 235 mb[0] = MBA_SCSI_COMPLETION; 236 mb[1] = MSW(stat); 237 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 238 qla2x00_async_event(vha, rsp, mb); 239 break; 240 default: 241 ql_dbg(ql_dbg_async, vha, 0x5028, 242 "Unrecognized interrupt type (%d).\n", stat & 0xff); 243 break; 244 } 245 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 246 RD_REG_WORD_RELAXED(®->hccr); 247 } 248 qla2x00_handle_mbx_completion(ha, status); 249 spin_unlock_irqrestore(&ha->hardware_lock, flags); 250 251 return (IRQ_HANDLED); 252 } 253 254 /** 255 * qla2x00_mbx_completion() - Process mailbox command completions. 256 * @ha: SCSI driver HA context 257 * @mb0: Mailbox0 register 258 */ 259 static void 260 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 261 { 262 uint16_t cnt; 263 uint32_t mboxes; 264 uint16_t __iomem *wptr; 265 struct qla_hw_data *ha = vha->hw; 266 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 267 268 /* Read all mbox registers? */ 269 mboxes = (1 << ha->mbx_count) - 1; 270 if (!ha->mcp) 271 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 272 else 273 mboxes = ha->mcp->in_mb; 274 275 /* Load return mailbox registers. */ 276 ha->flags.mbox_int = 1; 277 ha->mailbox_out[0] = mb0; 278 mboxes >>= 1; 279 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 280 281 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 282 if (IS_QLA2200(ha) && cnt == 8) 283 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 284 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 285 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 286 else if (mboxes & BIT_0) 287 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 288 289 wptr++; 290 mboxes >>= 1; 291 } 292 } 293 294 static void 295 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 296 { 297 static char *event[] = 298 { "Complete", "Request Notification", "Time Extension" }; 299 int rval; 300 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 301 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 302 uint16_t __iomem *wptr; 303 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 304 305 /* Seed data -- mailbox1 -> mailbox7. */ 306 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 307 wptr = (uint16_t __iomem *)®24->mailbox1; 308 else if (IS_QLA8044(vha->hw)) 309 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 310 else 311 return; 312 313 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 314 mb[cnt] = RD_REG_WORD(wptr); 315 316 ql_dbg(ql_dbg_async, vha, 0x5021, 317 "Inter-Driver Communication %s -- " 318 "%04x %04x %04x %04x %04x %04x %04x.\n", 319 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 320 mb[4], mb[5], mb[6]); 321 switch (aen) { 322 /* Handle IDC Error completion case. */ 323 case MBA_IDC_COMPLETE: 324 if (mb[1] >> 15) { 325 vha->hw->flags.idc_compl_status = 1; 326 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 327 complete(&vha->hw->dcbx_comp); 328 } 329 break; 330 331 case MBA_IDC_NOTIFY: 332 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 333 timeout = (descr >> 8) & 0xf; 334 ql_dbg(ql_dbg_async, vha, 0x5022, 335 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 336 vha->host_no, event[aen & 0xff], timeout); 337 338 if (!timeout) 339 return; 340 rval = qla2x00_post_idc_ack_work(vha, mb); 341 if (rval != QLA_SUCCESS) 342 ql_log(ql_log_warn, vha, 0x5023, 343 "IDC failed to post ACK.\n"); 344 break; 345 case MBA_IDC_TIME_EXT: 346 vha->hw->idc_extend_tmo = descr; 347 ql_dbg(ql_dbg_async, vha, 0x5087, 348 "%lu Inter-Driver Communication %s -- " 349 "Extend timeout by=%d.\n", 350 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 351 break; 352 } 353 } 354 355 #define LS_UNKNOWN 2 356 const char * 357 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 358 { 359 static const char *const link_speeds[] = { 360 "1", "2", "?", "4", "8", "16", "32", "10" 361 }; 362 #define QLA_LAST_SPEED 7 363 364 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 365 return link_speeds[0]; 366 else if (speed == 0x13) 367 return link_speeds[QLA_LAST_SPEED]; 368 else if (speed < QLA_LAST_SPEED) 369 return link_speeds[speed]; 370 else 371 return link_speeds[LS_UNKNOWN]; 372 } 373 374 static void 375 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 376 { 377 struct qla_hw_data *ha = vha->hw; 378 379 /* 380 * 8200 AEN Interpretation: 381 * mb[0] = AEN code 382 * mb[1] = AEN Reason code 383 * mb[2] = LSW of Peg-Halt Status-1 Register 384 * mb[6] = MSW of Peg-Halt Status-1 Register 385 * mb[3] = LSW of Peg-Halt Status-2 register 386 * mb[7] = MSW of Peg-Halt Status-2 register 387 * mb[4] = IDC Device-State Register value 388 * mb[5] = IDC Driver-Presence Register value 389 */ 390 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 391 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 392 mb[0], mb[1], mb[2], mb[6]); 393 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 394 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 395 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 396 397 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 398 IDC_HEARTBEAT_FAILURE)) { 399 ha->flags.nic_core_hung = 1; 400 ql_log(ql_log_warn, vha, 0x5060, 401 "83XX: F/W Error Reported: Check if reset required.\n"); 402 403 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 404 uint32_t protocol_engine_id, fw_err_code, err_level; 405 406 /* 407 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 408 * - PEG-Halt Status-1 Register: 409 * (LSW = mb[2], MSW = mb[6]) 410 * Bits 0-7 = protocol-engine ID 411 * Bits 8-28 = f/w error code 412 * Bits 29-31 = Error-level 413 * Error-level 0x1 = Non-Fatal error 414 * Error-level 0x2 = Recoverable Fatal error 415 * Error-level 0x4 = UnRecoverable Fatal error 416 * - PEG-Halt Status-2 Register: 417 * (LSW = mb[3], MSW = mb[7]) 418 */ 419 protocol_engine_id = (mb[2] & 0xff); 420 fw_err_code = (((mb[2] & 0xff00) >> 8) | 421 ((mb[6] & 0x1fff) << 8)); 422 err_level = ((mb[6] & 0xe000) >> 13); 423 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 424 "Register: protocol_engine_id=0x%x " 425 "fw_err_code=0x%x err_level=0x%x.\n", 426 protocol_engine_id, fw_err_code, err_level); 427 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 428 "Register: 0x%x%x.\n", mb[7], mb[3]); 429 if (err_level == ERR_LEVEL_NON_FATAL) { 430 ql_log(ql_log_warn, vha, 0x5063, 431 "Not a fatal error, f/w has recovered " 432 "iteself.\n"); 433 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 434 ql_log(ql_log_fatal, vha, 0x5064, 435 "Recoverable Fatal error: Chip reset " 436 "required.\n"); 437 qla83xx_schedule_work(vha, 438 QLA83XX_NIC_CORE_RESET); 439 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 440 ql_log(ql_log_fatal, vha, 0x5065, 441 "Unrecoverable Fatal error: Set FAILED " 442 "state, reboot required.\n"); 443 qla83xx_schedule_work(vha, 444 QLA83XX_NIC_CORE_UNRECOVERABLE); 445 } 446 } 447 448 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 449 uint16_t peg_fw_state, nw_interface_link_up; 450 uint16_t nw_interface_signal_detect, sfp_status; 451 uint16_t htbt_counter, htbt_monitor_enable; 452 uint16_t sfp_additonal_info, sfp_multirate; 453 uint16_t sfp_tx_fault, link_speed, dcbx_status; 454 455 /* 456 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 457 * - PEG-to-FC Status Register: 458 * (LSW = mb[2], MSW = mb[6]) 459 * Bits 0-7 = Peg-Firmware state 460 * Bit 8 = N/W Interface Link-up 461 * Bit 9 = N/W Interface signal detected 462 * Bits 10-11 = SFP Status 463 * SFP Status 0x0 = SFP+ transceiver not expected 464 * SFP Status 0x1 = SFP+ transceiver not present 465 * SFP Status 0x2 = SFP+ transceiver invalid 466 * SFP Status 0x3 = SFP+ transceiver present and 467 * valid 468 * Bits 12-14 = Heartbeat Counter 469 * Bit 15 = Heartbeat Monitor Enable 470 * Bits 16-17 = SFP Additional Info 471 * SFP info 0x0 = Unregocnized transceiver for 472 * Ethernet 473 * SFP info 0x1 = SFP+ brand validation failed 474 * SFP info 0x2 = SFP+ speed validation failed 475 * SFP info 0x3 = SFP+ access error 476 * Bit 18 = SFP Multirate 477 * Bit 19 = SFP Tx Fault 478 * Bits 20-22 = Link Speed 479 * Bits 23-27 = Reserved 480 * Bits 28-30 = DCBX Status 481 * DCBX Status 0x0 = DCBX Disabled 482 * DCBX Status 0x1 = DCBX Enabled 483 * DCBX Status 0x2 = DCBX Exchange error 484 * Bit 31 = Reserved 485 */ 486 peg_fw_state = (mb[2] & 0x00ff); 487 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 488 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 489 sfp_status = ((mb[2] & 0x0c00) >> 10); 490 htbt_counter = ((mb[2] & 0x7000) >> 12); 491 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 492 sfp_additonal_info = (mb[6] & 0x0003); 493 sfp_multirate = ((mb[6] & 0x0004) >> 2); 494 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 495 link_speed = ((mb[6] & 0x0070) >> 4); 496 dcbx_status = ((mb[6] & 0x7000) >> 12); 497 498 ql_log(ql_log_warn, vha, 0x5066, 499 "Peg-to-Fc Status Register:\n" 500 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 501 "nw_interface_signal_detect=0x%x" 502 "\nsfp_statis=0x%x.\n ", peg_fw_state, 503 nw_interface_link_up, nw_interface_signal_detect, 504 sfp_status); 505 ql_log(ql_log_warn, vha, 0x5067, 506 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 507 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", 508 htbt_counter, htbt_monitor_enable, 509 sfp_additonal_info, sfp_multirate); 510 ql_log(ql_log_warn, vha, 0x5068, 511 "sfp_tx_fault=0x%x, link_state=0x%x, " 512 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 513 dcbx_status); 514 515 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 516 } 517 518 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 519 ql_log(ql_log_warn, vha, 0x5069, 520 "Heartbeat Failure encountered, chip reset " 521 "required.\n"); 522 523 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 524 } 525 } 526 527 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 528 ql_log(ql_log_info, vha, 0x506a, 529 "IDC Device-State changed = 0x%x.\n", mb[4]); 530 if (ha->flags.nic_core_reset_owner) 531 return; 532 qla83xx_schedule_work(vha, MBA_IDC_AEN); 533 } 534 } 535 536 int 537 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 538 { 539 struct qla_hw_data *ha = vha->hw; 540 scsi_qla_host_t *vp; 541 uint32_t vp_did; 542 unsigned long flags; 543 int ret = 0; 544 545 if (!ha->num_vhosts) 546 return ret; 547 548 spin_lock_irqsave(&ha->vport_slock, flags); 549 list_for_each_entry(vp, &ha->vp_list, list) { 550 vp_did = vp->d_id.b24; 551 if (vp_did == rscn_entry) { 552 ret = 1; 553 break; 554 } 555 } 556 spin_unlock_irqrestore(&ha->vport_slock, flags); 557 558 return ret; 559 } 560 561 /** 562 * qla2x00_async_event() - Process aynchronous events. 563 * @ha: SCSI driver HA context 564 * @mb: Mailbox registers (0 - 3) 565 */ 566 void 567 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 568 { 569 uint16_t handle_cnt; 570 uint16_t cnt, mbx; 571 uint32_t handles[5]; 572 struct qla_hw_data *ha = vha->hw; 573 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 574 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 575 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 576 uint32_t rscn_entry, host_pid; 577 unsigned long flags; 578 579 /* Setup to process RIO completion. */ 580 handle_cnt = 0; 581 if (IS_CNA_CAPABLE(ha)) 582 goto skip_rio; 583 switch (mb[0]) { 584 case MBA_SCSI_COMPLETION: 585 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 586 handle_cnt = 1; 587 break; 588 case MBA_CMPLT_1_16BIT: 589 handles[0] = mb[1]; 590 handle_cnt = 1; 591 mb[0] = MBA_SCSI_COMPLETION; 592 break; 593 case MBA_CMPLT_2_16BIT: 594 handles[0] = mb[1]; 595 handles[1] = mb[2]; 596 handle_cnt = 2; 597 mb[0] = MBA_SCSI_COMPLETION; 598 break; 599 case MBA_CMPLT_3_16BIT: 600 handles[0] = mb[1]; 601 handles[1] = mb[2]; 602 handles[2] = mb[3]; 603 handle_cnt = 3; 604 mb[0] = MBA_SCSI_COMPLETION; 605 break; 606 case MBA_CMPLT_4_16BIT: 607 handles[0] = mb[1]; 608 handles[1] = mb[2]; 609 handles[2] = mb[3]; 610 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 611 handle_cnt = 4; 612 mb[0] = MBA_SCSI_COMPLETION; 613 break; 614 case MBA_CMPLT_5_16BIT: 615 handles[0] = mb[1]; 616 handles[1] = mb[2]; 617 handles[2] = mb[3]; 618 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 619 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 620 handle_cnt = 5; 621 mb[0] = MBA_SCSI_COMPLETION; 622 break; 623 case MBA_CMPLT_2_32BIT: 624 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 625 handles[1] = le32_to_cpu( 626 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 627 RD_MAILBOX_REG(ha, reg, 6)); 628 handle_cnt = 2; 629 mb[0] = MBA_SCSI_COMPLETION; 630 break; 631 default: 632 break; 633 } 634 skip_rio: 635 switch (mb[0]) { 636 case MBA_SCSI_COMPLETION: /* Fast Post */ 637 if (!vha->flags.online) 638 break; 639 640 for (cnt = 0; cnt < handle_cnt; cnt++) 641 qla2x00_process_completed_request(vha, rsp->req, 642 handles[cnt]); 643 break; 644 645 case MBA_RESET: /* Reset */ 646 ql_dbg(ql_dbg_async, vha, 0x5002, 647 "Asynchronous RESET.\n"); 648 649 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 650 break; 651 652 case MBA_SYSTEM_ERR: /* System Error */ 653 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? 654 RD_REG_WORD(®24->mailbox7) : 0; 655 ql_log(ql_log_warn, vha, 0x5003, 656 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 657 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 658 659 ha->isp_ops->fw_dump(vha, 1); 660 661 if (IS_FWI2_CAPABLE(ha)) { 662 if (mb[1] == 0 && mb[2] == 0) { 663 ql_log(ql_log_fatal, vha, 0x5004, 664 "Unrecoverable Hardware Error: adapter " 665 "marked OFFLINE!\n"); 666 vha->flags.online = 0; 667 vha->device_flags |= DFLG_DEV_FAILED; 668 } else { 669 /* Check to see if MPI timeout occurred */ 670 if ((mbx & MBX_3) && (ha->port_no == 0)) 671 set_bit(MPI_RESET_NEEDED, 672 &vha->dpc_flags); 673 674 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 675 } 676 } else if (mb[1] == 0) { 677 ql_log(ql_log_fatal, vha, 0x5005, 678 "Unrecoverable Hardware Error: adapter marked " 679 "OFFLINE!\n"); 680 vha->flags.online = 0; 681 vha->device_flags |= DFLG_DEV_FAILED; 682 } else 683 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 684 break; 685 686 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 687 ql_log(ql_log_warn, vha, 0x5006, 688 "ISP Request Transfer Error (%x).\n", mb[1]); 689 690 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 691 break; 692 693 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 694 ql_log(ql_log_warn, vha, 0x5007, 695 "ISP Response Transfer Error.\n"); 696 697 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 698 break; 699 700 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 701 ql_dbg(ql_dbg_async, vha, 0x5008, 702 "Asynchronous WAKEUP_THRES.\n"); 703 704 break; 705 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 706 ql_dbg(ql_dbg_async, vha, 0x5009, 707 "LIP occurred (%x).\n", mb[1]); 708 709 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 710 atomic_set(&vha->loop_state, LOOP_DOWN); 711 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 712 qla2x00_mark_all_devices_lost(vha, 1); 713 } 714 715 if (vha->vp_idx) { 716 atomic_set(&vha->vp_state, VP_FAILED); 717 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 718 } 719 720 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 721 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 722 723 vha->flags.management_server_logged_in = 0; 724 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 725 break; 726 727 case MBA_LOOP_UP: /* Loop Up Event */ 728 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 729 ha->link_data_rate = PORT_SPEED_1GB; 730 else 731 ha->link_data_rate = mb[1]; 732 733 ql_dbg(ql_dbg_async, vha, 0x500a, 734 "LOOP UP detected (%s Gbps).\n", 735 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 736 737 vha->flags.management_server_logged_in = 0; 738 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 739 break; 740 741 case MBA_LOOP_DOWN: /* Loop Down Event */ 742 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 743 ? RD_REG_WORD(®24->mailbox4) : 0; 744 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 745 : mbx; 746 ql_dbg(ql_dbg_async, vha, 0x500b, 747 "LOOP DOWN detected (%x %x %x %x).\n", 748 mb[1], mb[2], mb[3], mbx); 749 750 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 751 atomic_set(&vha->loop_state, LOOP_DOWN); 752 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 753 vha->device_flags |= DFLG_NO_CABLE; 754 qla2x00_mark_all_devices_lost(vha, 1); 755 } 756 757 if (vha->vp_idx) { 758 atomic_set(&vha->vp_state, VP_FAILED); 759 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 760 } 761 762 vha->flags.management_server_logged_in = 0; 763 ha->link_data_rate = PORT_SPEED_UNKNOWN; 764 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 765 break; 766 767 case MBA_LIP_RESET: /* LIP reset occurred */ 768 ql_dbg(ql_dbg_async, vha, 0x500c, 769 "LIP reset occurred (%x).\n", mb[1]); 770 771 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 772 atomic_set(&vha->loop_state, LOOP_DOWN); 773 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 774 qla2x00_mark_all_devices_lost(vha, 1); 775 } 776 777 if (vha->vp_idx) { 778 atomic_set(&vha->vp_state, VP_FAILED); 779 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 780 } 781 782 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 783 784 ha->operating_mode = LOOP; 785 vha->flags.management_server_logged_in = 0; 786 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 787 break; 788 789 /* case MBA_DCBX_COMPLETE: */ 790 case MBA_POINT_TO_POINT: /* Point-to-Point */ 791 if (IS_QLA2100(ha)) 792 break; 793 794 if (IS_CNA_CAPABLE(ha)) { 795 ql_dbg(ql_dbg_async, vha, 0x500d, 796 "DCBX Completed -- %04x %04x %04x.\n", 797 mb[1], mb[2], mb[3]); 798 if (ha->notify_dcbx_comp && !vha->vp_idx) 799 complete(&ha->dcbx_comp); 800 801 } else 802 ql_dbg(ql_dbg_async, vha, 0x500e, 803 "Asynchronous P2P MODE received.\n"); 804 805 /* 806 * Until there's a transition from loop down to loop up, treat 807 * this as loop down only. 808 */ 809 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 810 atomic_set(&vha->loop_state, LOOP_DOWN); 811 if (!atomic_read(&vha->loop_down_timer)) 812 atomic_set(&vha->loop_down_timer, 813 LOOP_DOWN_TIME); 814 qla2x00_mark_all_devices_lost(vha, 1); 815 } 816 817 if (vha->vp_idx) { 818 atomic_set(&vha->vp_state, VP_FAILED); 819 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 820 } 821 822 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 823 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 824 825 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 826 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 827 828 ha->flags.gpsc_supported = 1; 829 vha->flags.management_server_logged_in = 0; 830 break; 831 832 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 833 if (IS_QLA2100(ha)) 834 break; 835 836 ql_dbg(ql_dbg_async, vha, 0x500f, 837 "Configuration change detected: value=%x.\n", mb[1]); 838 839 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 840 atomic_set(&vha->loop_state, LOOP_DOWN); 841 if (!atomic_read(&vha->loop_down_timer)) 842 atomic_set(&vha->loop_down_timer, 843 LOOP_DOWN_TIME); 844 qla2x00_mark_all_devices_lost(vha, 1); 845 } 846 847 if (vha->vp_idx) { 848 atomic_set(&vha->vp_state, VP_FAILED); 849 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 850 } 851 852 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 853 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 854 break; 855 856 case MBA_PORT_UPDATE: /* Port database update */ 857 /* 858 * Handle only global and vn-port update events 859 * 860 * Relevant inputs: 861 * mb[1] = N_Port handle of changed port 862 * OR 0xffff for global event 863 * mb[2] = New login state 864 * 7 = Port logged out 865 * mb[3] = LSB is vp_idx, 0xff = all vps 866 * 867 * Skip processing if: 868 * Event is global, vp_idx is NOT all vps, 869 * vp_idx does not match 870 * Event is not global, vp_idx does not match 871 */ 872 if (IS_QLA2XXX_MIDTYPE(ha) && 873 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 874 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 875 break; 876 877 /* Global event -- port logout or port unavailable. */ 878 if (mb[1] == 0xffff && mb[2] == 0x7) { 879 ql_dbg(ql_dbg_async, vha, 0x5010, 880 "Port unavailable %04x %04x %04x.\n", 881 mb[1], mb[2], mb[3]); 882 ql_log(ql_log_warn, vha, 0x505e, 883 "Link is offline.\n"); 884 885 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 886 atomic_set(&vha->loop_state, LOOP_DOWN); 887 atomic_set(&vha->loop_down_timer, 888 LOOP_DOWN_TIME); 889 vha->device_flags |= DFLG_NO_CABLE; 890 qla2x00_mark_all_devices_lost(vha, 1); 891 } 892 893 if (vha->vp_idx) { 894 atomic_set(&vha->vp_state, VP_FAILED); 895 fc_vport_set_state(vha->fc_vport, 896 FC_VPORT_FAILED); 897 qla2x00_mark_all_devices_lost(vha, 1); 898 } 899 900 vha->flags.management_server_logged_in = 0; 901 ha->link_data_rate = PORT_SPEED_UNKNOWN; 902 break; 903 } 904 905 /* 906 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 907 * event etc. earlier indicating loop is down) then process 908 * it. Otherwise ignore it and Wait for RSCN to come in. 909 */ 910 atomic_set(&vha->loop_down_timer, 0); 911 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { 912 ql_dbg(ql_dbg_async, vha, 0x5011, 913 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 914 mb[1], mb[2], mb[3]); 915 916 qlt_async_event(mb[0], vha, mb); 917 break; 918 } 919 920 ql_dbg(ql_dbg_async, vha, 0x5012, 921 "Port database changed %04x %04x %04x.\n", 922 mb[1], mb[2], mb[3]); 923 ql_log(ql_log_warn, vha, 0x505f, 924 "Link is operational (%s Gbps).\n", 925 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 926 927 /* 928 * Mark all devices as missing so we will login again. 929 */ 930 atomic_set(&vha->loop_state, LOOP_UP); 931 932 qla2x00_mark_all_devices_lost(vha, 1); 933 934 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) 935 set_bit(SCR_PENDING, &vha->dpc_flags); 936 937 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 938 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 939 940 qlt_async_event(mb[0], vha, mb); 941 break; 942 943 case MBA_RSCN_UPDATE: /* State Change Registration */ 944 /* Check if the Vport has issued a SCR */ 945 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 946 break; 947 /* Only handle SCNs for our Vport index. */ 948 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 949 break; 950 951 ql_dbg(ql_dbg_async, vha, 0x5013, 952 "RSCN database changed -- %04x %04x %04x.\n", 953 mb[1], mb[2], mb[3]); 954 955 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 956 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 957 | vha->d_id.b.al_pa; 958 if (rscn_entry == host_pid) { 959 ql_dbg(ql_dbg_async, vha, 0x5014, 960 "Ignoring RSCN update to local host " 961 "port ID (%06x).\n", host_pid); 962 break; 963 } 964 965 /* Ignore reserved bits from RSCN-payload. */ 966 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 967 968 /* Skip RSCNs for virtual ports on the same physical port */ 969 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 970 break; 971 972 atomic_set(&vha->loop_down_timer, 0); 973 vha->flags.management_server_logged_in = 0; 974 975 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 976 set_bit(RSCN_UPDATE, &vha->dpc_flags); 977 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 978 break; 979 980 /* case MBA_RIO_RESPONSE: */ 981 case MBA_ZIO_RESPONSE: 982 ql_dbg(ql_dbg_async, vha, 0x5015, 983 "[R|Z]IO update completion.\n"); 984 985 if (IS_FWI2_CAPABLE(ha)) 986 qla24xx_process_response_queue(vha, rsp); 987 else 988 qla2x00_process_response_queue(rsp); 989 break; 990 991 case MBA_DISCARD_RND_FRAME: 992 ql_dbg(ql_dbg_async, vha, 0x5016, 993 "Discard RND Frame -- %04x %04x %04x.\n", 994 mb[1], mb[2], mb[3]); 995 break; 996 997 case MBA_TRACE_NOTIFICATION: 998 ql_dbg(ql_dbg_async, vha, 0x5017, 999 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1000 break; 1001 1002 case MBA_ISP84XX_ALERT: 1003 ql_dbg(ql_dbg_async, vha, 0x5018, 1004 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1005 mb[1], mb[2], mb[3]); 1006 1007 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1008 switch (mb[1]) { 1009 case A84_PANIC_RECOVERY: 1010 ql_log(ql_log_info, vha, 0x5019, 1011 "Alert 84XX: panic recovery %04x %04x.\n", 1012 mb[2], mb[3]); 1013 break; 1014 case A84_OP_LOGIN_COMPLETE: 1015 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1016 ql_log(ql_log_info, vha, 0x501a, 1017 "Alert 84XX: firmware version %x.\n", 1018 ha->cs84xx->op_fw_version); 1019 break; 1020 case A84_DIAG_LOGIN_COMPLETE: 1021 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1022 ql_log(ql_log_info, vha, 0x501b, 1023 "Alert 84XX: diagnostic firmware version %x.\n", 1024 ha->cs84xx->diag_fw_version); 1025 break; 1026 case A84_GOLD_LOGIN_COMPLETE: 1027 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1028 ha->cs84xx->fw_update = 1; 1029 ql_log(ql_log_info, vha, 0x501c, 1030 "Alert 84XX: gold firmware version %x.\n", 1031 ha->cs84xx->gold_fw_version); 1032 break; 1033 default: 1034 ql_log(ql_log_warn, vha, 0x501d, 1035 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1036 mb[1], mb[2], mb[3]); 1037 } 1038 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1039 break; 1040 case MBA_DCBX_START: 1041 ql_dbg(ql_dbg_async, vha, 0x501e, 1042 "DCBX Started -- %04x %04x %04x.\n", 1043 mb[1], mb[2], mb[3]); 1044 break; 1045 case MBA_DCBX_PARAM_UPDATE: 1046 ql_dbg(ql_dbg_async, vha, 0x501f, 1047 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1048 mb[1], mb[2], mb[3]); 1049 break; 1050 case MBA_FCF_CONF_ERR: 1051 ql_dbg(ql_dbg_async, vha, 0x5020, 1052 "FCF Configuration Error -- %04x %04x %04x.\n", 1053 mb[1], mb[2], mb[3]); 1054 break; 1055 case MBA_IDC_NOTIFY: 1056 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1057 mb[4] = RD_REG_WORD(®24->mailbox4); 1058 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1059 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1060 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1061 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1062 /* 1063 * Extend loop down timer since port is active. 1064 */ 1065 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1066 atomic_set(&vha->loop_down_timer, 1067 LOOP_DOWN_TIME); 1068 qla2xxx_wake_dpc(vha); 1069 } 1070 } 1071 case MBA_IDC_COMPLETE: 1072 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1073 complete(&ha->lb_portup_comp); 1074 /* Fallthru */ 1075 case MBA_IDC_TIME_EXT: 1076 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1077 IS_QLA8044(ha)) 1078 qla81xx_idc_event(vha, mb[0], mb[1]); 1079 break; 1080 1081 case MBA_IDC_AEN: 1082 mb[4] = RD_REG_WORD(®24->mailbox4); 1083 mb[5] = RD_REG_WORD(®24->mailbox5); 1084 mb[6] = RD_REG_WORD(®24->mailbox6); 1085 mb[7] = RD_REG_WORD(®24->mailbox7); 1086 qla83xx_handle_8200_aen(vha, mb); 1087 break; 1088 1089 default: 1090 ql_dbg(ql_dbg_async, vha, 0x5057, 1091 "Unknown AEN:%04x %04x %04x %04x\n", 1092 mb[0], mb[1], mb[2], mb[3]); 1093 } 1094 1095 qlt_async_event(mb[0], vha, mb); 1096 1097 if (!vha->vp_idx && ha->num_vhosts) 1098 qla2x00_alert_all_vps(rsp, mb); 1099 } 1100 1101 /** 1102 * qla2x00_process_completed_request() - Process a Fast Post response. 1103 * @ha: SCSI driver HA context 1104 * @index: SRB index 1105 */ 1106 void 1107 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1108 struct req_que *req, uint32_t index) 1109 { 1110 srb_t *sp; 1111 struct qla_hw_data *ha = vha->hw; 1112 1113 /* Validate handle. */ 1114 if (index >= req->num_outstanding_cmds) { 1115 ql_log(ql_log_warn, vha, 0x3014, 1116 "Invalid SCSI command index (%x).\n", index); 1117 1118 if (IS_P3P_TYPE(ha)) 1119 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1120 else 1121 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1122 return; 1123 } 1124 1125 sp = req->outstanding_cmds[index]; 1126 if (sp) { 1127 /* Free outstanding command slot. */ 1128 req->outstanding_cmds[index] = NULL; 1129 1130 /* Save ISP completion status */ 1131 sp->done(ha, sp, DID_OK << 16); 1132 } else { 1133 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1134 1135 if (IS_P3P_TYPE(ha)) 1136 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1137 else 1138 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1139 } 1140 } 1141 1142 srb_t * 1143 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1144 struct req_que *req, void *iocb) 1145 { 1146 struct qla_hw_data *ha = vha->hw; 1147 sts_entry_t *pkt = iocb; 1148 srb_t *sp = NULL; 1149 uint16_t index; 1150 1151 index = LSW(pkt->handle); 1152 if (index >= req->num_outstanding_cmds) { 1153 ql_log(ql_log_warn, vha, 0x5031, 1154 "Invalid command index (%x).\n", index); 1155 if (IS_P3P_TYPE(ha)) 1156 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1157 else 1158 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1159 goto done; 1160 } 1161 sp = req->outstanding_cmds[index]; 1162 if (!sp) { 1163 ql_log(ql_log_warn, vha, 0x5032, 1164 "Invalid completion handle (%x) -- timed-out.\n", index); 1165 return sp; 1166 } 1167 if (sp->handle != index) { 1168 ql_log(ql_log_warn, vha, 0x5033, 1169 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1170 return NULL; 1171 } 1172 1173 req->outstanding_cmds[index] = NULL; 1174 1175 done: 1176 return sp; 1177 } 1178 1179 static void 1180 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1181 struct mbx_entry *mbx) 1182 { 1183 const char func[] = "MBX-IOCB"; 1184 const char *type; 1185 fc_port_t *fcport; 1186 srb_t *sp; 1187 struct srb_iocb *lio; 1188 uint16_t *data; 1189 uint16_t status; 1190 1191 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1192 if (!sp) 1193 return; 1194 1195 lio = &sp->u.iocb_cmd; 1196 type = sp->name; 1197 fcport = sp->fcport; 1198 data = lio->u.logio.data; 1199 1200 data[0] = MBS_COMMAND_ERROR; 1201 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1202 QLA_LOGIO_LOGIN_RETRIED : 0; 1203 if (mbx->entry_status) { 1204 ql_dbg(ql_dbg_async, vha, 0x5043, 1205 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1206 "entry-status=%x status=%x state-flag=%x " 1207 "status-flags=%x.\n", type, sp->handle, 1208 fcport->d_id.b.domain, fcport->d_id.b.area, 1209 fcport->d_id.b.al_pa, mbx->entry_status, 1210 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1211 le16_to_cpu(mbx->status_flags)); 1212 1213 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1214 (uint8_t *)mbx, sizeof(*mbx)); 1215 1216 goto logio_done; 1217 } 1218 1219 status = le16_to_cpu(mbx->status); 1220 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1221 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1222 status = 0; 1223 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1224 ql_dbg(ql_dbg_async, vha, 0x5045, 1225 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1226 type, sp->handle, fcport->d_id.b.domain, 1227 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1228 le16_to_cpu(mbx->mb1)); 1229 1230 data[0] = MBS_COMMAND_COMPLETE; 1231 if (sp->type == SRB_LOGIN_CMD) { 1232 fcport->port_type = FCT_TARGET; 1233 if (le16_to_cpu(mbx->mb1) & BIT_0) 1234 fcport->port_type = FCT_INITIATOR; 1235 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1236 fcport->flags |= FCF_FCP2_DEVICE; 1237 } 1238 goto logio_done; 1239 } 1240 1241 data[0] = le16_to_cpu(mbx->mb0); 1242 switch (data[0]) { 1243 case MBS_PORT_ID_USED: 1244 data[1] = le16_to_cpu(mbx->mb1); 1245 break; 1246 case MBS_LOOP_ID_USED: 1247 break; 1248 default: 1249 data[0] = MBS_COMMAND_ERROR; 1250 break; 1251 } 1252 1253 ql_log(ql_log_warn, vha, 0x5046, 1254 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1255 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1256 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1257 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1258 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1259 le16_to_cpu(mbx->mb7)); 1260 1261 logio_done: 1262 sp->done(vha, sp, 0); 1263 } 1264 1265 static void 1266 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1267 sts_entry_t *pkt, int iocb_type) 1268 { 1269 const char func[] = "CT_IOCB"; 1270 const char *type; 1271 srb_t *sp; 1272 struct fc_bsg_job *bsg_job; 1273 uint16_t comp_status; 1274 int res; 1275 1276 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1277 if (!sp) 1278 return; 1279 1280 bsg_job = sp->u.bsg_job; 1281 1282 type = "ct pass-through"; 1283 1284 comp_status = le16_to_cpu(pkt->comp_status); 1285 1286 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1287 * fc payload to the caller 1288 */ 1289 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1290 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1291 1292 if (comp_status != CS_COMPLETE) { 1293 if (comp_status == CS_DATA_UNDERRUN) { 1294 res = DID_OK << 16; 1295 bsg_job->reply->reply_payload_rcv_len = 1296 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1297 1298 ql_log(ql_log_warn, vha, 0x5048, 1299 "CT pass-through-%s error " 1300 "comp_status-status=0x%x total_byte = 0x%x.\n", 1301 type, comp_status, 1302 bsg_job->reply->reply_payload_rcv_len); 1303 } else { 1304 ql_log(ql_log_warn, vha, 0x5049, 1305 "CT pass-through-%s error " 1306 "comp_status-status=0x%x.\n", type, comp_status); 1307 res = DID_ERROR << 16; 1308 bsg_job->reply->reply_payload_rcv_len = 0; 1309 } 1310 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1311 (uint8_t *)pkt, sizeof(*pkt)); 1312 } else { 1313 res = DID_OK << 16; 1314 bsg_job->reply->reply_payload_rcv_len = 1315 bsg_job->reply_payload.payload_len; 1316 bsg_job->reply_len = 0; 1317 } 1318 1319 sp->done(vha, sp, res); 1320 } 1321 1322 static void 1323 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1324 struct sts_entry_24xx *pkt, int iocb_type) 1325 { 1326 const char func[] = "ELS_CT_IOCB"; 1327 const char *type; 1328 srb_t *sp; 1329 struct fc_bsg_job *bsg_job; 1330 uint16_t comp_status; 1331 uint32_t fw_status[3]; 1332 uint8_t* fw_sts_ptr; 1333 int res; 1334 1335 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1336 if (!sp) 1337 return; 1338 bsg_job = sp->u.bsg_job; 1339 1340 type = NULL; 1341 switch (sp->type) { 1342 case SRB_ELS_CMD_RPT: 1343 case SRB_ELS_CMD_HST: 1344 type = "els"; 1345 break; 1346 case SRB_CT_CMD: 1347 type = "ct pass-through"; 1348 break; 1349 default: 1350 ql_dbg(ql_dbg_user, vha, 0x503e, 1351 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1352 return; 1353 } 1354 1355 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1356 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1357 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1358 1359 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1360 * fc payload to the caller 1361 */ 1362 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1363 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1364 1365 if (comp_status != CS_COMPLETE) { 1366 if (comp_status == CS_DATA_UNDERRUN) { 1367 res = DID_OK << 16; 1368 bsg_job->reply->reply_payload_rcv_len = 1369 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1370 1371 ql_dbg(ql_dbg_user, vha, 0x503f, 1372 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1373 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1374 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1375 le16_to_cpu(((struct els_sts_entry_24xx *) 1376 pkt)->total_byte_count)); 1377 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1378 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1379 } 1380 else { 1381 ql_dbg(ql_dbg_user, vha, 0x5040, 1382 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1383 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1384 type, sp->handle, comp_status, 1385 le16_to_cpu(((struct els_sts_entry_24xx *) 1386 pkt)->error_subcode_1), 1387 le16_to_cpu(((struct els_sts_entry_24xx *) 1388 pkt)->error_subcode_2)); 1389 res = DID_ERROR << 16; 1390 bsg_job->reply->reply_payload_rcv_len = 0; 1391 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1392 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1393 } 1394 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1395 (uint8_t *)pkt, sizeof(*pkt)); 1396 } 1397 else { 1398 res = DID_OK << 16; 1399 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1400 bsg_job->reply_len = 0; 1401 } 1402 1403 sp->done(vha, sp, res); 1404 } 1405 1406 static void 1407 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1408 struct logio_entry_24xx *logio) 1409 { 1410 const char func[] = "LOGIO-IOCB"; 1411 const char *type; 1412 fc_port_t *fcport; 1413 srb_t *sp; 1414 struct srb_iocb *lio; 1415 uint16_t *data; 1416 uint32_t iop[2]; 1417 1418 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1419 if (!sp) 1420 return; 1421 1422 lio = &sp->u.iocb_cmd; 1423 type = sp->name; 1424 fcport = sp->fcport; 1425 data = lio->u.logio.data; 1426 1427 data[0] = MBS_COMMAND_ERROR; 1428 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1429 QLA_LOGIO_LOGIN_RETRIED : 0; 1430 if (logio->entry_status) { 1431 ql_log(ql_log_warn, fcport->vha, 0x5034, 1432 "Async-%s error entry - hdl=%x" 1433 "portid=%02x%02x%02x entry-status=%x.\n", 1434 type, sp->handle, fcport->d_id.b.domain, 1435 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1436 logio->entry_status); 1437 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1438 (uint8_t *)logio, sizeof(*logio)); 1439 1440 goto logio_done; 1441 } 1442 1443 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1444 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1445 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1446 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1447 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1448 le32_to_cpu(logio->io_parameter[0])); 1449 1450 data[0] = MBS_COMMAND_COMPLETE; 1451 if (sp->type != SRB_LOGIN_CMD) 1452 goto logio_done; 1453 1454 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1455 if (iop[0] & BIT_4) { 1456 fcport->port_type = FCT_TARGET; 1457 if (iop[0] & BIT_8) 1458 fcport->flags |= FCF_FCP2_DEVICE; 1459 } else if (iop[0] & BIT_5) 1460 fcport->port_type = FCT_INITIATOR; 1461 1462 if (iop[0] & BIT_7) 1463 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1464 1465 if (logio->io_parameter[7] || logio->io_parameter[8]) 1466 fcport->supported_classes |= FC_COS_CLASS2; 1467 if (logio->io_parameter[9] || logio->io_parameter[10]) 1468 fcport->supported_classes |= FC_COS_CLASS3; 1469 1470 goto logio_done; 1471 } 1472 1473 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1474 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1475 switch (iop[0]) { 1476 case LSC_SCODE_PORTID_USED: 1477 data[0] = MBS_PORT_ID_USED; 1478 data[1] = LSW(iop[1]); 1479 break; 1480 case LSC_SCODE_NPORT_USED: 1481 data[0] = MBS_LOOP_ID_USED; 1482 break; 1483 default: 1484 data[0] = MBS_COMMAND_ERROR; 1485 break; 1486 } 1487 1488 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1489 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1490 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1491 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1492 le16_to_cpu(logio->comp_status), 1493 le32_to_cpu(logio->io_parameter[0]), 1494 le32_to_cpu(logio->io_parameter[1])); 1495 1496 logio_done: 1497 sp->done(vha, sp, 0); 1498 } 1499 1500 static void 1501 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1502 { 1503 const char func[] = "TMF-IOCB"; 1504 const char *type; 1505 fc_port_t *fcport; 1506 srb_t *sp; 1507 struct srb_iocb *iocb; 1508 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1509 1510 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1511 if (!sp) 1512 return; 1513 1514 iocb = &sp->u.iocb_cmd; 1515 type = sp->name; 1516 fcport = sp->fcport; 1517 iocb->u.tmf.data = QLA_SUCCESS; 1518 1519 if (sts->entry_status) { 1520 ql_log(ql_log_warn, fcport->vha, 0x5038, 1521 "Async-%s error - hdl=%x entry-status(%x).\n", 1522 type, sp->handle, sts->entry_status); 1523 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1524 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1525 ql_log(ql_log_warn, fcport->vha, 0x5039, 1526 "Async-%s error - hdl=%x completion status(%x).\n", 1527 type, sp->handle, sts->comp_status); 1528 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1529 } else if ((le16_to_cpu(sts->scsi_status) & 1530 SS_RESPONSE_INFO_LEN_VALID)) { 1531 if (le32_to_cpu(sts->rsp_data_len) < 4) { 1532 ql_log(ql_log_warn, fcport->vha, 0x503b, 1533 "Async-%s error - hdl=%x not enough response(%d).\n", 1534 type, sp->handle, sts->rsp_data_len); 1535 } else if (sts->data[3]) { 1536 ql_log(ql_log_warn, fcport->vha, 0x503c, 1537 "Async-%s error - hdl=%x response(%x).\n", 1538 type, sp->handle, sts->data[3]); 1539 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1540 } 1541 } 1542 1543 if (iocb->u.tmf.data != QLA_SUCCESS) 1544 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1545 (uint8_t *)sts, sizeof(*sts)); 1546 1547 sp->done(vha, sp, 0); 1548 } 1549 1550 /** 1551 * qla2x00_process_response_queue() - Process response queue entries. 1552 * @ha: SCSI driver HA context 1553 */ 1554 void 1555 qla2x00_process_response_queue(struct rsp_que *rsp) 1556 { 1557 struct scsi_qla_host *vha; 1558 struct qla_hw_data *ha = rsp->hw; 1559 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1560 sts_entry_t *pkt; 1561 uint16_t handle_cnt; 1562 uint16_t cnt; 1563 1564 vha = pci_get_drvdata(ha->pdev); 1565 1566 if (!vha->flags.online) 1567 return; 1568 1569 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1570 pkt = (sts_entry_t *)rsp->ring_ptr; 1571 1572 rsp->ring_index++; 1573 if (rsp->ring_index == rsp->length) { 1574 rsp->ring_index = 0; 1575 rsp->ring_ptr = rsp->ring; 1576 } else { 1577 rsp->ring_ptr++; 1578 } 1579 1580 if (pkt->entry_status != 0) { 1581 qla2x00_error_entry(vha, rsp, pkt); 1582 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1583 wmb(); 1584 continue; 1585 } 1586 1587 switch (pkt->entry_type) { 1588 case STATUS_TYPE: 1589 qla2x00_status_entry(vha, rsp, pkt); 1590 break; 1591 case STATUS_TYPE_21: 1592 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1593 for (cnt = 0; cnt < handle_cnt; cnt++) { 1594 qla2x00_process_completed_request(vha, rsp->req, 1595 ((sts21_entry_t *)pkt)->handle[cnt]); 1596 } 1597 break; 1598 case STATUS_TYPE_22: 1599 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1600 for (cnt = 0; cnt < handle_cnt; cnt++) { 1601 qla2x00_process_completed_request(vha, rsp->req, 1602 ((sts22_entry_t *)pkt)->handle[cnt]); 1603 } 1604 break; 1605 case STATUS_CONT_TYPE: 1606 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1607 break; 1608 case MBX_IOCB_TYPE: 1609 qla2x00_mbx_iocb_entry(vha, rsp->req, 1610 (struct mbx_entry *)pkt); 1611 break; 1612 case CT_IOCB_TYPE: 1613 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1614 break; 1615 default: 1616 /* Type Not Supported. */ 1617 ql_log(ql_log_warn, vha, 0x504a, 1618 "Received unknown response pkt type %x " 1619 "entry status=%x.\n", 1620 pkt->entry_type, pkt->entry_status); 1621 break; 1622 } 1623 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1624 wmb(); 1625 } 1626 1627 /* Adjust ring index */ 1628 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1629 } 1630 1631 static inline void 1632 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1633 uint32_t sense_len, struct rsp_que *rsp, int res) 1634 { 1635 struct scsi_qla_host *vha = sp->fcport->vha; 1636 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1637 uint32_t track_sense_len; 1638 1639 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1640 sense_len = SCSI_SENSE_BUFFERSIZE; 1641 1642 SET_CMD_SENSE_LEN(sp, sense_len); 1643 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1644 track_sense_len = sense_len; 1645 1646 if (sense_len > par_sense_len) 1647 sense_len = par_sense_len; 1648 1649 memcpy(cp->sense_buffer, sense_data, sense_len); 1650 1651 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1652 track_sense_len -= sense_len; 1653 SET_CMD_SENSE_LEN(sp, track_sense_len); 1654 1655 if (track_sense_len != 0) { 1656 rsp->status_srb = sp; 1657 cp->result = res; 1658 } 1659 1660 if (sense_len) { 1661 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1662 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 1663 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1664 cp); 1665 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1666 cp->sense_buffer, sense_len); 1667 } 1668 } 1669 1670 struct scsi_dif_tuple { 1671 __be16 guard; /* Checksum */ 1672 __be16 app_tag; /* APPL identifier */ 1673 __be32 ref_tag; /* Target LBA or indirect LBA */ 1674 }; 1675 1676 /* 1677 * Checks the guard or meta-data for the type of error 1678 * detected by the HBA. In case of errors, we set the 1679 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1680 * to indicate to the kernel that the HBA detected error. 1681 */ 1682 static inline int 1683 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1684 { 1685 struct scsi_qla_host *vha = sp->fcport->vha; 1686 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1687 uint8_t *ap = &sts24->data[12]; 1688 uint8_t *ep = &sts24->data[20]; 1689 uint32_t e_ref_tag, a_ref_tag; 1690 uint16_t e_app_tag, a_app_tag; 1691 uint16_t e_guard, a_guard; 1692 1693 /* 1694 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1695 * would make guard field appear at offset 2 1696 */ 1697 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1698 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1699 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1700 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1701 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1702 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1703 1704 ql_dbg(ql_dbg_io, vha, 0x3023, 1705 "iocb(s) %p Returned STATUS.\n", sts24); 1706 1707 ql_dbg(ql_dbg_io, vha, 0x3024, 1708 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1709 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1710 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1711 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1712 a_app_tag, e_app_tag, a_guard, e_guard); 1713 1714 /* 1715 * Ignore sector if: 1716 * For type 3: ref & app tag is all 'f's 1717 * For type 0,1,2: app tag is all 'f's 1718 */ 1719 if ((a_app_tag == 0xffff) && 1720 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1721 (a_ref_tag == 0xffffffff))) { 1722 uint32_t blocks_done, resid; 1723 sector_t lba_s = scsi_get_lba(cmd); 1724 1725 /* 2TB boundary case covered automatically with this */ 1726 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1727 1728 resid = scsi_bufflen(cmd) - (blocks_done * 1729 cmd->device->sector_size); 1730 1731 scsi_set_resid(cmd, resid); 1732 cmd->result = DID_OK << 16; 1733 1734 /* Update protection tag */ 1735 if (scsi_prot_sg_count(cmd)) { 1736 uint32_t i, j = 0, k = 0, num_ent; 1737 struct scatterlist *sg; 1738 struct sd_dif_tuple *spt; 1739 1740 /* Patch the corresponding protection tags */ 1741 scsi_for_each_prot_sg(cmd, sg, 1742 scsi_prot_sg_count(cmd), i) { 1743 num_ent = sg_dma_len(sg) / 8; 1744 if (k + num_ent < blocks_done) { 1745 k += num_ent; 1746 continue; 1747 } 1748 j = blocks_done - k - 1; 1749 k = blocks_done; 1750 break; 1751 } 1752 1753 if (k != blocks_done) { 1754 ql_log(ql_log_warn, vha, 0x302f, 1755 "unexpected tag values tag:lba=%x:%llx)\n", 1756 e_ref_tag, (unsigned long long)lba_s); 1757 return 1; 1758 } 1759 1760 spt = page_address(sg_page(sg)) + sg->offset; 1761 spt += j; 1762 1763 spt->app_tag = 0xffff; 1764 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1765 spt->ref_tag = 0xffffffff; 1766 } 1767 1768 return 0; 1769 } 1770 1771 /* check guard */ 1772 if (e_guard != a_guard) { 1773 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1774 0x10, 0x1); 1775 set_driver_byte(cmd, DRIVER_SENSE); 1776 set_host_byte(cmd, DID_ABORT); 1777 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1778 return 1; 1779 } 1780 1781 /* check ref tag */ 1782 if (e_ref_tag != a_ref_tag) { 1783 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1784 0x10, 0x3); 1785 set_driver_byte(cmd, DRIVER_SENSE); 1786 set_host_byte(cmd, DID_ABORT); 1787 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1788 return 1; 1789 } 1790 1791 /* check appl tag */ 1792 if (e_app_tag != a_app_tag) { 1793 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1794 0x10, 0x2); 1795 set_driver_byte(cmd, DRIVER_SENSE); 1796 set_host_byte(cmd, DID_ABORT); 1797 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1798 return 1; 1799 } 1800 1801 return 1; 1802 } 1803 1804 static void 1805 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 1806 struct req_que *req, uint32_t index) 1807 { 1808 struct qla_hw_data *ha = vha->hw; 1809 srb_t *sp; 1810 uint16_t comp_status; 1811 uint16_t scsi_status; 1812 uint16_t thread_id; 1813 uint32_t rval = EXT_STATUS_OK; 1814 struct fc_bsg_job *bsg_job = NULL; 1815 sts_entry_t *sts; 1816 struct sts_entry_24xx *sts24; 1817 sts = (sts_entry_t *) pkt; 1818 sts24 = (struct sts_entry_24xx *) pkt; 1819 1820 /* Validate handle. */ 1821 if (index >= req->num_outstanding_cmds) { 1822 ql_log(ql_log_warn, vha, 0x70af, 1823 "Invalid SCSI completion handle 0x%x.\n", index); 1824 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1825 return; 1826 } 1827 1828 sp = req->outstanding_cmds[index]; 1829 if (sp) { 1830 /* Free outstanding command slot. */ 1831 req->outstanding_cmds[index] = NULL; 1832 bsg_job = sp->u.bsg_job; 1833 } else { 1834 ql_log(ql_log_warn, vha, 0x70b0, 1835 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 1836 req->id, index); 1837 1838 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1839 return; 1840 } 1841 1842 if (IS_FWI2_CAPABLE(ha)) { 1843 comp_status = le16_to_cpu(sts24->comp_status); 1844 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1845 } else { 1846 comp_status = le16_to_cpu(sts->comp_status); 1847 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1848 } 1849 1850 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1851 switch (comp_status) { 1852 case CS_COMPLETE: 1853 if (scsi_status == 0) { 1854 bsg_job->reply->reply_payload_rcv_len = 1855 bsg_job->reply_payload.payload_len; 1856 vha->qla_stats.input_bytes += 1857 bsg_job->reply->reply_payload_rcv_len; 1858 vha->qla_stats.input_requests++; 1859 rval = EXT_STATUS_OK; 1860 } 1861 goto done; 1862 1863 case CS_DATA_OVERRUN: 1864 ql_dbg(ql_dbg_user, vha, 0x70b1, 1865 "Command completed with date overrun thread_id=%d\n", 1866 thread_id); 1867 rval = EXT_STATUS_DATA_OVERRUN; 1868 break; 1869 1870 case CS_DATA_UNDERRUN: 1871 ql_dbg(ql_dbg_user, vha, 0x70b2, 1872 "Command completed with date underrun thread_id=%d\n", 1873 thread_id); 1874 rval = EXT_STATUS_DATA_UNDERRUN; 1875 break; 1876 case CS_BIDIR_RD_OVERRUN: 1877 ql_dbg(ql_dbg_user, vha, 0x70b3, 1878 "Command completed with read data overrun thread_id=%d\n", 1879 thread_id); 1880 rval = EXT_STATUS_DATA_OVERRUN; 1881 break; 1882 1883 case CS_BIDIR_RD_WR_OVERRUN: 1884 ql_dbg(ql_dbg_user, vha, 0x70b4, 1885 "Command completed with read and write data overrun " 1886 "thread_id=%d\n", thread_id); 1887 rval = EXT_STATUS_DATA_OVERRUN; 1888 break; 1889 1890 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 1891 ql_dbg(ql_dbg_user, vha, 0x70b5, 1892 "Command completed with read data over and write data " 1893 "underrun thread_id=%d\n", thread_id); 1894 rval = EXT_STATUS_DATA_OVERRUN; 1895 break; 1896 1897 case CS_BIDIR_RD_UNDERRUN: 1898 ql_dbg(ql_dbg_user, vha, 0x70b6, 1899 "Command completed with read data data underrun " 1900 "thread_id=%d\n", thread_id); 1901 rval = EXT_STATUS_DATA_UNDERRUN; 1902 break; 1903 1904 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 1905 ql_dbg(ql_dbg_user, vha, 0x70b7, 1906 "Command completed with read data under and write data " 1907 "overrun thread_id=%d\n", thread_id); 1908 rval = EXT_STATUS_DATA_UNDERRUN; 1909 break; 1910 1911 case CS_BIDIR_RD_WR_UNDERRUN: 1912 ql_dbg(ql_dbg_user, vha, 0x70b8, 1913 "Command completed with read and write data underrun " 1914 "thread_id=%d\n", thread_id); 1915 rval = EXT_STATUS_DATA_UNDERRUN; 1916 break; 1917 1918 case CS_BIDIR_DMA: 1919 ql_dbg(ql_dbg_user, vha, 0x70b9, 1920 "Command completed with data DMA error thread_id=%d\n", 1921 thread_id); 1922 rval = EXT_STATUS_DMA_ERR; 1923 break; 1924 1925 case CS_TIMEOUT: 1926 ql_dbg(ql_dbg_user, vha, 0x70ba, 1927 "Command completed with timeout thread_id=%d\n", 1928 thread_id); 1929 rval = EXT_STATUS_TIMEOUT; 1930 break; 1931 default: 1932 ql_dbg(ql_dbg_user, vha, 0x70bb, 1933 "Command completed with completion status=0x%x " 1934 "thread_id=%d\n", comp_status, thread_id); 1935 rval = EXT_STATUS_ERR; 1936 break; 1937 } 1938 bsg_job->reply->reply_payload_rcv_len = 0; 1939 1940 done: 1941 /* Return the vendor specific reply to API */ 1942 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1943 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1944 /* Always return DID_OK, bsg will send the vendor specific response 1945 * in this case only */ 1946 sp->done(vha, sp, (DID_OK << 6)); 1947 1948 } 1949 1950 /** 1951 * qla2x00_status_entry() - Process a Status IOCB entry. 1952 * @ha: SCSI driver HA context 1953 * @pkt: Entry pointer 1954 */ 1955 static void 1956 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1957 { 1958 srb_t *sp; 1959 fc_port_t *fcport; 1960 struct scsi_cmnd *cp; 1961 sts_entry_t *sts; 1962 struct sts_entry_24xx *sts24; 1963 uint16_t comp_status; 1964 uint16_t scsi_status; 1965 uint16_t ox_id; 1966 uint8_t lscsi_status; 1967 int32_t resid; 1968 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1969 fw_resid_len; 1970 uint8_t *rsp_info, *sense_data; 1971 struct qla_hw_data *ha = vha->hw; 1972 uint32_t handle; 1973 uint16_t que; 1974 struct req_que *req; 1975 int logit = 1; 1976 int res = 0; 1977 uint16_t state_flags = 0; 1978 1979 sts = (sts_entry_t *) pkt; 1980 sts24 = (struct sts_entry_24xx *) pkt; 1981 if (IS_FWI2_CAPABLE(ha)) { 1982 comp_status = le16_to_cpu(sts24->comp_status); 1983 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1984 state_flags = le16_to_cpu(sts24->state_flags); 1985 } else { 1986 comp_status = le16_to_cpu(sts->comp_status); 1987 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1988 } 1989 handle = (uint32_t) LSW(sts->handle); 1990 que = MSW(sts->handle); 1991 req = ha->req_q_map[que]; 1992 1993 /* Check for invalid queue pointer */ 1994 if (req == NULL || 1995 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 1996 ql_dbg(ql_dbg_io, vha, 0x3059, 1997 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 1998 "que=%u.\n", sts->handle, req, que); 1999 return; 2000 } 2001 2002 /* Validate handle. */ 2003 if (handle < req->num_outstanding_cmds) 2004 sp = req->outstanding_cmds[handle]; 2005 else 2006 sp = NULL; 2007 2008 if (sp == NULL) { 2009 ql_dbg(ql_dbg_io, vha, 0x3017, 2010 "Invalid status handle (0x%x).\n", sts->handle); 2011 2012 if (IS_P3P_TYPE(ha)) 2013 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2014 else 2015 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2016 qla2xxx_wake_dpc(vha); 2017 return; 2018 } 2019 2020 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2021 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2022 return; 2023 } 2024 2025 /* Task Management completion. */ 2026 if (sp->type == SRB_TM_CMD) { 2027 qla24xx_tm_iocb_entry(vha, req, pkt); 2028 return; 2029 } 2030 2031 /* Fast path completion. */ 2032 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2033 qla2x00_process_completed_request(vha, req, handle); 2034 2035 return; 2036 } 2037 2038 req->outstanding_cmds[handle] = NULL; 2039 cp = GET_CMD_SP(sp); 2040 if (cp == NULL) { 2041 ql_dbg(ql_dbg_io, vha, 0x3018, 2042 "Command already returned (0x%x/%p).\n", 2043 sts->handle, sp); 2044 2045 return; 2046 } 2047 2048 lscsi_status = scsi_status & STATUS_MASK; 2049 2050 fcport = sp->fcport; 2051 2052 ox_id = 0; 2053 sense_len = par_sense_len = rsp_info_len = resid_len = 2054 fw_resid_len = 0; 2055 if (IS_FWI2_CAPABLE(ha)) { 2056 if (scsi_status & SS_SENSE_LEN_VALID) 2057 sense_len = le32_to_cpu(sts24->sense_len); 2058 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2059 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2060 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2061 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2062 if (comp_status == CS_DATA_UNDERRUN) 2063 fw_resid_len = le32_to_cpu(sts24->residual_len); 2064 rsp_info = sts24->data; 2065 sense_data = sts24->data; 2066 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2067 ox_id = le16_to_cpu(sts24->ox_id); 2068 par_sense_len = sizeof(sts24->data); 2069 } else { 2070 if (scsi_status & SS_SENSE_LEN_VALID) 2071 sense_len = le16_to_cpu(sts->req_sense_length); 2072 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2073 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2074 resid_len = le32_to_cpu(sts->residual_length); 2075 rsp_info = sts->rsp_info; 2076 sense_data = sts->req_sense_data; 2077 par_sense_len = sizeof(sts->req_sense_data); 2078 } 2079 2080 /* Check for any FCP transport errors. */ 2081 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2082 /* Sense data lies beyond any FCP RESPONSE data. */ 2083 if (IS_FWI2_CAPABLE(ha)) { 2084 sense_data += rsp_info_len; 2085 par_sense_len -= rsp_info_len; 2086 } 2087 if (rsp_info_len > 3 && rsp_info[3]) { 2088 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2089 "FCP I/O protocol failure (0x%x/0x%x).\n", 2090 rsp_info_len, rsp_info[3]); 2091 2092 res = DID_BUS_BUSY << 16; 2093 goto out; 2094 } 2095 } 2096 2097 /* Check for overrun. */ 2098 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2099 scsi_status & SS_RESIDUAL_OVER) 2100 comp_status = CS_DATA_OVERRUN; 2101 2102 /* 2103 * Based on Host and scsi status generate status code for Linux 2104 */ 2105 switch (comp_status) { 2106 case CS_COMPLETE: 2107 case CS_QUEUE_FULL: 2108 if (scsi_status == 0) { 2109 res = DID_OK << 16; 2110 break; 2111 } 2112 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2113 resid = resid_len; 2114 scsi_set_resid(cp, resid); 2115 2116 if (!lscsi_status && 2117 ((unsigned)(scsi_bufflen(cp) - resid) < 2118 cp->underflow)) { 2119 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2120 "Mid-layer underflow " 2121 "detected (0x%x of 0x%x bytes).\n", 2122 resid, scsi_bufflen(cp)); 2123 2124 res = DID_ERROR << 16; 2125 break; 2126 } 2127 } 2128 res = DID_OK << 16 | lscsi_status; 2129 2130 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2131 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2132 "QUEUE FULL detected.\n"); 2133 break; 2134 } 2135 logit = 0; 2136 if (lscsi_status != SS_CHECK_CONDITION) 2137 break; 2138 2139 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2140 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2141 break; 2142 2143 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2144 rsp, res); 2145 break; 2146 2147 case CS_DATA_UNDERRUN: 2148 /* Use F/W calculated residual length. */ 2149 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2150 scsi_set_resid(cp, resid); 2151 if (scsi_status & SS_RESIDUAL_UNDER) { 2152 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2153 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2154 "Dropped frame(s) detected " 2155 "(0x%x of 0x%x bytes).\n", 2156 resid, scsi_bufflen(cp)); 2157 2158 res = DID_ERROR << 16 | lscsi_status; 2159 goto check_scsi_status; 2160 } 2161 2162 if (!lscsi_status && 2163 ((unsigned)(scsi_bufflen(cp) - resid) < 2164 cp->underflow)) { 2165 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2166 "Mid-layer underflow " 2167 "detected (0x%x of 0x%x bytes).\n", 2168 resid, scsi_bufflen(cp)); 2169 2170 res = DID_ERROR << 16; 2171 break; 2172 } 2173 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2174 lscsi_status != SAM_STAT_BUSY) { 2175 /* 2176 * scsi status of task set and busy are considered to be 2177 * task not completed. 2178 */ 2179 2180 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2181 "Dropped frame(s) detected (0x%x " 2182 "of 0x%x bytes).\n", resid, 2183 scsi_bufflen(cp)); 2184 2185 res = DID_ERROR << 16 | lscsi_status; 2186 goto check_scsi_status; 2187 } else { 2188 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2189 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2190 scsi_status, lscsi_status); 2191 } 2192 2193 res = DID_OK << 16 | lscsi_status; 2194 logit = 0; 2195 2196 check_scsi_status: 2197 /* 2198 * Check to see if SCSI Status is non zero. If so report SCSI 2199 * Status. 2200 */ 2201 if (lscsi_status != 0) { 2202 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2203 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2204 "QUEUE FULL detected.\n"); 2205 logit = 1; 2206 break; 2207 } 2208 if (lscsi_status != SS_CHECK_CONDITION) 2209 break; 2210 2211 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2212 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2213 break; 2214 2215 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2216 sense_len, rsp, res); 2217 } 2218 break; 2219 2220 case CS_PORT_LOGGED_OUT: 2221 case CS_PORT_CONFIG_CHG: 2222 case CS_PORT_BUSY: 2223 case CS_INCOMPLETE: 2224 case CS_PORT_UNAVAILABLE: 2225 case CS_TIMEOUT: 2226 case CS_RESET: 2227 2228 /* 2229 * We are going to have the fc class block the rport 2230 * while we try to recover so instruct the mid layer 2231 * to requeue until the class decides how to handle this. 2232 */ 2233 res = DID_TRANSPORT_DISRUPTED << 16; 2234 2235 if (comp_status == CS_TIMEOUT) { 2236 if (IS_FWI2_CAPABLE(ha)) 2237 break; 2238 else if ((le16_to_cpu(sts->status_flags) & 2239 SF_LOGOUT_SENT) == 0) 2240 break; 2241 } 2242 2243 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 2244 "Port to be marked lost on fcport=%02x%02x%02x, current " 2245 "port state= %s.\n", fcport->d_id.b.domain, 2246 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2247 port_state_str[atomic_read(&fcport->state)]); 2248 2249 if (atomic_read(&fcport->state) == FCS_ONLINE) 2250 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2251 break; 2252 2253 case CS_ABORTED: 2254 res = DID_RESET << 16; 2255 break; 2256 2257 case CS_DIF_ERROR: 2258 logit = qla2x00_handle_dif_error(sp, sts24); 2259 res = cp->result; 2260 break; 2261 2262 case CS_TRANSPORT: 2263 res = DID_ERROR << 16; 2264 2265 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2266 break; 2267 2268 if (state_flags & BIT_4) 2269 scmd_printk(KERN_WARNING, cp, 2270 "Unsupported device '%s' found.\n", 2271 cp->device->vendor); 2272 break; 2273 2274 default: 2275 res = DID_ERROR << 16; 2276 break; 2277 } 2278 2279 out: 2280 if (logit) 2281 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2282 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d " 2283 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2284 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 2285 comp_status, scsi_status, res, vha->host_no, 2286 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2287 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2288 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2289 resid_len, fw_resid_len); 2290 2291 if (rsp->status_srb == NULL) 2292 sp->done(ha, sp, res); 2293 } 2294 2295 /** 2296 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2297 * @ha: SCSI driver HA context 2298 * @pkt: Entry pointer 2299 * 2300 * Extended sense data. 2301 */ 2302 static void 2303 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2304 { 2305 uint8_t sense_sz = 0; 2306 struct qla_hw_data *ha = rsp->hw; 2307 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2308 srb_t *sp = rsp->status_srb; 2309 struct scsi_cmnd *cp; 2310 uint32_t sense_len; 2311 uint8_t *sense_ptr; 2312 2313 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2314 return; 2315 2316 sense_len = GET_CMD_SENSE_LEN(sp); 2317 sense_ptr = GET_CMD_SENSE_PTR(sp); 2318 2319 cp = GET_CMD_SP(sp); 2320 if (cp == NULL) { 2321 ql_log(ql_log_warn, vha, 0x3025, 2322 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2323 2324 rsp->status_srb = NULL; 2325 return; 2326 } 2327 2328 if (sense_len > sizeof(pkt->data)) 2329 sense_sz = sizeof(pkt->data); 2330 else 2331 sense_sz = sense_len; 2332 2333 /* Move sense data. */ 2334 if (IS_FWI2_CAPABLE(ha)) 2335 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2336 memcpy(sense_ptr, pkt->data, sense_sz); 2337 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2338 sense_ptr, sense_sz); 2339 2340 sense_len -= sense_sz; 2341 sense_ptr += sense_sz; 2342 2343 SET_CMD_SENSE_PTR(sp, sense_ptr); 2344 SET_CMD_SENSE_LEN(sp, sense_len); 2345 2346 /* Place command on done queue. */ 2347 if (sense_len == 0) { 2348 rsp->status_srb = NULL; 2349 sp->done(ha, sp, cp->result); 2350 } 2351 } 2352 2353 /** 2354 * qla2x00_error_entry() - Process an error entry. 2355 * @ha: SCSI driver HA context 2356 * @pkt: Entry pointer 2357 */ 2358 static void 2359 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2360 { 2361 srb_t *sp; 2362 struct qla_hw_data *ha = vha->hw; 2363 const char func[] = "ERROR-IOCB"; 2364 uint16_t que = MSW(pkt->handle); 2365 struct req_que *req = NULL; 2366 int res = DID_ERROR << 16; 2367 2368 ql_dbg(ql_dbg_async, vha, 0x502a, 2369 "type of error status in response: 0x%x\n", pkt->entry_status); 2370 2371 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2372 goto fatal; 2373 2374 req = ha->req_q_map[que]; 2375 2376 if (pkt->entry_status & RF_BUSY) 2377 res = DID_BUS_BUSY << 16; 2378 2379 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2380 if (sp) { 2381 sp->done(ha, sp, res); 2382 return; 2383 } 2384 fatal: 2385 ql_log(ql_log_warn, vha, 0x5030, 2386 "Error entry - invalid handle/queue.\n"); 2387 2388 if (IS_P3P_TYPE(ha)) 2389 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2390 else 2391 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2392 qla2xxx_wake_dpc(vha); 2393 } 2394 2395 /** 2396 * qla24xx_mbx_completion() - Process mailbox command completions. 2397 * @ha: SCSI driver HA context 2398 * @mb0: Mailbox0 register 2399 */ 2400 static void 2401 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2402 { 2403 uint16_t cnt; 2404 uint32_t mboxes; 2405 uint16_t __iomem *wptr; 2406 struct qla_hw_data *ha = vha->hw; 2407 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2408 2409 /* Read all mbox registers? */ 2410 mboxes = (1 << ha->mbx_count) - 1; 2411 if (!ha->mcp) 2412 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2413 else 2414 mboxes = ha->mcp->in_mb; 2415 2416 /* Load return mailbox registers. */ 2417 ha->flags.mbox_int = 1; 2418 ha->mailbox_out[0] = mb0; 2419 mboxes >>= 1; 2420 wptr = (uint16_t __iomem *)®->mailbox1; 2421 2422 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2423 if (mboxes & BIT_0) 2424 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2425 2426 mboxes >>= 1; 2427 wptr++; 2428 } 2429 } 2430 2431 static void 2432 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2433 struct abort_entry_24xx *pkt) 2434 { 2435 const char func[] = "ABT_IOCB"; 2436 srb_t *sp; 2437 struct srb_iocb *abt; 2438 2439 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2440 if (!sp) 2441 return; 2442 2443 abt = &sp->u.iocb_cmd; 2444 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2445 sp->done(vha, sp, 0); 2446 } 2447 2448 /** 2449 * qla24xx_process_response_queue() - Process response queue entries. 2450 * @ha: SCSI driver HA context 2451 */ 2452 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2453 struct rsp_que *rsp) 2454 { 2455 struct sts_entry_24xx *pkt; 2456 struct qla_hw_data *ha = vha->hw; 2457 2458 if (!vha->flags.online) 2459 return; 2460 2461 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2462 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2463 2464 rsp->ring_index++; 2465 if (rsp->ring_index == rsp->length) { 2466 rsp->ring_index = 0; 2467 rsp->ring_ptr = rsp->ring; 2468 } else { 2469 rsp->ring_ptr++; 2470 } 2471 2472 if (pkt->entry_status != 0) { 2473 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2474 2475 (void)qlt_24xx_process_response_error(vha, pkt); 2476 2477 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2478 wmb(); 2479 continue; 2480 } 2481 2482 switch (pkt->entry_type) { 2483 case STATUS_TYPE: 2484 qla2x00_status_entry(vha, rsp, pkt); 2485 break; 2486 case STATUS_CONT_TYPE: 2487 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2488 break; 2489 case VP_RPT_ID_IOCB_TYPE: 2490 qla24xx_report_id_acquisition(vha, 2491 (struct vp_rpt_id_entry_24xx *)pkt); 2492 break; 2493 case LOGINOUT_PORT_IOCB_TYPE: 2494 qla24xx_logio_entry(vha, rsp->req, 2495 (struct logio_entry_24xx *)pkt); 2496 break; 2497 case CT_IOCB_TYPE: 2498 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2499 break; 2500 case ELS_IOCB_TYPE: 2501 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2502 break; 2503 case ABTS_RECV_24XX: 2504 /* ensure that the ATIO queue is empty */ 2505 qlt_24xx_process_atio_queue(vha); 2506 case ABTS_RESP_24XX: 2507 case CTIO_TYPE7: 2508 case NOTIFY_ACK_TYPE: 2509 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2510 break; 2511 case MARKER_TYPE: 2512 /* Do nothing in this case, this check is to prevent it 2513 * from falling into default case 2514 */ 2515 break; 2516 case ABORT_IOCB_TYPE: 2517 qla24xx_abort_iocb_entry(vha, rsp->req, 2518 (struct abort_entry_24xx *)pkt); 2519 break; 2520 default: 2521 /* Type Not Supported. */ 2522 ql_dbg(ql_dbg_async, vha, 0x5042, 2523 "Received unknown response pkt type %x " 2524 "entry status=%x.\n", 2525 pkt->entry_type, pkt->entry_status); 2526 break; 2527 } 2528 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2529 wmb(); 2530 } 2531 2532 /* Adjust ring index */ 2533 if (IS_P3P_TYPE(ha)) { 2534 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2535 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2536 } else 2537 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2538 } 2539 2540 static void 2541 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2542 { 2543 int rval; 2544 uint32_t cnt; 2545 struct qla_hw_data *ha = vha->hw; 2546 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2547 2548 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2549 !IS_QLA27XX(ha)) 2550 return; 2551 2552 rval = QLA_SUCCESS; 2553 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2554 RD_REG_DWORD(®->iobase_addr); 2555 WRT_REG_DWORD(®->iobase_window, 0x0001); 2556 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2557 rval == QLA_SUCCESS; cnt--) { 2558 if (cnt) { 2559 WRT_REG_DWORD(®->iobase_window, 0x0001); 2560 udelay(10); 2561 } else 2562 rval = QLA_FUNCTION_TIMEOUT; 2563 } 2564 if (rval == QLA_SUCCESS) 2565 goto next_test; 2566 2567 rval = QLA_SUCCESS; 2568 WRT_REG_DWORD(®->iobase_window, 0x0003); 2569 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2570 rval == QLA_SUCCESS; cnt--) { 2571 if (cnt) { 2572 WRT_REG_DWORD(®->iobase_window, 0x0003); 2573 udelay(10); 2574 } else 2575 rval = QLA_FUNCTION_TIMEOUT; 2576 } 2577 if (rval != QLA_SUCCESS) 2578 goto done; 2579 2580 next_test: 2581 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2582 ql_log(ql_log_info, vha, 0x504c, 2583 "Additional code -- 0x55AA.\n"); 2584 2585 done: 2586 WRT_REG_DWORD(®->iobase_window, 0x0000); 2587 RD_REG_DWORD(®->iobase_window); 2588 } 2589 2590 /** 2591 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2592 * @irq: 2593 * @dev_id: SCSI driver HA context 2594 * 2595 * Called by system whenever the host adapter generates an interrupt. 2596 * 2597 * Returns handled flag. 2598 */ 2599 irqreturn_t 2600 qla24xx_intr_handler(int irq, void *dev_id) 2601 { 2602 scsi_qla_host_t *vha; 2603 struct qla_hw_data *ha; 2604 struct device_reg_24xx __iomem *reg; 2605 int status; 2606 unsigned long iter; 2607 uint32_t stat; 2608 uint32_t hccr; 2609 uint16_t mb[8]; 2610 struct rsp_que *rsp; 2611 unsigned long flags; 2612 2613 rsp = (struct rsp_que *) dev_id; 2614 if (!rsp) { 2615 ql_log(ql_log_info, NULL, 0x5059, 2616 "%s: NULL response queue pointer.\n", __func__); 2617 return IRQ_NONE; 2618 } 2619 2620 ha = rsp->hw; 2621 reg = &ha->iobase->isp24; 2622 status = 0; 2623 2624 if (unlikely(pci_channel_offline(ha->pdev))) 2625 return IRQ_HANDLED; 2626 2627 spin_lock_irqsave(&ha->hardware_lock, flags); 2628 vha = pci_get_drvdata(ha->pdev); 2629 for (iter = 50; iter--; ) { 2630 stat = RD_REG_DWORD(®->host_status); 2631 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2632 break; 2633 if (stat & HSRX_RISC_PAUSED) { 2634 if (unlikely(pci_channel_offline(ha->pdev))) 2635 break; 2636 2637 hccr = RD_REG_DWORD(®->hccr); 2638 2639 ql_log(ql_log_warn, vha, 0x504b, 2640 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2641 hccr); 2642 2643 qla2xxx_check_risc_status(vha); 2644 2645 ha->isp_ops->fw_dump(vha, 1); 2646 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2647 break; 2648 } else if ((stat & HSRX_RISC_INT) == 0) 2649 break; 2650 2651 switch (stat & 0xff) { 2652 case INTR_ROM_MB_SUCCESS: 2653 case INTR_ROM_MB_FAILED: 2654 case INTR_MB_SUCCESS: 2655 case INTR_MB_FAILED: 2656 qla24xx_mbx_completion(vha, MSW(stat)); 2657 status |= MBX_INTERRUPT; 2658 2659 break; 2660 case INTR_ASYNC_EVENT: 2661 mb[0] = MSW(stat); 2662 mb[1] = RD_REG_WORD(®->mailbox1); 2663 mb[2] = RD_REG_WORD(®->mailbox2); 2664 mb[3] = RD_REG_WORD(®->mailbox3); 2665 qla2x00_async_event(vha, rsp, mb); 2666 break; 2667 case INTR_RSP_QUE_UPDATE: 2668 case INTR_RSP_QUE_UPDATE_83XX: 2669 qla24xx_process_response_queue(vha, rsp); 2670 break; 2671 case INTR_ATIO_QUE_UPDATE: 2672 qlt_24xx_process_atio_queue(vha); 2673 break; 2674 case INTR_ATIO_RSP_QUE_UPDATE: 2675 qlt_24xx_process_atio_queue(vha); 2676 qla24xx_process_response_queue(vha, rsp); 2677 break; 2678 default: 2679 ql_dbg(ql_dbg_async, vha, 0x504f, 2680 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2681 break; 2682 } 2683 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2684 RD_REG_DWORD_RELAXED(®->hccr); 2685 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2686 ndelay(3500); 2687 } 2688 qla2x00_handle_mbx_completion(ha, status); 2689 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2690 2691 return IRQ_HANDLED; 2692 } 2693 2694 static irqreturn_t 2695 qla24xx_msix_rsp_q(int irq, void *dev_id) 2696 { 2697 struct qla_hw_data *ha; 2698 struct rsp_que *rsp; 2699 struct device_reg_24xx __iomem *reg; 2700 struct scsi_qla_host *vha; 2701 unsigned long flags; 2702 uint32_t stat = 0; 2703 2704 rsp = (struct rsp_que *) dev_id; 2705 if (!rsp) { 2706 ql_log(ql_log_info, NULL, 0x505a, 2707 "%s: NULL response queue pointer.\n", __func__); 2708 return IRQ_NONE; 2709 } 2710 ha = rsp->hw; 2711 reg = &ha->iobase->isp24; 2712 2713 spin_lock_irqsave(&ha->hardware_lock, flags); 2714 2715 vha = pci_get_drvdata(ha->pdev); 2716 /* 2717 * Use host_status register to check to PCI disconnection before we 2718 * we process the response queue. 2719 */ 2720 stat = RD_REG_DWORD(®->host_status); 2721 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2722 goto out; 2723 qla24xx_process_response_queue(vha, rsp); 2724 if (!ha->flags.disable_msix_handshake) { 2725 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2726 RD_REG_DWORD_RELAXED(®->hccr); 2727 } 2728 out: 2729 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2730 2731 return IRQ_HANDLED; 2732 } 2733 2734 static irqreturn_t 2735 qla25xx_msix_rsp_q(int irq, void *dev_id) 2736 { 2737 struct qla_hw_data *ha; 2738 scsi_qla_host_t *vha; 2739 struct rsp_que *rsp; 2740 struct device_reg_24xx __iomem *reg; 2741 unsigned long flags; 2742 uint32_t hccr = 0; 2743 2744 rsp = (struct rsp_que *) dev_id; 2745 if (!rsp) { 2746 ql_log(ql_log_info, NULL, 0x505b, 2747 "%s: NULL response queue pointer.\n", __func__); 2748 return IRQ_NONE; 2749 } 2750 ha = rsp->hw; 2751 vha = pci_get_drvdata(ha->pdev); 2752 2753 /* Clear the interrupt, if enabled, for this response queue */ 2754 if (!ha->flags.disable_msix_handshake) { 2755 reg = &ha->iobase->isp24; 2756 spin_lock_irqsave(&ha->hardware_lock, flags); 2757 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2758 hccr = RD_REG_DWORD_RELAXED(®->hccr); 2759 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2760 } 2761 if (qla2x00_check_reg_for_disconnect(vha, hccr)) 2762 goto out; 2763 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2764 2765 out: 2766 return IRQ_HANDLED; 2767 } 2768 2769 static irqreturn_t 2770 qla24xx_msix_default(int irq, void *dev_id) 2771 { 2772 scsi_qla_host_t *vha; 2773 struct qla_hw_data *ha; 2774 struct rsp_que *rsp; 2775 struct device_reg_24xx __iomem *reg; 2776 int status; 2777 uint32_t stat; 2778 uint32_t hccr; 2779 uint16_t mb[8]; 2780 unsigned long flags; 2781 2782 rsp = (struct rsp_que *) dev_id; 2783 if (!rsp) { 2784 ql_log(ql_log_info, NULL, 0x505c, 2785 "%s: NULL response queue pointer.\n", __func__); 2786 return IRQ_NONE; 2787 } 2788 ha = rsp->hw; 2789 reg = &ha->iobase->isp24; 2790 status = 0; 2791 2792 spin_lock_irqsave(&ha->hardware_lock, flags); 2793 vha = pci_get_drvdata(ha->pdev); 2794 do { 2795 stat = RD_REG_DWORD(®->host_status); 2796 if (qla2x00_check_reg_for_disconnect(vha, stat)) 2797 break; 2798 if (stat & HSRX_RISC_PAUSED) { 2799 if (unlikely(pci_channel_offline(ha->pdev))) 2800 break; 2801 2802 hccr = RD_REG_DWORD(®->hccr); 2803 2804 ql_log(ql_log_info, vha, 0x5050, 2805 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2806 hccr); 2807 2808 qla2xxx_check_risc_status(vha); 2809 2810 ha->isp_ops->fw_dump(vha, 1); 2811 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2812 break; 2813 } else if ((stat & HSRX_RISC_INT) == 0) 2814 break; 2815 2816 switch (stat & 0xff) { 2817 case INTR_ROM_MB_SUCCESS: 2818 case INTR_ROM_MB_FAILED: 2819 case INTR_MB_SUCCESS: 2820 case INTR_MB_FAILED: 2821 qla24xx_mbx_completion(vha, MSW(stat)); 2822 status |= MBX_INTERRUPT; 2823 2824 break; 2825 case INTR_ASYNC_EVENT: 2826 mb[0] = MSW(stat); 2827 mb[1] = RD_REG_WORD(®->mailbox1); 2828 mb[2] = RD_REG_WORD(®->mailbox2); 2829 mb[3] = RD_REG_WORD(®->mailbox3); 2830 qla2x00_async_event(vha, rsp, mb); 2831 break; 2832 case INTR_RSP_QUE_UPDATE: 2833 case INTR_RSP_QUE_UPDATE_83XX: 2834 qla24xx_process_response_queue(vha, rsp); 2835 break; 2836 case INTR_ATIO_QUE_UPDATE: 2837 qlt_24xx_process_atio_queue(vha); 2838 break; 2839 case INTR_ATIO_RSP_QUE_UPDATE: 2840 qlt_24xx_process_atio_queue(vha); 2841 qla24xx_process_response_queue(vha, rsp); 2842 break; 2843 default: 2844 ql_dbg(ql_dbg_async, vha, 0x5051, 2845 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2846 break; 2847 } 2848 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2849 } while (0); 2850 qla2x00_handle_mbx_completion(ha, status); 2851 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2852 2853 return IRQ_HANDLED; 2854 } 2855 2856 /* Interrupt handling helpers. */ 2857 2858 struct qla_init_msix_entry { 2859 const char *name; 2860 irq_handler_t handler; 2861 }; 2862 2863 static struct qla_init_msix_entry msix_entries[3] = { 2864 { "qla2xxx (default)", qla24xx_msix_default }, 2865 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2866 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2867 }; 2868 2869 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2870 { "qla2xxx (default)", qla82xx_msix_default }, 2871 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2872 }; 2873 2874 static struct qla_init_msix_entry qla83xx_msix_entries[3] = { 2875 { "qla2xxx (default)", qla24xx_msix_default }, 2876 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2877 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 2878 }; 2879 2880 static void 2881 qla24xx_disable_msix(struct qla_hw_data *ha) 2882 { 2883 int i; 2884 struct qla_msix_entry *qentry; 2885 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2886 2887 for (i = 0; i < ha->msix_count; i++) { 2888 qentry = &ha->msix_entries[i]; 2889 if (qentry->have_irq) 2890 free_irq(qentry->vector, qentry->rsp); 2891 } 2892 pci_disable_msix(ha->pdev); 2893 kfree(ha->msix_entries); 2894 ha->msix_entries = NULL; 2895 ha->flags.msix_enabled = 0; 2896 ql_dbg(ql_dbg_init, vha, 0x0042, 2897 "Disabled the MSI.\n"); 2898 } 2899 2900 static int 2901 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2902 { 2903 #define MIN_MSIX_COUNT 2 2904 #define ATIO_VECTOR 2 2905 int i, ret; 2906 struct msix_entry *entries; 2907 struct qla_msix_entry *qentry; 2908 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2909 2910 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2911 GFP_KERNEL); 2912 if (!entries) { 2913 ql_log(ql_log_warn, vha, 0x00bc, 2914 "Failed to allocate memory for msix_entry.\n"); 2915 return -ENOMEM; 2916 } 2917 2918 for (i = 0; i < ha->msix_count; i++) 2919 entries[i].entry = i; 2920 2921 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2922 if (ret) { 2923 if (ret < MIN_MSIX_COUNT) 2924 goto msix_failed; 2925 2926 ql_log(ql_log_warn, vha, 0x00c6, 2927 "MSI-X: Failed to enable support " 2928 "-- %d/%d\n Retry with %d vectors.\n", 2929 ha->msix_count, ret, ret); 2930 ha->msix_count = ret; 2931 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2932 if (ret) { 2933 msix_failed: 2934 ql_log(ql_log_fatal, vha, 0x00c7, 2935 "MSI-X: Failed to enable support, " 2936 "giving up -- %d/%d.\n", 2937 ha->msix_count, ret); 2938 goto msix_out; 2939 } 2940 ha->max_rsp_queues = ha->msix_count - 1; 2941 } 2942 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2943 ha->msix_count, GFP_KERNEL); 2944 if (!ha->msix_entries) { 2945 ql_log(ql_log_fatal, vha, 0x00c8, 2946 "Failed to allocate memory for ha->msix_entries.\n"); 2947 ret = -ENOMEM; 2948 goto msix_out; 2949 } 2950 ha->flags.msix_enabled = 1; 2951 2952 for (i = 0; i < ha->msix_count; i++) { 2953 qentry = &ha->msix_entries[i]; 2954 qentry->vector = entries[i].vector; 2955 qentry->entry = entries[i].entry; 2956 qentry->have_irq = 0; 2957 qentry->rsp = NULL; 2958 } 2959 2960 /* Enable MSI-X vectors for the base queue */ 2961 for (i = 0; i < 2; i++) { 2962 qentry = &ha->msix_entries[i]; 2963 if (IS_P3P_TYPE(ha)) 2964 ret = request_irq(qentry->vector, 2965 qla82xx_msix_entries[i].handler, 2966 0, qla82xx_msix_entries[i].name, rsp); 2967 else 2968 ret = request_irq(qentry->vector, 2969 msix_entries[i].handler, 2970 0, msix_entries[i].name, rsp); 2971 if (ret) 2972 goto msix_register_fail; 2973 qentry->have_irq = 1; 2974 qentry->rsp = rsp; 2975 rsp->msix = qentry; 2976 } 2977 2978 /* 2979 * If target mode is enable, also request the vector for the ATIO 2980 * queue. 2981 */ 2982 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 2983 qentry = &ha->msix_entries[ATIO_VECTOR]; 2984 ret = request_irq(qentry->vector, 2985 qla83xx_msix_entries[ATIO_VECTOR].handler, 2986 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); 2987 qentry->have_irq = 1; 2988 qentry->rsp = rsp; 2989 rsp->msix = qentry; 2990 } 2991 2992 msix_register_fail: 2993 if (ret) { 2994 ql_log(ql_log_fatal, vha, 0x00cb, 2995 "MSI-X: unable to register handler -- %x/%d.\n", 2996 qentry->vector, ret); 2997 qla24xx_disable_msix(ha); 2998 ha->mqenable = 0; 2999 goto msix_out; 3000 } 3001 3002 /* Enable MSI-X vector for response queue update for queue 0 */ 3003 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3004 if (ha->msixbase && ha->mqiobase && 3005 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3006 ha->mqenable = 1; 3007 } else 3008 if (ha->mqiobase 3009 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3010 ha->mqenable = 1; 3011 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3012 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3013 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3014 ql_dbg(ql_dbg_init, vha, 0x0055, 3015 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3016 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3017 3018 msix_out: 3019 kfree(entries); 3020 return ret; 3021 } 3022 3023 int 3024 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3025 { 3026 int ret = QLA_FUNCTION_FAILED; 3027 device_reg_t *reg = ha->iobase; 3028 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3029 3030 /* If possible, enable MSI-X. */ 3031 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3032 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && 3033 !IS_QLA27XX(ha)) 3034 goto skip_msi; 3035 3036 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3037 (ha->pdev->subsystem_device == 0x7040 || 3038 ha->pdev->subsystem_device == 0x7041 || 3039 ha->pdev->subsystem_device == 0x1705)) { 3040 ql_log(ql_log_warn, vha, 0x0034, 3041 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3042 ha->pdev->subsystem_vendor, 3043 ha->pdev->subsystem_device); 3044 goto skip_msi; 3045 } 3046 3047 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3048 ql_log(ql_log_warn, vha, 0x0035, 3049 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3050 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3051 goto skip_msix; 3052 } 3053 3054 ret = qla24xx_enable_msix(ha, rsp); 3055 if (!ret) { 3056 ql_dbg(ql_dbg_init, vha, 0x0036, 3057 "MSI-X: Enabled (0x%X, 0x%X).\n", 3058 ha->chip_revision, ha->fw_attributes); 3059 goto clear_risc_ints; 3060 } 3061 3062 skip_msix: 3063 3064 ql_log(ql_log_info, vha, 0x0037, 3065 "Falling back-to MSI mode -%d.\n", ret); 3066 3067 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3068 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3069 !IS_QLA27XX(ha)) 3070 goto skip_msi; 3071 3072 ret = pci_enable_msi(ha->pdev); 3073 if (!ret) { 3074 ql_dbg(ql_dbg_init, vha, 0x0038, 3075 "MSI: Enabled.\n"); 3076 ha->flags.msi_enabled = 1; 3077 } else 3078 ql_log(ql_log_warn, vha, 0x0039, 3079 "Falling back-to INTa mode -- %d.\n", ret); 3080 skip_msi: 3081 3082 /* Skip INTx on ISP82xx. */ 3083 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3084 return QLA_FUNCTION_FAILED; 3085 3086 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3087 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3088 QLA2XXX_DRIVER_NAME, rsp); 3089 if (ret) { 3090 ql_log(ql_log_warn, vha, 0x003a, 3091 "Failed to reserve interrupt %d already in use.\n", 3092 ha->pdev->irq); 3093 goto fail; 3094 } else if (!ha->flags.msi_enabled) { 3095 ql_dbg(ql_dbg_init, vha, 0x0125, 3096 "INTa mode: Enabled.\n"); 3097 ha->flags.mr_intr_valid = 1; 3098 } 3099 3100 clear_risc_ints: 3101 3102 spin_lock_irq(&ha->hardware_lock); 3103 if (!IS_FWI2_CAPABLE(ha)) 3104 WRT_REG_WORD(®->isp.semaphore, 0); 3105 spin_unlock_irq(&ha->hardware_lock); 3106 3107 fail: 3108 return ret; 3109 } 3110 3111 void 3112 qla2x00_free_irqs(scsi_qla_host_t *vha) 3113 { 3114 struct qla_hw_data *ha = vha->hw; 3115 struct rsp_que *rsp; 3116 3117 /* 3118 * We need to check that ha->rsp_q_map is valid in case we are called 3119 * from a probe failure context. 3120 */ 3121 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3122 return; 3123 rsp = ha->rsp_q_map[0]; 3124 3125 if (ha->flags.msix_enabled) 3126 qla24xx_disable_msix(ha); 3127 else if (ha->flags.msi_enabled) { 3128 free_irq(ha->pdev->irq, rsp); 3129 pci_disable_msi(ha->pdev); 3130 } else 3131 free_irq(ha->pdev->irq, rsp); 3132 } 3133 3134 3135 int qla25xx_request_irq(struct rsp_que *rsp) 3136 { 3137 struct qla_hw_data *ha = rsp->hw; 3138 struct qla_init_msix_entry *intr = &msix_entries[2]; 3139 struct qla_msix_entry *msix = rsp->msix; 3140 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3141 int ret; 3142 3143 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 3144 if (ret) { 3145 ql_log(ql_log_fatal, vha, 0x00e6, 3146 "MSI-X: Unable to register handler -- %x/%d.\n", 3147 msix->vector, ret); 3148 return ret; 3149 } 3150 msix->have_irq = 1; 3151 msix->rsp = rsp; 3152 return ret; 3153 } 3154