1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2014 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsi_bsg_fc.h> 14 #include <scsi/scsi_eh.h> 15 16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 20 sts_entry_t *); 21 static void qla_irq_affinity_notify(struct irq_affinity_notify *, 22 const cpumask_t *); 23 static void qla_irq_affinity_release(struct kref *); 24 25 26 /** 27 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 28 * @irq: 29 * @dev_id: SCSI driver HA context 30 * 31 * Called by system whenever the host adapter generates an interrupt. 32 * 33 * Returns handled flag. 34 */ 35 irqreturn_t 36 qla2100_intr_handler(int irq, void *dev_id) 37 { 38 scsi_qla_host_t *vha; 39 struct qla_hw_data *ha; 40 struct device_reg_2xxx __iomem *reg; 41 int status; 42 unsigned long iter; 43 uint16_t hccr; 44 uint16_t mb[4]; 45 struct rsp_que *rsp; 46 unsigned long flags; 47 48 rsp = (struct rsp_que *) dev_id; 49 if (!rsp) { 50 ql_log(ql_log_info, NULL, 0x505d, 51 "%s: NULL response queue pointer.\n", __func__); 52 return (IRQ_NONE); 53 } 54 55 ha = rsp->hw; 56 reg = &ha->iobase->isp; 57 status = 0; 58 59 spin_lock_irqsave(&ha->hardware_lock, flags); 60 vha = pci_get_drvdata(ha->pdev); 61 for (iter = 50; iter--; ) { 62 hccr = RD_REG_WORD(®->hccr); 63 if (qla2x00_check_reg16_for_disconnect(vha, hccr)) 64 break; 65 if (hccr & HCCR_RISC_PAUSE) { 66 if (pci_channel_offline(ha->pdev)) 67 break; 68 69 /* 70 * Issue a "HARD" reset in order for the RISC interrupt 71 * bit to be cleared. Schedule a big hammer to get 72 * out of the RISC PAUSED state. 73 */ 74 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 75 RD_REG_WORD(®->hccr); 76 77 ha->isp_ops->fw_dump(vha, 1); 78 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 79 break; 80 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 81 break; 82 83 if (RD_REG_WORD(®->semaphore) & BIT_0) { 84 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 85 RD_REG_WORD(®->hccr); 86 87 /* Get mailbox data. */ 88 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 89 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 90 qla2x00_mbx_completion(vha, mb[0]); 91 status |= MBX_INTERRUPT; 92 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 93 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 94 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 95 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 96 qla2x00_async_event(vha, rsp, mb); 97 } else { 98 /*EMPTY*/ 99 ql_dbg(ql_dbg_async, vha, 0x5025, 100 "Unrecognized interrupt type (%d).\n", 101 mb[0]); 102 } 103 /* Release mailbox registers. */ 104 WRT_REG_WORD(®->semaphore, 0); 105 RD_REG_WORD(®->semaphore); 106 } else { 107 qla2x00_process_response_queue(rsp); 108 109 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 110 RD_REG_WORD(®->hccr); 111 } 112 } 113 qla2x00_handle_mbx_completion(ha, status); 114 spin_unlock_irqrestore(&ha->hardware_lock, flags); 115 116 return (IRQ_HANDLED); 117 } 118 119 bool 120 qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) 121 { 122 /* Check for PCI disconnection */ 123 if (reg == 0xffffffff && !pci_channel_offline(vha->hw->pdev)) { 124 if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && 125 !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && 126 !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { 127 /* 128 * Schedule this (only once) on the default system 129 * workqueue so that all the adapter workqueues and the 130 * DPC thread can be shutdown cleanly. 131 */ 132 schedule_work(&vha->hw->board_disable); 133 } 134 return true; 135 } else 136 return false; 137 } 138 139 bool 140 qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg) 141 { 142 return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg); 143 } 144 145 /** 146 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 147 * @irq: 148 * @dev_id: SCSI driver HA context 149 * 150 * Called by system whenever the host adapter generates an interrupt. 151 * 152 * Returns handled flag. 153 */ 154 irqreturn_t 155 qla2300_intr_handler(int irq, void *dev_id) 156 { 157 scsi_qla_host_t *vha; 158 struct device_reg_2xxx __iomem *reg; 159 int status; 160 unsigned long iter; 161 uint32_t stat; 162 uint16_t hccr; 163 uint16_t mb[4]; 164 struct rsp_que *rsp; 165 struct qla_hw_data *ha; 166 unsigned long flags; 167 168 rsp = (struct rsp_que *) dev_id; 169 if (!rsp) { 170 ql_log(ql_log_info, NULL, 0x5058, 171 "%s: NULL response queue pointer.\n", __func__); 172 return (IRQ_NONE); 173 } 174 175 ha = rsp->hw; 176 reg = &ha->iobase->isp; 177 status = 0; 178 179 spin_lock_irqsave(&ha->hardware_lock, flags); 180 vha = pci_get_drvdata(ha->pdev); 181 for (iter = 50; iter--; ) { 182 stat = RD_REG_DWORD(®->u.isp2300.host_status); 183 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 184 break; 185 if (stat & HSR_RISC_PAUSED) { 186 if (unlikely(pci_channel_offline(ha->pdev))) 187 break; 188 189 hccr = RD_REG_WORD(®->hccr); 190 191 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 192 ql_log(ql_log_warn, vha, 0x5026, 193 "Parity error -- HCCR=%x, Dumping " 194 "firmware.\n", hccr); 195 else 196 ql_log(ql_log_warn, vha, 0x5027, 197 "RISC paused -- HCCR=%x, Dumping " 198 "firmware.\n", hccr); 199 200 /* 201 * Issue a "HARD" reset in order for the RISC 202 * interrupt bit to be cleared. Schedule a big 203 * hammer to get out of the RISC PAUSED state. 204 */ 205 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 206 RD_REG_WORD(®->hccr); 207 208 ha->isp_ops->fw_dump(vha, 1); 209 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 210 break; 211 } else if ((stat & HSR_RISC_INT) == 0) 212 break; 213 214 switch (stat & 0xff) { 215 case 0x1: 216 case 0x2: 217 case 0x10: 218 case 0x11: 219 qla2x00_mbx_completion(vha, MSW(stat)); 220 status |= MBX_INTERRUPT; 221 222 /* Release mailbox registers. */ 223 WRT_REG_WORD(®->semaphore, 0); 224 break; 225 case 0x12: 226 mb[0] = MSW(stat); 227 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 228 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 229 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 230 qla2x00_async_event(vha, rsp, mb); 231 break; 232 case 0x13: 233 qla2x00_process_response_queue(rsp); 234 break; 235 case 0x15: 236 mb[0] = MBA_CMPLT_1_16BIT; 237 mb[1] = MSW(stat); 238 qla2x00_async_event(vha, rsp, mb); 239 break; 240 case 0x16: 241 mb[0] = MBA_SCSI_COMPLETION; 242 mb[1] = MSW(stat); 243 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 244 qla2x00_async_event(vha, rsp, mb); 245 break; 246 default: 247 ql_dbg(ql_dbg_async, vha, 0x5028, 248 "Unrecognized interrupt type (%d).\n", stat & 0xff); 249 break; 250 } 251 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 252 RD_REG_WORD_RELAXED(®->hccr); 253 } 254 qla2x00_handle_mbx_completion(ha, status); 255 spin_unlock_irqrestore(&ha->hardware_lock, flags); 256 257 return (IRQ_HANDLED); 258 } 259 260 /** 261 * qla2x00_mbx_completion() - Process mailbox command completions. 262 * @ha: SCSI driver HA context 263 * @mb0: Mailbox0 register 264 */ 265 static void 266 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 267 { 268 uint16_t cnt; 269 uint32_t mboxes; 270 uint16_t __iomem *wptr; 271 struct qla_hw_data *ha = vha->hw; 272 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 273 274 /* Read all mbox registers? */ 275 mboxes = (1 << ha->mbx_count) - 1; 276 if (!ha->mcp) 277 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 278 else 279 mboxes = ha->mcp->in_mb; 280 281 /* Load return mailbox registers. */ 282 ha->flags.mbox_int = 1; 283 ha->mailbox_out[0] = mb0; 284 mboxes >>= 1; 285 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 286 287 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 288 if (IS_QLA2200(ha) && cnt == 8) 289 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 290 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 291 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 292 else if (mboxes & BIT_0) 293 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 294 295 wptr++; 296 mboxes >>= 1; 297 } 298 } 299 300 static void 301 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 302 { 303 static char *event[] = 304 { "Complete", "Request Notification", "Time Extension" }; 305 int rval; 306 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 307 struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; 308 uint16_t __iomem *wptr; 309 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 310 311 /* Seed data -- mailbox1 -> mailbox7. */ 312 if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) 313 wptr = (uint16_t __iomem *)®24->mailbox1; 314 else if (IS_QLA8044(vha->hw)) 315 wptr = (uint16_t __iomem *)®82->mailbox_out[1]; 316 else 317 return; 318 319 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 320 mb[cnt] = RD_REG_WORD(wptr); 321 322 ql_dbg(ql_dbg_async, vha, 0x5021, 323 "Inter-Driver Communication %s -- " 324 "%04x %04x %04x %04x %04x %04x %04x.\n", 325 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 326 mb[4], mb[5], mb[6]); 327 switch (aen) { 328 /* Handle IDC Error completion case. */ 329 case MBA_IDC_COMPLETE: 330 if (mb[1] >> 15) { 331 vha->hw->flags.idc_compl_status = 1; 332 if (vha->hw->notify_dcbx_comp && !vha->vp_idx) 333 complete(&vha->hw->dcbx_comp); 334 } 335 break; 336 337 case MBA_IDC_NOTIFY: 338 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 339 timeout = (descr >> 8) & 0xf; 340 ql_dbg(ql_dbg_async, vha, 0x5022, 341 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 342 vha->host_no, event[aen & 0xff], timeout); 343 344 if (!timeout) 345 return; 346 rval = qla2x00_post_idc_ack_work(vha, mb); 347 if (rval != QLA_SUCCESS) 348 ql_log(ql_log_warn, vha, 0x5023, 349 "IDC failed to post ACK.\n"); 350 break; 351 case MBA_IDC_TIME_EXT: 352 vha->hw->idc_extend_tmo = descr; 353 ql_dbg(ql_dbg_async, vha, 0x5087, 354 "%lu Inter-Driver Communication %s -- " 355 "Extend timeout by=%d.\n", 356 vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo); 357 break; 358 } 359 } 360 361 #define LS_UNKNOWN 2 362 const char * 363 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 364 { 365 static const char *const link_speeds[] = { 366 "1", "2", "?", "4", "8", "16", "32", "10" 367 }; 368 #define QLA_LAST_SPEED 7 369 370 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 371 return link_speeds[0]; 372 else if (speed == 0x13) 373 return link_speeds[QLA_LAST_SPEED]; 374 else if (speed < QLA_LAST_SPEED) 375 return link_speeds[speed]; 376 else 377 return link_speeds[LS_UNKNOWN]; 378 } 379 380 static void 381 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 382 { 383 struct qla_hw_data *ha = vha->hw; 384 385 /* 386 * 8200 AEN Interpretation: 387 * mb[0] = AEN code 388 * mb[1] = AEN Reason code 389 * mb[2] = LSW of Peg-Halt Status-1 Register 390 * mb[6] = MSW of Peg-Halt Status-1 Register 391 * mb[3] = LSW of Peg-Halt Status-2 register 392 * mb[7] = MSW of Peg-Halt Status-2 register 393 * mb[4] = IDC Device-State Register value 394 * mb[5] = IDC Driver-Presence Register value 395 */ 396 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 397 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 398 mb[0], mb[1], mb[2], mb[6]); 399 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 400 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 401 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 402 403 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 404 IDC_HEARTBEAT_FAILURE)) { 405 ha->flags.nic_core_hung = 1; 406 ql_log(ql_log_warn, vha, 0x5060, 407 "83XX: F/W Error Reported: Check if reset required.\n"); 408 409 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 410 uint32_t protocol_engine_id, fw_err_code, err_level; 411 412 /* 413 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 414 * - PEG-Halt Status-1 Register: 415 * (LSW = mb[2], MSW = mb[6]) 416 * Bits 0-7 = protocol-engine ID 417 * Bits 8-28 = f/w error code 418 * Bits 29-31 = Error-level 419 * Error-level 0x1 = Non-Fatal error 420 * Error-level 0x2 = Recoverable Fatal error 421 * Error-level 0x4 = UnRecoverable Fatal error 422 * - PEG-Halt Status-2 Register: 423 * (LSW = mb[3], MSW = mb[7]) 424 */ 425 protocol_engine_id = (mb[2] & 0xff); 426 fw_err_code = (((mb[2] & 0xff00) >> 8) | 427 ((mb[6] & 0x1fff) << 8)); 428 err_level = ((mb[6] & 0xe000) >> 13); 429 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 430 "Register: protocol_engine_id=0x%x " 431 "fw_err_code=0x%x err_level=0x%x.\n", 432 protocol_engine_id, fw_err_code, err_level); 433 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 434 "Register: 0x%x%x.\n", mb[7], mb[3]); 435 if (err_level == ERR_LEVEL_NON_FATAL) { 436 ql_log(ql_log_warn, vha, 0x5063, 437 "Not a fatal error, f/w has recovered " 438 "iteself.\n"); 439 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 440 ql_log(ql_log_fatal, vha, 0x5064, 441 "Recoverable Fatal error: Chip reset " 442 "required.\n"); 443 qla83xx_schedule_work(vha, 444 QLA83XX_NIC_CORE_RESET); 445 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 446 ql_log(ql_log_fatal, vha, 0x5065, 447 "Unrecoverable Fatal error: Set FAILED " 448 "state, reboot required.\n"); 449 qla83xx_schedule_work(vha, 450 QLA83XX_NIC_CORE_UNRECOVERABLE); 451 } 452 } 453 454 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 455 uint16_t peg_fw_state, nw_interface_link_up; 456 uint16_t nw_interface_signal_detect, sfp_status; 457 uint16_t htbt_counter, htbt_monitor_enable; 458 uint16_t sfp_additonal_info, sfp_multirate; 459 uint16_t sfp_tx_fault, link_speed, dcbx_status; 460 461 /* 462 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 463 * - PEG-to-FC Status Register: 464 * (LSW = mb[2], MSW = mb[6]) 465 * Bits 0-7 = Peg-Firmware state 466 * Bit 8 = N/W Interface Link-up 467 * Bit 9 = N/W Interface signal detected 468 * Bits 10-11 = SFP Status 469 * SFP Status 0x0 = SFP+ transceiver not expected 470 * SFP Status 0x1 = SFP+ transceiver not present 471 * SFP Status 0x2 = SFP+ transceiver invalid 472 * SFP Status 0x3 = SFP+ transceiver present and 473 * valid 474 * Bits 12-14 = Heartbeat Counter 475 * Bit 15 = Heartbeat Monitor Enable 476 * Bits 16-17 = SFP Additional Info 477 * SFP info 0x0 = Unregocnized transceiver for 478 * Ethernet 479 * SFP info 0x1 = SFP+ brand validation failed 480 * SFP info 0x2 = SFP+ speed validation failed 481 * SFP info 0x3 = SFP+ access error 482 * Bit 18 = SFP Multirate 483 * Bit 19 = SFP Tx Fault 484 * Bits 20-22 = Link Speed 485 * Bits 23-27 = Reserved 486 * Bits 28-30 = DCBX Status 487 * DCBX Status 0x0 = DCBX Disabled 488 * DCBX Status 0x1 = DCBX Enabled 489 * DCBX Status 0x2 = DCBX Exchange error 490 * Bit 31 = Reserved 491 */ 492 peg_fw_state = (mb[2] & 0x00ff); 493 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 494 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 495 sfp_status = ((mb[2] & 0x0c00) >> 10); 496 htbt_counter = ((mb[2] & 0x7000) >> 12); 497 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 498 sfp_additonal_info = (mb[6] & 0x0003); 499 sfp_multirate = ((mb[6] & 0x0004) >> 2); 500 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 501 link_speed = ((mb[6] & 0x0070) >> 4); 502 dcbx_status = ((mb[6] & 0x7000) >> 12); 503 504 ql_log(ql_log_warn, vha, 0x5066, 505 "Peg-to-Fc Status Register:\n" 506 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 507 "nw_interface_signal_detect=0x%x" 508 "\nsfp_statis=0x%x.\n ", peg_fw_state, 509 nw_interface_link_up, nw_interface_signal_detect, 510 sfp_status); 511 ql_log(ql_log_warn, vha, 0x5067, 512 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 513 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", 514 htbt_counter, htbt_monitor_enable, 515 sfp_additonal_info, sfp_multirate); 516 ql_log(ql_log_warn, vha, 0x5068, 517 "sfp_tx_fault=0x%x, link_state=0x%x, " 518 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 519 dcbx_status); 520 521 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 522 } 523 524 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 525 ql_log(ql_log_warn, vha, 0x5069, 526 "Heartbeat Failure encountered, chip reset " 527 "required.\n"); 528 529 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 530 } 531 } 532 533 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 534 ql_log(ql_log_info, vha, 0x506a, 535 "IDC Device-State changed = 0x%x.\n", mb[4]); 536 if (ha->flags.nic_core_reset_owner) 537 return; 538 qla83xx_schedule_work(vha, MBA_IDC_AEN); 539 } 540 } 541 542 int 543 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) 544 { 545 struct qla_hw_data *ha = vha->hw; 546 scsi_qla_host_t *vp; 547 uint32_t vp_did; 548 unsigned long flags; 549 int ret = 0; 550 551 if (!ha->num_vhosts) 552 return ret; 553 554 spin_lock_irqsave(&ha->vport_slock, flags); 555 list_for_each_entry(vp, &ha->vp_list, list) { 556 vp_did = vp->d_id.b24; 557 if (vp_did == rscn_entry) { 558 ret = 1; 559 break; 560 } 561 } 562 spin_unlock_irqrestore(&ha->vport_slock, flags); 563 564 return ret; 565 } 566 567 static inline fc_port_t * 568 qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) 569 { 570 fc_port_t *fcport; 571 572 list_for_each_entry(fcport, &vha->vp_fcports, list) 573 if (fcport->loop_id == loop_id) 574 return fcport; 575 return NULL; 576 } 577 578 /** 579 * qla2x00_async_event() - Process aynchronous events. 580 * @ha: SCSI driver HA context 581 * @mb: Mailbox registers (0 - 3) 582 */ 583 void 584 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 585 { 586 uint16_t handle_cnt; 587 uint16_t cnt, mbx; 588 uint32_t handles[5]; 589 struct qla_hw_data *ha = vha->hw; 590 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 591 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 592 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 593 uint32_t rscn_entry, host_pid; 594 unsigned long flags; 595 fc_port_t *fcport = NULL; 596 597 /* Setup to process RIO completion. */ 598 handle_cnt = 0; 599 if (IS_CNA_CAPABLE(ha)) 600 goto skip_rio; 601 switch (mb[0]) { 602 case MBA_SCSI_COMPLETION: 603 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 604 handle_cnt = 1; 605 break; 606 case MBA_CMPLT_1_16BIT: 607 handles[0] = mb[1]; 608 handle_cnt = 1; 609 mb[0] = MBA_SCSI_COMPLETION; 610 break; 611 case MBA_CMPLT_2_16BIT: 612 handles[0] = mb[1]; 613 handles[1] = mb[2]; 614 handle_cnt = 2; 615 mb[0] = MBA_SCSI_COMPLETION; 616 break; 617 case MBA_CMPLT_3_16BIT: 618 handles[0] = mb[1]; 619 handles[1] = mb[2]; 620 handles[2] = mb[3]; 621 handle_cnt = 3; 622 mb[0] = MBA_SCSI_COMPLETION; 623 break; 624 case MBA_CMPLT_4_16BIT: 625 handles[0] = mb[1]; 626 handles[1] = mb[2]; 627 handles[2] = mb[3]; 628 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 629 handle_cnt = 4; 630 mb[0] = MBA_SCSI_COMPLETION; 631 break; 632 case MBA_CMPLT_5_16BIT: 633 handles[0] = mb[1]; 634 handles[1] = mb[2]; 635 handles[2] = mb[3]; 636 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 637 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 638 handle_cnt = 5; 639 mb[0] = MBA_SCSI_COMPLETION; 640 break; 641 case MBA_CMPLT_2_32BIT: 642 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 643 handles[1] = le32_to_cpu( 644 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 645 RD_MAILBOX_REG(ha, reg, 6)); 646 handle_cnt = 2; 647 mb[0] = MBA_SCSI_COMPLETION; 648 break; 649 default: 650 break; 651 } 652 skip_rio: 653 switch (mb[0]) { 654 case MBA_SCSI_COMPLETION: /* Fast Post */ 655 if (!vha->flags.online) 656 break; 657 658 for (cnt = 0; cnt < handle_cnt; cnt++) 659 qla2x00_process_completed_request(vha, rsp->req, 660 handles[cnt]); 661 break; 662 663 case MBA_RESET: /* Reset */ 664 ql_dbg(ql_dbg_async, vha, 0x5002, 665 "Asynchronous RESET.\n"); 666 667 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 668 break; 669 670 case MBA_SYSTEM_ERR: /* System Error */ 671 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? 672 RD_REG_WORD(®24->mailbox7) : 0; 673 ql_log(ql_log_warn, vha, 0x5003, 674 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 675 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 676 677 ha->isp_ops->fw_dump(vha, 1); 678 679 if (IS_FWI2_CAPABLE(ha)) { 680 if (mb[1] == 0 && mb[2] == 0) { 681 ql_log(ql_log_fatal, vha, 0x5004, 682 "Unrecoverable Hardware Error: adapter " 683 "marked OFFLINE!\n"); 684 vha->flags.online = 0; 685 vha->device_flags |= DFLG_DEV_FAILED; 686 } else { 687 /* Check to see if MPI timeout occurred */ 688 if ((mbx & MBX_3) && (ha->port_no == 0)) 689 set_bit(MPI_RESET_NEEDED, 690 &vha->dpc_flags); 691 692 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 693 } 694 } else if (mb[1] == 0) { 695 ql_log(ql_log_fatal, vha, 0x5005, 696 "Unrecoverable Hardware Error: adapter marked " 697 "OFFLINE!\n"); 698 vha->flags.online = 0; 699 vha->device_flags |= DFLG_DEV_FAILED; 700 } else 701 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 702 break; 703 704 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 705 ql_log(ql_log_warn, vha, 0x5006, 706 "ISP Request Transfer Error (%x).\n", mb[1]); 707 708 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 709 break; 710 711 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 712 ql_log(ql_log_warn, vha, 0x5007, 713 "ISP Response Transfer Error.\n"); 714 715 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 716 break; 717 718 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 719 ql_dbg(ql_dbg_async, vha, 0x5008, 720 "Asynchronous WAKEUP_THRES.\n"); 721 722 break; 723 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 724 ql_dbg(ql_dbg_async, vha, 0x5009, 725 "LIP occurred (%x).\n", mb[1]); 726 727 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 728 atomic_set(&vha->loop_state, LOOP_DOWN); 729 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 730 qla2x00_mark_all_devices_lost(vha, 1); 731 } 732 733 if (vha->vp_idx) { 734 atomic_set(&vha->vp_state, VP_FAILED); 735 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 736 } 737 738 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 739 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 740 741 vha->flags.management_server_logged_in = 0; 742 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 743 break; 744 745 case MBA_LOOP_UP: /* Loop Up Event */ 746 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 747 ha->link_data_rate = PORT_SPEED_1GB; 748 else 749 ha->link_data_rate = mb[1]; 750 751 ql_log(ql_log_info, vha, 0x500a, 752 "LOOP UP detected (%s Gbps).\n", 753 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 754 755 vha->flags.management_server_logged_in = 0; 756 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 757 break; 758 759 case MBA_LOOP_DOWN: /* Loop Down Event */ 760 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 761 ? RD_REG_WORD(®24->mailbox4) : 0; 762 mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) 763 : mbx; 764 ql_log(ql_log_info, vha, 0x500b, 765 "LOOP DOWN detected (%x %x %x %x).\n", 766 mb[1], mb[2], mb[3], mbx); 767 768 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 769 atomic_set(&vha->loop_state, LOOP_DOWN); 770 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 771 /* 772 * In case of loop down, restore WWPN from 773 * NVRAM in case of FA-WWPN capable ISP 774 * Restore for Physical Port only 775 */ 776 if (!vha->vp_idx) { 777 if (ha->flags.fawwpn_enabled) { 778 void *wwpn = ha->init_cb->port_name; 779 memcpy(vha->port_name, wwpn, WWN_SIZE); 780 fc_host_port_name(vha->host) = 781 wwn_to_u64(vha->port_name); 782 ql_dbg(ql_dbg_init + ql_dbg_verbose, 783 vha, 0x0144, "LOOP DOWN detected," 784 "restore WWPN %016llx\n", 785 wwn_to_u64(vha->port_name)); 786 } 787 788 clear_bit(VP_CONFIG_OK, &vha->vp_flags); 789 } 790 791 vha->device_flags |= DFLG_NO_CABLE; 792 qla2x00_mark_all_devices_lost(vha, 1); 793 } 794 795 if (vha->vp_idx) { 796 atomic_set(&vha->vp_state, VP_FAILED); 797 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 798 } 799 800 vha->flags.management_server_logged_in = 0; 801 ha->link_data_rate = PORT_SPEED_UNKNOWN; 802 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 803 break; 804 805 case MBA_LIP_RESET: /* LIP reset occurred */ 806 ql_dbg(ql_dbg_async, vha, 0x500c, 807 "LIP reset occurred (%x).\n", mb[1]); 808 809 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 810 atomic_set(&vha->loop_state, LOOP_DOWN); 811 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 812 qla2x00_mark_all_devices_lost(vha, 1); 813 } 814 815 if (vha->vp_idx) { 816 atomic_set(&vha->vp_state, VP_FAILED); 817 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 818 } 819 820 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 821 822 ha->operating_mode = LOOP; 823 vha->flags.management_server_logged_in = 0; 824 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 825 break; 826 827 /* case MBA_DCBX_COMPLETE: */ 828 case MBA_POINT_TO_POINT: /* Point-to-Point */ 829 if (IS_QLA2100(ha)) 830 break; 831 832 if (IS_CNA_CAPABLE(ha)) { 833 ql_dbg(ql_dbg_async, vha, 0x500d, 834 "DCBX Completed -- %04x %04x %04x.\n", 835 mb[1], mb[2], mb[3]); 836 if (ha->notify_dcbx_comp && !vha->vp_idx) 837 complete(&ha->dcbx_comp); 838 839 } else 840 ql_dbg(ql_dbg_async, vha, 0x500e, 841 "Asynchronous P2P MODE received.\n"); 842 843 /* 844 * Until there's a transition from loop down to loop up, treat 845 * this as loop down only. 846 */ 847 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 848 atomic_set(&vha->loop_state, LOOP_DOWN); 849 if (!atomic_read(&vha->loop_down_timer)) 850 atomic_set(&vha->loop_down_timer, 851 LOOP_DOWN_TIME); 852 qla2x00_mark_all_devices_lost(vha, 1); 853 } 854 855 if (vha->vp_idx) { 856 atomic_set(&vha->vp_state, VP_FAILED); 857 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 858 } 859 860 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 861 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 862 863 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 864 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 865 866 ha->flags.gpsc_supported = 1; 867 vha->flags.management_server_logged_in = 0; 868 break; 869 870 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 871 if (IS_QLA2100(ha)) 872 break; 873 874 ql_dbg(ql_dbg_async, vha, 0x500f, 875 "Configuration change detected: value=%x.\n", mb[1]); 876 877 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 878 atomic_set(&vha->loop_state, LOOP_DOWN); 879 if (!atomic_read(&vha->loop_down_timer)) 880 atomic_set(&vha->loop_down_timer, 881 LOOP_DOWN_TIME); 882 qla2x00_mark_all_devices_lost(vha, 1); 883 } 884 885 if (vha->vp_idx) { 886 atomic_set(&vha->vp_state, VP_FAILED); 887 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 888 } 889 890 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 891 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 892 break; 893 894 case MBA_PORT_UPDATE: /* Port database update */ 895 /* 896 * Handle only global and vn-port update events 897 * 898 * Relevant inputs: 899 * mb[1] = N_Port handle of changed port 900 * OR 0xffff for global event 901 * mb[2] = New login state 902 * 7 = Port logged out 903 * mb[3] = LSB is vp_idx, 0xff = all vps 904 * 905 * Skip processing if: 906 * Event is global, vp_idx is NOT all vps, 907 * vp_idx does not match 908 * Event is not global, vp_idx does not match 909 */ 910 if (IS_QLA2XXX_MIDTYPE(ha) && 911 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 912 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 913 break; 914 915 if (mb[2] == 0x7) { 916 ql_dbg(ql_dbg_async, vha, 0x5010, 917 "Port %s %04x %04x %04x.\n", 918 mb[1] == 0xffff ? "unavailable" : "logout", 919 mb[1], mb[2], mb[3]); 920 921 if (mb[1] == 0xffff) 922 goto global_port_update; 923 924 /* Port logout */ 925 fcport = qla2x00_find_fcport_by_loopid(vha, mb[1]); 926 if (!fcport) 927 break; 928 if (atomic_read(&fcport->state) != FCS_ONLINE) 929 break; 930 ql_dbg(ql_dbg_async, vha, 0x508a, 931 "Marking port lost loopid=%04x portid=%06x.\n", 932 fcport->loop_id, fcport->d_id.b24); 933 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 934 break; 935 936 global_port_update: 937 /* Port unavailable. */ 938 ql_log(ql_log_warn, vha, 0x505e, 939 "Link is offline.\n"); 940 941 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 942 atomic_set(&vha->loop_state, LOOP_DOWN); 943 atomic_set(&vha->loop_down_timer, 944 LOOP_DOWN_TIME); 945 vha->device_flags |= DFLG_NO_CABLE; 946 qla2x00_mark_all_devices_lost(vha, 1); 947 } 948 949 if (vha->vp_idx) { 950 atomic_set(&vha->vp_state, VP_FAILED); 951 fc_vport_set_state(vha->fc_vport, 952 FC_VPORT_FAILED); 953 qla2x00_mark_all_devices_lost(vha, 1); 954 } 955 956 vha->flags.management_server_logged_in = 0; 957 ha->link_data_rate = PORT_SPEED_UNKNOWN; 958 break; 959 } 960 961 /* 962 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 963 * event etc. earlier indicating loop is down) then process 964 * it. Otherwise ignore it and Wait for RSCN to come in. 965 */ 966 atomic_set(&vha->loop_down_timer, 0); 967 if (atomic_read(&vha->loop_state) != LOOP_DOWN && 968 atomic_read(&vha->loop_state) != LOOP_DEAD) { 969 ql_dbg(ql_dbg_async, vha, 0x5011, 970 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 971 mb[1], mb[2], mb[3]); 972 973 qlt_async_event(mb[0], vha, mb); 974 break; 975 } 976 977 ql_dbg(ql_dbg_async, vha, 0x5012, 978 "Port database changed %04x %04x %04x.\n", 979 mb[1], mb[2], mb[3]); 980 981 /* 982 * Mark all devices as missing so we will login again. 983 */ 984 atomic_set(&vha->loop_state, LOOP_UP); 985 986 qla2x00_mark_all_devices_lost(vha, 1); 987 988 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) 989 set_bit(SCR_PENDING, &vha->dpc_flags); 990 991 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 992 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 993 set_bit(VP_CONFIG_OK, &vha->vp_flags); 994 995 qlt_async_event(mb[0], vha, mb); 996 break; 997 998 case MBA_RSCN_UPDATE: /* State Change Registration */ 999 /* Check if the Vport has issued a SCR */ 1000 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 1001 break; 1002 /* Only handle SCNs for our Vport index. */ 1003 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 1004 break; 1005 1006 ql_dbg(ql_dbg_async, vha, 0x5013, 1007 "RSCN database changed -- %04x %04x %04x.\n", 1008 mb[1], mb[2], mb[3]); 1009 1010 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 1011 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 1012 | vha->d_id.b.al_pa; 1013 if (rscn_entry == host_pid) { 1014 ql_dbg(ql_dbg_async, vha, 0x5014, 1015 "Ignoring RSCN update to local host " 1016 "port ID (%06x).\n", host_pid); 1017 break; 1018 } 1019 1020 /* Ignore reserved bits from RSCN-payload. */ 1021 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 1022 1023 /* Skip RSCNs for virtual ports on the same physical port */ 1024 if (qla2x00_is_a_vp_did(vha, rscn_entry)) 1025 break; 1026 1027 /* 1028 * Search for the rport related to this RSCN entry and mark it 1029 * as lost. 1030 */ 1031 list_for_each_entry(fcport, &vha->vp_fcports, list) { 1032 if (atomic_read(&fcport->state) != FCS_ONLINE) 1033 continue; 1034 if (fcport->d_id.b24 == rscn_entry) { 1035 qla2x00_mark_device_lost(vha, fcport, 0, 0); 1036 break; 1037 } 1038 } 1039 1040 atomic_set(&vha->loop_down_timer, 0); 1041 vha->flags.management_server_logged_in = 0; 1042 1043 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 1044 set_bit(RSCN_UPDATE, &vha->dpc_flags); 1045 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 1046 break; 1047 1048 /* case MBA_RIO_RESPONSE: */ 1049 case MBA_ZIO_RESPONSE: 1050 ql_dbg(ql_dbg_async, vha, 0x5015, 1051 "[R|Z]IO update completion.\n"); 1052 1053 if (IS_FWI2_CAPABLE(ha)) 1054 qla24xx_process_response_queue(vha, rsp); 1055 else 1056 qla2x00_process_response_queue(rsp); 1057 break; 1058 1059 case MBA_DISCARD_RND_FRAME: 1060 ql_dbg(ql_dbg_async, vha, 0x5016, 1061 "Discard RND Frame -- %04x %04x %04x.\n", 1062 mb[1], mb[2], mb[3]); 1063 break; 1064 1065 case MBA_TRACE_NOTIFICATION: 1066 ql_dbg(ql_dbg_async, vha, 0x5017, 1067 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 1068 break; 1069 1070 case MBA_ISP84XX_ALERT: 1071 ql_dbg(ql_dbg_async, vha, 0x5018, 1072 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 1073 mb[1], mb[2], mb[3]); 1074 1075 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 1076 switch (mb[1]) { 1077 case A84_PANIC_RECOVERY: 1078 ql_log(ql_log_info, vha, 0x5019, 1079 "Alert 84XX: panic recovery %04x %04x.\n", 1080 mb[2], mb[3]); 1081 break; 1082 case A84_OP_LOGIN_COMPLETE: 1083 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 1084 ql_log(ql_log_info, vha, 0x501a, 1085 "Alert 84XX: firmware version %x.\n", 1086 ha->cs84xx->op_fw_version); 1087 break; 1088 case A84_DIAG_LOGIN_COMPLETE: 1089 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1090 ql_log(ql_log_info, vha, 0x501b, 1091 "Alert 84XX: diagnostic firmware version %x.\n", 1092 ha->cs84xx->diag_fw_version); 1093 break; 1094 case A84_GOLD_LOGIN_COMPLETE: 1095 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 1096 ha->cs84xx->fw_update = 1; 1097 ql_log(ql_log_info, vha, 0x501c, 1098 "Alert 84XX: gold firmware version %x.\n", 1099 ha->cs84xx->gold_fw_version); 1100 break; 1101 default: 1102 ql_log(ql_log_warn, vha, 0x501d, 1103 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 1104 mb[1], mb[2], mb[3]); 1105 } 1106 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 1107 break; 1108 case MBA_DCBX_START: 1109 ql_dbg(ql_dbg_async, vha, 0x501e, 1110 "DCBX Started -- %04x %04x %04x.\n", 1111 mb[1], mb[2], mb[3]); 1112 break; 1113 case MBA_DCBX_PARAM_UPDATE: 1114 ql_dbg(ql_dbg_async, vha, 0x501f, 1115 "DCBX Parameters Updated -- %04x %04x %04x.\n", 1116 mb[1], mb[2], mb[3]); 1117 break; 1118 case MBA_FCF_CONF_ERR: 1119 ql_dbg(ql_dbg_async, vha, 0x5020, 1120 "FCF Configuration Error -- %04x %04x %04x.\n", 1121 mb[1], mb[2], mb[3]); 1122 break; 1123 case MBA_IDC_NOTIFY: 1124 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { 1125 mb[4] = RD_REG_WORD(®24->mailbox4); 1126 if (((mb[2] & 0x7fff) == MBC_PORT_RESET || 1127 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && 1128 (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { 1129 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 1130 /* 1131 * Extend loop down timer since port is active. 1132 */ 1133 if (atomic_read(&vha->loop_state) == LOOP_DOWN) 1134 atomic_set(&vha->loop_down_timer, 1135 LOOP_DOWN_TIME); 1136 qla2xxx_wake_dpc(vha); 1137 } 1138 } 1139 case MBA_IDC_COMPLETE: 1140 if (ha->notify_lb_portup_comp && !vha->vp_idx) 1141 complete(&ha->lb_portup_comp); 1142 /* Fallthru */ 1143 case MBA_IDC_TIME_EXT: 1144 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || 1145 IS_QLA8044(ha)) 1146 qla81xx_idc_event(vha, mb[0], mb[1]); 1147 break; 1148 1149 case MBA_IDC_AEN: 1150 mb[4] = RD_REG_WORD(®24->mailbox4); 1151 mb[5] = RD_REG_WORD(®24->mailbox5); 1152 mb[6] = RD_REG_WORD(®24->mailbox6); 1153 mb[7] = RD_REG_WORD(®24->mailbox7); 1154 qla83xx_handle_8200_aen(vha, mb); 1155 break; 1156 1157 case MBA_DPORT_DIAGNOSTICS: 1158 ql_dbg(ql_dbg_async, vha, 0x5052, 1159 "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1], 1160 mb[1] == 0 ? "start" : 1161 mb[1] == 1 ? "done (ok)" : 1162 mb[1] == 2 ? "done (error)" : "other"); 1163 break; 1164 1165 default: 1166 ql_dbg(ql_dbg_async, vha, 0x5057, 1167 "Unknown AEN:%04x %04x %04x %04x\n", 1168 mb[0], mb[1], mb[2], mb[3]); 1169 } 1170 1171 qlt_async_event(mb[0], vha, mb); 1172 1173 if (!vha->vp_idx && ha->num_vhosts) 1174 qla2x00_alert_all_vps(rsp, mb); 1175 } 1176 1177 /** 1178 * qla2x00_process_completed_request() - Process a Fast Post response. 1179 * @ha: SCSI driver HA context 1180 * @index: SRB index 1181 */ 1182 void 1183 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1184 struct req_que *req, uint32_t index) 1185 { 1186 srb_t *sp; 1187 struct qla_hw_data *ha = vha->hw; 1188 1189 /* Validate handle. */ 1190 if (index >= req->num_outstanding_cmds) { 1191 ql_log(ql_log_warn, vha, 0x3014, 1192 "Invalid SCSI command index (%x).\n", index); 1193 1194 if (IS_P3P_TYPE(ha)) 1195 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1196 else 1197 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1198 return; 1199 } 1200 1201 sp = req->outstanding_cmds[index]; 1202 if (sp) { 1203 /* Free outstanding command slot. */ 1204 req->outstanding_cmds[index] = NULL; 1205 1206 /* Save ISP completion status */ 1207 sp->done(ha, sp, DID_OK << 16); 1208 } else { 1209 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1210 1211 if (IS_P3P_TYPE(ha)) 1212 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1213 else 1214 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1215 } 1216 } 1217 1218 srb_t * 1219 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1220 struct req_que *req, void *iocb) 1221 { 1222 struct qla_hw_data *ha = vha->hw; 1223 sts_entry_t *pkt = iocb; 1224 srb_t *sp = NULL; 1225 uint16_t index; 1226 1227 index = LSW(pkt->handle); 1228 if (index >= req->num_outstanding_cmds) { 1229 ql_log(ql_log_warn, vha, 0x5031, 1230 "Invalid command index (%x).\n", index); 1231 if (IS_P3P_TYPE(ha)) 1232 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1233 else 1234 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1235 goto done; 1236 } 1237 sp = req->outstanding_cmds[index]; 1238 if (!sp) { 1239 ql_log(ql_log_warn, vha, 0x5032, 1240 "Invalid completion handle (%x) -- timed-out.\n", index); 1241 return sp; 1242 } 1243 if (sp->handle != index) { 1244 ql_log(ql_log_warn, vha, 0x5033, 1245 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1246 return NULL; 1247 } 1248 1249 req->outstanding_cmds[index] = NULL; 1250 1251 done: 1252 return sp; 1253 } 1254 1255 static void 1256 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1257 struct mbx_entry *mbx) 1258 { 1259 const char func[] = "MBX-IOCB"; 1260 const char *type; 1261 fc_port_t *fcport; 1262 srb_t *sp; 1263 struct srb_iocb *lio; 1264 uint16_t *data; 1265 uint16_t status; 1266 1267 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1268 if (!sp) 1269 return; 1270 1271 lio = &sp->u.iocb_cmd; 1272 type = sp->name; 1273 fcport = sp->fcport; 1274 data = lio->u.logio.data; 1275 1276 data[0] = MBS_COMMAND_ERROR; 1277 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1278 QLA_LOGIO_LOGIN_RETRIED : 0; 1279 if (mbx->entry_status) { 1280 ql_dbg(ql_dbg_async, vha, 0x5043, 1281 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1282 "entry-status=%x status=%x state-flag=%x " 1283 "status-flags=%x.\n", type, sp->handle, 1284 fcport->d_id.b.domain, fcport->d_id.b.area, 1285 fcport->d_id.b.al_pa, mbx->entry_status, 1286 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1287 le16_to_cpu(mbx->status_flags)); 1288 1289 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1290 (uint8_t *)mbx, sizeof(*mbx)); 1291 1292 goto logio_done; 1293 } 1294 1295 status = le16_to_cpu(mbx->status); 1296 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1297 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1298 status = 0; 1299 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1300 ql_dbg(ql_dbg_async, vha, 0x5045, 1301 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1302 type, sp->handle, fcport->d_id.b.domain, 1303 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1304 le16_to_cpu(mbx->mb1)); 1305 1306 data[0] = MBS_COMMAND_COMPLETE; 1307 if (sp->type == SRB_LOGIN_CMD) { 1308 fcport->port_type = FCT_TARGET; 1309 if (le16_to_cpu(mbx->mb1) & BIT_0) 1310 fcport->port_type = FCT_INITIATOR; 1311 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1312 fcport->flags |= FCF_FCP2_DEVICE; 1313 } 1314 goto logio_done; 1315 } 1316 1317 data[0] = le16_to_cpu(mbx->mb0); 1318 switch (data[0]) { 1319 case MBS_PORT_ID_USED: 1320 data[1] = le16_to_cpu(mbx->mb1); 1321 break; 1322 case MBS_LOOP_ID_USED: 1323 break; 1324 default: 1325 data[0] = MBS_COMMAND_ERROR; 1326 break; 1327 } 1328 1329 ql_log(ql_log_warn, vha, 0x5046, 1330 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1331 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1332 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1333 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1334 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1335 le16_to_cpu(mbx->mb7)); 1336 1337 logio_done: 1338 sp->done(vha, sp, 0); 1339 } 1340 1341 static void 1342 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1343 sts_entry_t *pkt, int iocb_type) 1344 { 1345 const char func[] = "CT_IOCB"; 1346 const char *type; 1347 srb_t *sp; 1348 struct fc_bsg_job *bsg_job; 1349 uint16_t comp_status; 1350 int res; 1351 1352 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1353 if (!sp) 1354 return; 1355 1356 bsg_job = sp->u.bsg_job; 1357 1358 type = "ct pass-through"; 1359 1360 comp_status = le16_to_cpu(pkt->comp_status); 1361 1362 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1363 * fc payload to the caller 1364 */ 1365 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1366 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1367 1368 if (comp_status != CS_COMPLETE) { 1369 if (comp_status == CS_DATA_UNDERRUN) { 1370 res = DID_OK << 16; 1371 bsg_job->reply->reply_payload_rcv_len = 1372 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1373 1374 ql_log(ql_log_warn, vha, 0x5048, 1375 "CT pass-through-%s error " 1376 "comp_status-status=0x%x total_byte = 0x%x.\n", 1377 type, comp_status, 1378 bsg_job->reply->reply_payload_rcv_len); 1379 } else { 1380 ql_log(ql_log_warn, vha, 0x5049, 1381 "CT pass-through-%s error " 1382 "comp_status-status=0x%x.\n", type, comp_status); 1383 res = DID_ERROR << 16; 1384 bsg_job->reply->reply_payload_rcv_len = 0; 1385 } 1386 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1387 (uint8_t *)pkt, sizeof(*pkt)); 1388 } else { 1389 res = DID_OK << 16; 1390 bsg_job->reply->reply_payload_rcv_len = 1391 bsg_job->reply_payload.payload_len; 1392 bsg_job->reply_len = 0; 1393 } 1394 1395 sp->done(vha, sp, res); 1396 } 1397 1398 static void 1399 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1400 struct sts_entry_24xx *pkt, int iocb_type) 1401 { 1402 const char func[] = "ELS_CT_IOCB"; 1403 const char *type; 1404 srb_t *sp; 1405 struct fc_bsg_job *bsg_job; 1406 uint16_t comp_status; 1407 uint32_t fw_status[3]; 1408 uint8_t* fw_sts_ptr; 1409 int res; 1410 1411 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1412 if (!sp) 1413 return; 1414 bsg_job = sp->u.bsg_job; 1415 1416 type = NULL; 1417 switch (sp->type) { 1418 case SRB_ELS_CMD_RPT: 1419 case SRB_ELS_CMD_HST: 1420 type = "els"; 1421 break; 1422 case SRB_CT_CMD: 1423 type = "ct pass-through"; 1424 break; 1425 case SRB_ELS_DCMD: 1426 type = "Driver ELS logo"; 1427 ql_dbg(ql_dbg_user, vha, 0x5047, 1428 "Completing %s: (%p) type=%d.\n", type, sp, sp->type); 1429 sp->done(vha, sp, 0); 1430 return; 1431 default: 1432 ql_dbg(ql_dbg_user, vha, 0x503e, 1433 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1434 return; 1435 } 1436 1437 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1438 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1439 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1440 1441 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1442 * fc payload to the caller 1443 */ 1444 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1445 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1446 1447 if (comp_status != CS_COMPLETE) { 1448 if (comp_status == CS_DATA_UNDERRUN) { 1449 res = DID_OK << 16; 1450 bsg_job->reply->reply_payload_rcv_len = 1451 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1452 1453 ql_dbg(ql_dbg_user, vha, 0x503f, 1454 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1455 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1456 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1457 le16_to_cpu(((struct els_sts_entry_24xx *) 1458 pkt)->total_byte_count)); 1459 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1460 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1461 } 1462 else { 1463 ql_dbg(ql_dbg_user, vha, 0x5040, 1464 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1465 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1466 type, sp->handle, comp_status, 1467 le16_to_cpu(((struct els_sts_entry_24xx *) 1468 pkt)->error_subcode_1), 1469 le16_to_cpu(((struct els_sts_entry_24xx *) 1470 pkt)->error_subcode_2)); 1471 res = DID_ERROR << 16; 1472 bsg_job->reply->reply_payload_rcv_len = 0; 1473 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1474 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1475 } 1476 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1477 (uint8_t *)pkt, sizeof(*pkt)); 1478 } 1479 else { 1480 res = DID_OK << 16; 1481 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1482 bsg_job->reply_len = 0; 1483 } 1484 1485 sp->done(vha, sp, res); 1486 } 1487 1488 static void 1489 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1490 struct logio_entry_24xx *logio) 1491 { 1492 const char func[] = "LOGIO-IOCB"; 1493 const char *type; 1494 fc_port_t *fcport; 1495 srb_t *sp; 1496 struct srb_iocb *lio; 1497 uint16_t *data; 1498 uint32_t iop[2]; 1499 1500 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1501 if (!sp) 1502 return; 1503 1504 lio = &sp->u.iocb_cmd; 1505 type = sp->name; 1506 fcport = sp->fcport; 1507 data = lio->u.logio.data; 1508 1509 data[0] = MBS_COMMAND_ERROR; 1510 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1511 QLA_LOGIO_LOGIN_RETRIED : 0; 1512 if (logio->entry_status) { 1513 ql_log(ql_log_warn, fcport->vha, 0x5034, 1514 "Async-%s error entry - hdl=%x" 1515 "portid=%02x%02x%02x entry-status=%x.\n", 1516 type, sp->handle, fcport->d_id.b.domain, 1517 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1518 logio->entry_status); 1519 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1520 (uint8_t *)logio, sizeof(*logio)); 1521 1522 goto logio_done; 1523 } 1524 1525 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1526 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1527 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1528 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1529 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1530 le32_to_cpu(logio->io_parameter[0])); 1531 1532 data[0] = MBS_COMMAND_COMPLETE; 1533 if (sp->type != SRB_LOGIN_CMD) 1534 goto logio_done; 1535 1536 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1537 if (iop[0] & BIT_4) { 1538 fcport->port_type = FCT_TARGET; 1539 if (iop[0] & BIT_8) 1540 fcport->flags |= FCF_FCP2_DEVICE; 1541 } else if (iop[0] & BIT_5) 1542 fcport->port_type = FCT_INITIATOR; 1543 1544 if (iop[0] & BIT_7) 1545 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1546 1547 if (logio->io_parameter[7] || logio->io_parameter[8]) 1548 fcport->supported_classes |= FC_COS_CLASS2; 1549 if (logio->io_parameter[9] || logio->io_parameter[10]) 1550 fcport->supported_classes |= FC_COS_CLASS3; 1551 1552 goto logio_done; 1553 } 1554 1555 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1556 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1557 switch (iop[0]) { 1558 case LSC_SCODE_PORTID_USED: 1559 data[0] = MBS_PORT_ID_USED; 1560 data[1] = LSW(iop[1]); 1561 break; 1562 case LSC_SCODE_NPORT_USED: 1563 data[0] = MBS_LOOP_ID_USED; 1564 break; 1565 default: 1566 data[0] = MBS_COMMAND_ERROR; 1567 break; 1568 } 1569 1570 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1571 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1572 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1573 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1574 le16_to_cpu(logio->comp_status), 1575 le32_to_cpu(logio->io_parameter[0]), 1576 le32_to_cpu(logio->io_parameter[1])); 1577 1578 logio_done: 1579 sp->done(vha, sp, 0); 1580 } 1581 1582 static void 1583 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) 1584 { 1585 const char func[] = "TMF-IOCB"; 1586 const char *type; 1587 fc_port_t *fcport; 1588 srb_t *sp; 1589 struct srb_iocb *iocb; 1590 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1591 1592 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1593 if (!sp) 1594 return; 1595 1596 iocb = &sp->u.iocb_cmd; 1597 type = sp->name; 1598 fcport = sp->fcport; 1599 iocb->u.tmf.data = QLA_SUCCESS; 1600 1601 if (sts->entry_status) { 1602 ql_log(ql_log_warn, fcport->vha, 0x5038, 1603 "Async-%s error - hdl=%x entry-status(%x).\n", 1604 type, sp->handle, sts->entry_status); 1605 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1606 } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { 1607 ql_log(ql_log_warn, fcport->vha, 0x5039, 1608 "Async-%s error - hdl=%x completion status(%x).\n", 1609 type, sp->handle, sts->comp_status); 1610 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1611 } else if ((le16_to_cpu(sts->scsi_status) & 1612 SS_RESPONSE_INFO_LEN_VALID)) { 1613 if (le32_to_cpu(sts->rsp_data_len) < 4) { 1614 ql_log(ql_log_warn, fcport->vha, 0x503b, 1615 "Async-%s error - hdl=%x not enough response(%d).\n", 1616 type, sp->handle, sts->rsp_data_len); 1617 } else if (sts->data[3]) { 1618 ql_log(ql_log_warn, fcport->vha, 0x503c, 1619 "Async-%s error - hdl=%x response(%x).\n", 1620 type, sp->handle, sts->data[3]); 1621 iocb->u.tmf.data = QLA_FUNCTION_FAILED; 1622 } 1623 } 1624 1625 if (iocb->u.tmf.data != QLA_SUCCESS) 1626 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1627 (uint8_t *)sts, sizeof(*sts)); 1628 1629 sp->done(vha, sp, 0); 1630 } 1631 1632 /** 1633 * qla2x00_process_response_queue() - Process response queue entries. 1634 * @ha: SCSI driver HA context 1635 */ 1636 void 1637 qla2x00_process_response_queue(struct rsp_que *rsp) 1638 { 1639 struct scsi_qla_host *vha; 1640 struct qla_hw_data *ha = rsp->hw; 1641 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1642 sts_entry_t *pkt; 1643 uint16_t handle_cnt; 1644 uint16_t cnt; 1645 1646 vha = pci_get_drvdata(ha->pdev); 1647 1648 if (!vha->flags.online) 1649 return; 1650 1651 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1652 pkt = (sts_entry_t *)rsp->ring_ptr; 1653 1654 rsp->ring_index++; 1655 if (rsp->ring_index == rsp->length) { 1656 rsp->ring_index = 0; 1657 rsp->ring_ptr = rsp->ring; 1658 } else { 1659 rsp->ring_ptr++; 1660 } 1661 1662 if (pkt->entry_status != 0) { 1663 qla2x00_error_entry(vha, rsp, pkt); 1664 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1665 wmb(); 1666 continue; 1667 } 1668 1669 switch (pkt->entry_type) { 1670 case STATUS_TYPE: 1671 qla2x00_status_entry(vha, rsp, pkt); 1672 break; 1673 case STATUS_TYPE_21: 1674 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1675 for (cnt = 0; cnt < handle_cnt; cnt++) { 1676 qla2x00_process_completed_request(vha, rsp->req, 1677 ((sts21_entry_t *)pkt)->handle[cnt]); 1678 } 1679 break; 1680 case STATUS_TYPE_22: 1681 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1682 for (cnt = 0; cnt < handle_cnt; cnt++) { 1683 qla2x00_process_completed_request(vha, rsp->req, 1684 ((sts22_entry_t *)pkt)->handle[cnt]); 1685 } 1686 break; 1687 case STATUS_CONT_TYPE: 1688 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1689 break; 1690 case MBX_IOCB_TYPE: 1691 qla2x00_mbx_iocb_entry(vha, rsp->req, 1692 (struct mbx_entry *)pkt); 1693 break; 1694 case CT_IOCB_TYPE: 1695 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1696 break; 1697 default: 1698 /* Type Not Supported. */ 1699 ql_log(ql_log_warn, vha, 0x504a, 1700 "Received unknown response pkt type %x " 1701 "entry status=%x.\n", 1702 pkt->entry_type, pkt->entry_status); 1703 break; 1704 } 1705 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1706 wmb(); 1707 } 1708 1709 /* Adjust ring index */ 1710 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1711 } 1712 1713 static inline void 1714 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1715 uint32_t sense_len, struct rsp_que *rsp, int res) 1716 { 1717 struct scsi_qla_host *vha = sp->fcport->vha; 1718 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1719 uint32_t track_sense_len; 1720 1721 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1722 sense_len = SCSI_SENSE_BUFFERSIZE; 1723 1724 SET_CMD_SENSE_LEN(sp, sense_len); 1725 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1726 track_sense_len = sense_len; 1727 1728 if (sense_len > par_sense_len) 1729 sense_len = par_sense_len; 1730 1731 memcpy(cp->sense_buffer, sense_data, sense_len); 1732 1733 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1734 track_sense_len -= sense_len; 1735 SET_CMD_SENSE_LEN(sp, track_sense_len); 1736 1737 if (track_sense_len != 0) { 1738 rsp->status_srb = sp; 1739 cp->result = res; 1740 } 1741 1742 if (sense_len) { 1743 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1744 "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", 1745 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1746 cp); 1747 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1748 cp->sense_buffer, sense_len); 1749 } 1750 } 1751 1752 struct scsi_dif_tuple { 1753 __be16 guard; /* Checksum */ 1754 __be16 app_tag; /* APPL identifier */ 1755 __be32 ref_tag; /* Target LBA or indirect LBA */ 1756 }; 1757 1758 /* 1759 * Checks the guard or meta-data for the type of error 1760 * detected by the HBA. In case of errors, we set the 1761 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1762 * to indicate to the kernel that the HBA detected error. 1763 */ 1764 static inline int 1765 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1766 { 1767 struct scsi_qla_host *vha = sp->fcport->vha; 1768 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1769 uint8_t *ap = &sts24->data[12]; 1770 uint8_t *ep = &sts24->data[20]; 1771 uint32_t e_ref_tag, a_ref_tag; 1772 uint16_t e_app_tag, a_app_tag; 1773 uint16_t e_guard, a_guard; 1774 1775 /* 1776 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1777 * would make guard field appear at offset 2 1778 */ 1779 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1780 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1781 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1782 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1783 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1784 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1785 1786 ql_dbg(ql_dbg_io, vha, 0x3023, 1787 "iocb(s) %p Returned STATUS.\n", sts24); 1788 1789 ql_dbg(ql_dbg_io, vha, 0x3024, 1790 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1791 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1792 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1793 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1794 a_app_tag, e_app_tag, a_guard, e_guard); 1795 1796 /* 1797 * Ignore sector if: 1798 * For type 3: ref & app tag is all 'f's 1799 * For type 0,1,2: app tag is all 'f's 1800 */ 1801 if ((a_app_tag == 0xffff) && 1802 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1803 (a_ref_tag == 0xffffffff))) { 1804 uint32_t blocks_done, resid; 1805 sector_t lba_s = scsi_get_lba(cmd); 1806 1807 /* 2TB boundary case covered automatically with this */ 1808 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1809 1810 resid = scsi_bufflen(cmd) - (blocks_done * 1811 cmd->device->sector_size); 1812 1813 scsi_set_resid(cmd, resid); 1814 cmd->result = DID_OK << 16; 1815 1816 /* Update protection tag */ 1817 if (scsi_prot_sg_count(cmd)) { 1818 uint32_t i, j = 0, k = 0, num_ent; 1819 struct scatterlist *sg; 1820 struct sd_dif_tuple *spt; 1821 1822 /* Patch the corresponding protection tags */ 1823 scsi_for_each_prot_sg(cmd, sg, 1824 scsi_prot_sg_count(cmd), i) { 1825 num_ent = sg_dma_len(sg) / 8; 1826 if (k + num_ent < blocks_done) { 1827 k += num_ent; 1828 continue; 1829 } 1830 j = blocks_done - k - 1; 1831 k = blocks_done; 1832 break; 1833 } 1834 1835 if (k != blocks_done) { 1836 ql_log(ql_log_warn, vha, 0x302f, 1837 "unexpected tag values tag:lba=%x:%llx)\n", 1838 e_ref_tag, (unsigned long long)lba_s); 1839 return 1; 1840 } 1841 1842 spt = page_address(sg_page(sg)) + sg->offset; 1843 spt += j; 1844 1845 spt->app_tag = 0xffff; 1846 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1847 spt->ref_tag = 0xffffffff; 1848 } 1849 1850 return 0; 1851 } 1852 1853 /* check guard */ 1854 if (e_guard != a_guard) { 1855 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1856 0x10, 0x1); 1857 set_driver_byte(cmd, DRIVER_SENSE); 1858 set_host_byte(cmd, DID_ABORT); 1859 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1860 return 1; 1861 } 1862 1863 /* check ref tag */ 1864 if (e_ref_tag != a_ref_tag) { 1865 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1866 0x10, 0x3); 1867 set_driver_byte(cmd, DRIVER_SENSE); 1868 set_host_byte(cmd, DID_ABORT); 1869 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1870 return 1; 1871 } 1872 1873 /* check appl tag */ 1874 if (e_app_tag != a_app_tag) { 1875 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1876 0x10, 0x2); 1877 set_driver_byte(cmd, DRIVER_SENSE); 1878 set_host_byte(cmd, DID_ABORT); 1879 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1880 return 1; 1881 } 1882 1883 return 1; 1884 } 1885 1886 static void 1887 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 1888 struct req_que *req, uint32_t index) 1889 { 1890 struct qla_hw_data *ha = vha->hw; 1891 srb_t *sp; 1892 uint16_t comp_status; 1893 uint16_t scsi_status; 1894 uint16_t thread_id; 1895 uint32_t rval = EXT_STATUS_OK; 1896 struct fc_bsg_job *bsg_job = NULL; 1897 sts_entry_t *sts; 1898 struct sts_entry_24xx *sts24; 1899 sts = (sts_entry_t *) pkt; 1900 sts24 = (struct sts_entry_24xx *) pkt; 1901 1902 /* Validate handle. */ 1903 if (index >= req->num_outstanding_cmds) { 1904 ql_log(ql_log_warn, vha, 0x70af, 1905 "Invalid SCSI completion handle 0x%x.\n", index); 1906 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1907 return; 1908 } 1909 1910 sp = req->outstanding_cmds[index]; 1911 if (sp) { 1912 /* Free outstanding command slot. */ 1913 req->outstanding_cmds[index] = NULL; 1914 bsg_job = sp->u.bsg_job; 1915 } else { 1916 ql_log(ql_log_warn, vha, 0x70b0, 1917 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 1918 req->id, index); 1919 1920 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1921 return; 1922 } 1923 1924 if (IS_FWI2_CAPABLE(ha)) { 1925 comp_status = le16_to_cpu(sts24->comp_status); 1926 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1927 } else { 1928 comp_status = le16_to_cpu(sts->comp_status); 1929 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1930 } 1931 1932 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1933 switch (comp_status) { 1934 case CS_COMPLETE: 1935 if (scsi_status == 0) { 1936 bsg_job->reply->reply_payload_rcv_len = 1937 bsg_job->reply_payload.payload_len; 1938 vha->qla_stats.input_bytes += 1939 bsg_job->reply->reply_payload_rcv_len; 1940 vha->qla_stats.input_requests++; 1941 rval = EXT_STATUS_OK; 1942 } 1943 goto done; 1944 1945 case CS_DATA_OVERRUN: 1946 ql_dbg(ql_dbg_user, vha, 0x70b1, 1947 "Command completed with date overrun thread_id=%d\n", 1948 thread_id); 1949 rval = EXT_STATUS_DATA_OVERRUN; 1950 break; 1951 1952 case CS_DATA_UNDERRUN: 1953 ql_dbg(ql_dbg_user, vha, 0x70b2, 1954 "Command completed with date underrun thread_id=%d\n", 1955 thread_id); 1956 rval = EXT_STATUS_DATA_UNDERRUN; 1957 break; 1958 case CS_BIDIR_RD_OVERRUN: 1959 ql_dbg(ql_dbg_user, vha, 0x70b3, 1960 "Command completed with read data overrun thread_id=%d\n", 1961 thread_id); 1962 rval = EXT_STATUS_DATA_OVERRUN; 1963 break; 1964 1965 case CS_BIDIR_RD_WR_OVERRUN: 1966 ql_dbg(ql_dbg_user, vha, 0x70b4, 1967 "Command completed with read and write data overrun " 1968 "thread_id=%d\n", thread_id); 1969 rval = EXT_STATUS_DATA_OVERRUN; 1970 break; 1971 1972 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 1973 ql_dbg(ql_dbg_user, vha, 0x70b5, 1974 "Command completed with read data over and write data " 1975 "underrun thread_id=%d\n", thread_id); 1976 rval = EXT_STATUS_DATA_OVERRUN; 1977 break; 1978 1979 case CS_BIDIR_RD_UNDERRUN: 1980 ql_dbg(ql_dbg_user, vha, 0x70b6, 1981 "Command completed with read data data underrun " 1982 "thread_id=%d\n", thread_id); 1983 rval = EXT_STATUS_DATA_UNDERRUN; 1984 break; 1985 1986 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 1987 ql_dbg(ql_dbg_user, vha, 0x70b7, 1988 "Command completed with read data under and write data " 1989 "overrun thread_id=%d\n", thread_id); 1990 rval = EXT_STATUS_DATA_UNDERRUN; 1991 break; 1992 1993 case CS_BIDIR_RD_WR_UNDERRUN: 1994 ql_dbg(ql_dbg_user, vha, 0x70b8, 1995 "Command completed with read and write data underrun " 1996 "thread_id=%d\n", thread_id); 1997 rval = EXT_STATUS_DATA_UNDERRUN; 1998 break; 1999 2000 case CS_BIDIR_DMA: 2001 ql_dbg(ql_dbg_user, vha, 0x70b9, 2002 "Command completed with data DMA error thread_id=%d\n", 2003 thread_id); 2004 rval = EXT_STATUS_DMA_ERR; 2005 break; 2006 2007 case CS_TIMEOUT: 2008 ql_dbg(ql_dbg_user, vha, 0x70ba, 2009 "Command completed with timeout thread_id=%d\n", 2010 thread_id); 2011 rval = EXT_STATUS_TIMEOUT; 2012 break; 2013 default: 2014 ql_dbg(ql_dbg_user, vha, 0x70bb, 2015 "Command completed with completion status=0x%x " 2016 "thread_id=%d\n", comp_status, thread_id); 2017 rval = EXT_STATUS_ERR; 2018 break; 2019 } 2020 bsg_job->reply->reply_payload_rcv_len = 0; 2021 2022 done: 2023 /* Return the vendor specific reply to API */ 2024 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 2025 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 2026 /* Always return DID_OK, bsg will send the vendor specific response 2027 * in this case only */ 2028 sp->done(vha, sp, (DID_OK << 6)); 2029 2030 } 2031 2032 /** 2033 * qla2x00_status_entry() - Process a Status IOCB entry. 2034 * @ha: SCSI driver HA context 2035 * @pkt: Entry pointer 2036 */ 2037 static void 2038 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 2039 { 2040 srb_t *sp; 2041 fc_port_t *fcport; 2042 struct scsi_cmnd *cp; 2043 sts_entry_t *sts; 2044 struct sts_entry_24xx *sts24; 2045 uint16_t comp_status; 2046 uint16_t scsi_status; 2047 uint16_t ox_id; 2048 uint8_t lscsi_status; 2049 int32_t resid; 2050 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 2051 fw_resid_len; 2052 uint8_t *rsp_info, *sense_data; 2053 struct qla_hw_data *ha = vha->hw; 2054 uint32_t handle; 2055 uint16_t que; 2056 struct req_que *req; 2057 int logit = 1; 2058 int res = 0; 2059 uint16_t state_flags = 0; 2060 uint16_t retry_delay = 0; 2061 2062 sts = (sts_entry_t *) pkt; 2063 sts24 = (struct sts_entry_24xx *) pkt; 2064 if (IS_FWI2_CAPABLE(ha)) { 2065 comp_status = le16_to_cpu(sts24->comp_status); 2066 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 2067 state_flags = le16_to_cpu(sts24->state_flags); 2068 } else { 2069 comp_status = le16_to_cpu(sts->comp_status); 2070 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 2071 } 2072 handle = (uint32_t) LSW(sts->handle); 2073 que = MSW(sts->handle); 2074 req = ha->req_q_map[que]; 2075 2076 /* Check for invalid queue pointer */ 2077 if (req == NULL || 2078 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { 2079 ql_dbg(ql_dbg_io, vha, 0x3059, 2080 "Invalid status handle (0x%x): Bad req pointer. req=%p, " 2081 "que=%u.\n", sts->handle, req, que); 2082 return; 2083 } 2084 2085 /* Validate handle. */ 2086 if (handle < req->num_outstanding_cmds) { 2087 sp = req->outstanding_cmds[handle]; 2088 if (!sp) { 2089 ql_dbg(ql_dbg_io, vha, 0x3075, 2090 "%s(%ld): Already returned command for status handle (0x%x).\n", 2091 __func__, vha->host_no, sts->handle); 2092 return; 2093 } 2094 } else { 2095 ql_dbg(ql_dbg_io, vha, 0x3017, 2096 "Invalid status handle, out of range (0x%x).\n", 2097 sts->handle); 2098 2099 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { 2100 if (IS_P3P_TYPE(ha)) 2101 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2102 else 2103 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2104 qla2xxx_wake_dpc(vha); 2105 } 2106 return; 2107 } 2108 2109 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 2110 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 2111 return; 2112 } 2113 2114 /* Task Management completion. */ 2115 if (sp->type == SRB_TM_CMD) { 2116 qla24xx_tm_iocb_entry(vha, req, pkt); 2117 return; 2118 } 2119 2120 /* Fast path completion. */ 2121 if (comp_status == CS_COMPLETE && scsi_status == 0) { 2122 qla2x00_process_completed_request(vha, req, handle); 2123 2124 return; 2125 } 2126 2127 req->outstanding_cmds[handle] = NULL; 2128 cp = GET_CMD_SP(sp); 2129 if (cp == NULL) { 2130 ql_dbg(ql_dbg_io, vha, 0x3018, 2131 "Command already returned (0x%x/%p).\n", 2132 sts->handle, sp); 2133 2134 return; 2135 } 2136 2137 lscsi_status = scsi_status & STATUS_MASK; 2138 2139 fcport = sp->fcport; 2140 2141 ox_id = 0; 2142 sense_len = par_sense_len = rsp_info_len = resid_len = 2143 fw_resid_len = 0; 2144 if (IS_FWI2_CAPABLE(ha)) { 2145 if (scsi_status & SS_SENSE_LEN_VALID) 2146 sense_len = le32_to_cpu(sts24->sense_len); 2147 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2148 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 2149 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 2150 resid_len = le32_to_cpu(sts24->rsp_residual_count); 2151 if (comp_status == CS_DATA_UNDERRUN) 2152 fw_resid_len = le32_to_cpu(sts24->residual_len); 2153 rsp_info = sts24->data; 2154 sense_data = sts24->data; 2155 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 2156 ox_id = le16_to_cpu(sts24->ox_id); 2157 par_sense_len = sizeof(sts24->data); 2158 /* Valid values of the retry delay timer are 0x1-0xffef */ 2159 if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) 2160 retry_delay = sts24->retry_delay; 2161 } else { 2162 if (scsi_status & SS_SENSE_LEN_VALID) 2163 sense_len = le16_to_cpu(sts->req_sense_length); 2164 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 2165 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 2166 resid_len = le32_to_cpu(sts->residual_length); 2167 rsp_info = sts->rsp_info; 2168 sense_data = sts->req_sense_data; 2169 par_sense_len = sizeof(sts->req_sense_data); 2170 } 2171 2172 /* Check for any FCP transport errors. */ 2173 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 2174 /* Sense data lies beyond any FCP RESPONSE data. */ 2175 if (IS_FWI2_CAPABLE(ha)) { 2176 sense_data += rsp_info_len; 2177 par_sense_len -= rsp_info_len; 2178 } 2179 if (rsp_info_len > 3 && rsp_info[3]) { 2180 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 2181 "FCP I/O protocol failure (0x%x/0x%x).\n", 2182 rsp_info_len, rsp_info[3]); 2183 2184 res = DID_BUS_BUSY << 16; 2185 goto out; 2186 } 2187 } 2188 2189 /* Check for overrun. */ 2190 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2191 scsi_status & SS_RESIDUAL_OVER) 2192 comp_status = CS_DATA_OVERRUN; 2193 2194 /* 2195 * Check retry_delay_timer value if we receive a busy or 2196 * queue full. 2197 */ 2198 if (lscsi_status == SAM_STAT_TASK_SET_FULL || 2199 lscsi_status == SAM_STAT_BUSY) 2200 qla2x00_set_retry_delay_timestamp(fcport, retry_delay); 2201 2202 /* 2203 * Based on Host and scsi status generate status code for Linux 2204 */ 2205 switch (comp_status) { 2206 case CS_COMPLETE: 2207 case CS_QUEUE_FULL: 2208 if (scsi_status == 0) { 2209 res = DID_OK << 16; 2210 break; 2211 } 2212 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2213 resid = resid_len; 2214 scsi_set_resid(cp, resid); 2215 2216 if (!lscsi_status && 2217 ((unsigned)(scsi_bufflen(cp) - resid) < 2218 cp->underflow)) { 2219 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2220 "Mid-layer underflow " 2221 "detected (0x%x of 0x%x bytes).\n", 2222 resid, scsi_bufflen(cp)); 2223 2224 res = DID_ERROR << 16; 2225 break; 2226 } 2227 } 2228 res = DID_OK << 16 | lscsi_status; 2229 2230 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2231 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2232 "QUEUE FULL detected.\n"); 2233 break; 2234 } 2235 logit = 0; 2236 if (lscsi_status != SS_CHECK_CONDITION) 2237 break; 2238 2239 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2240 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2241 break; 2242 2243 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2244 rsp, res); 2245 break; 2246 2247 case CS_DATA_UNDERRUN: 2248 /* Use F/W calculated residual length. */ 2249 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2250 scsi_set_resid(cp, resid); 2251 if (scsi_status & SS_RESIDUAL_UNDER) { 2252 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2253 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2254 "Dropped frame(s) detected " 2255 "(0x%x of 0x%x bytes).\n", 2256 resid, scsi_bufflen(cp)); 2257 2258 res = DID_ERROR << 16 | lscsi_status; 2259 goto check_scsi_status; 2260 } 2261 2262 if (!lscsi_status && 2263 ((unsigned)(scsi_bufflen(cp) - resid) < 2264 cp->underflow)) { 2265 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2266 "Mid-layer underflow " 2267 "detected (0x%x of 0x%x bytes).\n", 2268 resid, scsi_bufflen(cp)); 2269 2270 res = DID_ERROR << 16; 2271 break; 2272 } 2273 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2274 lscsi_status != SAM_STAT_BUSY) { 2275 /* 2276 * scsi status of task set and busy are considered to be 2277 * task not completed. 2278 */ 2279 2280 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2281 "Dropped frame(s) detected (0x%x " 2282 "of 0x%x bytes).\n", resid, 2283 scsi_bufflen(cp)); 2284 2285 res = DID_ERROR << 16 | lscsi_status; 2286 goto check_scsi_status; 2287 } else { 2288 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2289 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2290 scsi_status, lscsi_status); 2291 } 2292 2293 res = DID_OK << 16 | lscsi_status; 2294 logit = 0; 2295 2296 check_scsi_status: 2297 /* 2298 * Check to see if SCSI Status is non zero. If so report SCSI 2299 * Status. 2300 */ 2301 if (lscsi_status != 0) { 2302 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2303 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2304 "QUEUE FULL detected.\n"); 2305 logit = 1; 2306 break; 2307 } 2308 if (lscsi_status != SS_CHECK_CONDITION) 2309 break; 2310 2311 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2312 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2313 break; 2314 2315 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2316 sense_len, rsp, res); 2317 } 2318 break; 2319 2320 case CS_PORT_LOGGED_OUT: 2321 case CS_PORT_CONFIG_CHG: 2322 case CS_PORT_BUSY: 2323 case CS_INCOMPLETE: 2324 case CS_PORT_UNAVAILABLE: 2325 case CS_TIMEOUT: 2326 case CS_RESET: 2327 2328 /* 2329 * We are going to have the fc class block the rport 2330 * while we try to recover so instruct the mid layer 2331 * to requeue until the class decides how to handle this. 2332 */ 2333 res = DID_TRANSPORT_DISRUPTED << 16; 2334 2335 if (comp_status == CS_TIMEOUT) { 2336 if (IS_FWI2_CAPABLE(ha)) 2337 break; 2338 else if ((le16_to_cpu(sts->status_flags) & 2339 SF_LOGOUT_SENT) == 0) 2340 break; 2341 } 2342 2343 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 2344 "Port to be marked lost on fcport=%02x%02x%02x, current " 2345 "port state= %s.\n", fcport->d_id.b.domain, 2346 fcport->d_id.b.area, fcport->d_id.b.al_pa, 2347 port_state_str[atomic_read(&fcport->state)]); 2348 2349 if (atomic_read(&fcport->state) == FCS_ONLINE) 2350 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2351 break; 2352 2353 case CS_ABORTED: 2354 res = DID_RESET << 16; 2355 break; 2356 2357 case CS_DIF_ERROR: 2358 logit = qla2x00_handle_dif_error(sp, sts24); 2359 res = cp->result; 2360 break; 2361 2362 case CS_TRANSPORT: 2363 res = DID_ERROR << 16; 2364 2365 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2366 break; 2367 2368 if (state_flags & BIT_4) 2369 scmd_printk(KERN_WARNING, cp, 2370 "Unsupported device '%s' found.\n", 2371 cp->device->vendor); 2372 break; 2373 2374 default: 2375 res = DID_ERROR << 16; 2376 break; 2377 } 2378 2379 out: 2380 if (logit) 2381 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2382 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " 2383 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " 2384 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", 2385 comp_status, scsi_status, res, vha->host_no, 2386 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2387 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2388 cp->cmnd, scsi_bufflen(cp), rsp_info_len, 2389 resid_len, fw_resid_len, sp, cp); 2390 2391 if (rsp->status_srb == NULL) 2392 sp->done(ha, sp, res); 2393 } 2394 2395 /** 2396 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2397 * @ha: SCSI driver HA context 2398 * @pkt: Entry pointer 2399 * 2400 * Extended sense data. 2401 */ 2402 static void 2403 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2404 { 2405 uint8_t sense_sz = 0; 2406 struct qla_hw_data *ha = rsp->hw; 2407 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2408 srb_t *sp = rsp->status_srb; 2409 struct scsi_cmnd *cp; 2410 uint32_t sense_len; 2411 uint8_t *sense_ptr; 2412 2413 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2414 return; 2415 2416 sense_len = GET_CMD_SENSE_LEN(sp); 2417 sense_ptr = GET_CMD_SENSE_PTR(sp); 2418 2419 cp = GET_CMD_SP(sp); 2420 if (cp == NULL) { 2421 ql_log(ql_log_warn, vha, 0x3025, 2422 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2423 2424 rsp->status_srb = NULL; 2425 return; 2426 } 2427 2428 if (sense_len > sizeof(pkt->data)) 2429 sense_sz = sizeof(pkt->data); 2430 else 2431 sense_sz = sense_len; 2432 2433 /* Move sense data. */ 2434 if (IS_FWI2_CAPABLE(ha)) 2435 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2436 memcpy(sense_ptr, pkt->data, sense_sz); 2437 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2438 sense_ptr, sense_sz); 2439 2440 sense_len -= sense_sz; 2441 sense_ptr += sense_sz; 2442 2443 SET_CMD_SENSE_PTR(sp, sense_ptr); 2444 SET_CMD_SENSE_LEN(sp, sense_len); 2445 2446 /* Place command on done queue. */ 2447 if (sense_len == 0) { 2448 rsp->status_srb = NULL; 2449 sp->done(ha, sp, cp->result); 2450 } 2451 } 2452 2453 /** 2454 * qla2x00_error_entry() - Process an error entry. 2455 * @ha: SCSI driver HA context 2456 * @pkt: Entry pointer 2457 */ 2458 static void 2459 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2460 { 2461 srb_t *sp; 2462 struct qla_hw_data *ha = vha->hw; 2463 const char func[] = "ERROR-IOCB"; 2464 uint16_t que = MSW(pkt->handle); 2465 struct req_que *req = NULL; 2466 int res = DID_ERROR << 16; 2467 2468 ql_dbg(ql_dbg_async, vha, 0x502a, 2469 "type of error status in response: 0x%x\n", pkt->entry_status); 2470 2471 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2472 goto fatal; 2473 2474 req = ha->req_q_map[que]; 2475 2476 if (pkt->entry_status & RF_BUSY) 2477 res = DID_BUS_BUSY << 16; 2478 2479 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2480 if (sp) { 2481 sp->done(ha, sp, res); 2482 return; 2483 } 2484 fatal: 2485 ql_log(ql_log_warn, vha, 0x5030, 2486 "Error entry - invalid handle/queue (%04x).\n", que); 2487 } 2488 2489 /** 2490 * qla24xx_mbx_completion() - Process mailbox command completions. 2491 * @ha: SCSI driver HA context 2492 * @mb0: Mailbox0 register 2493 */ 2494 static void 2495 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2496 { 2497 uint16_t cnt; 2498 uint32_t mboxes; 2499 uint16_t __iomem *wptr; 2500 struct qla_hw_data *ha = vha->hw; 2501 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2502 2503 /* Read all mbox registers? */ 2504 mboxes = (1 << ha->mbx_count) - 1; 2505 if (!ha->mcp) 2506 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2507 else 2508 mboxes = ha->mcp->in_mb; 2509 2510 /* Load return mailbox registers. */ 2511 ha->flags.mbox_int = 1; 2512 ha->mailbox_out[0] = mb0; 2513 mboxes >>= 1; 2514 wptr = (uint16_t __iomem *)®->mailbox1; 2515 2516 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2517 if (mboxes & BIT_0) 2518 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2519 2520 mboxes >>= 1; 2521 wptr++; 2522 } 2523 } 2524 2525 static void 2526 qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 2527 struct abort_entry_24xx *pkt) 2528 { 2529 const char func[] = "ABT_IOCB"; 2530 srb_t *sp; 2531 struct srb_iocb *abt; 2532 2533 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2534 if (!sp) 2535 return; 2536 2537 abt = &sp->u.iocb_cmd; 2538 abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); 2539 sp->done(vha, sp, 0); 2540 } 2541 2542 /** 2543 * qla24xx_process_response_queue() - Process response queue entries. 2544 * @ha: SCSI driver HA context 2545 */ 2546 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2547 struct rsp_que *rsp) 2548 { 2549 struct sts_entry_24xx *pkt; 2550 struct qla_hw_data *ha = vha->hw; 2551 2552 if (!vha->flags.online) 2553 return; 2554 2555 if (rsp->msix->cpuid != smp_processor_id()) { 2556 /* if kernel does not notify qla of IRQ's CPU change, 2557 * then set it here. 2558 */ 2559 rsp->msix->cpuid = smp_processor_id(); 2560 ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid; 2561 } 2562 2563 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2564 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2565 2566 rsp->ring_index++; 2567 if (rsp->ring_index == rsp->length) { 2568 rsp->ring_index = 0; 2569 rsp->ring_ptr = rsp->ring; 2570 } else { 2571 rsp->ring_ptr++; 2572 } 2573 2574 if (pkt->entry_status != 0) { 2575 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2576 2577 if (qlt_24xx_process_response_error(vha, pkt)) 2578 goto process_err; 2579 2580 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2581 wmb(); 2582 continue; 2583 } 2584 process_err: 2585 2586 switch (pkt->entry_type) { 2587 case STATUS_TYPE: 2588 qla2x00_status_entry(vha, rsp, pkt); 2589 break; 2590 case STATUS_CONT_TYPE: 2591 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2592 break; 2593 case VP_RPT_ID_IOCB_TYPE: 2594 qla24xx_report_id_acquisition(vha, 2595 (struct vp_rpt_id_entry_24xx *)pkt); 2596 break; 2597 case LOGINOUT_PORT_IOCB_TYPE: 2598 qla24xx_logio_entry(vha, rsp->req, 2599 (struct logio_entry_24xx *)pkt); 2600 break; 2601 case CT_IOCB_TYPE: 2602 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2603 break; 2604 case ELS_IOCB_TYPE: 2605 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2606 break; 2607 case ABTS_RECV_24XX: 2608 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 2609 /* ensure that the ATIO queue is empty */ 2610 qlt_handle_abts_recv(vha, (response_t *)pkt); 2611 break; 2612 } else { 2613 /* drop through */ 2614 qlt_24xx_process_atio_queue(vha, 1); 2615 } 2616 case ABTS_RESP_24XX: 2617 case CTIO_TYPE7: 2618 case NOTIFY_ACK_TYPE: 2619 case CTIO_CRC2: 2620 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2621 break; 2622 case MARKER_TYPE: 2623 /* Do nothing in this case, this check is to prevent it 2624 * from falling into default case 2625 */ 2626 break; 2627 case ABORT_IOCB_TYPE: 2628 qla24xx_abort_iocb_entry(vha, rsp->req, 2629 (struct abort_entry_24xx *)pkt); 2630 break; 2631 default: 2632 /* Type Not Supported. */ 2633 ql_dbg(ql_dbg_async, vha, 0x5042, 2634 "Received unknown response pkt type %x " 2635 "entry status=%x.\n", 2636 pkt->entry_type, pkt->entry_status); 2637 break; 2638 } 2639 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2640 wmb(); 2641 } 2642 2643 /* Adjust ring index */ 2644 if (IS_P3P_TYPE(ha)) { 2645 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2646 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2647 } else 2648 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2649 } 2650 2651 static void 2652 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2653 { 2654 int rval; 2655 uint32_t cnt; 2656 struct qla_hw_data *ha = vha->hw; 2657 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2658 2659 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && 2660 !IS_QLA27XX(ha)) 2661 return; 2662 2663 rval = QLA_SUCCESS; 2664 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2665 RD_REG_DWORD(®->iobase_addr); 2666 WRT_REG_DWORD(®->iobase_window, 0x0001); 2667 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2668 rval == QLA_SUCCESS; cnt--) { 2669 if (cnt) { 2670 WRT_REG_DWORD(®->iobase_window, 0x0001); 2671 udelay(10); 2672 } else 2673 rval = QLA_FUNCTION_TIMEOUT; 2674 } 2675 if (rval == QLA_SUCCESS) 2676 goto next_test; 2677 2678 rval = QLA_SUCCESS; 2679 WRT_REG_DWORD(®->iobase_window, 0x0003); 2680 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2681 rval == QLA_SUCCESS; cnt--) { 2682 if (cnt) { 2683 WRT_REG_DWORD(®->iobase_window, 0x0003); 2684 udelay(10); 2685 } else 2686 rval = QLA_FUNCTION_TIMEOUT; 2687 } 2688 if (rval != QLA_SUCCESS) 2689 goto done; 2690 2691 next_test: 2692 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2693 ql_log(ql_log_info, vha, 0x504c, 2694 "Additional code -- 0x55AA.\n"); 2695 2696 done: 2697 WRT_REG_DWORD(®->iobase_window, 0x0000); 2698 RD_REG_DWORD(®->iobase_window); 2699 } 2700 2701 /** 2702 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2703 * @irq: 2704 * @dev_id: SCSI driver HA context 2705 * 2706 * Called by system whenever the host adapter generates an interrupt. 2707 * 2708 * Returns handled flag. 2709 */ 2710 irqreturn_t 2711 qla24xx_intr_handler(int irq, void *dev_id) 2712 { 2713 scsi_qla_host_t *vha; 2714 struct qla_hw_data *ha; 2715 struct device_reg_24xx __iomem *reg; 2716 int status; 2717 unsigned long iter; 2718 uint32_t stat; 2719 uint32_t hccr; 2720 uint16_t mb[8]; 2721 struct rsp_que *rsp; 2722 unsigned long flags; 2723 2724 rsp = (struct rsp_que *) dev_id; 2725 if (!rsp) { 2726 ql_log(ql_log_info, NULL, 0x5059, 2727 "%s: NULL response queue pointer.\n", __func__); 2728 return IRQ_NONE; 2729 } 2730 2731 ha = rsp->hw; 2732 reg = &ha->iobase->isp24; 2733 status = 0; 2734 2735 if (unlikely(pci_channel_offline(ha->pdev))) 2736 return IRQ_HANDLED; 2737 2738 spin_lock_irqsave(&ha->hardware_lock, flags); 2739 vha = pci_get_drvdata(ha->pdev); 2740 for (iter = 50; iter--; ) { 2741 stat = RD_REG_DWORD(®->host_status); 2742 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2743 break; 2744 if (stat & HSRX_RISC_PAUSED) { 2745 if (unlikely(pci_channel_offline(ha->pdev))) 2746 break; 2747 2748 hccr = RD_REG_DWORD(®->hccr); 2749 2750 ql_log(ql_log_warn, vha, 0x504b, 2751 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2752 hccr); 2753 2754 qla2xxx_check_risc_status(vha); 2755 2756 ha->isp_ops->fw_dump(vha, 1); 2757 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2758 break; 2759 } else if ((stat & HSRX_RISC_INT) == 0) 2760 break; 2761 2762 switch (stat & 0xff) { 2763 case INTR_ROM_MB_SUCCESS: 2764 case INTR_ROM_MB_FAILED: 2765 case INTR_MB_SUCCESS: 2766 case INTR_MB_FAILED: 2767 qla24xx_mbx_completion(vha, MSW(stat)); 2768 status |= MBX_INTERRUPT; 2769 2770 break; 2771 case INTR_ASYNC_EVENT: 2772 mb[0] = MSW(stat); 2773 mb[1] = RD_REG_WORD(®->mailbox1); 2774 mb[2] = RD_REG_WORD(®->mailbox2); 2775 mb[3] = RD_REG_WORD(®->mailbox3); 2776 qla2x00_async_event(vha, rsp, mb); 2777 break; 2778 case INTR_RSP_QUE_UPDATE: 2779 case INTR_RSP_QUE_UPDATE_83XX: 2780 qla24xx_process_response_queue(vha, rsp); 2781 break; 2782 case INTR_ATIO_QUE_UPDATE:{ 2783 unsigned long flags2; 2784 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 2785 qlt_24xx_process_atio_queue(vha, 1); 2786 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 2787 break; 2788 } 2789 case INTR_ATIO_RSP_QUE_UPDATE: { 2790 unsigned long flags2; 2791 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 2792 qlt_24xx_process_atio_queue(vha, 1); 2793 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 2794 2795 qla24xx_process_response_queue(vha, rsp); 2796 break; 2797 } 2798 default: 2799 ql_dbg(ql_dbg_async, vha, 0x504f, 2800 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2801 break; 2802 } 2803 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2804 RD_REG_DWORD_RELAXED(®->hccr); 2805 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2806 ndelay(3500); 2807 } 2808 qla2x00_handle_mbx_completion(ha, status); 2809 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2810 2811 return IRQ_HANDLED; 2812 } 2813 2814 static irqreturn_t 2815 qla24xx_msix_rsp_q(int irq, void *dev_id) 2816 { 2817 struct qla_hw_data *ha; 2818 struct rsp_que *rsp; 2819 struct device_reg_24xx __iomem *reg; 2820 struct scsi_qla_host *vha; 2821 unsigned long flags; 2822 uint32_t stat = 0; 2823 2824 rsp = (struct rsp_que *) dev_id; 2825 if (!rsp) { 2826 ql_log(ql_log_info, NULL, 0x505a, 2827 "%s: NULL response queue pointer.\n", __func__); 2828 return IRQ_NONE; 2829 } 2830 ha = rsp->hw; 2831 reg = &ha->iobase->isp24; 2832 2833 spin_lock_irqsave(&ha->hardware_lock, flags); 2834 2835 vha = pci_get_drvdata(ha->pdev); 2836 /* 2837 * Use host_status register to check to PCI disconnection before we 2838 * we process the response queue. 2839 */ 2840 stat = RD_REG_DWORD(®->host_status); 2841 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2842 goto out; 2843 qla24xx_process_response_queue(vha, rsp); 2844 if (!ha->flags.disable_msix_handshake) { 2845 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2846 RD_REG_DWORD_RELAXED(®->hccr); 2847 } 2848 out: 2849 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2850 2851 return IRQ_HANDLED; 2852 } 2853 2854 static irqreturn_t 2855 qla25xx_msix_rsp_q(int irq, void *dev_id) 2856 { 2857 struct qla_hw_data *ha; 2858 scsi_qla_host_t *vha; 2859 struct rsp_que *rsp; 2860 struct device_reg_24xx __iomem *reg; 2861 unsigned long flags; 2862 uint32_t hccr = 0; 2863 2864 rsp = (struct rsp_que *) dev_id; 2865 if (!rsp) { 2866 ql_log(ql_log_info, NULL, 0x505b, 2867 "%s: NULL response queue pointer.\n", __func__); 2868 return IRQ_NONE; 2869 } 2870 ha = rsp->hw; 2871 vha = pci_get_drvdata(ha->pdev); 2872 2873 /* Clear the interrupt, if enabled, for this response queue */ 2874 if (!ha->flags.disable_msix_handshake) { 2875 reg = &ha->iobase->isp24; 2876 spin_lock_irqsave(&ha->hardware_lock, flags); 2877 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2878 hccr = RD_REG_DWORD_RELAXED(®->hccr); 2879 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2880 } 2881 if (qla2x00_check_reg32_for_disconnect(vha, hccr)) 2882 goto out; 2883 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2884 2885 out: 2886 return IRQ_HANDLED; 2887 } 2888 2889 static irqreturn_t 2890 qla24xx_msix_default(int irq, void *dev_id) 2891 { 2892 scsi_qla_host_t *vha; 2893 struct qla_hw_data *ha; 2894 struct rsp_que *rsp; 2895 struct device_reg_24xx __iomem *reg; 2896 int status; 2897 uint32_t stat; 2898 uint32_t hccr; 2899 uint16_t mb[8]; 2900 unsigned long flags; 2901 2902 rsp = (struct rsp_que *) dev_id; 2903 if (!rsp) { 2904 ql_log(ql_log_info, NULL, 0x505c, 2905 "%s: NULL response queue pointer.\n", __func__); 2906 return IRQ_NONE; 2907 } 2908 ha = rsp->hw; 2909 reg = &ha->iobase->isp24; 2910 status = 0; 2911 2912 spin_lock_irqsave(&ha->hardware_lock, flags); 2913 vha = pci_get_drvdata(ha->pdev); 2914 do { 2915 stat = RD_REG_DWORD(®->host_status); 2916 if (qla2x00_check_reg32_for_disconnect(vha, stat)) 2917 break; 2918 if (stat & HSRX_RISC_PAUSED) { 2919 if (unlikely(pci_channel_offline(ha->pdev))) 2920 break; 2921 2922 hccr = RD_REG_DWORD(®->hccr); 2923 2924 ql_log(ql_log_info, vha, 0x5050, 2925 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2926 hccr); 2927 2928 qla2xxx_check_risc_status(vha); 2929 2930 ha->isp_ops->fw_dump(vha, 1); 2931 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2932 break; 2933 } else if ((stat & HSRX_RISC_INT) == 0) 2934 break; 2935 2936 switch (stat & 0xff) { 2937 case INTR_ROM_MB_SUCCESS: 2938 case INTR_ROM_MB_FAILED: 2939 case INTR_MB_SUCCESS: 2940 case INTR_MB_FAILED: 2941 qla24xx_mbx_completion(vha, MSW(stat)); 2942 status |= MBX_INTERRUPT; 2943 2944 break; 2945 case INTR_ASYNC_EVENT: 2946 mb[0] = MSW(stat); 2947 mb[1] = RD_REG_WORD(®->mailbox1); 2948 mb[2] = RD_REG_WORD(®->mailbox2); 2949 mb[3] = RD_REG_WORD(®->mailbox3); 2950 qla2x00_async_event(vha, rsp, mb); 2951 break; 2952 case INTR_RSP_QUE_UPDATE: 2953 case INTR_RSP_QUE_UPDATE_83XX: 2954 qla24xx_process_response_queue(vha, rsp); 2955 break; 2956 case INTR_ATIO_QUE_UPDATE:{ 2957 unsigned long flags2; 2958 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 2959 qlt_24xx_process_atio_queue(vha, 1); 2960 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 2961 break; 2962 } 2963 case INTR_ATIO_RSP_QUE_UPDATE: { 2964 unsigned long flags2; 2965 spin_lock_irqsave(&ha->tgt.atio_lock, flags2); 2966 qlt_24xx_process_atio_queue(vha, 1); 2967 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2); 2968 2969 qla24xx_process_response_queue(vha, rsp); 2970 break; 2971 } 2972 default: 2973 ql_dbg(ql_dbg_async, vha, 0x5051, 2974 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2975 break; 2976 } 2977 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2978 } while (0); 2979 qla2x00_handle_mbx_completion(ha, status); 2980 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2981 2982 return IRQ_HANDLED; 2983 } 2984 2985 /* Interrupt handling helpers. */ 2986 2987 struct qla_init_msix_entry { 2988 const char *name; 2989 irq_handler_t handler; 2990 }; 2991 2992 static struct qla_init_msix_entry msix_entries[3] = { 2993 { "qla2xxx (default)", qla24xx_msix_default }, 2994 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2995 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2996 }; 2997 2998 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2999 { "qla2xxx (default)", qla82xx_msix_default }, 3000 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 3001 }; 3002 3003 static struct qla_init_msix_entry qla83xx_msix_entries[3] = { 3004 { "qla2xxx (default)", qla24xx_msix_default }, 3005 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 3006 { "qla2xxx (atio_q)", qla83xx_msix_atio_q }, 3007 }; 3008 3009 static void 3010 qla24xx_disable_msix(struct qla_hw_data *ha) 3011 { 3012 int i; 3013 struct qla_msix_entry *qentry; 3014 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3015 3016 for (i = 0; i < ha->msix_count; i++) { 3017 qentry = &ha->msix_entries[i]; 3018 if (qentry->have_irq) { 3019 /* un-register irq cpu affinity notification */ 3020 irq_set_affinity_notifier(qentry->vector, NULL); 3021 free_irq(qentry->vector, qentry->rsp); 3022 } 3023 } 3024 pci_disable_msix(ha->pdev); 3025 kfree(ha->msix_entries); 3026 ha->msix_entries = NULL; 3027 ha->flags.msix_enabled = 0; 3028 ql_dbg(ql_dbg_init, vha, 0x0042, 3029 "Disabled the MSI.\n"); 3030 } 3031 3032 static int 3033 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 3034 { 3035 #define MIN_MSIX_COUNT 2 3036 #define ATIO_VECTOR 2 3037 int i, ret; 3038 struct msix_entry *entries; 3039 struct qla_msix_entry *qentry; 3040 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3041 3042 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 3043 GFP_KERNEL); 3044 if (!entries) { 3045 ql_log(ql_log_warn, vha, 0x00bc, 3046 "Failed to allocate memory for msix_entry.\n"); 3047 return -ENOMEM; 3048 } 3049 3050 for (i = 0; i < ha->msix_count; i++) 3051 entries[i].entry = i; 3052 3053 ret = pci_enable_msix_range(ha->pdev, 3054 entries, MIN_MSIX_COUNT, ha->msix_count); 3055 if (ret < 0) { 3056 ql_log(ql_log_fatal, vha, 0x00c7, 3057 "MSI-X: Failed to enable support, " 3058 "giving up -- %d/%d.\n", 3059 ha->msix_count, ret); 3060 goto msix_out; 3061 } else if (ret < ha->msix_count) { 3062 ql_log(ql_log_warn, vha, 0x00c6, 3063 "MSI-X: Failed to enable support " 3064 "-- %d/%d\n Retry with %d vectors.\n", 3065 ha->msix_count, ret, ret); 3066 } 3067 ha->msix_count = ret; 3068 ha->max_rsp_queues = ha->msix_count - 1; 3069 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3070 ha->msix_count, GFP_KERNEL); 3071 if (!ha->msix_entries) { 3072 ql_log(ql_log_fatal, vha, 0x00c8, 3073 "Failed to allocate memory for ha->msix_entries.\n"); 3074 ret = -ENOMEM; 3075 goto msix_out; 3076 } 3077 ha->flags.msix_enabled = 1; 3078 3079 for (i = 0; i < ha->msix_count; i++) { 3080 qentry = &ha->msix_entries[i]; 3081 qentry->vector = entries[i].vector; 3082 qentry->entry = entries[i].entry; 3083 qentry->have_irq = 0; 3084 qentry->rsp = NULL; 3085 qentry->irq_notify.notify = qla_irq_affinity_notify; 3086 qentry->irq_notify.release = qla_irq_affinity_release; 3087 qentry->cpuid = -1; 3088 } 3089 3090 /* Enable MSI-X vectors for the base queue */ 3091 for (i = 0; i < 2; i++) { 3092 qentry = &ha->msix_entries[i]; 3093 if (IS_P3P_TYPE(ha)) 3094 ret = request_irq(qentry->vector, 3095 qla82xx_msix_entries[i].handler, 3096 0, qla82xx_msix_entries[i].name, rsp); 3097 else 3098 ret = request_irq(qentry->vector, 3099 msix_entries[i].handler, 3100 0, msix_entries[i].name, rsp); 3101 if (ret) 3102 goto msix_register_fail; 3103 qentry->have_irq = 1; 3104 qentry->rsp = rsp; 3105 rsp->msix = qentry; 3106 3107 /* Register for CPU affinity notification. */ 3108 irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify); 3109 3110 /* Schedule work (ie. trigger a notification) to read cpu 3111 * mask for this specific irq. 3112 * kref_get is required because 3113 * irq_affinity_notify() will do 3114 * kref_put(). 3115 */ 3116 kref_get(&qentry->irq_notify.kref); 3117 schedule_work(&qentry->irq_notify.work); 3118 } 3119 3120 /* 3121 * If target mode is enable, also request the vector for the ATIO 3122 * queue. 3123 */ 3124 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) { 3125 qentry = &ha->msix_entries[ATIO_VECTOR]; 3126 ret = request_irq(qentry->vector, 3127 qla83xx_msix_entries[ATIO_VECTOR].handler, 3128 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp); 3129 qentry->have_irq = 1; 3130 qentry->rsp = rsp; 3131 rsp->msix = qentry; 3132 } 3133 3134 msix_register_fail: 3135 if (ret) { 3136 ql_log(ql_log_fatal, vha, 0x00cb, 3137 "MSI-X: unable to register handler -- %x/%d.\n", 3138 qentry->vector, ret); 3139 qla24xx_disable_msix(ha); 3140 ha->mqenable = 0; 3141 goto msix_out; 3142 } 3143 3144 /* Enable MSI-X vector for response queue update for queue 0 */ 3145 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { 3146 if (ha->msixbase && ha->mqiobase && 3147 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3148 ha->mqenable = 1; 3149 } else 3150 if (ha->mqiobase 3151 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 3152 ha->mqenable = 1; 3153 ql_dbg(ql_dbg_multiq, vha, 0xc005, 3154 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3155 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3156 ql_dbg(ql_dbg_init, vha, 0x0055, 3157 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 3158 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 3159 3160 msix_out: 3161 kfree(entries); 3162 return ret; 3163 } 3164 3165 int 3166 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 3167 { 3168 int ret = QLA_FUNCTION_FAILED; 3169 device_reg_t *reg = ha->iobase; 3170 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3171 3172 /* If possible, enable MSI-X. */ 3173 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3174 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha) && 3175 !IS_QLA27XX(ha)) 3176 goto skip_msi; 3177 3178 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 3179 (ha->pdev->subsystem_device == 0x7040 || 3180 ha->pdev->subsystem_device == 0x7041 || 3181 ha->pdev->subsystem_device == 0x1705)) { 3182 ql_log(ql_log_warn, vha, 0x0034, 3183 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 3184 ha->pdev->subsystem_vendor, 3185 ha->pdev->subsystem_device); 3186 goto skip_msi; 3187 } 3188 3189 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 3190 ql_log(ql_log_warn, vha, 0x0035, 3191 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 3192 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 3193 goto skip_msix; 3194 } 3195 3196 ret = qla24xx_enable_msix(ha, rsp); 3197 if (!ret) { 3198 ql_dbg(ql_dbg_init, vha, 0x0036, 3199 "MSI-X: Enabled (0x%X, 0x%X).\n", 3200 ha->chip_revision, ha->fw_attributes); 3201 goto clear_risc_ints; 3202 } 3203 3204 skip_msix: 3205 3206 ql_log(ql_log_info, vha, 0x0037, 3207 "Falling back-to MSI mode -%d.\n", ret); 3208 3209 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 3210 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && 3211 !IS_QLA27XX(ha)) 3212 goto skip_msi; 3213 3214 ret = pci_enable_msi(ha->pdev); 3215 if (!ret) { 3216 ql_dbg(ql_dbg_init, vha, 0x0038, 3217 "MSI: Enabled.\n"); 3218 ha->flags.msi_enabled = 1; 3219 } else 3220 ql_log(ql_log_warn, vha, 0x0039, 3221 "Falling back-to INTa mode -- %d.\n", ret); 3222 skip_msi: 3223 3224 /* Skip INTx on ISP82xx. */ 3225 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 3226 return QLA_FUNCTION_FAILED; 3227 3228 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 3229 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 3230 QLA2XXX_DRIVER_NAME, rsp); 3231 if (ret) { 3232 ql_log(ql_log_warn, vha, 0x003a, 3233 "Failed to reserve interrupt %d already in use.\n", 3234 ha->pdev->irq); 3235 goto fail; 3236 } else if (!ha->flags.msi_enabled) { 3237 ql_dbg(ql_dbg_init, vha, 0x0125, 3238 "INTa mode: Enabled.\n"); 3239 ha->flags.mr_intr_valid = 1; 3240 } 3241 3242 clear_risc_ints: 3243 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha)) 3244 goto fail; 3245 3246 spin_lock_irq(&ha->hardware_lock); 3247 WRT_REG_WORD(®->isp.semaphore, 0); 3248 spin_unlock_irq(&ha->hardware_lock); 3249 3250 fail: 3251 return ret; 3252 } 3253 3254 void 3255 qla2x00_free_irqs(scsi_qla_host_t *vha) 3256 { 3257 struct qla_hw_data *ha = vha->hw; 3258 struct rsp_que *rsp; 3259 3260 /* 3261 * We need to check that ha->rsp_q_map is valid in case we are called 3262 * from a probe failure context. 3263 */ 3264 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 3265 return; 3266 rsp = ha->rsp_q_map[0]; 3267 3268 if (ha->flags.msix_enabled) 3269 qla24xx_disable_msix(ha); 3270 else if (ha->flags.msi_enabled) { 3271 free_irq(ha->pdev->irq, rsp); 3272 pci_disable_msi(ha->pdev); 3273 } else 3274 free_irq(ha->pdev->irq, rsp); 3275 } 3276 3277 3278 int qla25xx_request_irq(struct rsp_que *rsp) 3279 { 3280 struct qla_hw_data *ha = rsp->hw; 3281 struct qla_init_msix_entry *intr = &msix_entries[2]; 3282 struct qla_msix_entry *msix = rsp->msix; 3283 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 3284 int ret; 3285 3286 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 3287 if (ret) { 3288 ql_log(ql_log_fatal, vha, 0x00e6, 3289 "MSI-X: Unable to register handler -- %x/%d.\n", 3290 msix->vector, ret); 3291 return ret; 3292 } 3293 msix->have_irq = 1; 3294 msix->rsp = rsp; 3295 return ret; 3296 } 3297 3298 3299 /* irq_set_affinity/irqbalance will trigger notification of cpu mask update */ 3300 static void qla_irq_affinity_notify(struct irq_affinity_notify *notify, 3301 const cpumask_t *mask) 3302 { 3303 struct qla_msix_entry *e = 3304 container_of(notify, struct qla_msix_entry, irq_notify); 3305 struct qla_hw_data *ha; 3306 struct scsi_qla_host *base_vha; 3307 3308 /* user is recommended to set mask to just 1 cpu */ 3309 e->cpuid = cpumask_first(mask); 3310 3311 ha = e->rsp->hw; 3312 base_vha = pci_get_drvdata(ha->pdev); 3313 3314 ql_dbg(ql_dbg_init, base_vha, 0xffff, 3315 "%s: host %ld : vector %d cpu %d \n", __func__, 3316 base_vha->host_no, e->vector, e->cpuid); 3317 3318 if (e->have_irq) { 3319 if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && 3320 (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) { 3321 ha->tgt.rspq_vector_cpuid = e->cpuid; 3322 ql_dbg(ql_dbg_init, base_vha, 0xffff, 3323 "%s: host%ld: rspq vector %d cpu %d runtime change\n", 3324 __func__, base_vha->host_no, e->vector, e->cpuid); 3325 } 3326 } 3327 } 3328 3329 static void qla_irq_affinity_release(struct kref *ref) 3330 { 3331 struct irq_affinity_notify *notify = 3332 container_of(ref, struct irq_affinity_notify, kref); 3333 struct qla_msix_entry *e = 3334 container_of(notify, struct qla_msix_entry, irq_notify); 3335 struct scsi_qla_host *base_vha = pci_get_drvdata(e->rsp->hw->pdev); 3336 3337 ql_dbg(ql_dbg_init, base_vha, 0xffff, 3338 "%s: host%ld: vector %d cpu %d \n", __func__, 3339 base_vha->host_no, e->vector, e->cpuid); 3340 } 3341