1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2012 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 #include "qla_target.h" 9 10 #include <linux/delay.h> 11 #include <linux/slab.h> 12 #include <scsi/scsi_tcq.h> 13 #include <scsi/scsi_bsg_fc.h> 14 #include <scsi/scsi_eh.h> 15 16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 17 static void qla2x00_process_completed_request(struct scsi_qla_host *, 18 struct req_que *, uint32_t); 19 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); 20 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); 21 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, 22 sts_entry_t *); 23 24 /** 25 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 26 * @irq: 27 * @dev_id: SCSI driver HA context 28 * 29 * Called by system whenever the host adapter generates an interrupt. 30 * 31 * Returns handled flag. 32 */ 33 irqreturn_t 34 qla2100_intr_handler(int irq, void *dev_id) 35 { 36 scsi_qla_host_t *vha; 37 struct qla_hw_data *ha; 38 struct device_reg_2xxx __iomem *reg; 39 int status; 40 unsigned long iter; 41 uint16_t hccr; 42 uint16_t mb[4]; 43 struct rsp_que *rsp; 44 unsigned long flags; 45 46 rsp = (struct rsp_que *) dev_id; 47 if (!rsp) { 48 ql_log(ql_log_info, NULL, 0x505d, 49 "%s: NULL response queue pointer.\n", __func__); 50 return (IRQ_NONE); 51 } 52 53 ha = rsp->hw; 54 reg = &ha->iobase->isp; 55 status = 0; 56 57 spin_lock_irqsave(&ha->hardware_lock, flags); 58 vha = pci_get_drvdata(ha->pdev); 59 for (iter = 50; iter--; ) { 60 hccr = RD_REG_WORD(®->hccr); 61 if (hccr & HCCR_RISC_PAUSE) { 62 if (pci_channel_offline(ha->pdev)) 63 break; 64 65 /* 66 * Issue a "HARD" reset in order for the RISC interrupt 67 * bit to be cleared. Schedule a big hammer to get 68 * out of the RISC PAUSED state. 69 */ 70 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 71 RD_REG_WORD(®->hccr); 72 73 ha->isp_ops->fw_dump(vha, 1); 74 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 75 break; 76 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 77 break; 78 79 if (RD_REG_WORD(®->semaphore) & BIT_0) { 80 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 81 RD_REG_WORD(®->hccr); 82 83 /* Get mailbox data. */ 84 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 85 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 86 qla2x00_mbx_completion(vha, mb[0]); 87 status |= MBX_INTERRUPT; 88 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 89 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 90 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 91 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 92 qla2x00_async_event(vha, rsp, mb); 93 } else { 94 /*EMPTY*/ 95 ql_dbg(ql_dbg_async, vha, 0x5025, 96 "Unrecognized interrupt type (%d).\n", 97 mb[0]); 98 } 99 /* Release mailbox registers. */ 100 WRT_REG_WORD(®->semaphore, 0); 101 RD_REG_WORD(®->semaphore); 102 } else { 103 qla2x00_process_response_queue(rsp); 104 105 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 106 RD_REG_WORD(®->hccr); 107 } 108 } 109 spin_unlock_irqrestore(&ha->hardware_lock, flags); 110 111 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 112 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 113 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 114 complete(&ha->mbx_intr_comp); 115 } 116 117 return (IRQ_HANDLED); 118 } 119 120 /** 121 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 122 * @irq: 123 * @dev_id: SCSI driver HA context 124 * 125 * Called by system whenever the host adapter generates an interrupt. 126 * 127 * Returns handled flag. 128 */ 129 irqreturn_t 130 qla2300_intr_handler(int irq, void *dev_id) 131 { 132 scsi_qla_host_t *vha; 133 struct device_reg_2xxx __iomem *reg; 134 int status; 135 unsigned long iter; 136 uint32_t stat; 137 uint16_t hccr; 138 uint16_t mb[4]; 139 struct rsp_que *rsp; 140 struct qla_hw_data *ha; 141 unsigned long flags; 142 143 rsp = (struct rsp_que *) dev_id; 144 if (!rsp) { 145 ql_log(ql_log_info, NULL, 0x5058, 146 "%s: NULL response queue pointer.\n", __func__); 147 return (IRQ_NONE); 148 } 149 150 ha = rsp->hw; 151 reg = &ha->iobase->isp; 152 status = 0; 153 154 spin_lock_irqsave(&ha->hardware_lock, flags); 155 vha = pci_get_drvdata(ha->pdev); 156 for (iter = 50; iter--; ) { 157 stat = RD_REG_DWORD(®->u.isp2300.host_status); 158 if (stat & HSR_RISC_PAUSED) { 159 if (unlikely(pci_channel_offline(ha->pdev))) 160 break; 161 162 hccr = RD_REG_WORD(®->hccr); 163 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 164 ql_log(ql_log_warn, vha, 0x5026, 165 "Parity error -- HCCR=%x, Dumping " 166 "firmware.\n", hccr); 167 else 168 ql_log(ql_log_warn, vha, 0x5027, 169 "RISC paused -- HCCR=%x, Dumping " 170 "firmware.\n", hccr); 171 172 /* 173 * Issue a "HARD" reset in order for the RISC 174 * interrupt bit to be cleared. Schedule a big 175 * hammer to get out of the RISC PAUSED state. 176 */ 177 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 178 RD_REG_WORD(®->hccr); 179 180 ha->isp_ops->fw_dump(vha, 1); 181 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 182 break; 183 } else if ((stat & HSR_RISC_INT) == 0) 184 break; 185 186 switch (stat & 0xff) { 187 case 0x1: 188 case 0x2: 189 case 0x10: 190 case 0x11: 191 qla2x00_mbx_completion(vha, MSW(stat)); 192 status |= MBX_INTERRUPT; 193 194 /* Release mailbox registers. */ 195 WRT_REG_WORD(®->semaphore, 0); 196 break; 197 case 0x12: 198 mb[0] = MSW(stat); 199 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 200 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 201 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 202 qla2x00_async_event(vha, rsp, mb); 203 break; 204 case 0x13: 205 qla2x00_process_response_queue(rsp); 206 break; 207 case 0x15: 208 mb[0] = MBA_CMPLT_1_16BIT; 209 mb[1] = MSW(stat); 210 qla2x00_async_event(vha, rsp, mb); 211 break; 212 case 0x16: 213 mb[0] = MBA_SCSI_COMPLETION; 214 mb[1] = MSW(stat); 215 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 216 qla2x00_async_event(vha, rsp, mb); 217 break; 218 default: 219 ql_dbg(ql_dbg_async, vha, 0x5028, 220 "Unrecognized interrupt type (%d).\n", stat & 0xff); 221 break; 222 } 223 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 224 RD_REG_WORD_RELAXED(®->hccr); 225 } 226 spin_unlock_irqrestore(&ha->hardware_lock, flags); 227 228 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 229 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 230 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 231 complete(&ha->mbx_intr_comp); 232 } 233 234 return (IRQ_HANDLED); 235 } 236 237 /** 238 * qla2x00_mbx_completion() - Process mailbox command completions. 239 * @ha: SCSI driver HA context 240 * @mb0: Mailbox0 register 241 */ 242 static void 243 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 244 { 245 uint16_t cnt; 246 uint32_t mboxes; 247 uint16_t __iomem *wptr; 248 struct qla_hw_data *ha = vha->hw; 249 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 250 251 /* Read all mbox registers? */ 252 mboxes = (1 << ha->mbx_count) - 1; 253 if (!ha->mcp) 254 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n"); 255 else 256 mboxes = ha->mcp->in_mb; 257 258 /* Load return mailbox registers. */ 259 ha->flags.mbox_int = 1; 260 ha->mailbox_out[0] = mb0; 261 mboxes >>= 1; 262 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 263 264 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 265 if (IS_QLA2200(ha) && cnt == 8) 266 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 267 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) 268 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 269 else if (mboxes & BIT_0) 270 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 271 272 wptr++; 273 mboxes >>= 1; 274 } 275 } 276 277 static void 278 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) 279 { 280 static char *event[] = 281 { "Complete", "Request Notification", "Time Extension" }; 282 int rval; 283 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; 284 uint16_t __iomem *wptr; 285 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; 286 287 /* Seed data -- mailbox1 -> mailbox7. */ 288 wptr = (uint16_t __iomem *)®24->mailbox1; 289 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) 290 mb[cnt] = RD_REG_WORD(wptr); 291 292 ql_dbg(ql_dbg_async, vha, 0x5021, 293 "Inter-Driver Communication %s -- " 294 "%04x %04x %04x %04x %04x %04x %04x.\n", 295 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3], 296 mb[4], mb[5], mb[6]); 297 if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) { 298 vha->hw->flags.idc_compl_status = 1; 299 if (vha->hw->notify_dcbx_comp) 300 complete(&vha->hw->dcbx_comp); 301 } 302 303 /* Acknowledgement needed? [Notify && non-zero timeout]. */ 304 timeout = (descr >> 8) & 0xf; 305 if (aen != MBA_IDC_NOTIFY || !timeout) 306 return; 307 308 ql_dbg(ql_dbg_async, vha, 0x5022, 309 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n", 310 vha->host_no, event[aen & 0xff], timeout); 311 312 rval = qla2x00_post_idc_ack_work(vha, mb); 313 if (rval != QLA_SUCCESS) 314 ql_log(ql_log_warn, vha, 0x5023, 315 "IDC failed to post ACK.\n"); 316 } 317 318 #define LS_UNKNOWN 2 319 const char * 320 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) 321 { 322 static const char * const link_speeds[] = { 323 "1", "2", "?", "4", "8", "16", "10" 324 }; 325 326 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 327 return link_speeds[0]; 328 else if (speed == 0x13) 329 return link_speeds[6]; 330 else if (speed < 6) 331 return link_speeds[speed]; 332 else 333 return link_speeds[LS_UNKNOWN]; 334 } 335 336 static void 337 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) 338 { 339 struct qla_hw_data *ha = vha->hw; 340 341 /* 342 * 8200 AEN Interpretation: 343 * mb[0] = AEN code 344 * mb[1] = AEN Reason code 345 * mb[2] = LSW of Peg-Halt Status-1 Register 346 * mb[6] = MSW of Peg-Halt Status-1 Register 347 * mb[3] = LSW of Peg-Halt Status-2 register 348 * mb[7] = MSW of Peg-Halt Status-2 register 349 * mb[4] = IDC Device-State Register value 350 * mb[5] = IDC Driver-Presence Register value 351 */ 352 ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: " 353 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n", 354 mb[0], mb[1], mb[2], mb[6]); 355 ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x " 356 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x " 357 "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]); 358 359 if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE | 360 IDC_HEARTBEAT_FAILURE)) { 361 ha->flags.nic_core_hung = 1; 362 ql_log(ql_log_warn, vha, 0x5060, 363 "83XX: F/W Error Reported: Check if reset required.\n"); 364 365 if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) { 366 uint32_t protocol_engine_id, fw_err_code, err_level; 367 368 /* 369 * IDC_PEG_HALT_STATUS_CHANGE interpretation: 370 * - PEG-Halt Status-1 Register: 371 * (LSW = mb[2], MSW = mb[6]) 372 * Bits 0-7 = protocol-engine ID 373 * Bits 8-28 = f/w error code 374 * Bits 29-31 = Error-level 375 * Error-level 0x1 = Non-Fatal error 376 * Error-level 0x2 = Recoverable Fatal error 377 * Error-level 0x4 = UnRecoverable Fatal error 378 * - PEG-Halt Status-2 Register: 379 * (LSW = mb[3], MSW = mb[7]) 380 */ 381 protocol_engine_id = (mb[2] & 0xff); 382 fw_err_code = (((mb[2] & 0xff00) >> 8) | 383 ((mb[6] & 0x1fff) << 8)); 384 err_level = ((mb[6] & 0xe000) >> 13); 385 ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 " 386 "Register: protocol_engine_id=0x%x " 387 "fw_err_code=0x%x err_level=0x%x.\n", 388 protocol_engine_id, fw_err_code, err_level); 389 ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 " 390 "Register: 0x%x%x.\n", mb[7], mb[3]); 391 if (err_level == ERR_LEVEL_NON_FATAL) { 392 ql_log(ql_log_warn, vha, 0x5063, 393 "Not a fatal error, f/w has recovered " 394 "iteself.\n"); 395 } else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) { 396 ql_log(ql_log_fatal, vha, 0x5064, 397 "Recoverable Fatal error: Chip reset " 398 "required.\n"); 399 qla83xx_schedule_work(vha, 400 QLA83XX_NIC_CORE_RESET); 401 } else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) { 402 ql_log(ql_log_fatal, vha, 0x5065, 403 "Unrecoverable Fatal error: Set FAILED " 404 "state, reboot required.\n"); 405 qla83xx_schedule_work(vha, 406 QLA83XX_NIC_CORE_UNRECOVERABLE); 407 } 408 } 409 410 if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) { 411 uint16_t peg_fw_state, nw_interface_link_up; 412 uint16_t nw_interface_signal_detect, sfp_status; 413 uint16_t htbt_counter, htbt_monitor_enable; 414 uint16_t sfp_additonal_info, sfp_multirate; 415 uint16_t sfp_tx_fault, link_speed, dcbx_status; 416 417 /* 418 * IDC_NIC_FW_REPORTED_FAILURE interpretation: 419 * - PEG-to-FC Status Register: 420 * (LSW = mb[2], MSW = mb[6]) 421 * Bits 0-7 = Peg-Firmware state 422 * Bit 8 = N/W Interface Link-up 423 * Bit 9 = N/W Interface signal detected 424 * Bits 10-11 = SFP Status 425 * SFP Status 0x0 = SFP+ transceiver not expected 426 * SFP Status 0x1 = SFP+ transceiver not present 427 * SFP Status 0x2 = SFP+ transceiver invalid 428 * SFP Status 0x3 = SFP+ transceiver present and 429 * valid 430 * Bits 12-14 = Heartbeat Counter 431 * Bit 15 = Heartbeat Monitor Enable 432 * Bits 16-17 = SFP Additional Info 433 * SFP info 0x0 = Unregocnized transceiver for 434 * Ethernet 435 * SFP info 0x1 = SFP+ brand validation failed 436 * SFP info 0x2 = SFP+ speed validation failed 437 * SFP info 0x3 = SFP+ access error 438 * Bit 18 = SFP Multirate 439 * Bit 19 = SFP Tx Fault 440 * Bits 20-22 = Link Speed 441 * Bits 23-27 = Reserved 442 * Bits 28-30 = DCBX Status 443 * DCBX Status 0x0 = DCBX Disabled 444 * DCBX Status 0x1 = DCBX Enabled 445 * DCBX Status 0x2 = DCBX Exchange error 446 * Bit 31 = Reserved 447 */ 448 peg_fw_state = (mb[2] & 0x00ff); 449 nw_interface_link_up = ((mb[2] & 0x0100) >> 8); 450 nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9); 451 sfp_status = ((mb[2] & 0x0c00) >> 10); 452 htbt_counter = ((mb[2] & 0x7000) >> 12); 453 htbt_monitor_enable = ((mb[2] & 0x8000) >> 15); 454 sfp_additonal_info = (mb[6] & 0x0003); 455 sfp_multirate = ((mb[6] & 0x0004) >> 2); 456 sfp_tx_fault = ((mb[6] & 0x0008) >> 3); 457 link_speed = ((mb[6] & 0x0070) >> 4); 458 dcbx_status = ((mb[6] & 0x7000) >> 12); 459 460 ql_log(ql_log_warn, vha, 0x5066, 461 "Peg-to-Fc Status Register:\n" 462 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, " 463 "nw_interface_signal_detect=0x%x" 464 "\nsfp_statis=0x%x.\n ", peg_fw_state, 465 nw_interface_link_up, nw_interface_signal_detect, 466 sfp_status); 467 ql_log(ql_log_warn, vha, 0x5067, 468 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, " 469 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ", 470 htbt_counter, htbt_monitor_enable, 471 sfp_additonal_info, sfp_multirate); 472 ql_log(ql_log_warn, vha, 0x5068, 473 "sfp_tx_fault=0x%x, link_state=0x%x, " 474 "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed, 475 dcbx_status); 476 477 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 478 } 479 480 if (mb[1] & IDC_HEARTBEAT_FAILURE) { 481 ql_log(ql_log_warn, vha, 0x5069, 482 "Heartbeat Failure encountered, chip reset " 483 "required.\n"); 484 485 qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET); 486 } 487 } 488 489 if (mb[1] & IDC_DEVICE_STATE_CHANGE) { 490 ql_log(ql_log_info, vha, 0x506a, 491 "IDC Device-State changed = 0x%x.\n", mb[4]); 492 qla83xx_schedule_work(vha, MBA_IDC_AEN); 493 } 494 } 495 496 /** 497 * qla2x00_async_event() - Process aynchronous events. 498 * @ha: SCSI driver HA context 499 * @mb: Mailbox registers (0 - 3) 500 */ 501 void 502 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 503 { 504 uint16_t handle_cnt; 505 uint16_t cnt, mbx; 506 uint32_t handles[5]; 507 struct qla_hw_data *ha = vha->hw; 508 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 509 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; 510 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; 511 uint32_t rscn_entry, host_pid; 512 unsigned long flags; 513 514 /* Setup to process RIO completion. */ 515 handle_cnt = 0; 516 if (IS_CNA_CAPABLE(ha)) 517 goto skip_rio; 518 switch (mb[0]) { 519 case MBA_SCSI_COMPLETION: 520 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 521 handle_cnt = 1; 522 break; 523 case MBA_CMPLT_1_16BIT: 524 handles[0] = mb[1]; 525 handle_cnt = 1; 526 mb[0] = MBA_SCSI_COMPLETION; 527 break; 528 case MBA_CMPLT_2_16BIT: 529 handles[0] = mb[1]; 530 handles[1] = mb[2]; 531 handle_cnt = 2; 532 mb[0] = MBA_SCSI_COMPLETION; 533 break; 534 case MBA_CMPLT_3_16BIT: 535 handles[0] = mb[1]; 536 handles[1] = mb[2]; 537 handles[2] = mb[3]; 538 handle_cnt = 3; 539 mb[0] = MBA_SCSI_COMPLETION; 540 break; 541 case MBA_CMPLT_4_16BIT: 542 handles[0] = mb[1]; 543 handles[1] = mb[2]; 544 handles[2] = mb[3]; 545 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 546 handle_cnt = 4; 547 mb[0] = MBA_SCSI_COMPLETION; 548 break; 549 case MBA_CMPLT_5_16BIT: 550 handles[0] = mb[1]; 551 handles[1] = mb[2]; 552 handles[2] = mb[3]; 553 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 554 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 555 handle_cnt = 5; 556 mb[0] = MBA_SCSI_COMPLETION; 557 break; 558 case MBA_CMPLT_2_32BIT: 559 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 560 handles[1] = le32_to_cpu( 561 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 562 RD_MAILBOX_REG(ha, reg, 6)); 563 handle_cnt = 2; 564 mb[0] = MBA_SCSI_COMPLETION; 565 break; 566 default: 567 break; 568 } 569 skip_rio: 570 switch (mb[0]) { 571 case MBA_SCSI_COMPLETION: /* Fast Post */ 572 if (!vha->flags.online) 573 break; 574 575 for (cnt = 0; cnt < handle_cnt; cnt++) 576 qla2x00_process_completed_request(vha, rsp->req, 577 handles[cnt]); 578 break; 579 580 case MBA_RESET: /* Reset */ 581 ql_dbg(ql_dbg_async, vha, 0x5002, 582 "Asynchronous RESET.\n"); 583 584 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 585 break; 586 587 case MBA_SYSTEM_ERR: /* System Error */ 588 mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ? 589 RD_REG_WORD(®24->mailbox7) : 0; 590 ql_log(ql_log_warn, vha, 0x5003, 591 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " 592 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); 593 594 ha->isp_ops->fw_dump(vha, 1); 595 596 if (IS_FWI2_CAPABLE(ha)) { 597 if (mb[1] == 0 && mb[2] == 0) { 598 ql_log(ql_log_fatal, vha, 0x5004, 599 "Unrecoverable Hardware Error: adapter " 600 "marked OFFLINE!\n"); 601 vha->flags.online = 0; 602 vha->device_flags |= DFLG_DEV_FAILED; 603 } else { 604 /* Check to see if MPI timeout occurred */ 605 if ((mbx & MBX_3) && (ha->flags.port0)) 606 set_bit(MPI_RESET_NEEDED, 607 &vha->dpc_flags); 608 609 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 610 } 611 } else if (mb[1] == 0) { 612 ql_log(ql_log_fatal, vha, 0x5005, 613 "Unrecoverable Hardware Error: adapter marked " 614 "OFFLINE!\n"); 615 vha->flags.online = 0; 616 vha->device_flags |= DFLG_DEV_FAILED; 617 } else 618 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 619 break; 620 621 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 622 ql_log(ql_log_warn, vha, 0x5006, 623 "ISP Request Transfer Error (%x).\n", mb[1]); 624 625 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 626 break; 627 628 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 629 ql_log(ql_log_warn, vha, 0x5007, 630 "ISP Response Transfer Error.\n"); 631 632 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 633 break; 634 635 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 636 ql_dbg(ql_dbg_async, vha, 0x5008, 637 "Asynchronous WAKEUP_THRES.\n"); 638 639 break; 640 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 641 ql_dbg(ql_dbg_async, vha, 0x5009, 642 "LIP occurred (%x).\n", mb[1]); 643 644 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 645 atomic_set(&vha->loop_state, LOOP_DOWN); 646 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 647 qla2x00_mark_all_devices_lost(vha, 1); 648 } 649 650 if (vha->vp_idx) { 651 atomic_set(&vha->vp_state, VP_FAILED); 652 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 653 } 654 655 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 656 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 657 658 vha->flags.management_server_logged_in = 0; 659 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]); 660 break; 661 662 case MBA_LOOP_UP: /* Loop Up Event */ 663 if (IS_QLA2100(ha) || IS_QLA2200(ha)) 664 ha->link_data_rate = PORT_SPEED_1GB; 665 else 666 ha->link_data_rate = mb[1]; 667 668 ql_dbg(ql_dbg_async, vha, 0x500a, 669 "LOOP UP detected (%s Gbps).\n", 670 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 671 672 vha->flags.management_server_logged_in = 0; 673 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 674 break; 675 676 case MBA_LOOP_DOWN: /* Loop Down Event */ 677 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) 678 ? RD_REG_WORD(®24->mailbox4) : 0; 679 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; 680 ql_dbg(ql_dbg_async, vha, 0x500b, 681 "LOOP DOWN detected (%x %x %x %x).\n", 682 mb[1], mb[2], mb[3], mbx); 683 684 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 685 atomic_set(&vha->loop_state, LOOP_DOWN); 686 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 687 vha->device_flags |= DFLG_NO_CABLE; 688 qla2x00_mark_all_devices_lost(vha, 1); 689 } 690 691 if (vha->vp_idx) { 692 atomic_set(&vha->vp_state, VP_FAILED); 693 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 694 } 695 696 vha->flags.management_server_logged_in = 0; 697 ha->link_data_rate = PORT_SPEED_UNKNOWN; 698 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); 699 break; 700 701 case MBA_LIP_RESET: /* LIP reset occurred */ 702 ql_dbg(ql_dbg_async, vha, 0x500c, 703 "LIP reset occurred (%x).\n", mb[1]); 704 705 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 706 atomic_set(&vha->loop_state, LOOP_DOWN); 707 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 708 qla2x00_mark_all_devices_lost(vha, 1); 709 } 710 711 if (vha->vp_idx) { 712 atomic_set(&vha->vp_state, VP_FAILED); 713 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 714 } 715 716 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 717 718 ha->operating_mode = LOOP; 719 vha->flags.management_server_logged_in = 0; 720 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]); 721 break; 722 723 /* case MBA_DCBX_COMPLETE: */ 724 case MBA_POINT_TO_POINT: /* Point-to-Point */ 725 if (IS_QLA2100(ha)) 726 break; 727 728 if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) { 729 ql_dbg(ql_dbg_async, vha, 0x500d, 730 "DCBX Completed -- %04x %04x %04x.\n", 731 mb[1], mb[2], mb[3]); 732 if (ha->notify_dcbx_comp) 733 complete(&ha->dcbx_comp); 734 735 } else 736 ql_dbg(ql_dbg_async, vha, 0x500e, 737 "Asynchronous P2P MODE received.\n"); 738 739 /* 740 * Until there's a transition from loop down to loop up, treat 741 * this as loop down only. 742 */ 743 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 744 atomic_set(&vha->loop_state, LOOP_DOWN); 745 if (!atomic_read(&vha->loop_down_timer)) 746 atomic_set(&vha->loop_down_timer, 747 LOOP_DOWN_TIME); 748 qla2x00_mark_all_devices_lost(vha, 1); 749 } 750 751 if (vha->vp_idx) { 752 atomic_set(&vha->vp_state, VP_FAILED); 753 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 754 } 755 756 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))) 757 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); 758 759 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); 760 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); 761 762 ha->flags.gpsc_supported = 1; 763 vha->flags.management_server_logged_in = 0; 764 break; 765 766 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 767 if (IS_QLA2100(ha)) 768 break; 769 770 ql_dbg(ql_dbg_async, vha, 0x500f, 771 "Configuration change detected: value=%x.\n", mb[1]); 772 773 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 774 atomic_set(&vha->loop_state, LOOP_DOWN); 775 if (!atomic_read(&vha->loop_down_timer)) 776 atomic_set(&vha->loop_down_timer, 777 LOOP_DOWN_TIME); 778 qla2x00_mark_all_devices_lost(vha, 1); 779 } 780 781 if (vha->vp_idx) { 782 atomic_set(&vha->vp_state, VP_FAILED); 783 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); 784 } 785 786 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 787 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 788 break; 789 790 case MBA_PORT_UPDATE: /* Port database update */ 791 /* 792 * Handle only global and vn-port update events 793 * 794 * Relevant inputs: 795 * mb[1] = N_Port handle of changed port 796 * OR 0xffff for global event 797 * mb[2] = New login state 798 * 7 = Port logged out 799 * mb[3] = LSB is vp_idx, 0xff = all vps 800 * 801 * Skip processing if: 802 * Event is global, vp_idx is NOT all vps, 803 * vp_idx does not match 804 * Event is not global, vp_idx does not match 805 */ 806 if (IS_QLA2XXX_MIDTYPE(ha) && 807 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) || 808 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff)) 809 break; 810 811 /* Global event -- port logout or port unavailable. */ 812 if (mb[1] == 0xffff && mb[2] == 0x7) { 813 ql_dbg(ql_dbg_async, vha, 0x5010, 814 "Port unavailable %04x %04x %04x.\n", 815 mb[1], mb[2], mb[3]); 816 ql_log(ql_log_warn, vha, 0x505e, 817 "Link is offline.\n"); 818 819 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 820 atomic_set(&vha->loop_state, LOOP_DOWN); 821 atomic_set(&vha->loop_down_timer, 822 LOOP_DOWN_TIME); 823 vha->device_flags |= DFLG_NO_CABLE; 824 qla2x00_mark_all_devices_lost(vha, 1); 825 } 826 827 if (vha->vp_idx) { 828 atomic_set(&vha->vp_state, VP_FAILED); 829 fc_vport_set_state(vha->fc_vport, 830 FC_VPORT_FAILED); 831 qla2x00_mark_all_devices_lost(vha, 1); 832 } 833 834 vha->flags.management_server_logged_in = 0; 835 ha->link_data_rate = PORT_SPEED_UNKNOWN; 836 break; 837 } 838 839 /* 840 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET 841 * event etc. earlier indicating loop is down) then process 842 * it. Otherwise ignore it and Wait for RSCN to come in. 843 */ 844 atomic_set(&vha->loop_down_timer, 0); 845 if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) { 846 ql_dbg(ql_dbg_async, vha, 0x5011, 847 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 848 mb[1], mb[2], mb[3]); 849 850 qlt_async_event(mb[0], vha, mb); 851 break; 852 } 853 854 ql_dbg(ql_dbg_async, vha, 0x5012, 855 "Port database changed %04x %04x %04x.\n", 856 mb[1], mb[2], mb[3]); 857 ql_log(ql_log_warn, vha, 0x505f, 858 "Link is operational (%s Gbps).\n", 859 qla2x00_get_link_speed_str(ha, ha->link_data_rate)); 860 861 /* 862 * Mark all devices as missing so we will login again. 863 */ 864 atomic_set(&vha->loop_state, LOOP_UP); 865 866 qla2x00_mark_all_devices_lost(vha, 1); 867 868 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha)) 869 set_bit(SCR_PENDING, &vha->dpc_flags); 870 871 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 872 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 873 874 qlt_async_event(mb[0], vha, mb); 875 break; 876 877 case MBA_RSCN_UPDATE: /* State Change Registration */ 878 /* Check if the Vport has issued a SCR */ 879 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags)) 880 break; 881 /* Only handle SCNs for our Vport index. */ 882 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) 883 break; 884 885 ql_dbg(ql_dbg_async, vha, 0x5013, 886 "RSCN database changed -- %04x %04x %04x.\n", 887 mb[1], mb[2], mb[3]); 888 889 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2]; 890 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8) 891 | vha->d_id.b.al_pa; 892 if (rscn_entry == host_pid) { 893 ql_dbg(ql_dbg_async, vha, 0x5014, 894 "Ignoring RSCN update to local host " 895 "port ID (%06x).\n", host_pid); 896 break; 897 } 898 899 /* Ignore reserved bits from RSCN-payload. */ 900 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2]; 901 902 atomic_set(&vha->loop_down_timer, 0); 903 vha->flags.management_server_logged_in = 0; 904 905 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 906 set_bit(RSCN_UPDATE, &vha->dpc_flags); 907 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); 908 break; 909 910 /* case MBA_RIO_RESPONSE: */ 911 case MBA_ZIO_RESPONSE: 912 ql_dbg(ql_dbg_async, vha, 0x5015, 913 "[R|Z]IO update completion.\n"); 914 915 if (IS_FWI2_CAPABLE(ha)) 916 qla24xx_process_response_queue(vha, rsp); 917 else 918 qla2x00_process_response_queue(rsp); 919 break; 920 921 case MBA_DISCARD_RND_FRAME: 922 ql_dbg(ql_dbg_async, vha, 0x5016, 923 "Discard RND Frame -- %04x %04x %04x.\n", 924 mb[1], mb[2], mb[3]); 925 break; 926 927 case MBA_TRACE_NOTIFICATION: 928 ql_dbg(ql_dbg_async, vha, 0x5017, 929 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]); 930 break; 931 932 case MBA_ISP84XX_ALERT: 933 ql_dbg(ql_dbg_async, vha, 0x5018, 934 "ISP84XX Alert Notification -- %04x %04x %04x.\n", 935 mb[1], mb[2], mb[3]); 936 937 spin_lock_irqsave(&ha->cs84xx->access_lock, flags); 938 switch (mb[1]) { 939 case A84_PANIC_RECOVERY: 940 ql_log(ql_log_info, vha, 0x5019, 941 "Alert 84XX: panic recovery %04x %04x.\n", 942 mb[2], mb[3]); 943 break; 944 case A84_OP_LOGIN_COMPLETE: 945 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2]; 946 ql_log(ql_log_info, vha, 0x501a, 947 "Alert 84XX: firmware version %x.\n", 948 ha->cs84xx->op_fw_version); 949 break; 950 case A84_DIAG_LOGIN_COMPLETE: 951 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 952 ql_log(ql_log_info, vha, 0x501b, 953 "Alert 84XX: diagnostic firmware version %x.\n", 954 ha->cs84xx->diag_fw_version); 955 break; 956 case A84_GOLD_LOGIN_COMPLETE: 957 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2]; 958 ha->cs84xx->fw_update = 1; 959 ql_log(ql_log_info, vha, 0x501c, 960 "Alert 84XX: gold firmware version %x.\n", 961 ha->cs84xx->gold_fw_version); 962 break; 963 default: 964 ql_log(ql_log_warn, vha, 0x501d, 965 "Alert 84xx: Invalid Alert %04x %04x %04x.\n", 966 mb[1], mb[2], mb[3]); 967 } 968 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags); 969 break; 970 case MBA_DCBX_START: 971 ql_dbg(ql_dbg_async, vha, 0x501e, 972 "DCBX Started -- %04x %04x %04x.\n", 973 mb[1], mb[2], mb[3]); 974 break; 975 case MBA_DCBX_PARAM_UPDATE: 976 ql_dbg(ql_dbg_async, vha, 0x501f, 977 "DCBX Parameters Updated -- %04x %04x %04x.\n", 978 mb[1], mb[2], mb[3]); 979 break; 980 case MBA_FCF_CONF_ERR: 981 ql_dbg(ql_dbg_async, vha, 0x5020, 982 "FCF Configuration Error -- %04x %04x %04x.\n", 983 mb[1], mb[2], mb[3]); 984 break; 985 case MBA_IDC_NOTIFY: 986 /* See if we need to quiesce any I/O */ 987 if (IS_QLA8031(vha->hw)) 988 if ((mb[2] & 0x7fff) == MBC_PORT_RESET || 989 (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) { 990 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 991 qla2xxx_wake_dpc(vha); 992 } 993 case MBA_IDC_COMPLETE: 994 case MBA_IDC_TIME_EXT: 995 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) 996 qla81xx_idc_event(vha, mb[0], mb[1]); 997 break; 998 999 case MBA_IDC_AEN: 1000 mb[4] = RD_REG_WORD(®24->mailbox4); 1001 mb[5] = RD_REG_WORD(®24->mailbox5); 1002 mb[6] = RD_REG_WORD(®24->mailbox6); 1003 mb[7] = RD_REG_WORD(®24->mailbox7); 1004 qla83xx_handle_8200_aen(vha, mb); 1005 break; 1006 1007 default: 1008 ql_dbg(ql_dbg_async, vha, 0x5057, 1009 "Unknown AEN:%04x %04x %04x %04x\n", 1010 mb[0], mb[1], mb[2], mb[3]); 1011 } 1012 1013 qlt_async_event(mb[0], vha, mb); 1014 1015 if (!vha->vp_idx && ha->num_vhosts) 1016 qla2x00_alert_all_vps(rsp, mb); 1017 } 1018 1019 /** 1020 * qla2x00_process_completed_request() - Process a Fast Post response. 1021 * @ha: SCSI driver HA context 1022 * @index: SRB index 1023 */ 1024 static void 1025 qla2x00_process_completed_request(struct scsi_qla_host *vha, 1026 struct req_que *req, uint32_t index) 1027 { 1028 srb_t *sp; 1029 struct qla_hw_data *ha = vha->hw; 1030 1031 /* Validate handle. */ 1032 if (index >= MAX_OUTSTANDING_COMMANDS) { 1033 ql_log(ql_log_warn, vha, 0x3014, 1034 "Invalid SCSI command index (%x).\n", index); 1035 1036 if (IS_QLA82XX(ha)) 1037 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1038 else 1039 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1040 return; 1041 } 1042 1043 sp = req->outstanding_cmds[index]; 1044 if (sp) { 1045 /* Free outstanding command slot. */ 1046 req->outstanding_cmds[index] = NULL; 1047 1048 /* Save ISP completion status */ 1049 sp->done(ha, sp, DID_OK << 16); 1050 } else { 1051 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n"); 1052 1053 if (IS_QLA82XX(ha)) 1054 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1055 else 1056 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1057 } 1058 } 1059 1060 static srb_t * 1061 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, 1062 struct req_que *req, void *iocb) 1063 { 1064 struct qla_hw_data *ha = vha->hw; 1065 sts_entry_t *pkt = iocb; 1066 srb_t *sp = NULL; 1067 uint16_t index; 1068 1069 index = LSW(pkt->handle); 1070 if (index >= MAX_OUTSTANDING_COMMANDS) { 1071 ql_log(ql_log_warn, vha, 0x5031, 1072 "Invalid command index (%x).\n", index); 1073 if (IS_QLA82XX(ha)) 1074 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1075 else 1076 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1077 goto done; 1078 } 1079 sp = req->outstanding_cmds[index]; 1080 if (!sp) { 1081 ql_log(ql_log_warn, vha, 0x5032, 1082 "Invalid completion handle (%x) -- timed-out.\n", index); 1083 return sp; 1084 } 1085 if (sp->handle != index) { 1086 ql_log(ql_log_warn, vha, 0x5033, 1087 "SRB handle (%x) mismatch %x.\n", sp->handle, index); 1088 return NULL; 1089 } 1090 1091 req->outstanding_cmds[index] = NULL; 1092 1093 done: 1094 return sp; 1095 } 1096 1097 static void 1098 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1099 struct mbx_entry *mbx) 1100 { 1101 const char func[] = "MBX-IOCB"; 1102 const char *type; 1103 fc_port_t *fcport; 1104 srb_t *sp; 1105 struct srb_iocb *lio; 1106 uint16_t *data; 1107 uint16_t status; 1108 1109 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx); 1110 if (!sp) 1111 return; 1112 1113 lio = &sp->u.iocb_cmd; 1114 type = sp->name; 1115 fcport = sp->fcport; 1116 data = lio->u.logio.data; 1117 1118 data[0] = MBS_COMMAND_ERROR; 1119 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1120 QLA_LOGIO_LOGIN_RETRIED : 0; 1121 if (mbx->entry_status) { 1122 ql_dbg(ql_dbg_async, vha, 0x5043, 1123 "Async-%s error entry - hdl=%x portid=%02x%02x%02x " 1124 "entry-status=%x status=%x state-flag=%x " 1125 "status-flags=%x.\n", type, sp->handle, 1126 fcport->d_id.b.domain, fcport->d_id.b.area, 1127 fcport->d_id.b.al_pa, mbx->entry_status, 1128 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags), 1129 le16_to_cpu(mbx->status_flags)); 1130 1131 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, 1132 (uint8_t *)mbx, sizeof(*mbx)); 1133 1134 goto logio_done; 1135 } 1136 1137 status = le16_to_cpu(mbx->status); 1138 if (status == 0x30 && sp->type == SRB_LOGIN_CMD && 1139 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) 1140 status = 0; 1141 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) { 1142 ql_dbg(ql_dbg_async, vha, 0x5045, 1143 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n", 1144 type, sp->handle, fcport->d_id.b.domain, 1145 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1146 le16_to_cpu(mbx->mb1)); 1147 1148 data[0] = MBS_COMMAND_COMPLETE; 1149 if (sp->type == SRB_LOGIN_CMD) { 1150 fcport->port_type = FCT_TARGET; 1151 if (le16_to_cpu(mbx->mb1) & BIT_0) 1152 fcport->port_type = FCT_INITIATOR; 1153 else if (le16_to_cpu(mbx->mb1) & BIT_1) 1154 fcport->flags |= FCF_FCP2_DEVICE; 1155 } 1156 goto logio_done; 1157 } 1158 1159 data[0] = le16_to_cpu(mbx->mb0); 1160 switch (data[0]) { 1161 case MBS_PORT_ID_USED: 1162 data[1] = le16_to_cpu(mbx->mb1); 1163 break; 1164 case MBS_LOOP_ID_USED: 1165 break; 1166 default: 1167 data[0] = MBS_COMMAND_ERROR; 1168 break; 1169 } 1170 1171 ql_log(ql_log_warn, vha, 0x5046, 1172 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x " 1173 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle, 1174 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, 1175 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1), 1176 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6), 1177 le16_to_cpu(mbx->mb7)); 1178 1179 logio_done: 1180 sp->done(vha, sp, 0); 1181 } 1182 1183 static void 1184 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1185 sts_entry_t *pkt, int iocb_type) 1186 { 1187 const char func[] = "CT_IOCB"; 1188 const char *type; 1189 srb_t *sp; 1190 struct fc_bsg_job *bsg_job; 1191 uint16_t comp_status; 1192 int res; 1193 1194 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1195 if (!sp) 1196 return; 1197 1198 bsg_job = sp->u.bsg_job; 1199 1200 type = "ct pass-through"; 1201 1202 comp_status = le16_to_cpu(pkt->comp_status); 1203 1204 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1205 * fc payload to the caller 1206 */ 1207 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1208 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1209 1210 if (comp_status != CS_COMPLETE) { 1211 if (comp_status == CS_DATA_UNDERRUN) { 1212 res = DID_OK << 16; 1213 bsg_job->reply->reply_payload_rcv_len = 1214 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); 1215 1216 ql_log(ql_log_warn, vha, 0x5048, 1217 "CT pass-through-%s error " 1218 "comp_status-status=0x%x total_byte = 0x%x.\n", 1219 type, comp_status, 1220 bsg_job->reply->reply_payload_rcv_len); 1221 } else { 1222 ql_log(ql_log_warn, vha, 0x5049, 1223 "CT pass-through-%s error " 1224 "comp_status-status=0x%x.\n", type, comp_status); 1225 res = DID_ERROR << 16; 1226 bsg_job->reply->reply_payload_rcv_len = 0; 1227 } 1228 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, 1229 (uint8_t *)pkt, sizeof(*pkt)); 1230 } else { 1231 res = DID_OK << 16; 1232 bsg_job->reply->reply_payload_rcv_len = 1233 bsg_job->reply_payload.payload_len; 1234 bsg_job->reply_len = 0; 1235 } 1236 1237 sp->done(vha, sp, res); 1238 } 1239 1240 static void 1241 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, 1242 struct sts_entry_24xx *pkt, int iocb_type) 1243 { 1244 const char func[] = "ELS_CT_IOCB"; 1245 const char *type; 1246 srb_t *sp; 1247 struct fc_bsg_job *bsg_job; 1248 uint16_t comp_status; 1249 uint32_t fw_status[3]; 1250 uint8_t* fw_sts_ptr; 1251 int res; 1252 1253 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 1254 if (!sp) 1255 return; 1256 bsg_job = sp->u.bsg_job; 1257 1258 type = NULL; 1259 switch (sp->type) { 1260 case SRB_ELS_CMD_RPT: 1261 case SRB_ELS_CMD_HST: 1262 type = "els"; 1263 break; 1264 case SRB_CT_CMD: 1265 type = "ct pass-through"; 1266 break; 1267 default: 1268 ql_dbg(ql_dbg_user, vha, 0x503e, 1269 "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); 1270 return; 1271 } 1272 1273 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); 1274 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1); 1275 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2); 1276 1277 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT 1278 * fc payload to the caller 1279 */ 1280 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK; 1281 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status); 1282 1283 if (comp_status != CS_COMPLETE) { 1284 if (comp_status == CS_DATA_UNDERRUN) { 1285 res = DID_OK << 16; 1286 bsg_job->reply->reply_payload_rcv_len = 1287 le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); 1288 1289 ql_dbg(ql_dbg_user, vha, 0x503f, 1290 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1291 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", 1292 type, sp->handle, comp_status, fw_status[1], fw_status[2], 1293 le16_to_cpu(((struct els_sts_entry_24xx *) 1294 pkt)->total_byte_count)); 1295 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1296 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1297 } 1298 else { 1299 ql_dbg(ql_dbg_user, vha, 0x5040, 1300 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " 1301 "error subcode 1=0x%x error subcode 2=0x%x.\n", 1302 type, sp->handle, comp_status, 1303 le16_to_cpu(((struct els_sts_entry_24xx *) 1304 pkt)->error_subcode_1), 1305 le16_to_cpu(((struct els_sts_entry_24xx *) 1306 pkt)->error_subcode_2)); 1307 res = DID_ERROR << 16; 1308 bsg_job->reply->reply_payload_rcv_len = 0; 1309 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply); 1310 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status)); 1311 } 1312 ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, 1313 (uint8_t *)pkt, sizeof(*pkt)); 1314 } 1315 else { 1316 res = DID_OK << 16; 1317 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len; 1318 bsg_job->reply_len = 0; 1319 } 1320 1321 sp->done(vha, sp, res); 1322 } 1323 1324 static void 1325 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, 1326 struct logio_entry_24xx *logio) 1327 { 1328 const char func[] = "LOGIO-IOCB"; 1329 const char *type; 1330 fc_port_t *fcport; 1331 srb_t *sp; 1332 struct srb_iocb *lio; 1333 uint16_t *data; 1334 uint32_t iop[2]; 1335 1336 sp = qla2x00_get_sp_from_handle(vha, func, req, logio); 1337 if (!sp) 1338 return; 1339 1340 lio = &sp->u.iocb_cmd; 1341 type = sp->name; 1342 fcport = sp->fcport; 1343 data = lio->u.logio.data; 1344 1345 data[0] = MBS_COMMAND_ERROR; 1346 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ? 1347 QLA_LOGIO_LOGIN_RETRIED : 0; 1348 if (logio->entry_status) { 1349 ql_log(ql_log_warn, fcport->vha, 0x5034, 1350 "Async-%s error entry - hdl=%x" 1351 "portid=%02x%02x%02x entry-status=%x.\n", 1352 type, sp->handle, fcport->d_id.b.domain, 1353 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1354 logio->entry_status); 1355 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, 1356 (uint8_t *)logio, sizeof(*logio)); 1357 1358 goto logio_done; 1359 } 1360 1361 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { 1362 ql_dbg(ql_dbg_async, fcport->vha, 0x5036, 1363 "Async-%s complete - hdl=%x portid=%02x%02x%02x " 1364 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1365 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1366 le32_to_cpu(logio->io_parameter[0])); 1367 1368 data[0] = MBS_COMMAND_COMPLETE; 1369 if (sp->type != SRB_LOGIN_CMD) 1370 goto logio_done; 1371 1372 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1373 if (iop[0] & BIT_4) { 1374 fcport->port_type = FCT_TARGET; 1375 if (iop[0] & BIT_8) 1376 fcport->flags |= FCF_FCP2_DEVICE; 1377 } else if (iop[0] & BIT_5) 1378 fcport->port_type = FCT_INITIATOR; 1379 1380 if (iop[0] & BIT_7) 1381 fcport->flags |= FCF_CONF_COMP_SUPPORTED; 1382 1383 if (logio->io_parameter[7] || logio->io_parameter[8]) 1384 fcport->supported_classes |= FC_COS_CLASS2; 1385 if (logio->io_parameter[9] || logio->io_parameter[10]) 1386 fcport->supported_classes |= FC_COS_CLASS3; 1387 1388 goto logio_done; 1389 } 1390 1391 iop[0] = le32_to_cpu(logio->io_parameter[0]); 1392 iop[1] = le32_to_cpu(logio->io_parameter[1]); 1393 switch (iop[0]) { 1394 case LSC_SCODE_PORTID_USED: 1395 data[0] = MBS_PORT_ID_USED; 1396 data[1] = LSW(iop[1]); 1397 break; 1398 case LSC_SCODE_NPORT_USED: 1399 data[0] = MBS_LOOP_ID_USED; 1400 break; 1401 default: 1402 data[0] = MBS_COMMAND_ERROR; 1403 break; 1404 } 1405 1406 ql_dbg(ql_dbg_async, fcport->vha, 0x5037, 1407 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " 1408 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, 1409 fcport->d_id.b.area, fcport->d_id.b.al_pa, 1410 le16_to_cpu(logio->comp_status), 1411 le32_to_cpu(logio->io_parameter[0]), 1412 le32_to_cpu(logio->io_parameter[1])); 1413 1414 logio_done: 1415 sp->done(vha, sp, 0); 1416 } 1417 1418 static void 1419 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, 1420 struct tsk_mgmt_entry *tsk) 1421 { 1422 const char func[] = "TMF-IOCB"; 1423 const char *type; 1424 fc_port_t *fcport; 1425 srb_t *sp; 1426 struct srb_iocb *iocb; 1427 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; 1428 int error = 1; 1429 1430 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); 1431 if (!sp) 1432 return; 1433 1434 iocb = &sp->u.iocb_cmd; 1435 type = sp->name; 1436 fcport = sp->fcport; 1437 1438 if (sts->entry_status) { 1439 ql_log(ql_log_warn, fcport->vha, 0x5038, 1440 "Async-%s error - hdl=%x entry-status(%x).\n", 1441 type, sp->handle, sts->entry_status); 1442 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) { 1443 ql_log(ql_log_warn, fcport->vha, 0x5039, 1444 "Async-%s error - hdl=%x completion status(%x).\n", 1445 type, sp->handle, sts->comp_status); 1446 } else if (!(le16_to_cpu(sts->scsi_status) & 1447 SS_RESPONSE_INFO_LEN_VALID)) { 1448 ql_log(ql_log_warn, fcport->vha, 0x503a, 1449 "Async-%s error - hdl=%x no response info(%x).\n", 1450 type, sp->handle, sts->scsi_status); 1451 } else if (le32_to_cpu(sts->rsp_data_len) < 4) { 1452 ql_log(ql_log_warn, fcport->vha, 0x503b, 1453 "Async-%s error - hdl=%x not enough response(%d).\n", 1454 type, sp->handle, sts->rsp_data_len); 1455 } else if (sts->data[3]) { 1456 ql_log(ql_log_warn, fcport->vha, 0x503c, 1457 "Async-%s error - hdl=%x response(%x).\n", 1458 type, sp->handle, sts->data[3]); 1459 } else { 1460 error = 0; 1461 } 1462 1463 if (error) { 1464 iocb->u.tmf.data = error; 1465 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, 1466 (uint8_t *)sts, sizeof(*sts)); 1467 } 1468 1469 sp->done(vha, sp, 0); 1470 } 1471 1472 /** 1473 * qla2x00_process_response_queue() - Process response queue entries. 1474 * @ha: SCSI driver HA context 1475 */ 1476 void 1477 qla2x00_process_response_queue(struct rsp_que *rsp) 1478 { 1479 struct scsi_qla_host *vha; 1480 struct qla_hw_data *ha = rsp->hw; 1481 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 1482 sts_entry_t *pkt; 1483 uint16_t handle_cnt; 1484 uint16_t cnt; 1485 1486 vha = pci_get_drvdata(ha->pdev); 1487 1488 if (!vha->flags.online) 1489 return; 1490 1491 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 1492 pkt = (sts_entry_t *)rsp->ring_ptr; 1493 1494 rsp->ring_index++; 1495 if (rsp->ring_index == rsp->length) { 1496 rsp->ring_index = 0; 1497 rsp->ring_ptr = rsp->ring; 1498 } else { 1499 rsp->ring_ptr++; 1500 } 1501 1502 if (pkt->entry_status != 0) { 1503 qla2x00_error_entry(vha, rsp, pkt); 1504 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1505 wmb(); 1506 continue; 1507 } 1508 1509 switch (pkt->entry_type) { 1510 case STATUS_TYPE: 1511 qla2x00_status_entry(vha, rsp, pkt); 1512 break; 1513 case STATUS_TYPE_21: 1514 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 1515 for (cnt = 0; cnt < handle_cnt; cnt++) { 1516 qla2x00_process_completed_request(vha, rsp->req, 1517 ((sts21_entry_t *)pkt)->handle[cnt]); 1518 } 1519 break; 1520 case STATUS_TYPE_22: 1521 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 1522 for (cnt = 0; cnt < handle_cnt; cnt++) { 1523 qla2x00_process_completed_request(vha, rsp->req, 1524 ((sts22_entry_t *)pkt)->handle[cnt]); 1525 } 1526 break; 1527 case STATUS_CONT_TYPE: 1528 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 1529 break; 1530 case MBX_IOCB_TYPE: 1531 qla2x00_mbx_iocb_entry(vha, rsp->req, 1532 (struct mbx_entry *)pkt); 1533 break; 1534 case CT_IOCB_TYPE: 1535 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 1536 break; 1537 default: 1538 /* Type Not Supported. */ 1539 ql_log(ql_log_warn, vha, 0x504a, 1540 "Received unknown response pkt type %x " 1541 "entry status=%x.\n", 1542 pkt->entry_type, pkt->entry_status); 1543 break; 1544 } 1545 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1546 wmb(); 1547 } 1548 1549 /* Adjust ring index */ 1550 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); 1551 } 1552 1553 static inline void 1554 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, 1555 uint32_t sense_len, struct rsp_que *rsp, int res) 1556 { 1557 struct scsi_qla_host *vha = sp->fcport->vha; 1558 struct scsi_cmnd *cp = GET_CMD_SP(sp); 1559 uint32_t track_sense_len; 1560 1561 if (sense_len >= SCSI_SENSE_BUFFERSIZE) 1562 sense_len = SCSI_SENSE_BUFFERSIZE; 1563 1564 SET_CMD_SENSE_LEN(sp, sense_len); 1565 SET_CMD_SENSE_PTR(sp, cp->sense_buffer); 1566 track_sense_len = sense_len; 1567 1568 if (sense_len > par_sense_len) 1569 sense_len = par_sense_len; 1570 1571 memcpy(cp->sense_buffer, sense_data, sense_len); 1572 1573 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len); 1574 track_sense_len -= sense_len; 1575 SET_CMD_SENSE_LEN(sp, track_sense_len); 1576 1577 if (track_sense_len != 0) { 1578 rsp->status_srb = sp; 1579 cp->result = res; 1580 } 1581 1582 if (sense_len) { 1583 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, 1584 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n", 1585 sp->fcport->vha->host_no, cp->device->id, cp->device->lun, 1586 cp); 1587 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, 1588 cp->sense_buffer, sense_len); 1589 } 1590 } 1591 1592 struct scsi_dif_tuple { 1593 __be16 guard; /* Checksum */ 1594 __be16 app_tag; /* APPL identifier */ 1595 __be32 ref_tag; /* Target LBA or indirect LBA */ 1596 }; 1597 1598 /* 1599 * Checks the guard or meta-data for the type of error 1600 * detected by the HBA. In case of errors, we set the 1601 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST 1602 * to indicate to the kernel that the HBA detected error. 1603 */ 1604 static inline int 1605 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) 1606 { 1607 struct scsi_qla_host *vha = sp->fcport->vha; 1608 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 1609 uint8_t *ap = &sts24->data[12]; 1610 uint8_t *ep = &sts24->data[20]; 1611 uint32_t e_ref_tag, a_ref_tag; 1612 uint16_t e_app_tag, a_app_tag; 1613 uint16_t e_guard, a_guard; 1614 1615 /* 1616 * swab32 of the "data" field in the beginning of qla2x00_status_entry() 1617 * would make guard field appear at offset 2 1618 */ 1619 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); 1620 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); 1621 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); 1622 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); 1623 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); 1624 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); 1625 1626 ql_dbg(ql_dbg_io, vha, 0x3023, 1627 "iocb(s) %p Returned STATUS.\n", sts24); 1628 1629 ql_dbg(ql_dbg_io, vha, 0x3024, 1630 "DIF ERROR in cmd 0x%x lba 0x%llx act ref" 1631 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app" 1632 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n", 1633 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag, 1634 a_app_tag, e_app_tag, a_guard, e_guard); 1635 1636 /* 1637 * Ignore sector if: 1638 * For type 3: ref & app tag is all 'f's 1639 * For type 0,1,2: app tag is all 'f's 1640 */ 1641 if ((a_app_tag == 0xffff) && 1642 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || 1643 (a_ref_tag == 0xffffffff))) { 1644 uint32_t blocks_done, resid; 1645 sector_t lba_s = scsi_get_lba(cmd); 1646 1647 /* 2TB boundary case covered automatically with this */ 1648 blocks_done = e_ref_tag - (uint32_t)lba_s + 1; 1649 1650 resid = scsi_bufflen(cmd) - (blocks_done * 1651 cmd->device->sector_size); 1652 1653 scsi_set_resid(cmd, resid); 1654 cmd->result = DID_OK << 16; 1655 1656 /* Update protection tag */ 1657 if (scsi_prot_sg_count(cmd)) { 1658 uint32_t i, j = 0, k = 0, num_ent; 1659 struct scatterlist *sg; 1660 struct sd_dif_tuple *spt; 1661 1662 /* Patch the corresponding protection tags */ 1663 scsi_for_each_prot_sg(cmd, sg, 1664 scsi_prot_sg_count(cmd), i) { 1665 num_ent = sg_dma_len(sg) / 8; 1666 if (k + num_ent < blocks_done) { 1667 k += num_ent; 1668 continue; 1669 } 1670 j = blocks_done - k - 1; 1671 k = blocks_done; 1672 break; 1673 } 1674 1675 if (k != blocks_done) { 1676 ql_log(ql_log_warn, vha, 0x302f, 1677 "unexpected tag values tag:lba=%x:%llx)\n", 1678 e_ref_tag, (unsigned long long)lba_s); 1679 return 1; 1680 } 1681 1682 spt = page_address(sg_page(sg)) + sg->offset; 1683 spt += j; 1684 1685 spt->app_tag = 0xffff; 1686 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) 1687 spt->ref_tag = 0xffffffff; 1688 } 1689 1690 return 0; 1691 } 1692 1693 /* check guard */ 1694 if (e_guard != a_guard) { 1695 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1696 0x10, 0x1); 1697 set_driver_byte(cmd, DRIVER_SENSE); 1698 set_host_byte(cmd, DID_ABORT); 1699 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1700 return 1; 1701 } 1702 1703 /* check ref tag */ 1704 if (e_ref_tag != a_ref_tag) { 1705 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1706 0x10, 0x3); 1707 set_driver_byte(cmd, DRIVER_SENSE); 1708 set_host_byte(cmd, DID_ABORT); 1709 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1710 return 1; 1711 } 1712 1713 /* check appl tag */ 1714 if (e_app_tag != a_app_tag) { 1715 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, 1716 0x10, 0x2); 1717 set_driver_byte(cmd, DRIVER_SENSE); 1718 set_host_byte(cmd, DID_ABORT); 1719 cmd->result |= SAM_STAT_CHECK_CONDITION << 1; 1720 return 1; 1721 } 1722 1723 return 1; 1724 } 1725 1726 static void 1727 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, 1728 struct req_que *req, uint32_t index) 1729 { 1730 struct qla_hw_data *ha = vha->hw; 1731 srb_t *sp; 1732 uint16_t comp_status; 1733 uint16_t scsi_status; 1734 uint16_t thread_id; 1735 uint32_t rval = EXT_STATUS_OK; 1736 struct fc_bsg_job *bsg_job = NULL; 1737 sts_entry_t *sts; 1738 struct sts_entry_24xx *sts24; 1739 sts = (sts_entry_t *) pkt; 1740 sts24 = (struct sts_entry_24xx *) pkt; 1741 1742 /* Validate handle. */ 1743 if (index >= MAX_OUTSTANDING_COMMANDS) { 1744 ql_log(ql_log_warn, vha, 0x70af, 1745 "Invalid SCSI completion handle 0x%x.\n", index); 1746 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1747 return; 1748 } 1749 1750 sp = req->outstanding_cmds[index]; 1751 if (sp) { 1752 /* Free outstanding command slot. */ 1753 req->outstanding_cmds[index] = NULL; 1754 bsg_job = sp->u.bsg_job; 1755 } else { 1756 ql_log(ql_log_warn, vha, 0x70b0, 1757 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n", 1758 req->id, index); 1759 1760 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1761 return; 1762 } 1763 1764 if (IS_FWI2_CAPABLE(ha)) { 1765 comp_status = le16_to_cpu(sts24->comp_status); 1766 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1767 } else { 1768 comp_status = le16_to_cpu(sts->comp_status); 1769 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1770 } 1771 1772 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 1773 switch (comp_status) { 1774 case CS_COMPLETE: 1775 if (scsi_status == 0) { 1776 bsg_job->reply->reply_payload_rcv_len = 1777 bsg_job->reply_payload.payload_len; 1778 rval = EXT_STATUS_OK; 1779 } 1780 goto done; 1781 1782 case CS_DATA_OVERRUN: 1783 ql_dbg(ql_dbg_user, vha, 0x70b1, 1784 "Command completed with date overrun thread_id=%d\n", 1785 thread_id); 1786 rval = EXT_STATUS_DATA_OVERRUN; 1787 break; 1788 1789 case CS_DATA_UNDERRUN: 1790 ql_dbg(ql_dbg_user, vha, 0x70b2, 1791 "Command completed with date underrun thread_id=%d\n", 1792 thread_id); 1793 rval = EXT_STATUS_DATA_UNDERRUN; 1794 break; 1795 case CS_BIDIR_RD_OVERRUN: 1796 ql_dbg(ql_dbg_user, vha, 0x70b3, 1797 "Command completed with read data overrun thread_id=%d\n", 1798 thread_id); 1799 rval = EXT_STATUS_DATA_OVERRUN; 1800 break; 1801 1802 case CS_BIDIR_RD_WR_OVERRUN: 1803 ql_dbg(ql_dbg_user, vha, 0x70b4, 1804 "Command completed with read and write data overrun " 1805 "thread_id=%d\n", thread_id); 1806 rval = EXT_STATUS_DATA_OVERRUN; 1807 break; 1808 1809 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN: 1810 ql_dbg(ql_dbg_user, vha, 0x70b5, 1811 "Command completed with read data over and write data " 1812 "underrun thread_id=%d\n", thread_id); 1813 rval = EXT_STATUS_DATA_OVERRUN; 1814 break; 1815 1816 case CS_BIDIR_RD_UNDERRUN: 1817 ql_dbg(ql_dbg_user, vha, 0x70b6, 1818 "Command completed with read data data underrun " 1819 "thread_id=%d\n", thread_id); 1820 rval = EXT_STATUS_DATA_UNDERRUN; 1821 break; 1822 1823 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN: 1824 ql_dbg(ql_dbg_user, vha, 0x70b7, 1825 "Command completed with read data under and write data " 1826 "overrun thread_id=%d\n", thread_id); 1827 rval = EXT_STATUS_DATA_UNDERRUN; 1828 break; 1829 1830 case CS_BIDIR_RD_WR_UNDERRUN: 1831 ql_dbg(ql_dbg_user, vha, 0x70b8, 1832 "Command completed with read and write data underrun " 1833 "thread_id=%d\n", thread_id); 1834 rval = EXT_STATUS_DATA_UNDERRUN; 1835 break; 1836 1837 case CS_BIDIR_DMA: 1838 ql_dbg(ql_dbg_user, vha, 0x70b9, 1839 "Command completed with data DMA error thread_id=%d\n", 1840 thread_id); 1841 rval = EXT_STATUS_DMA_ERR; 1842 break; 1843 1844 case CS_TIMEOUT: 1845 ql_dbg(ql_dbg_user, vha, 0x70ba, 1846 "Command completed with timeout thread_id=%d\n", 1847 thread_id); 1848 rval = EXT_STATUS_TIMEOUT; 1849 break; 1850 default: 1851 ql_dbg(ql_dbg_user, vha, 0x70bb, 1852 "Command completed with completion status=0x%x " 1853 "thread_id=%d\n", comp_status, thread_id); 1854 rval = EXT_STATUS_ERR; 1855 break; 1856 } 1857 bsg_job->reply->reply_payload_rcv_len = 0; 1858 1859 done: 1860 /* Return the vendor specific reply to API */ 1861 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; 1862 bsg_job->reply_len = sizeof(struct fc_bsg_reply); 1863 /* Always return DID_OK, bsg will send the vendor specific response 1864 * in this case only */ 1865 sp->done(vha, sp, (DID_OK << 6)); 1866 1867 } 1868 1869 /** 1870 * qla2x00_status_entry() - Process a Status IOCB entry. 1871 * @ha: SCSI driver HA context 1872 * @pkt: Entry pointer 1873 */ 1874 static void 1875 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) 1876 { 1877 srb_t *sp; 1878 fc_port_t *fcport; 1879 struct scsi_cmnd *cp; 1880 sts_entry_t *sts; 1881 struct sts_entry_24xx *sts24; 1882 uint16_t comp_status; 1883 uint16_t scsi_status; 1884 uint16_t ox_id; 1885 uint8_t lscsi_status; 1886 int32_t resid; 1887 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len, 1888 fw_resid_len; 1889 uint8_t *rsp_info, *sense_data; 1890 struct qla_hw_data *ha = vha->hw; 1891 uint32_t handle; 1892 uint16_t que; 1893 struct req_que *req; 1894 int logit = 1; 1895 int res = 0; 1896 uint16_t state_flags = 0; 1897 1898 sts = (sts_entry_t *) pkt; 1899 sts24 = (struct sts_entry_24xx *) pkt; 1900 if (IS_FWI2_CAPABLE(ha)) { 1901 comp_status = le16_to_cpu(sts24->comp_status); 1902 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 1903 state_flags = le16_to_cpu(sts24->state_flags); 1904 } else { 1905 comp_status = le16_to_cpu(sts->comp_status); 1906 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 1907 } 1908 handle = (uint32_t) LSW(sts->handle); 1909 que = MSW(sts->handle); 1910 req = ha->req_q_map[que]; 1911 1912 /* Validate handle. */ 1913 if (handle < MAX_OUTSTANDING_COMMANDS) { 1914 sp = req->outstanding_cmds[handle]; 1915 } else 1916 sp = NULL; 1917 1918 if (sp == NULL) { 1919 ql_dbg(ql_dbg_io, vha, 0x3017, 1920 "Invalid status handle (0x%x).\n", sts->handle); 1921 1922 if (IS_QLA82XX(ha)) 1923 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 1924 else 1925 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 1926 qla2xxx_wake_dpc(vha); 1927 return; 1928 } 1929 1930 if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) { 1931 qla25xx_process_bidir_status_iocb(vha, pkt, req, handle); 1932 return; 1933 } 1934 1935 /* Fast path completion. */ 1936 if (comp_status == CS_COMPLETE && scsi_status == 0) { 1937 qla2x00_process_completed_request(vha, req, handle); 1938 1939 return; 1940 } 1941 1942 req->outstanding_cmds[handle] = NULL; 1943 cp = GET_CMD_SP(sp); 1944 if (cp == NULL) { 1945 ql_dbg(ql_dbg_io, vha, 0x3018, 1946 "Command already returned (0x%x/%p).\n", 1947 sts->handle, sp); 1948 1949 return; 1950 } 1951 1952 lscsi_status = scsi_status & STATUS_MASK; 1953 1954 fcport = sp->fcport; 1955 1956 ox_id = 0; 1957 sense_len = par_sense_len = rsp_info_len = resid_len = 1958 fw_resid_len = 0; 1959 if (IS_FWI2_CAPABLE(ha)) { 1960 if (scsi_status & SS_SENSE_LEN_VALID) 1961 sense_len = le32_to_cpu(sts24->sense_len); 1962 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1963 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 1964 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) 1965 resid_len = le32_to_cpu(sts24->rsp_residual_count); 1966 if (comp_status == CS_DATA_UNDERRUN) 1967 fw_resid_len = le32_to_cpu(sts24->residual_len); 1968 rsp_info = sts24->data; 1969 sense_data = sts24->data; 1970 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 1971 ox_id = le16_to_cpu(sts24->ox_id); 1972 par_sense_len = sizeof(sts24->data); 1973 } else { 1974 if (scsi_status & SS_SENSE_LEN_VALID) 1975 sense_len = le16_to_cpu(sts->req_sense_length); 1976 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) 1977 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 1978 resid_len = le32_to_cpu(sts->residual_length); 1979 rsp_info = sts->rsp_info; 1980 sense_data = sts->req_sense_data; 1981 par_sense_len = sizeof(sts->req_sense_data); 1982 } 1983 1984 /* Check for any FCP transport errors. */ 1985 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 1986 /* Sense data lies beyond any FCP RESPONSE data. */ 1987 if (IS_FWI2_CAPABLE(ha)) { 1988 sense_data += rsp_info_len; 1989 par_sense_len -= rsp_info_len; 1990 } 1991 if (rsp_info_len > 3 && rsp_info[3]) { 1992 ql_dbg(ql_dbg_io, fcport->vha, 0x3019, 1993 "FCP I/O protocol failure (0x%x/0x%x).\n", 1994 rsp_info_len, rsp_info[3]); 1995 1996 res = DID_BUS_BUSY << 16; 1997 goto out; 1998 } 1999 } 2000 2001 /* Check for overrun. */ 2002 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && 2003 scsi_status & SS_RESIDUAL_OVER) 2004 comp_status = CS_DATA_OVERRUN; 2005 2006 /* 2007 * Based on Host and scsi status generate status code for Linux 2008 */ 2009 switch (comp_status) { 2010 case CS_COMPLETE: 2011 case CS_QUEUE_FULL: 2012 if (scsi_status == 0) { 2013 res = DID_OK << 16; 2014 break; 2015 } 2016 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 2017 resid = resid_len; 2018 scsi_set_resid(cp, resid); 2019 2020 if (!lscsi_status && 2021 ((unsigned)(scsi_bufflen(cp) - resid) < 2022 cp->underflow)) { 2023 ql_dbg(ql_dbg_io, fcport->vha, 0x301a, 2024 "Mid-layer underflow " 2025 "detected (0x%x of 0x%x bytes).\n", 2026 resid, scsi_bufflen(cp)); 2027 2028 res = DID_ERROR << 16; 2029 break; 2030 } 2031 } 2032 res = DID_OK << 16 | lscsi_status; 2033 2034 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2035 ql_dbg(ql_dbg_io, fcport->vha, 0x301b, 2036 "QUEUE FULL detected.\n"); 2037 break; 2038 } 2039 logit = 0; 2040 if (lscsi_status != SS_CHECK_CONDITION) 2041 break; 2042 2043 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2044 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2045 break; 2046 2047 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len, 2048 rsp, res); 2049 break; 2050 2051 case CS_DATA_UNDERRUN: 2052 /* Use F/W calculated residual length. */ 2053 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len; 2054 scsi_set_resid(cp, resid); 2055 if (scsi_status & SS_RESIDUAL_UNDER) { 2056 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { 2057 ql_dbg(ql_dbg_io, fcport->vha, 0x301d, 2058 "Dropped frame(s) detected " 2059 "(0x%x of 0x%x bytes).\n", 2060 resid, scsi_bufflen(cp)); 2061 2062 res = DID_ERROR << 16 | lscsi_status; 2063 goto check_scsi_status; 2064 } 2065 2066 if (!lscsi_status && 2067 ((unsigned)(scsi_bufflen(cp) - resid) < 2068 cp->underflow)) { 2069 ql_dbg(ql_dbg_io, fcport->vha, 0x301e, 2070 "Mid-layer underflow " 2071 "detected (0x%x of 0x%x bytes).\n", 2072 resid, scsi_bufflen(cp)); 2073 2074 res = DID_ERROR << 16; 2075 break; 2076 } 2077 } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && 2078 lscsi_status != SAM_STAT_BUSY) { 2079 /* 2080 * scsi status of task set and busy are considered to be 2081 * task not completed. 2082 */ 2083 2084 ql_dbg(ql_dbg_io, fcport->vha, 0x301f, 2085 "Dropped frame(s) detected (0x%x " 2086 "of 0x%x bytes).\n", resid, 2087 scsi_bufflen(cp)); 2088 2089 res = DID_ERROR << 16 | lscsi_status; 2090 goto check_scsi_status; 2091 } else { 2092 ql_dbg(ql_dbg_io, fcport->vha, 0x3030, 2093 "scsi_status: 0x%x, lscsi_status: 0x%x\n", 2094 scsi_status, lscsi_status); 2095 } 2096 2097 res = DID_OK << 16 | lscsi_status; 2098 logit = 0; 2099 2100 check_scsi_status: 2101 /* 2102 * Check to see if SCSI Status is non zero. If so report SCSI 2103 * Status. 2104 */ 2105 if (lscsi_status != 0) { 2106 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 2107 ql_dbg(ql_dbg_io, fcport->vha, 0x3020, 2108 "QUEUE FULL detected.\n"); 2109 logit = 1; 2110 break; 2111 } 2112 if (lscsi_status != SS_CHECK_CONDITION) 2113 break; 2114 2115 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2116 if (!(scsi_status & SS_SENSE_LEN_VALID)) 2117 break; 2118 2119 qla2x00_handle_sense(sp, sense_data, par_sense_len, 2120 sense_len, rsp, res); 2121 } 2122 break; 2123 2124 case CS_PORT_LOGGED_OUT: 2125 case CS_PORT_CONFIG_CHG: 2126 case CS_PORT_BUSY: 2127 case CS_INCOMPLETE: 2128 case CS_PORT_UNAVAILABLE: 2129 case CS_TIMEOUT: 2130 case CS_RESET: 2131 2132 /* 2133 * We are going to have the fc class block the rport 2134 * while we try to recover so instruct the mid layer 2135 * to requeue until the class decides how to handle this. 2136 */ 2137 res = DID_TRANSPORT_DISRUPTED << 16; 2138 2139 if (comp_status == CS_TIMEOUT) { 2140 if (IS_FWI2_CAPABLE(ha)) 2141 break; 2142 else if ((le16_to_cpu(sts->status_flags) & 2143 SF_LOGOUT_SENT) == 0) 2144 break; 2145 } 2146 2147 ql_dbg(ql_dbg_io, fcport->vha, 0x3021, 2148 "Port down status: port-state=0x%x.\n", 2149 atomic_read(&fcport->state)); 2150 2151 if (atomic_read(&fcport->state) == FCS_ONLINE) 2152 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); 2153 break; 2154 2155 case CS_ABORTED: 2156 res = DID_RESET << 16; 2157 break; 2158 2159 case CS_DIF_ERROR: 2160 logit = qla2x00_handle_dif_error(sp, sts24); 2161 res = cp->result; 2162 break; 2163 2164 case CS_TRANSPORT: 2165 res = DID_ERROR << 16; 2166 2167 if (!IS_PI_SPLIT_DET_CAPABLE(ha)) 2168 break; 2169 2170 if (state_flags & BIT_4) 2171 scmd_printk(KERN_WARNING, cp, 2172 "Unsupported device '%s' found.\n", 2173 cp->device->vendor); 2174 break; 2175 2176 default: 2177 res = DID_ERROR << 16; 2178 break; 2179 } 2180 2181 out: 2182 if (logit) 2183 ql_dbg(ql_dbg_io, fcport->vha, 0x3022, 2184 "FCP command status: 0x%x-0x%x (0x%x) " 2185 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x " 2186 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x " 2187 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n", 2188 comp_status, scsi_status, res, vha->host_no, 2189 cp->device->id, cp->device->lun, fcport->d_id.b.domain, 2190 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, 2191 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 2192 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7], 2193 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len, 2194 resid_len, fw_resid_len); 2195 2196 if (rsp->status_srb == NULL) 2197 sp->done(ha, sp, res); 2198 } 2199 2200 /** 2201 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 2202 * @ha: SCSI driver HA context 2203 * @pkt: Entry pointer 2204 * 2205 * Extended sense data. 2206 */ 2207 static void 2208 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) 2209 { 2210 uint8_t sense_sz = 0; 2211 struct qla_hw_data *ha = rsp->hw; 2212 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev); 2213 srb_t *sp = rsp->status_srb; 2214 struct scsi_cmnd *cp; 2215 uint32_t sense_len; 2216 uint8_t *sense_ptr; 2217 2218 if (!sp || !GET_CMD_SENSE_LEN(sp)) 2219 return; 2220 2221 sense_len = GET_CMD_SENSE_LEN(sp); 2222 sense_ptr = GET_CMD_SENSE_PTR(sp); 2223 2224 cp = GET_CMD_SP(sp); 2225 if (cp == NULL) { 2226 ql_log(ql_log_warn, vha, 0x3025, 2227 "cmd is NULL: already returned to OS (sp=%p).\n", sp); 2228 2229 rsp->status_srb = NULL; 2230 return; 2231 } 2232 2233 if (sense_len > sizeof(pkt->data)) 2234 sense_sz = sizeof(pkt->data); 2235 else 2236 sense_sz = sense_len; 2237 2238 /* Move sense data. */ 2239 if (IS_FWI2_CAPABLE(ha)) 2240 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 2241 memcpy(sense_ptr, pkt->data, sense_sz); 2242 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c, 2243 sense_ptr, sense_sz); 2244 2245 sense_len -= sense_sz; 2246 sense_ptr += sense_sz; 2247 2248 SET_CMD_SENSE_PTR(sp, sense_ptr); 2249 SET_CMD_SENSE_LEN(sp, sense_len); 2250 2251 /* Place command on done queue. */ 2252 if (sense_len == 0) { 2253 rsp->status_srb = NULL; 2254 sp->done(ha, sp, cp->result); 2255 } 2256 } 2257 2258 /** 2259 * qla2x00_error_entry() - Process an error entry. 2260 * @ha: SCSI driver HA context 2261 * @pkt: Entry pointer 2262 */ 2263 static void 2264 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) 2265 { 2266 srb_t *sp; 2267 struct qla_hw_data *ha = vha->hw; 2268 const char func[] = "ERROR-IOCB"; 2269 uint16_t que = MSW(pkt->handle); 2270 struct req_que *req = NULL; 2271 int res = DID_ERROR << 16; 2272 2273 ql_dbg(ql_dbg_async, vha, 0x502a, 2274 "type of error status in response: 0x%x\n", pkt->entry_status); 2275 2276 if (que >= ha->max_req_queues || !ha->req_q_map[que]) 2277 goto fatal; 2278 2279 req = ha->req_q_map[que]; 2280 2281 if (pkt->entry_status & RF_BUSY) 2282 res = DID_BUS_BUSY << 16; 2283 2284 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); 2285 if (sp) { 2286 sp->done(ha, sp, res); 2287 return; 2288 } 2289 fatal: 2290 ql_log(ql_log_warn, vha, 0x5030, 2291 "Error entry - invalid handle/queue.\n"); 2292 2293 if (IS_QLA82XX(ha)) 2294 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); 2295 else 2296 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2297 qla2xxx_wake_dpc(vha); 2298 } 2299 2300 /** 2301 * qla24xx_mbx_completion() - Process mailbox command completions. 2302 * @ha: SCSI driver HA context 2303 * @mb0: Mailbox0 register 2304 */ 2305 static void 2306 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) 2307 { 2308 uint16_t cnt; 2309 uint32_t mboxes; 2310 uint16_t __iomem *wptr; 2311 struct qla_hw_data *ha = vha->hw; 2312 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2313 2314 /* Read all mbox registers? */ 2315 mboxes = (1 << ha->mbx_count) - 1; 2316 if (!ha->mcp) 2317 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n"); 2318 else 2319 mboxes = ha->mcp->in_mb; 2320 2321 /* Load return mailbox registers. */ 2322 ha->flags.mbox_int = 1; 2323 ha->mailbox_out[0] = mb0; 2324 mboxes >>= 1; 2325 wptr = (uint16_t __iomem *)®->mailbox1; 2326 2327 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 2328 if (mboxes & BIT_0) 2329 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 2330 2331 mboxes >>= 1; 2332 wptr++; 2333 } 2334 } 2335 2336 /** 2337 * qla24xx_process_response_queue() - Process response queue entries. 2338 * @ha: SCSI driver HA context 2339 */ 2340 void qla24xx_process_response_queue(struct scsi_qla_host *vha, 2341 struct rsp_que *rsp) 2342 { 2343 struct sts_entry_24xx *pkt; 2344 struct qla_hw_data *ha = vha->hw; 2345 2346 if (!vha->flags.online) 2347 return; 2348 2349 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { 2350 pkt = (struct sts_entry_24xx *)rsp->ring_ptr; 2351 2352 rsp->ring_index++; 2353 if (rsp->ring_index == rsp->length) { 2354 rsp->ring_index = 0; 2355 rsp->ring_ptr = rsp->ring; 2356 } else { 2357 rsp->ring_ptr++; 2358 } 2359 2360 if (pkt->entry_status != 0) { 2361 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2362 2363 (void)qlt_24xx_process_response_error(vha, pkt); 2364 2365 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2366 wmb(); 2367 continue; 2368 } 2369 2370 switch (pkt->entry_type) { 2371 case STATUS_TYPE: 2372 qla2x00_status_entry(vha, rsp, pkt); 2373 break; 2374 case STATUS_CONT_TYPE: 2375 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt); 2376 break; 2377 case VP_RPT_ID_IOCB_TYPE: 2378 qla24xx_report_id_acquisition(vha, 2379 (struct vp_rpt_id_entry_24xx *)pkt); 2380 break; 2381 case LOGINOUT_PORT_IOCB_TYPE: 2382 qla24xx_logio_entry(vha, rsp->req, 2383 (struct logio_entry_24xx *)pkt); 2384 break; 2385 case TSK_MGMT_IOCB_TYPE: 2386 qla24xx_tm_iocb_entry(vha, rsp->req, 2387 (struct tsk_mgmt_entry *)pkt); 2388 break; 2389 case CT_IOCB_TYPE: 2390 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE); 2391 break; 2392 case ELS_IOCB_TYPE: 2393 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2394 break; 2395 case ABTS_RECV_24XX: 2396 /* ensure that the ATIO queue is empty */ 2397 qlt_24xx_process_atio_queue(vha); 2398 case ABTS_RESP_24XX: 2399 case CTIO_TYPE7: 2400 case NOTIFY_ACK_TYPE: 2401 qlt_response_pkt_all_vps(vha, (response_t *)pkt); 2402 break; 2403 case MARKER_TYPE: 2404 /* Do nothing in this case, this check is to prevent it 2405 * from falling into default case 2406 */ 2407 break; 2408 default: 2409 /* Type Not Supported. */ 2410 ql_dbg(ql_dbg_async, vha, 0x5042, 2411 "Received unknown response pkt type %x " 2412 "entry status=%x.\n", 2413 pkt->entry_type, pkt->entry_status); 2414 break; 2415 } 2416 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2417 wmb(); 2418 } 2419 2420 /* Adjust ring index */ 2421 if (IS_QLA82XX(ha)) { 2422 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; 2423 WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); 2424 } else 2425 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); 2426 } 2427 2428 static void 2429 qla2xxx_check_risc_status(scsi_qla_host_t *vha) 2430 { 2431 int rval; 2432 uint32_t cnt; 2433 struct qla_hw_data *ha = vha->hw; 2434 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 2435 2436 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 2437 return; 2438 2439 rval = QLA_SUCCESS; 2440 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 2441 RD_REG_DWORD(®->iobase_addr); 2442 WRT_REG_DWORD(®->iobase_window, 0x0001); 2443 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2444 rval == QLA_SUCCESS; cnt--) { 2445 if (cnt) { 2446 WRT_REG_DWORD(®->iobase_window, 0x0001); 2447 udelay(10); 2448 } else 2449 rval = QLA_FUNCTION_TIMEOUT; 2450 } 2451 if (rval == QLA_SUCCESS) 2452 goto next_test; 2453 2454 WRT_REG_DWORD(®->iobase_window, 0x0003); 2455 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 2456 rval == QLA_SUCCESS; cnt--) { 2457 if (cnt) { 2458 WRT_REG_DWORD(®->iobase_window, 0x0003); 2459 udelay(10); 2460 } else 2461 rval = QLA_FUNCTION_TIMEOUT; 2462 } 2463 if (rval != QLA_SUCCESS) 2464 goto done; 2465 2466 next_test: 2467 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 2468 ql_log(ql_log_info, vha, 0x504c, 2469 "Additional code -- 0x55AA.\n"); 2470 2471 done: 2472 WRT_REG_DWORD(®->iobase_window, 0x0000); 2473 RD_REG_DWORD(®->iobase_window); 2474 } 2475 2476 /** 2477 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx. 2478 * @irq: 2479 * @dev_id: SCSI driver HA context 2480 * 2481 * Called by system whenever the host adapter generates an interrupt. 2482 * 2483 * Returns handled flag. 2484 */ 2485 irqreturn_t 2486 qla24xx_intr_handler(int irq, void *dev_id) 2487 { 2488 scsi_qla_host_t *vha; 2489 struct qla_hw_data *ha; 2490 struct device_reg_24xx __iomem *reg; 2491 int status; 2492 unsigned long iter; 2493 uint32_t stat; 2494 uint32_t hccr; 2495 uint16_t mb[8]; 2496 struct rsp_que *rsp; 2497 unsigned long flags; 2498 2499 rsp = (struct rsp_que *) dev_id; 2500 if (!rsp) { 2501 ql_log(ql_log_info, NULL, 0x5059, 2502 "%s: NULL response queue pointer.\n", __func__); 2503 return IRQ_NONE; 2504 } 2505 2506 ha = rsp->hw; 2507 reg = &ha->iobase->isp24; 2508 status = 0; 2509 2510 if (unlikely(pci_channel_offline(ha->pdev))) 2511 return IRQ_HANDLED; 2512 2513 spin_lock_irqsave(&ha->hardware_lock, flags); 2514 vha = pci_get_drvdata(ha->pdev); 2515 for (iter = 50; iter--; ) { 2516 stat = RD_REG_DWORD(®->host_status); 2517 if (stat & HSRX_RISC_PAUSED) { 2518 if (unlikely(pci_channel_offline(ha->pdev))) 2519 break; 2520 2521 hccr = RD_REG_DWORD(®->hccr); 2522 2523 ql_log(ql_log_warn, vha, 0x504b, 2524 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2525 hccr); 2526 2527 qla2xxx_check_risc_status(vha); 2528 2529 ha->isp_ops->fw_dump(vha, 1); 2530 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2531 break; 2532 } else if ((stat & HSRX_RISC_INT) == 0) 2533 break; 2534 2535 switch (stat & 0xff) { 2536 case INTR_ROM_MB_SUCCESS: 2537 case INTR_ROM_MB_FAILED: 2538 case INTR_MB_SUCCESS: 2539 case INTR_MB_FAILED: 2540 qla24xx_mbx_completion(vha, MSW(stat)); 2541 status |= MBX_INTERRUPT; 2542 2543 break; 2544 case INTR_ASYNC_EVENT: 2545 mb[0] = MSW(stat); 2546 mb[1] = RD_REG_WORD(®->mailbox1); 2547 mb[2] = RD_REG_WORD(®->mailbox2); 2548 mb[3] = RD_REG_WORD(®->mailbox3); 2549 qla2x00_async_event(vha, rsp, mb); 2550 break; 2551 case INTR_RSP_QUE_UPDATE: 2552 case INTR_RSP_QUE_UPDATE_83XX: 2553 qla24xx_process_response_queue(vha, rsp); 2554 break; 2555 case INTR_ATIO_QUE_UPDATE: 2556 qlt_24xx_process_atio_queue(vha); 2557 break; 2558 case INTR_ATIO_RSP_QUE_UPDATE: 2559 qlt_24xx_process_atio_queue(vha); 2560 qla24xx_process_response_queue(vha, rsp); 2561 break; 2562 default: 2563 ql_dbg(ql_dbg_async, vha, 0x504f, 2564 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2565 break; 2566 } 2567 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2568 RD_REG_DWORD_RELAXED(®->hccr); 2569 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) 2570 ndelay(3500); 2571 } 2572 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2573 2574 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2575 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2576 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2577 complete(&ha->mbx_intr_comp); 2578 } 2579 2580 return IRQ_HANDLED; 2581 } 2582 2583 static irqreturn_t 2584 qla24xx_msix_rsp_q(int irq, void *dev_id) 2585 { 2586 struct qla_hw_data *ha; 2587 struct rsp_que *rsp; 2588 struct device_reg_24xx __iomem *reg; 2589 struct scsi_qla_host *vha; 2590 unsigned long flags; 2591 2592 rsp = (struct rsp_que *) dev_id; 2593 if (!rsp) { 2594 ql_log(ql_log_info, NULL, 0x505a, 2595 "%s: NULL response queue pointer.\n", __func__); 2596 return IRQ_NONE; 2597 } 2598 ha = rsp->hw; 2599 reg = &ha->iobase->isp24; 2600 2601 spin_lock_irqsave(&ha->hardware_lock, flags); 2602 2603 vha = pci_get_drvdata(ha->pdev); 2604 qla24xx_process_response_queue(vha, rsp); 2605 if (!ha->flags.disable_msix_handshake) { 2606 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2607 RD_REG_DWORD_RELAXED(®->hccr); 2608 } 2609 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2610 2611 return IRQ_HANDLED; 2612 } 2613 2614 static irqreturn_t 2615 qla25xx_msix_rsp_q(int irq, void *dev_id) 2616 { 2617 struct qla_hw_data *ha; 2618 struct rsp_que *rsp; 2619 struct device_reg_24xx __iomem *reg; 2620 unsigned long flags; 2621 2622 rsp = (struct rsp_que *) dev_id; 2623 if (!rsp) { 2624 ql_log(ql_log_info, NULL, 0x505b, 2625 "%s: NULL response queue pointer.\n", __func__); 2626 return IRQ_NONE; 2627 } 2628 ha = rsp->hw; 2629 2630 /* Clear the interrupt, if enabled, for this response queue */ 2631 if (!ha->flags.disable_msix_handshake) { 2632 reg = &ha->iobase->isp24; 2633 spin_lock_irqsave(&ha->hardware_lock, flags); 2634 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2635 RD_REG_DWORD_RELAXED(®->hccr); 2636 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2637 } 2638 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work); 2639 2640 return IRQ_HANDLED; 2641 } 2642 2643 static irqreturn_t 2644 qla24xx_msix_default(int irq, void *dev_id) 2645 { 2646 scsi_qla_host_t *vha; 2647 struct qla_hw_data *ha; 2648 struct rsp_que *rsp; 2649 struct device_reg_24xx __iomem *reg; 2650 int status; 2651 uint32_t stat; 2652 uint32_t hccr; 2653 uint16_t mb[8]; 2654 unsigned long flags; 2655 2656 rsp = (struct rsp_que *) dev_id; 2657 if (!rsp) { 2658 ql_log(ql_log_info, NULL, 0x505c, 2659 "%s: NULL response queue pointer.\n", __func__); 2660 return IRQ_NONE; 2661 } 2662 ha = rsp->hw; 2663 reg = &ha->iobase->isp24; 2664 status = 0; 2665 2666 spin_lock_irqsave(&ha->hardware_lock, flags); 2667 vha = pci_get_drvdata(ha->pdev); 2668 do { 2669 stat = RD_REG_DWORD(®->host_status); 2670 if (stat & HSRX_RISC_PAUSED) { 2671 if (unlikely(pci_channel_offline(ha->pdev))) 2672 break; 2673 2674 hccr = RD_REG_DWORD(®->hccr); 2675 2676 ql_log(ql_log_info, vha, 0x5050, 2677 "RISC paused -- HCCR=%x, Dumping firmware.\n", 2678 hccr); 2679 2680 qla2xxx_check_risc_status(vha); 2681 2682 ha->isp_ops->fw_dump(vha, 1); 2683 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 2684 break; 2685 } else if ((stat & HSRX_RISC_INT) == 0) 2686 break; 2687 2688 switch (stat & 0xff) { 2689 case INTR_ROM_MB_SUCCESS: 2690 case INTR_ROM_MB_FAILED: 2691 case INTR_MB_SUCCESS: 2692 case INTR_MB_FAILED: 2693 qla24xx_mbx_completion(vha, MSW(stat)); 2694 status |= MBX_INTERRUPT; 2695 2696 break; 2697 case INTR_ASYNC_EVENT: 2698 mb[0] = MSW(stat); 2699 mb[1] = RD_REG_WORD(®->mailbox1); 2700 mb[2] = RD_REG_WORD(®->mailbox2); 2701 mb[3] = RD_REG_WORD(®->mailbox3); 2702 qla2x00_async_event(vha, rsp, mb); 2703 break; 2704 case INTR_RSP_QUE_UPDATE: 2705 case INTR_RSP_QUE_UPDATE_83XX: 2706 qla24xx_process_response_queue(vha, rsp); 2707 break; 2708 case INTR_ATIO_QUE_UPDATE: 2709 qlt_24xx_process_atio_queue(vha); 2710 break; 2711 case INTR_ATIO_RSP_QUE_UPDATE: 2712 qlt_24xx_process_atio_queue(vha); 2713 qla24xx_process_response_queue(vha, rsp); 2714 break; 2715 default: 2716 ql_dbg(ql_dbg_async, vha, 0x5051, 2717 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2718 break; 2719 } 2720 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 2721 } while (0); 2722 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2723 2724 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 2725 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 2726 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 2727 complete(&ha->mbx_intr_comp); 2728 } 2729 return IRQ_HANDLED; 2730 } 2731 2732 /* Interrupt handling helpers. */ 2733 2734 struct qla_init_msix_entry { 2735 const char *name; 2736 irq_handler_t handler; 2737 }; 2738 2739 static struct qla_init_msix_entry msix_entries[3] = { 2740 { "qla2xxx (default)", qla24xx_msix_default }, 2741 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 2742 { "qla2xxx (multiq)", qla25xx_msix_rsp_q }, 2743 }; 2744 2745 static struct qla_init_msix_entry qla82xx_msix_entries[2] = { 2746 { "qla2xxx (default)", qla82xx_msix_default }, 2747 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q }, 2748 }; 2749 2750 static void 2751 qla24xx_disable_msix(struct qla_hw_data *ha) 2752 { 2753 int i; 2754 struct qla_msix_entry *qentry; 2755 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2756 2757 for (i = 0; i < ha->msix_count; i++) { 2758 qentry = &ha->msix_entries[i]; 2759 if (qentry->have_irq) 2760 free_irq(qentry->vector, qentry->rsp); 2761 } 2762 pci_disable_msix(ha->pdev); 2763 kfree(ha->msix_entries); 2764 ha->msix_entries = NULL; 2765 ha->flags.msix_enabled = 0; 2766 ql_dbg(ql_dbg_init, vha, 0x0042, 2767 "Disabled the MSI.\n"); 2768 } 2769 2770 static int 2771 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) 2772 { 2773 #define MIN_MSIX_COUNT 2 2774 int i, ret; 2775 struct msix_entry *entries; 2776 struct qla_msix_entry *qentry; 2777 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2778 2779 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count, 2780 GFP_KERNEL); 2781 if (!entries) { 2782 ql_log(ql_log_warn, vha, 0x00bc, 2783 "Failed to allocate memory for msix_entry.\n"); 2784 return -ENOMEM; 2785 } 2786 2787 for (i = 0; i < ha->msix_count; i++) 2788 entries[i].entry = i; 2789 2790 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2791 if (ret) { 2792 if (ret < MIN_MSIX_COUNT) 2793 goto msix_failed; 2794 2795 ql_log(ql_log_warn, vha, 0x00c6, 2796 "MSI-X: Failed to enable support " 2797 "-- %d/%d\n Retry with %d vectors.\n", 2798 ha->msix_count, ret, ret); 2799 ha->msix_count = ret; 2800 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count); 2801 if (ret) { 2802 msix_failed: 2803 ql_log(ql_log_fatal, vha, 0x00c7, 2804 "MSI-X: Failed to enable support, " 2805 "giving up -- %d/%d.\n", 2806 ha->msix_count, ret); 2807 goto msix_out; 2808 } 2809 ha->max_rsp_queues = ha->msix_count - 1; 2810 } 2811 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 2812 ha->msix_count, GFP_KERNEL); 2813 if (!ha->msix_entries) { 2814 ql_log(ql_log_fatal, vha, 0x00c8, 2815 "Failed to allocate memory for ha->msix_entries.\n"); 2816 ret = -ENOMEM; 2817 goto msix_out; 2818 } 2819 ha->flags.msix_enabled = 1; 2820 2821 for (i = 0; i < ha->msix_count; i++) { 2822 qentry = &ha->msix_entries[i]; 2823 qentry->vector = entries[i].vector; 2824 qentry->entry = entries[i].entry; 2825 qentry->have_irq = 0; 2826 qentry->rsp = NULL; 2827 } 2828 2829 /* Enable MSI-X vectors for the base queue */ 2830 for (i = 0; i < 2; i++) { 2831 qentry = &ha->msix_entries[i]; 2832 if (IS_QLA82XX(ha)) { 2833 ret = request_irq(qentry->vector, 2834 qla82xx_msix_entries[i].handler, 2835 0, qla82xx_msix_entries[i].name, rsp); 2836 } else { 2837 ret = request_irq(qentry->vector, 2838 msix_entries[i].handler, 2839 0, msix_entries[i].name, rsp); 2840 } 2841 if (ret) { 2842 ql_log(ql_log_fatal, vha, 0x00cb, 2843 "MSI-X: unable to register handler -- %x/%d.\n", 2844 qentry->vector, ret); 2845 qla24xx_disable_msix(ha); 2846 ha->mqenable = 0; 2847 goto msix_out; 2848 } 2849 qentry->have_irq = 1; 2850 qentry->rsp = rsp; 2851 rsp->msix = qentry; 2852 } 2853 2854 /* Enable MSI-X vector for response queue update for queue 0 */ 2855 if (IS_QLA83XX(ha)) { 2856 if (ha->msixbase && ha->mqiobase && 2857 (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2858 ha->mqenable = 1; 2859 } else 2860 if (ha->mqiobase 2861 && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1)) 2862 ha->mqenable = 1; 2863 ql_dbg(ql_dbg_multiq, vha, 0xc005, 2864 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2865 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2866 ql_dbg(ql_dbg_init, vha, 0x0055, 2867 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", 2868 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); 2869 2870 msix_out: 2871 kfree(entries); 2872 return ret; 2873 } 2874 2875 int 2876 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) 2877 { 2878 int ret; 2879 device_reg_t __iomem *reg = ha->iobase; 2880 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2881 2882 /* If possible, enable MSI-X. */ 2883 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2884 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) 2885 goto skip_msi; 2886 2887 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP && 2888 (ha->pdev->subsystem_device == 0x7040 || 2889 ha->pdev->subsystem_device == 0x7041 || 2890 ha->pdev->subsystem_device == 0x1705)) { 2891 ql_log(ql_log_warn, vha, 0x0034, 2892 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n", 2893 ha->pdev->subsystem_vendor, 2894 ha->pdev->subsystem_device); 2895 goto skip_msi; 2896 } 2897 2898 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { 2899 ql_log(ql_log_warn, vha, 0x0035, 2900 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", 2901 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); 2902 goto skip_msix; 2903 } 2904 2905 ret = qla24xx_enable_msix(ha, rsp); 2906 if (!ret) { 2907 ql_dbg(ql_dbg_init, vha, 0x0036, 2908 "MSI-X: Enabled (0x%X, 0x%X).\n", 2909 ha->chip_revision, ha->fw_attributes); 2910 goto clear_risc_ints; 2911 } 2912 ql_log(ql_log_info, vha, 0x0037, 2913 "MSI-X Falling back-to MSI mode -%d.\n", ret); 2914 skip_msix: 2915 2916 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && 2917 !IS_QLA8001(ha) && !IS_QLA82XX(ha)) 2918 goto skip_msi; 2919 2920 ret = pci_enable_msi(ha->pdev); 2921 if (!ret) { 2922 ql_dbg(ql_dbg_init, vha, 0x0038, 2923 "MSI: Enabled.\n"); 2924 ha->flags.msi_enabled = 1; 2925 } else 2926 ql_log(ql_log_warn, vha, 0x0039, 2927 "MSI-X; Falling back-to INTa mode -- %d.\n", ret); 2928 2929 /* Skip INTx on ISP82xx. */ 2930 if (!ha->flags.msi_enabled && IS_QLA82XX(ha)) 2931 return QLA_FUNCTION_FAILED; 2932 2933 skip_msi: 2934 2935 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 2936 ha->flags.msi_enabled ? 0 : IRQF_SHARED, 2937 QLA2XXX_DRIVER_NAME, rsp); 2938 if (ret) { 2939 ql_log(ql_log_warn, vha, 0x003a, 2940 "Failed to reserve interrupt %d already in use.\n", 2941 ha->pdev->irq); 2942 goto fail; 2943 } else if (!ha->flags.msi_enabled) 2944 ql_dbg(ql_dbg_init, vha, 0x0125, 2945 "INTa mode: Enabled.\n"); 2946 2947 clear_risc_ints: 2948 2949 spin_lock_irq(&ha->hardware_lock); 2950 if (!IS_FWI2_CAPABLE(ha)) 2951 WRT_REG_WORD(®->isp.semaphore, 0); 2952 spin_unlock_irq(&ha->hardware_lock); 2953 2954 fail: 2955 return ret; 2956 } 2957 2958 void 2959 qla2x00_free_irqs(scsi_qla_host_t *vha) 2960 { 2961 struct qla_hw_data *ha = vha->hw; 2962 struct rsp_que *rsp; 2963 2964 /* 2965 * We need to check that ha->rsp_q_map is valid in case we are called 2966 * from a probe failure context. 2967 */ 2968 if (!ha->rsp_q_map || !ha->rsp_q_map[0]) 2969 return; 2970 rsp = ha->rsp_q_map[0]; 2971 2972 if (ha->flags.msix_enabled) 2973 qla24xx_disable_msix(ha); 2974 else if (ha->flags.msi_enabled) { 2975 free_irq(ha->pdev->irq, rsp); 2976 pci_disable_msi(ha->pdev); 2977 } else 2978 free_irq(ha->pdev->irq, rsp); 2979 } 2980 2981 2982 int qla25xx_request_irq(struct rsp_que *rsp) 2983 { 2984 struct qla_hw_data *ha = rsp->hw; 2985 struct qla_init_msix_entry *intr = &msix_entries[2]; 2986 struct qla_msix_entry *msix = rsp->msix; 2987 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 2988 int ret; 2989 2990 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp); 2991 if (ret) { 2992 ql_log(ql_log_fatal, vha, 0x00e6, 2993 "MSI-X: Unable to register handler -- %x/%d.\n", 2994 msix->vector, ret); 2995 return ret; 2996 } 2997 msix->have_irq = 1; 2998 msix->rsp = rsp; 2999 return ret; 3000 } 3001