1 /* 2 * QLogic Fibre Channel HBA Driver 3 * Copyright (c) 2003-2005 QLogic Corporation 4 * 5 * See LICENSE.qla2xxx for copyright and licensing details. 6 */ 7 #include "qla_def.h" 8 9 #include <linux/delay.h> 10 #include <scsi/scsi_tcq.h> 11 12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); 13 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t); 14 static void qla2x00_status_entry(scsi_qla_host_t *, void *); 15 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *); 16 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *); 17 static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *); 18 19 static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *); 20 21 /** 22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. 23 * @irq: 24 * @dev_id: SCSI driver HA context 25 * 26 * Called by system whenever the host adapter generates an interrupt. 27 * 28 * Returns handled flag. 29 */ 30 irqreturn_t 31 qla2100_intr_handler(int irq, void *dev_id) 32 { 33 scsi_qla_host_t *ha; 34 struct device_reg_2xxx __iomem *reg; 35 int status; 36 unsigned long flags; 37 unsigned long iter; 38 uint16_t hccr; 39 uint16_t mb[4]; 40 41 ha = (scsi_qla_host_t *) dev_id; 42 if (!ha) { 43 printk(KERN_INFO 44 "%s(): NULL host pointer\n", __func__); 45 return (IRQ_NONE); 46 } 47 48 reg = &ha->iobase->isp; 49 status = 0; 50 51 spin_lock_irqsave(&ha->hardware_lock, flags); 52 for (iter = 50; iter--; ) { 53 hccr = RD_REG_WORD(®->hccr); 54 if (hccr & HCCR_RISC_PAUSE) { 55 if (pci_channel_offline(ha->pdev)) 56 break; 57 58 /* 59 * Issue a "HARD" reset in order for the RISC interrupt 60 * bit to be cleared. Schedule a big hammmer to get 61 * out of the RISC PAUSED state. 62 */ 63 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 64 RD_REG_WORD(®->hccr); 65 66 ha->isp_ops->fw_dump(ha, 1); 67 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 68 break; 69 } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) 70 break; 71 72 if (RD_REG_WORD(®->semaphore) & BIT_0) { 73 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 74 RD_REG_WORD(®->hccr); 75 76 /* Get mailbox data. */ 77 mb[0] = RD_MAILBOX_REG(ha, reg, 0); 78 if (mb[0] > 0x3fff && mb[0] < 0x8000) { 79 qla2x00_mbx_completion(ha, mb[0]); 80 status |= MBX_INTERRUPT; 81 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) { 82 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 83 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 84 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 85 qla2x00_async_event(ha, mb); 86 } else { 87 /*EMPTY*/ 88 DEBUG2(printk("scsi(%ld): Unrecognized " 89 "interrupt type (%d).\n", 90 ha->host_no, mb[0])); 91 } 92 /* Release mailbox registers. */ 93 WRT_REG_WORD(®->semaphore, 0); 94 RD_REG_WORD(®->semaphore); 95 } else { 96 qla2x00_process_response_queue(ha); 97 98 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 99 RD_REG_WORD(®->hccr); 100 } 101 } 102 spin_unlock_irqrestore(&ha->hardware_lock, flags); 103 104 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 105 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 106 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 107 up(&ha->mbx_intr_sem); 108 } 109 110 return (IRQ_HANDLED); 111 } 112 113 /** 114 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 115 * @irq: 116 * @dev_id: SCSI driver HA context 117 * 118 * Called by system whenever the host adapter generates an interrupt. 119 * 120 * Returns handled flag. 121 */ 122 irqreturn_t 123 qla2300_intr_handler(int irq, void *dev_id) 124 { 125 scsi_qla_host_t *ha; 126 struct device_reg_2xxx __iomem *reg; 127 int status; 128 unsigned long flags; 129 unsigned long iter; 130 uint32_t stat; 131 uint16_t hccr; 132 uint16_t mb[4]; 133 134 ha = (scsi_qla_host_t *) dev_id; 135 if (!ha) { 136 printk(KERN_INFO 137 "%s(): NULL host pointer\n", __func__); 138 return (IRQ_NONE); 139 } 140 141 reg = &ha->iobase->isp; 142 status = 0; 143 144 spin_lock_irqsave(&ha->hardware_lock, flags); 145 for (iter = 50; iter--; ) { 146 stat = RD_REG_DWORD(®->u.isp2300.host_status); 147 if (stat & HSR_RISC_PAUSED) { 148 if (pci_channel_offline(ha->pdev)) 149 break; 150 151 hccr = RD_REG_WORD(®->hccr); 152 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) 153 qla_printk(KERN_INFO, ha, "Parity error -- " 154 "HCCR=%x, Dumping firmware!\n", hccr); 155 else 156 qla_printk(KERN_INFO, ha, "RISC paused -- " 157 "HCCR=%x, Dumping firmware!\n", hccr); 158 159 /* 160 * Issue a "HARD" reset in order for the RISC 161 * interrupt bit to be cleared. Schedule a big 162 * hammmer to get out of the RISC PAUSED state. 163 */ 164 WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); 165 RD_REG_WORD(®->hccr); 166 167 ha->isp_ops->fw_dump(ha, 1); 168 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 169 break; 170 } else if ((stat & HSR_RISC_INT) == 0) 171 break; 172 173 switch (stat & 0xff) { 174 case 0x1: 175 case 0x2: 176 case 0x10: 177 case 0x11: 178 qla2x00_mbx_completion(ha, MSW(stat)); 179 status |= MBX_INTERRUPT; 180 181 /* Release mailbox registers. */ 182 WRT_REG_WORD(®->semaphore, 0); 183 break; 184 case 0x12: 185 mb[0] = MSW(stat); 186 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 187 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 188 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 189 qla2x00_async_event(ha, mb); 190 break; 191 case 0x13: 192 qla2x00_process_response_queue(ha); 193 break; 194 case 0x15: 195 mb[0] = MBA_CMPLT_1_16BIT; 196 mb[1] = MSW(stat); 197 qla2x00_async_event(ha, mb); 198 break; 199 case 0x16: 200 mb[0] = MBA_SCSI_COMPLETION; 201 mb[1] = MSW(stat); 202 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 203 qla2x00_async_event(ha, mb); 204 break; 205 default: 206 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 207 "(%d).\n", 208 ha->host_no, stat & 0xff)); 209 break; 210 } 211 WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); 212 RD_REG_WORD_RELAXED(®->hccr); 213 } 214 spin_unlock_irqrestore(&ha->hardware_lock, flags); 215 216 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 217 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 218 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 219 up(&ha->mbx_intr_sem); 220 } 221 222 return (IRQ_HANDLED); 223 } 224 225 /** 226 * qla2x00_mbx_completion() - Process mailbox command completions. 227 * @ha: SCSI driver HA context 228 * @mb0: Mailbox0 register 229 */ 230 static void 231 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 232 { 233 uint16_t cnt; 234 uint16_t __iomem *wptr; 235 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 236 237 /* Load return mailbox registers. */ 238 ha->flags.mbox_int = 1; 239 ha->mailbox_out[0] = mb0; 240 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); 241 242 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 243 if (IS_QLA2200(ha) && cnt == 8) 244 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); 245 if (cnt == 4 || cnt == 5) 246 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); 247 else 248 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 249 250 wptr++; 251 } 252 253 if (ha->mcp) { 254 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 255 __func__, ha->host_no, ha->mcp->mb[0])); 256 } else { 257 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 258 __func__, ha->host_no)); 259 } 260 } 261 262 /** 263 * qla2x00_async_event() - Process aynchronous events. 264 * @ha: SCSI driver HA context 265 * @mb: Mailbox registers (0 - 3) 266 */ 267 void 268 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb) 269 { 270 #define LS_UNKNOWN 2 271 static char *link_speeds[5] = { "1", "2", "?", "4", "8" }; 272 char *link_speed; 273 uint16_t handle_cnt; 274 uint16_t cnt; 275 uint32_t handles[5]; 276 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 277 uint32_t rscn_entry, host_pid; 278 uint8_t rscn_queue_index; 279 280 /* Setup to process RIO completion. */ 281 handle_cnt = 0; 282 switch (mb[0]) { 283 case MBA_SCSI_COMPLETION: 284 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 285 handle_cnt = 1; 286 break; 287 case MBA_CMPLT_1_16BIT: 288 handles[0] = mb[1]; 289 handle_cnt = 1; 290 mb[0] = MBA_SCSI_COMPLETION; 291 break; 292 case MBA_CMPLT_2_16BIT: 293 handles[0] = mb[1]; 294 handles[1] = mb[2]; 295 handle_cnt = 2; 296 mb[0] = MBA_SCSI_COMPLETION; 297 break; 298 case MBA_CMPLT_3_16BIT: 299 handles[0] = mb[1]; 300 handles[1] = mb[2]; 301 handles[2] = mb[3]; 302 handle_cnt = 3; 303 mb[0] = MBA_SCSI_COMPLETION; 304 break; 305 case MBA_CMPLT_4_16BIT: 306 handles[0] = mb[1]; 307 handles[1] = mb[2]; 308 handles[2] = mb[3]; 309 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 310 handle_cnt = 4; 311 mb[0] = MBA_SCSI_COMPLETION; 312 break; 313 case MBA_CMPLT_5_16BIT: 314 handles[0] = mb[1]; 315 handles[1] = mb[2]; 316 handles[2] = mb[3]; 317 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6); 318 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7); 319 handle_cnt = 5; 320 mb[0] = MBA_SCSI_COMPLETION; 321 break; 322 case MBA_CMPLT_2_32BIT: 323 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); 324 handles[1] = le32_to_cpu( 325 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | 326 RD_MAILBOX_REG(ha, reg, 6)); 327 handle_cnt = 2; 328 mb[0] = MBA_SCSI_COMPLETION; 329 break; 330 default: 331 break; 332 } 333 334 switch (mb[0]) { 335 case MBA_SCSI_COMPLETION: /* Fast Post */ 336 if (!ha->flags.online) 337 break; 338 339 for (cnt = 0; cnt < handle_cnt; cnt++) 340 qla2x00_process_completed_request(ha, handles[cnt]); 341 break; 342 343 case MBA_RESET: /* Reset */ 344 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no)); 345 346 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 347 break; 348 349 case MBA_SYSTEM_ERR: /* System Error */ 350 mb[1] = RD_MAILBOX_REG(ha, reg, 1); 351 mb[2] = RD_MAILBOX_REG(ha, reg, 2); 352 mb[3] = RD_MAILBOX_REG(ha, reg, 3); 353 354 qla_printk(KERN_INFO, ha, 355 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n", 356 mb[1], mb[2], mb[3]); 357 358 ha->isp_ops->fw_dump(ha, 1); 359 360 if (IS_FWI2_CAPABLE(ha)) { 361 if (mb[1] == 0 && mb[2] == 0) { 362 qla_printk(KERN_ERR, ha, 363 "Unrecoverable Hardware Error: adapter " 364 "marked OFFLINE!\n"); 365 ha->flags.online = 0; 366 } else 367 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 368 } else if (mb[1] == 0) { 369 qla_printk(KERN_INFO, ha, 370 "Unrecoverable Hardware Error: adapter marked " 371 "OFFLINE!\n"); 372 ha->flags.online = 0; 373 } else 374 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 375 break; 376 377 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */ 378 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n", 379 ha->host_no)); 380 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n"); 381 382 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 383 break; 384 385 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */ 386 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n", 387 ha->host_no)); 388 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n"); 389 390 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 391 break; 392 393 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 394 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n", 395 ha->host_no)); 396 break; 397 398 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 399 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no, 400 mb[1])); 401 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]); 402 403 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 404 atomic_set(&ha->loop_state, LOOP_DOWN); 405 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 406 qla2x00_mark_all_devices_lost(ha, 1); 407 } 408 409 if (ha->parent) { 410 atomic_set(&ha->vp_state, VP_FAILED); 411 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 412 } 413 414 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 415 416 ha->flags.management_server_logged_in = 0; 417 break; 418 419 case MBA_LOOP_UP: /* Loop Up Event */ 420 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 421 link_speed = link_speeds[0]; 422 ha->link_data_rate = PORT_SPEED_1GB; 423 } else { 424 link_speed = link_speeds[LS_UNKNOWN]; 425 if (mb[1] < 5) 426 link_speed = link_speeds[mb[1]]; 427 ha->link_data_rate = mb[1]; 428 } 429 430 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n", 431 ha->host_no, link_speed)); 432 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n", 433 link_speed); 434 435 ha->flags.management_server_logged_in = 0; 436 break; 437 438 case MBA_LOOP_DOWN: /* Loop Down Event */ 439 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n", 440 ha->host_no, mb[1])); 441 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]); 442 443 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 444 atomic_set(&ha->loop_state, LOOP_DOWN); 445 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 446 ha->device_flags |= DFLG_NO_CABLE; 447 qla2x00_mark_all_devices_lost(ha, 1); 448 } 449 450 if (ha->parent) { 451 atomic_set(&ha->vp_state, VP_FAILED); 452 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 453 } 454 455 ha->flags.management_server_logged_in = 0; 456 ha->link_data_rate = PORT_SPEED_UNKNOWN; 457 if (ql2xfdmienable) 458 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags); 459 break; 460 461 case MBA_LIP_RESET: /* LIP reset occurred */ 462 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n", 463 ha->host_no, mb[1])); 464 qla_printk(KERN_INFO, ha, 465 "LIP reset occured (%x).\n", mb[1]); 466 467 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 468 atomic_set(&ha->loop_state, LOOP_DOWN); 469 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); 470 qla2x00_mark_all_devices_lost(ha, 1); 471 } 472 473 if (ha->parent) { 474 atomic_set(&ha->vp_state, VP_FAILED); 475 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 476 } 477 478 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 479 480 ha->operating_mode = LOOP; 481 ha->flags.management_server_logged_in = 0; 482 break; 483 484 case MBA_POINT_TO_POINT: /* Point-to-Point */ 485 if (IS_QLA2100(ha)) 486 break; 487 488 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n", 489 ha->host_no)); 490 491 /* 492 * Until there's a transition from loop down to loop up, treat 493 * this as loop down only. 494 */ 495 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 496 atomic_set(&ha->loop_state, LOOP_DOWN); 497 if (!atomic_read(&ha->loop_down_timer)) 498 atomic_set(&ha->loop_down_timer, 499 LOOP_DOWN_TIME); 500 qla2x00_mark_all_devices_lost(ha, 1); 501 } 502 503 if (ha->parent) { 504 atomic_set(&ha->vp_state, VP_FAILED); 505 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 506 } 507 508 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) { 509 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags); 510 } 511 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); 512 513 ha->flags.gpsc_supported = 1; 514 ha->flags.management_server_logged_in = 0; 515 break; 516 517 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */ 518 if (IS_QLA2100(ha)) 519 break; 520 521 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection " 522 "received.\n", 523 ha->host_no)); 524 qla_printk(KERN_INFO, ha, 525 "Configuration change detected: value=%x.\n", mb[1]); 526 527 if (atomic_read(&ha->loop_state) != LOOP_DOWN) { 528 atomic_set(&ha->loop_state, LOOP_DOWN); 529 if (!atomic_read(&ha->loop_down_timer)) 530 atomic_set(&ha->loop_down_timer, 531 LOOP_DOWN_TIME); 532 qla2x00_mark_all_devices_lost(ha, 1); 533 } 534 535 if (ha->parent) { 536 atomic_set(&ha->vp_state, VP_FAILED); 537 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED); 538 } 539 540 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 541 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 542 break; 543 544 case MBA_PORT_UPDATE: /* Port database update */ 545 /* 546 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET 547 * event etc. earlier indicating loop is down) then process 548 * it. Otherwise ignore it and Wait for RSCN to come in. 549 */ 550 atomic_set(&ha->loop_down_timer, 0); 551 if (atomic_read(&ha->loop_state) != LOOP_DOWN && 552 atomic_read(&ha->loop_state) != LOOP_DEAD) { 553 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE " 554 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1], 555 mb[2], mb[3])); 556 break; 557 } 558 559 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n", 560 ha->host_no)); 561 DEBUG(printk(KERN_INFO 562 "scsi(%ld): Port database changed %04x %04x %04x.\n", 563 ha->host_no, mb[1], mb[2], mb[3])); 564 565 /* 566 * Mark all devices as missing so we will login again. 567 */ 568 atomic_set(&ha->loop_state, LOOP_UP); 569 570 qla2x00_mark_all_devices_lost(ha, 1); 571 572 ha->flags.rscn_queue_overflow = 1; 573 574 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 575 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); 576 break; 577 578 case MBA_RSCN_UPDATE: /* State Change Registration */ 579 /* Check if the Vport has issued a SCR */ 580 if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags)) 581 break; 582 583 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n", 584 ha->host_no)); 585 DEBUG(printk(KERN_INFO 586 "scsi(%ld): RSCN database changed -- %04x %04x.\n", 587 ha->host_no, mb[1], mb[2])); 588 589 rscn_entry = (mb[1] << 16) | mb[2]; 590 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) | 591 ha->d_id.b.al_pa; 592 if (rscn_entry == host_pid) { 593 DEBUG(printk(KERN_INFO 594 "scsi(%ld): Ignoring RSCN update to local host " 595 "port ID (%06x)\n", 596 ha->host_no, host_pid)); 597 break; 598 } 599 600 rscn_queue_index = ha->rscn_in_ptr + 1; 601 if (rscn_queue_index == MAX_RSCN_COUNT) 602 rscn_queue_index = 0; 603 if (rscn_queue_index != ha->rscn_out_ptr) { 604 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry; 605 ha->rscn_in_ptr = rscn_queue_index; 606 } else { 607 ha->flags.rscn_queue_overflow = 1; 608 } 609 610 atomic_set(&ha->loop_state, LOOP_UPDATE); 611 atomic_set(&ha->loop_down_timer, 0); 612 ha->flags.management_server_logged_in = 0; 613 614 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); 615 set_bit(RSCN_UPDATE, &ha->dpc_flags); 616 break; 617 618 /* case MBA_RIO_RESPONSE: */ 619 case MBA_ZIO_RESPONSE: 620 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n", 621 ha->host_no)); 622 DEBUG(printk(KERN_INFO 623 "scsi(%ld): [R|Z]IO update completion.\n", 624 ha->host_no)); 625 626 if (IS_FWI2_CAPABLE(ha)) 627 qla24xx_process_response_queue(ha); 628 else 629 qla2x00_process_response_queue(ha); 630 break; 631 632 case MBA_DISCARD_RND_FRAME: 633 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x " 634 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3])); 635 break; 636 637 case MBA_TRACE_NOTIFICATION: 638 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n", 639 ha->host_no, mb[1], mb[2])); 640 break; 641 } 642 643 if (!ha->parent && ha->num_vhosts) 644 qla2x00_alert_all_vps(ha, mb); 645 } 646 647 static void 648 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data) 649 { 650 fc_port_t *fcport = data; 651 652 if (fcport->ha->max_q_depth <= sdev->queue_depth) 653 return; 654 655 if (sdev->ordered_tags) 656 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, 657 sdev->queue_depth + 1); 658 else 659 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 660 sdev->queue_depth + 1); 661 662 fcport->last_ramp_up = jiffies; 663 664 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 665 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n", 666 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 667 sdev->queue_depth)); 668 } 669 670 static void 671 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data) 672 { 673 fc_port_t *fcport = data; 674 675 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1)) 676 return; 677 678 DEBUG2(qla_printk(KERN_INFO, fcport->ha, 679 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n", 680 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun, 681 sdev->queue_depth)); 682 } 683 684 static inline void 685 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp) 686 { 687 fc_port_t *fcport; 688 struct scsi_device *sdev; 689 690 sdev = sp->cmd->device; 691 if (sdev->queue_depth >= ha->max_q_depth) 692 return; 693 694 fcport = sp->fcport; 695 if (time_before(jiffies, 696 fcport->last_ramp_up + ql2xqfullrampup * HZ)) 697 return; 698 if (time_before(jiffies, 699 fcport->last_queue_full + ql2xqfullrampup * HZ)) 700 return; 701 702 starget_for_each_device(sdev->sdev_target, fcport, 703 qla2x00_adjust_sdev_qdepth_up); 704 } 705 706 /** 707 * qla2x00_process_completed_request() - Process a Fast Post response. 708 * @ha: SCSI driver HA context 709 * @index: SRB index 710 */ 711 static void 712 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index) 713 { 714 srb_t *sp; 715 716 /* Validate handle. */ 717 if (index >= MAX_OUTSTANDING_COMMANDS) { 718 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n", 719 ha->host_no, index)); 720 qla_printk(KERN_WARNING, ha, 721 "Invalid SCSI completion handle %d.\n", index); 722 723 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 724 return; 725 } 726 727 sp = ha->outstanding_cmds[index]; 728 if (sp) { 729 /* Free outstanding command slot. */ 730 ha->outstanding_cmds[index] = NULL; 731 732 CMD_COMPL_STATUS(sp->cmd) = 0L; 733 CMD_SCSI_STATUS(sp->cmd) = 0L; 734 735 /* Save ISP completion status */ 736 sp->cmd->result = DID_OK << 16; 737 738 qla2x00_ramp_up_queue_depth(ha, sp); 739 qla2x00_sp_compl(ha, sp); 740 } else { 741 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n", 742 ha->host_no)); 743 qla_printk(KERN_WARNING, ha, 744 "Invalid ISP SCSI completion handle\n"); 745 746 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 747 } 748 } 749 750 /** 751 * qla2x00_process_response_queue() - Process response queue entries. 752 * @ha: SCSI driver HA context 753 */ 754 void 755 qla2x00_process_response_queue(struct scsi_qla_host *ha) 756 { 757 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; 758 sts_entry_t *pkt; 759 uint16_t handle_cnt; 760 uint16_t cnt; 761 762 if (!ha->flags.online) 763 return; 764 765 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 766 pkt = (sts_entry_t *)ha->response_ring_ptr; 767 768 ha->rsp_ring_index++; 769 if (ha->rsp_ring_index == ha->response_q_length) { 770 ha->rsp_ring_index = 0; 771 ha->response_ring_ptr = ha->response_ring; 772 } else { 773 ha->response_ring_ptr++; 774 } 775 776 if (pkt->entry_status != 0) { 777 DEBUG3(printk(KERN_INFO 778 "scsi(%ld): Process error entry.\n", ha->host_no)); 779 780 qla2x00_error_entry(ha, pkt); 781 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 782 wmb(); 783 continue; 784 } 785 786 switch (pkt->entry_type) { 787 case STATUS_TYPE: 788 qla2x00_status_entry(ha, pkt); 789 break; 790 case STATUS_TYPE_21: 791 handle_cnt = ((sts21_entry_t *)pkt)->handle_count; 792 for (cnt = 0; cnt < handle_cnt; cnt++) { 793 qla2x00_process_completed_request(ha, 794 ((sts21_entry_t *)pkt)->handle[cnt]); 795 } 796 break; 797 case STATUS_TYPE_22: 798 handle_cnt = ((sts22_entry_t *)pkt)->handle_count; 799 for (cnt = 0; cnt < handle_cnt; cnt++) { 800 qla2x00_process_completed_request(ha, 801 ((sts22_entry_t *)pkt)->handle[cnt]); 802 } 803 break; 804 case STATUS_CONT_TYPE: 805 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 806 break; 807 case MS_IOCB_TYPE: 808 qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt); 809 break; 810 default: 811 /* Type Not Supported. */ 812 DEBUG4(printk(KERN_WARNING 813 "scsi(%ld): Received unknown response pkt type %x " 814 "entry status=%x.\n", 815 ha->host_no, pkt->entry_type, pkt->entry_status)); 816 break; 817 } 818 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 819 wmb(); 820 } 821 822 /* Adjust ring index */ 823 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index); 824 } 825 826 /** 827 * qla2x00_status_entry() - Process a Status IOCB entry. 828 * @ha: SCSI driver HA context 829 * @pkt: Entry pointer 830 */ 831 static void 832 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) 833 { 834 srb_t *sp; 835 fc_port_t *fcport; 836 struct scsi_cmnd *cp; 837 sts_entry_t *sts; 838 struct sts_entry_24xx *sts24; 839 uint16_t comp_status; 840 uint16_t scsi_status; 841 uint8_t lscsi_status; 842 int32_t resid; 843 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len; 844 uint8_t *rsp_info, *sense_data; 845 846 sts = (sts_entry_t *) pkt; 847 sts24 = (struct sts_entry_24xx *) pkt; 848 if (IS_FWI2_CAPABLE(ha)) { 849 comp_status = le16_to_cpu(sts24->comp_status); 850 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; 851 } else { 852 comp_status = le16_to_cpu(sts->comp_status); 853 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK; 854 } 855 856 /* Fast path completion. */ 857 if (comp_status == CS_COMPLETE && scsi_status == 0) { 858 qla2x00_process_completed_request(ha, sts->handle); 859 860 return; 861 } 862 863 /* Validate handle. */ 864 if (sts->handle < MAX_OUTSTANDING_COMMANDS) { 865 sp = ha->outstanding_cmds[sts->handle]; 866 ha->outstanding_cmds[sts->handle] = NULL; 867 } else 868 sp = NULL; 869 870 if (sp == NULL) { 871 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n", 872 ha->host_no)); 873 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n"); 874 875 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 876 qla2xxx_wake_dpc(ha); 877 return; 878 } 879 cp = sp->cmd; 880 if (cp == NULL) { 881 DEBUG2(printk("scsi(%ld): Command already returned back to OS " 882 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp)); 883 qla_printk(KERN_WARNING, ha, 884 "Command is NULL: already returned to OS (sp=%p)\n", sp); 885 886 return; 887 } 888 889 lscsi_status = scsi_status & STATUS_MASK; 890 CMD_ENTRY_STATUS(cp) = sts->entry_status; 891 CMD_COMPL_STATUS(cp) = comp_status; 892 CMD_SCSI_STATUS(cp) = scsi_status; 893 894 fcport = sp->fcport; 895 896 sense_len = rsp_info_len = resid_len = fw_resid_len = 0; 897 if (IS_FWI2_CAPABLE(ha)) { 898 sense_len = le32_to_cpu(sts24->sense_len); 899 rsp_info_len = le32_to_cpu(sts24->rsp_data_len); 900 resid_len = le32_to_cpu(sts24->rsp_residual_count); 901 fw_resid_len = le32_to_cpu(sts24->residual_len); 902 rsp_info = sts24->data; 903 sense_data = sts24->data; 904 host_to_fcp_swap(sts24->data, sizeof(sts24->data)); 905 } else { 906 sense_len = le16_to_cpu(sts->req_sense_length); 907 rsp_info_len = le16_to_cpu(sts->rsp_info_len); 908 resid_len = le32_to_cpu(sts->residual_length); 909 rsp_info = sts->rsp_info; 910 sense_data = sts->req_sense_data; 911 } 912 913 /* Check for any FCP transport errors. */ 914 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) { 915 /* Sense data lies beyond any FCP RESPONSE data. */ 916 if (IS_FWI2_CAPABLE(ha)) 917 sense_data += rsp_info_len; 918 if (rsp_info_len > 3 && rsp_info[3]) { 919 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol " 920 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..." 921 "retrying command\n", ha->host_no, 922 cp->device->channel, cp->device->id, 923 cp->device->lun, rsp_info_len, rsp_info[0], 924 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4], 925 rsp_info[5], rsp_info[6], rsp_info[7])); 926 927 cp->result = DID_BUS_BUSY << 16; 928 qla2x00_sp_compl(ha, sp); 929 return; 930 } 931 } 932 933 /* 934 * Based on Host and scsi status generate status code for Linux 935 */ 936 switch (comp_status) { 937 case CS_COMPLETE: 938 case CS_QUEUE_FULL: 939 if (scsi_status == 0) { 940 cp->result = DID_OK << 16; 941 break; 942 } 943 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) { 944 resid = resid_len; 945 scsi_set_resid(cp, resid); 946 CMD_RESID_LEN(cp) = resid; 947 948 if (!lscsi_status && 949 ((unsigned)(scsi_bufflen(cp) - resid) < 950 cp->underflow)) { 951 qla_printk(KERN_INFO, ha, 952 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 953 "detected (%x of %x bytes)...returning " 954 "error status.\n", ha->host_no, 955 cp->device->channel, cp->device->id, 956 cp->device->lun, resid, 957 scsi_bufflen(cp)); 958 959 cp->result = DID_ERROR << 16; 960 break; 961 } 962 } 963 cp->result = DID_OK << 16 | lscsi_status; 964 965 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 966 DEBUG2(printk(KERN_INFO 967 "scsi(%ld): QUEUE FULL status detected " 968 "0x%x-0x%x.\n", ha->host_no, comp_status, 969 scsi_status)); 970 971 /* Adjust queue depth for all luns on the port. */ 972 fcport->last_queue_full = jiffies; 973 starget_for_each_device(cp->device->sdev_target, 974 fcport, qla2x00_adjust_sdev_qdepth_down); 975 break; 976 } 977 if (lscsi_status != SS_CHECK_CONDITION) 978 break; 979 980 /* Copy Sense Data into sense buffer. */ 981 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer)); 982 983 if (!(scsi_status & SS_SENSE_LEN_VALID)) 984 break; 985 986 if (sense_len >= sizeof(cp->sense_buffer)) 987 sense_len = sizeof(cp->sense_buffer); 988 989 CMD_ACTUAL_SNSLEN(cp) = sense_len; 990 sp->request_sense_length = sense_len; 991 sp->request_sense_ptr = cp->sense_buffer; 992 993 if (sp->request_sense_length > 32) 994 sense_len = 32; 995 996 memcpy(cp->sense_buffer, sense_data, sense_len); 997 998 sp->request_sense_ptr += sense_len; 999 sp->request_sense_length -= sense_len; 1000 if (sp->request_sense_length != 0) 1001 ha->status_srb = sp; 1002 1003 DEBUG5(printk("%s(): Check condition Sense data, " 1004 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", __func__, 1005 ha->host_no, cp->device->channel, cp->device->id, 1006 cp->device->lun, cp, cp->serial_number)); 1007 if (sense_len) 1008 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, 1009 CMD_ACTUAL_SNSLEN(cp))); 1010 break; 1011 1012 case CS_DATA_UNDERRUN: 1013 resid = resid_len; 1014 /* Use F/W calculated residual length. */ 1015 if (IS_FWI2_CAPABLE(ha)) 1016 resid = fw_resid_len; 1017 1018 if (scsi_status & SS_RESIDUAL_UNDER) { 1019 scsi_set_resid(cp, resid); 1020 CMD_RESID_LEN(cp) = resid; 1021 } else { 1022 DEBUG2(printk(KERN_INFO 1023 "scsi(%ld:%d:%d) UNDERRUN status detected " 1024 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x " 1025 "os_underflow=0x%x\n", ha->host_no, 1026 cp->device->id, cp->device->lun, comp_status, 1027 scsi_status, resid_len, resid, cp->cmnd[0], 1028 cp->underflow)); 1029 1030 } 1031 1032 /* 1033 * Check to see if SCSI Status is non zero. If so report SCSI 1034 * Status. 1035 */ 1036 if (lscsi_status != 0) { 1037 cp->result = DID_OK << 16 | lscsi_status; 1038 1039 if (lscsi_status == SAM_STAT_TASK_SET_FULL) { 1040 DEBUG2(printk(KERN_INFO 1041 "scsi(%ld): QUEUE FULL status detected " 1042 "0x%x-0x%x.\n", ha->host_no, comp_status, 1043 scsi_status)); 1044 1045 /* 1046 * Adjust queue depth for all luns on the 1047 * port. 1048 */ 1049 fcport->last_queue_full = jiffies; 1050 starget_for_each_device( 1051 cp->device->sdev_target, fcport, 1052 qla2x00_adjust_sdev_qdepth_down); 1053 break; 1054 } 1055 if (lscsi_status != SS_CHECK_CONDITION) 1056 break; 1057 1058 /* Copy Sense Data into sense buffer */ 1059 memset(cp->sense_buffer, 0, sizeof(cp->sense_buffer)); 1060 1061 if (!(scsi_status & SS_SENSE_LEN_VALID)) 1062 break; 1063 1064 if (sense_len >= sizeof(cp->sense_buffer)) 1065 sense_len = sizeof(cp->sense_buffer); 1066 1067 CMD_ACTUAL_SNSLEN(cp) = sense_len; 1068 sp->request_sense_length = sense_len; 1069 sp->request_sense_ptr = cp->sense_buffer; 1070 1071 if (sp->request_sense_length > 32) 1072 sense_len = 32; 1073 1074 memcpy(cp->sense_buffer, sense_data, sense_len); 1075 1076 sp->request_sense_ptr += sense_len; 1077 sp->request_sense_length -= sense_len; 1078 if (sp->request_sense_length != 0) 1079 ha->status_srb = sp; 1080 1081 DEBUG5(printk("%s(): Check condition Sense data, " 1082 "scsi(%ld:%d:%d:%d) cmd=%p pid=%ld\n", 1083 __func__, ha->host_no, cp->device->channel, 1084 cp->device->id, cp->device->lun, cp, 1085 cp->serial_number)); 1086 1087 /* 1088 * In case of a Underrun condition, set both the lscsi 1089 * status and the completion status to appropriate 1090 * values. 1091 */ 1092 if (resid && 1093 ((unsigned)(scsi_bufflen(cp) - resid) < 1094 cp->underflow)) { 1095 DEBUG2(qla_printk(KERN_INFO, ha, 1096 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1097 "detected (%x of %x bytes)...returning " 1098 "error status.\n", ha->host_no, 1099 cp->device->channel, cp->device->id, 1100 cp->device->lun, resid, 1101 scsi_bufflen(cp))); 1102 1103 cp->result = DID_ERROR << 16 | lscsi_status; 1104 } 1105 1106 if (sense_len) 1107 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, 1108 CMD_ACTUAL_SNSLEN(cp))); 1109 } else { 1110 /* 1111 * If RISC reports underrun and target does not report 1112 * it then we must have a lost frame, so tell upper 1113 * layer to retry it by reporting a bus busy. 1114 */ 1115 if (!(scsi_status & SS_RESIDUAL_UNDER)) { 1116 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped " 1117 "frame(s) detected (%x of %x bytes)..." 1118 "retrying command.\n", ha->host_no, 1119 cp->device->channel, cp->device->id, 1120 cp->device->lun, resid, 1121 scsi_bufflen(cp))); 1122 1123 cp->result = DID_BUS_BUSY << 16; 1124 break; 1125 } 1126 1127 /* Handle mid-layer underflow */ 1128 if ((unsigned)(scsi_bufflen(cp) - resid) < 1129 cp->underflow) { 1130 qla_printk(KERN_INFO, ha, 1131 "scsi(%ld:%d:%d:%d): Mid-layer underflow " 1132 "detected (%x of %x bytes)...returning " 1133 "error status.\n", ha->host_no, 1134 cp->device->channel, cp->device->id, 1135 cp->device->lun, resid, 1136 scsi_bufflen(cp)); 1137 1138 cp->result = DID_ERROR << 16; 1139 break; 1140 } 1141 1142 /* Everybody online, looking good... */ 1143 cp->result = DID_OK << 16; 1144 } 1145 break; 1146 1147 case CS_DATA_OVERRUN: 1148 DEBUG2(printk(KERN_INFO 1149 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n", 1150 ha->host_no, cp->device->id, cp->device->lun, comp_status, 1151 scsi_status)); 1152 DEBUG2(printk(KERN_INFO 1153 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 1154 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3], 1155 cp->cmnd[4], cp->cmnd[5])); 1156 DEBUG2(printk(KERN_INFO 1157 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR " 1158 "status!\n", 1159 cp->serial_number, scsi_bufflen(cp), resid_len)); 1160 1161 cp->result = DID_ERROR << 16; 1162 break; 1163 1164 case CS_PORT_LOGGED_OUT: 1165 case CS_PORT_CONFIG_CHG: 1166 case CS_PORT_BUSY: 1167 case CS_INCOMPLETE: 1168 case CS_PORT_UNAVAILABLE: 1169 /* 1170 * If the port is in Target Down state, return all IOs for this 1171 * Target with DID_NO_CONNECT ELSE Queue the IOs in the 1172 * retry_queue. 1173 */ 1174 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down " 1175 "pid=%ld, compl status=0x%x, port state=0x%x\n", 1176 ha->host_no, cp->device->id, cp->device->lun, 1177 cp->serial_number, comp_status, 1178 atomic_read(&fcport->state))); 1179 1180 cp->result = DID_BUS_BUSY << 16; 1181 if (atomic_read(&fcport->state) == FCS_ONLINE) { 1182 qla2x00_mark_device_lost(ha, fcport, 1, 1); 1183 } 1184 break; 1185 1186 case CS_RESET: 1187 DEBUG2(printk(KERN_INFO 1188 "scsi(%ld): RESET status detected 0x%x-0x%x.\n", 1189 ha->host_no, comp_status, scsi_status)); 1190 1191 cp->result = DID_RESET << 16; 1192 break; 1193 1194 case CS_ABORTED: 1195 /* 1196 * hv2.19.12 - DID_ABORT does not retry the request if we 1197 * aborted this request then abort otherwise it must be a 1198 * reset. 1199 */ 1200 DEBUG2(printk(KERN_INFO 1201 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n", 1202 ha->host_no, comp_status, scsi_status)); 1203 1204 cp->result = DID_RESET << 16; 1205 break; 1206 1207 case CS_TIMEOUT: 1208 cp->result = DID_BUS_BUSY << 16; 1209 1210 if (IS_FWI2_CAPABLE(ha)) { 1211 DEBUG2(printk(KERN_INFO 1212 "scsi(%ld:%d:%d:%d): TIMEOUT status detected " 1213 "0x%x-0x%x\n", ha->host_no, cp->device->channel, 1214 cp->device->id, cp->device->lun, comp_status, 1215 scsi_status)); 1216 break; 1217 } 1218 DEBUG2(printk(KERN_INFO 1219 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x " 1220 "sflags=%x.\n", ha->host_no, cp->device->channel, 1221 cp->device->id, cp->device->lun, comp_status, scsi_status, 1222 le16_to_cpu(sts->status_flags))); 1223 1224 /* Check to see if logout occurred. */ 1225 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT)) 1226 qla2x00_mark_device_lost(ha, fcport, 1, 1); 1227 break; 1228 1229 default: 1230 DEBUG3(printk("scsi(%ld): Error detected (unknown status) " 1231 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status)); 1232 qla_printk(KERN_INFO, ha, 1233 "Unknown status detected 0x%x-0x%x.\n", 1234 comp_status, scsi_status); 1235 1236 cp->result = DID_ERROR << 16; 1237 break; 1238 } 1239 1240 /* Place command on done queue. */ 1241 if (ha->status_srb == NULL) 1242 qla2x00_sp_compl(ha, sp); 1243 } 1244 1245 /** 1246 * qla2x00_status_cont_entry() - Process a Status Continuations entry. 1247 * @ha: SCSI driver HA context 1248 * @pkt: Entry pointer 1249 * 1250 * Extended sense data. 1251 */ 1252 static void 1253 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt) 1254 { 1255 uint8_t sense_sz = 0; 1256 srb_t *sp = ha->status_srb; 1257 struct scsi_cmnd *cp; 1258 1259 if (sp != NULL && sp->request_sense_length != 0) { 1260 cp = sp->cmd; 1261 if (cp == NULL) { 1262 DEBUG2(printk("%s(): Cmd already returned back to OS " 1263 "sp=%p.\n", __func__, sp)); 1264 qla_printk(KERN_INFO, ha, 1265 "cmd is NULL: already returned to OS (sp=%p)\n", 1266 sp); 1267 1268 ha->status_srb = NULL; 1269 return; 1270 } 1271 1272 if (sp->request_sense_length > sizeof(pkt->data)) { 1273 sense_sz = sizeof(pkt->data); 1274 } else { 1275 sense_sz = sp->request_sense_length; 1276 } 1277 1278 /* Move sense data. */ 1279 if (IS_FWI2_CAPABLE(ha)) 1280 host_to_fcp_swap(pkt->data, sizeof(pkt->data)); 1281 memcpy(sp->request_sense_ptr, pkt->data, sense_sz); 1282 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz)); 1283 1284 sp->request_sense_ptr += sense_sz; 1285 sp->request_sense_length -= sense_sz; 1286 1287 /* Place command on done queue. */ 1288 if (sp->request_sense_length == 0) { 1289 ha->status_srb = NULL; 1290 qla2x00_sp_compl(ha, sp); 1291 } 1292 } 1293 } 1294 1295 /** 1296 * qla2x00_error_entry() - Process an error entry. 1297 * @ha: SCSI driver HA context 1298 * @pkt: Entry pointer 1299 */ 1300 static void 1301 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt) 1302 { 1303 srb_t *sp; 1304 1305 #if defined(QL_DEBUG_LEVEL_2) 1306 if (pkt->entry_status & RF_INV_E_ORDER) 1307 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__); 1308 else if (pkt->entry_status & RF_INV_E_COUNT) 1309 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__); 1310 else if (pkt->entry_status & RF_INV_E_PARAM) 1311 qla_printk(KERN_ERR, ha, 1312 "%s: Invalid Entry Parameter\n", __func__); 1313 else if (pkt->entry_status & RF_INV_E_TYPE) 1314 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__); 1315 else if (pkt->entry_status & RF_BUSY) 1316 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__); 1317 else 1318 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__); 1319 #endif 1320 1321 /* Validate handle. */ 1322 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1323 sp = ha->outstanding_cmds[pkt->handle]; 1324 else 1325 sp = NULL; 1326 1327 if (sp) { 1328 /* Free outstanding command slot. */ 1329 ha->outstanding_cmds[pkt->handle] = NULL; 1330 1331 /* Bad payload or header */ 1332 if (pkt->entry_status & 1333 (RF_INV_E_ORDER | RF_INV_E_COUNT | 1334 RF_INV_E_PARAM | RF_INV_E_TYPE)) { 1335 sp->cmd->result = DID_ERROR << 16; 1336 } else if (pkt->entry_status & RF_BUSY) { 1337 sp->cmd->result = DID_BUS_BUSY << 16; 1338 } else { 1339 sp->cmd->result = DID_ERROR << 16; 1340 } 1341 qla2x00_sp_compl(ha, sp); 1342 1343 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type == 1344 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) { 1345 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n", 1346 ha->host_no)); 1347 qla_printk(KERN_WARNING, ha, 1348 "Error entry - invalid handle\n"); 1349 1350 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1351 qla2xxx_wake_dpc(ha); 1352 } 1353 } 1354 1355 /** 1356 * qla2x00_ms_entry() - Process a Management Server entry. 1357 * @ha: SCSI driver HA context 1358 * @index: Response queue out pointer 1359 */ 1360 static void 1361 qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt) 1362 { 1363 srb_t *sp; 1364 1365 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n", 1366 __func__, ha->host_no, pkt, pkt->handle1)); 1367 1368 /* Validate handle. */ 1369 if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS) 1370 sp = ha->outstanding_cmds[pkt->handle1]; 1371 else 1372 sp = NULL; 1373 1374 if (sp == NULL) { 1375 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n", 1376 ha->host_no)); 1377 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n"); 1378 1379 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1380 return; 1381 } 1382 1383 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status); 1384 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status; 1385 1386 /* Free outstanding command slot. */ 1387 ha->outstanding_cmds[pkt->handle1] = NULL; 1388 1389 qla2x00_sp_compl(ha, sp); 1390 } 1391 1392 1393 /** 1394 * qla24xx_mbx_completion() - Process mailbox command completions. 1395 * @ha: SCSI driver HA context 1396 * @mb0: Mailbox0 register 1397 */ 1398 static void 1399 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0) 1400 { 1401 uint16_t cnt; 1402 uint16_t __iomem *wptr; 1403 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1404 1405 /* Load return mailbox registers. */ 1406 ha->flags.mbox_int = 1; 1407 ha->mailbox_out[0] = mb0; 1408 wptr = (uint16_t __iomem *)®->mailbox1; 1409 1410 for (cnt = 1; cnt < ha->mbx_count; cnt++) { 1411 ha->mailbox_out[cnt] = RD_REG_WORD(wptr); 1412 wptr++; 1413 } 1414 1415 if (ha->mcp) { 1416 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n", 1417 __func__, ha->host_no, ha->mcp->mb[0])); 1418 } else { 1419 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n", 1420 __func__, ha->host_no)); 1421 } 1422 } 1423 1424 /** 1425 * qla24xx_process_response_queue() - Process response queue entries. 1426 * @ha: SCSI driver HA context 1427 */ 1428 void 1429 qla24xx_process_response_queue(struct scsi_qla_host *ha) 1430 { 1431 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1432 struct sts_entry_24xx *pkt; 1433 1434 if (!ha->flags.online) 1435 return; 1436 1437 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) { 1438 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr; 1439 1440 ha->rsp_ring_index++; 1441 if (ha->rsp_ring_index == ha->response_q_length) { 1442 ha->rsp_ring_index = 0; 1443 ha->response_ring_ptr = ha->response_ring; 1444 } else { 1445 ha->response_ring_ptr++; 1446 } 1447 1448 if (pkt->entry_status != 0) { 1449 DEBUG3(printk(KERN_INFO 1450 "scsi(%ld): Process error entry.\n", ha->host_no)); 1451 1452 qla2x00_error_entry(ha, (sts_entry_t *) pkt); 1453 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1454 wmb(); 1455 continue; 1456 } 1457 1458 switch (pkt->entry_type) { 1459 case STATUS_TYPE: 1460 qla2x00_status_entry(ha, pkt); 1461 break; 1462 case STATUS_CONT_TYPE: 1463 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt); 1464 break; 1465 case MS_IOCB_TYPE: 1466 qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt); 1467 break; 1468 case VP_RPT_ID_IOCB_TYPE: 1469 qla24xx_report_id_acquisition(ha, 1470 (struct vp_rpt_id_entry_24xx *)pkt); 1471 break; 1472 default: 1473 /* Type Not Supported. */ 1474 DEBUG4(printk(KERN_WARNING 1475 "scsi(%ld): Received unknown response pkt type %x " 1476 "entry status=%x.\n", 1477 ha->host_no, pkt->entry_type, pkt->entry_status)); 1478 break; 1479 } 1480 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 1481 wmb(); 1482 } 1483 1484 /* Adjust ring index */ 1485 WRT_REG_DWORD(®->rsp_q_out, ha->rsp_ring_index); 1486 } 1487 1488 static void 1489 qla2xxx_check_risc_status(scsi_qla_host_t *ha) 1490 { 1491 int rval; 1492 uint32_t cnt; 1493 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; 1494 1495 if (!IS_QLA25XX(ha)) 1496 return; 1497 1498 rval = QLA_SUCCESS; 1499 WRT_REG_DWORD(®->iobase_addr, 0x7C00); 1500 RD_REG_DWORD(®->iobase_addr); 1501 WRT_REG_DWORD(®->iobase_window, 0x0001); 1502 for (cnt = 10000; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 1503 rval == QLA_SUCCESS; cnt--) { 1504 if (cnt) { 1505 WRT_REG_DWORD(®->iobase_window, 0x0001); 1506 udelay(10); 1507 } else 1508 rval = QLA_FUNCTION_TIMEOUT; 1509 } 1510 if (rval == QLA_SUCCESS) 1511 goto next_test; 1512 1513 WRT_REG_DWORD(®->iobase_window, 0x0003); 1514 for (cnt = 100; (RD_REG_DWORD(®->iobase_window) & BIT_0) == 0 && 1515 rval == QLA_SUCCESS; cnt--) { 1516 if (cnt) { 1517 WRT_REG_DWORD(®->iobase_window, 0x0003); 1518 udelay(10); 1519 } else 1520 rval = QLA_FUNCTION_TIMEOUT; 1521 } 1522 if (rval != QLA_SUCCESS) 1523 goto done; 1524 1525 next_test: 1526 if (RD_REG_DWORD(®->iobase_c8) & BIT_3) 1527 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n"); 1528 1529 done: 1530 WRT_REG_DWORD(®->iobase_window, 0x0000); 1531 RD_REG_DWORD(®->iobase_window); 1532 } 1533 1534 /** 1535 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. 1536 * @irq: 1537 * @dev_id: SCSI driver HA context 1538 * 1539 * Called by system whenever the host adapter generates an interrupt. 1540 * 1541 * Returns handled flag. 1542 */ 1543 irqreturn_t 1544 qla24xx_intr_handler(int irq, void *dev_id) 1545 { 1546 scsi_qla_host_t *ha; 1547 struct device_reg_24xx __iomem *reg; 1548 int status; 1549 unsigned long flags; 1550 unsigned long iter; 1551 uint32_t stat; 1552 uint32_t hccr; 1553 uint16_t mb[4]; 1554 1555 ha = (scsi_qla_host_t *) dev_id; 1556 if (!ha) { 1557 printk(KERN_INFO 1558 "%s(): NULL host pointer\n", __func__); 1559 return IRQ_NONE; 1560 } 1561 1562 reg = &ha->iobase->isp24; 1563 status = 0; 1564 1565 spin_lock_irqsave(&ha->hardware_lock, flags); 1566 for (iter = 50; iter--; ) { 1567 stat = RD_REG_DWORD(®->host_status); 1568 if (stat & HSRX_RISC_PAUSED) { 1569 if (pci_channel_offline(ha->pdev)) 1570 break; 1571 1572 hccr = RD_REG_DWORD(®->hccr); 1573 1574 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1575 "Dumping firmware!\n", hccr); 1576 1577 qla2xxx_check_risc_status(ha); 1578 1579 ha->isp_ops->fw_dump(ha, 1); 1580 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1581 break; 1582 } else if ((stat & HSRX_RISC_INT) == 0) 1583 break; 1584 1585 switch (stat & 0xff) { 1586 case 0x1: 1587 case 0x2: 1588 case 0x10: 1589 case 0x11: 1590 qla24xx_mbx_completion(ha, MSW(stat)); 1591 status |= MBX_INTERRUPT; 1592 1593 break; 1594 case 0x12: 1595 mb[0] = MSW(stat); 1596 mb[1] = RD_REG_WORD(®->mailbox1); 1597 mb[2] = RD_REG_WORD(®->mailbox2); 1598 mb[3] = RD_REG_WORD(®->mailbox3); 1599 qla2x00_async_event(ha, mb); 1600 break; 1601 case 0x13: 1602 qla24xx_process_response_queue(ha); 1603 break; 1604 default: 1605 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1606 "(%d).\n", 1607 ha->host_no, stat & 0xff)); 1608 break; 1609 } 1610 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 1611 RD_REG_DWORD_RELAXED(®->hccr); 1612 } 1613 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1614 1615 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1616 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1617 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1618 up(&ha->mbx_intr_sem); 1619 } 1620 1621 return IRQ_HANDLED; 1622 } 1623 1624 /** 1625 * qla24xx_ms_entry() - Process a Management Server entry. 1626 * @ha: SCSI driver HA context 1627 * @index: Response queue out pointer 1628 */ 1629 static void 1630 qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt) 1631 { 1632 srb_t *sp; 1633 1634 DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n", 1635 __func__, ha->host_no, pkt, pkt->handle)); 1636 1637 DEBUG9(printk("%s: ct pkt dump:\n", __func__)); 1638 DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx))); 1639 1640 /* Validate handle. */ 1641 if (pkt->handle < MAX_OUTSTANDING_COMMANDS) 1642 sp = ha->outstanding_cmds[pkt->handle]; 1643 else 1644 sp = NULL; 1645 1646 if (sp == NULL) { 1647 DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n", 1648 ha->host_no)); 1649 DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n", 1650 ha->host_no)); 1651 qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n", 1652 pkt->handle); 1653 1654 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1655 return; 1656 } 1657 1658 CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status); 1659 CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status; 1660 1661 /* Free outstanding command slot. */ 1662 ha->outstanding_cmds[pkt->handle] = NULL; 1663 1664 qla2x00_sp_compl(ha, sp); 1665 } 1666 1667 static irqreturn_t 1668 qla24xx_msix_rsp_q(int irq, void *dev_id) 1669 { 1670 scsi_qla_host_t *ha; 1671 struct device_reg_24xx __iomem *reg; 1672 unsigned long flags; 1673 1674 ha = dev_id; 1675 reg = &ha->iobase->isp24; 1676 1677 spin_lock_irqsave(&ha->hardware_lock, flags); 1678 1679 qla24xx_process_response_queue(ha); 1680 1681 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 1682 1683 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1684 1685 return IRQ_HANDLED; 1686 } 1687 1688 static irqreturn_t 1689 qla24xx_msix_default(int irq, void *dev_id) 1690 { 1691 scsi_qla_host_t *ha; 1692 struct device_reg_24xx __iomem *reg; 1693 int status; 1694 unsigned long flags; 1695 uint32_t stat; 1696 uint32_t hccr; 1697 uint16_t mb[4]; 1698 1699 ha = dev_id; 1700 reg = &ha->iobase->isp24; 1701 status = 0; 1702 1703 spin_lock_irqsave(&ha->hardware_lock, flags); 1704 do { 1705 stat = RD_REG_DWORD(®->host_status); 1706 if (stat & HSRX_RISC_PAUSED) { 1707 if (pci_channel_offline(ha->pdev)) 1708 break; 1709 1710 hccr = RD_REG_DWORD(®->hccr); 1711 1712 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, " 1713 "Dumping firmware!\n", hccr); 1714 1715 qla2xxx_check_risc_status(ha); 1716 1717 ha->isp_ops->fw_dump(ha, 1); 1718 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags); 1719 break; 1720 } else if ((stat & HSRX_RISC_INT) == 0) 1721 break; 1722 1723 switch (stat & 0xff) { 1724 case 0x1: 1725 case 0x2: 1726 case 0x10: 1727 case 0x11: 1728 qla24xx_mbx_completion(ha, MSW(stat)); 1729 status |= MBX_INTERRUPT; 1730 1731 break; 1732 case 0x12: 1733 mb[0] = MSW(stat); 1734 mb[1] = RD_REG_WORD(®->mailbox1); 1735 mb[2] = RD_REG_WORD(®->mailbox2); 1736 mb[3] = RD_REG_WORD(®->mailbox3); 1737 qla2x00_async_event(ha, mb); 1738 break; 1739 case 0x13: 1740 qla24xx_process_response_queue(ha); 1741 break; 1742 default: 1743 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type " 1744 "(%d).\n", 1745 ha->host_no, stat & 0xff)); 1746 break; 1747 } 1748 WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); 1749 } while (0); 1750 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1751 1752 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) && 1753 (status & MBX_INTERRUPT) && ha->flags.mbox_int) { 1754 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 1755 up(&ha->mbx_intr_sem); 1756 } 1757 1758 return IRQ_HANDLED; 1759 } 1760 1761 /* Interrupt handling helpers. */ 1762 1763 struct qla_init_msix_entry { 1764 uint16_t entry; 1765 uint16_t index; 1766 const char *name; 1767 irq_handler_t handler; 1768 }; 1769 1770 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { 1771 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT, 1772 "qla2xxx (default)", qla24xx_msix_default }, 1773 1774 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q, 1775 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q }, 1776 }; 1777 1778 static void 1779 qla24xx_disable_msix(scsi_qla_host_t *ha) 1780 { 1781 int i; 1782 struct qla_msix_entry *qentry; 1783 1784 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1785 qentry = &ha->msix_entries[imsix_entries[i].index]; 1786 if (qentry->have_irq) 1787 free_irq(qentry->msix_vector, ha); 1788 } 1789 pci_disable_msix(ha->pdev); 1790 } 1791 1792 static int 1793 qla24xx_enable_msix(scsi_qla_host_t *ha) 1794 { 1795 int i, ret; 1796 struct msix_entry entries[QLA_MSIX_ENTRIES]; 1797 struct qla_msix_entry *qentry; 1798 1799 for (i = 0; i < QLA_MSIX_ENTRIES; i++) 1800 entries[i].entry = imsix_entries[i].entry; 1801 1802 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries)); 1803 if (ret) { 1804 qla_printk(KERN_WARNING, ha, 1805 "MSI-X: Failed to enable support -- %d/%d\n", 1806 QLA_MSIX_ENTRIES, ret); 1807 goto msix_out; 1808 } 1809 ha->flags.msix_enabled = 1; 1810 1811 for (i = 0; i < QLA_MSIX_ENTRIES; i++) { 1812 qentry = &ha->msix_entries[imsix_entries[i].index]; 1813 qentry->msix_vector = entries[i].vector; 1814 qentry->msix_entry = entries[i].entry; 1815 qentry->have_irq = 0; 1816 ret = request_irq(qentry->msix_vector, 1817 imsix_entries[i].handler, 0, imsix_entries[i].name, ha); 1818 if (ret) { 1819 qla_printk(KERN_WARNING, ha, 1820 "MSI-X: Unable to register handler -- %x/%d.\n", 1821 imsix_entries[i].index, ret); 1822 qla24xx_disable_msix(ha); 1823 goto msix_out; 1824 } 1825 qentry->have_irq = 1; 1826 } 1827 1828 msix_out: 1829 return ret; 1830 } 1831 1832 int 1833 qla2x00_request_irqs(scsi_qla_host_t *ha) 1834 { 1835 int ret; 1836 1837 /* If possible, enable MSI-X. */ 1838 if (!IS_QLA2432(ha) && !IS_QLA2532(ha)) 1839 goto skip_msix; 1840 1841 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX || 1842 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) { 1843 DEBUG2(qla_printk(KERN_WARNING, ha, 1844 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n", 1845 ha->chip_revision, ha->fw_attributes)); 1846 1847 goto skip_msix; 1848 } 1849 1850 ret = qla24xx_enable_msix(ha); 1851 if (!ret) { 1852 DEBUG2(qla_printk(KERN_INFO, ha, 1853 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision, 1854 ha->fw_attributes)); 1855 return ret; 1856 } 1857 qla_printk(KERN_WARNING, ha, 1858 "MSI-X: Falling back-to INTa mode -- %d.\n", ret); 1859 skip_msix: 1860 1861 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha)) 1862 goto skip_msi; 1863 1864 ret = pci_enable_msi(ha->pdev); 1865 if (!ret) { 1866 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n")); 1867 ha->flags.msi_enabled = 1; 1868 } 1869 skip_msi: 1870 1871 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, 1872 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha); 1873 if (!ret) { 1874 ha->flags.inta_enabled = 1; 1875 ha->host->irq = ha->pdev->irq; 1876 } else { 1877 qla_printk(KERN_WARNING, ha, 1878 "Failed to reserve interrupt %d already in use.\n", 1879 ha->pdev->irq); 1880 } 1881 1882 return ret; 1883 } 1884 1885 void 1886 qla2x00_free_irqs(scsi_qla_host_t *ha) 1887 { 1888 1889 if (ha->flags.msix_enabled) 1890 qla24xx_disable_msix(ha); 1891 else if (ha->flags.inta_enabled) { 1892 free_irq(ha->host->irq, ha); 1893 pci_disable_msi(ha->pdev); 1894 } 1895 } 1896