1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 8 #include "ql4_def.h" 9 #include "ql4_glbl.h" 10 #include "ql4_dbg.h" 11 #include "ql4_inline.h" 12 13 /** 14 * qla4xxx_status_entry - processes status IOCBs 15 * @ha: Pointer to host adapter structure. 16 * @sts_entry: Pointer to status entry structure. 17 **/ 18 static void qla4xxx_status_entry(struct scsi_qla_host *ha, 19 struct status_entry *sts_entry) 20 { 21 uint8_t scsi_status; 22 struct scsi_cmnd *cmd; 23 struct srb *srb; 24 struct ddb_entry *ddb_entry; 25 uint32_t residual; 26 uint16_t sensebytecnt; 27 28 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 29 if (!srb) { 30 /* FIXMEdg: Don't we need to reset ISP in this case??? */ 31 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid " 32 "handle 0x%x, sp=%p. This cmd may have already " 33 "been completed.\n", ha->host_no, __func__, 34 le32_to_cpu(sts_entry->handle), srb)); 35 dev_warn(&ha->pdev->dev, "%s invalid status entry:" 36 " handle=0x%0x\n", __func__, sts_entry->handle); 37 set_bit(DPC_RESET_HA, &ha->dpc_flags); 38 return; 39 } 40 41 cmd = srb->cmd; 42 if (cmd == NULL) { 43 DEBUG2(printk("scsi%ld: %s: Command already returned back to " 44 "OS pkt->handle=%d srb=%p srb->state:%d\n", 45 ha->host_no, __func__, sts_entry->handle, 46 srb, srb->state)); 47 dev_warn(&ha->pdev->dev, "Command is NULL:" 48 " already returned to OS (srb=%p)\n", srb); 49 return; 50 } 51 52 ddb_entry = srb->ddb; 53 if (ddb_entry == NULL) { 54 cmd->result = DID_NO_CONNECT << 16; 55 goto status_entry_exit; 56 } 57 58 residual = le32_to_cpu(sts_entry->residualByteCnt); 59 60 /* Translate ISP error to a Linux SCSI error. */ 61 scsi_status = sts_entry->scsiStatus; 62 switch (sts_entry->completionStatus) { 63 case SCS_COMPLETE: 64 65 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { 66 cmd->result = DID_ERROR << 16; 67 break; 68 } 69 70 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { 71 scsi_set_resid(cmd, residual); 72 if (!scsi_status && ((scsi_bufflen(cmd) - residual) < 73 cmd->underflow)) { 74 75 cmd->result = DID_ERROR << 16; 76 77 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " 78 "Mid-layer Data underrun0, " 79 "xferlen = 0x%x, " 80 "residual = 0x%x\n", ha->host_no, 81 cmd->device->channel, 82 cmd->device->id, 83 cmd->device->lun, __func__, 84 scsi_bufflen(cmd), residual)); 85 break; 86 } 87 } 88 89 cmd->result = DID_OK << 16 | scsi_status; 90 91 if (scsi_status != SCSI_CHECK_CONDITION) 92 break; 93 94 /* Copy Sense Data into sense buffer. */ 95 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 96 97 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt); 98 if (sensebytecnt == 0) 99 break; 100 101 memcpy(cmd->sense_buffer, sts_entry->senseData, 102 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE)); 103 104 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " 105 "ASC/ASCQ = %02x/%02x\n", ha->host_no, 106 cmd->device->channel, cmd->device->id, 107 cmd->device->lun, __func__, 108 sts_entry->senseData[2] & 0x0f, 109 sts_entry->senseData[12], 110 sts_entry->senseData[13])); 111 112 srb->flags |= SRB_GOT_SENSE; 113 break; 114 115 case SCS_INCOMPLETE: 116 /* Always set the status to DID_ERROR, since 117 * all conditions result in that status anyway */ 118 cmd->result = DID_ERROR << 16; 119 break; 120 121 case SCS_RESET_OCCURRED: 122 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n", 123 ha->host_no, cmd->device->channel, 124 cmd->device->id, cmd->device->lun, __func__)); 125 126 cmd->result = DID_RESET << 16; 127 break; 128 129 case SCS_ABORTED: 130 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n", 131 ha->host_no, cmd->device->channel, 132 cmd->device->id, cmd->device->lun, __func__)); 133 134 cmd->result = DID_RESET << 16; 135 break; 136 137 case SCS_TIMEOUT: 138 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n", 139 ha->host_no, cmd->device->channel, 140 cmd->device->id, cmd->device->lun)); 141 142 cmd->result = DID_BUS_BUSY << 16; 143 144 /* 145 * Mark device missing so that we won't continue to send 146 * I/O to this device. We should get a ddb state change 147 * AEN soon. 148 */ 149 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 150 qla4xxx_mark_device_missing(ha, ddb_entry); 151 break; 152 153 case SCS_DATA_UNDERRUN: 154 case SCS_DATA_OVERRUN: 155 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || 156 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { 157 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, " 158 "residual = 0x%x\n", ha->host_no, 159 cmd->device->channel, cmd->device->id, 160 cmd->device->lun, __func__, residual)); 161 162 cmd->result = DID_ERROR << 16; 163 break; 164 } 165 166 scsi_set_resid(cmd, residual); 167 168 /* 169 * If there is scsi_status, it takes precedense over 170 * underflow condition. 171 */ 172 if (scsi_status != 0) { 173 cmd->result = DID_OK << 16 | scsi_status; 174 175 if (scsi_status != SCSI_CHECK_CONDITION) 176 break; 177 178 /* Copy Sense Data into sense buffer. */ 179 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 180 181 sensebytecnt = 182 le16_to_cpu(sts_entry->senseDataByteCnt); 183 if (sensebytecnt == 0) 184 break; 185 186 memcpy(cmd->sense_buffer, sts_entry->senseData, 187 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE)); 188 189 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " 190 "ASC/ASCQ = %02x/%02x\n", ha->host_no, 191 cmd->device->channel, cmd->device->id, 192 cmd->device->lun, __func__, 193 sts_entry->senseData[2] & 0x0f, 194 sts_entry->senseData[12], 195 sts_entry->senseData[13])); 196 } else { 197 /* 198 * If RISC reports underrun and target does not 199 * report it then we must have a lost frame, so 200 * tell upper layer to retry it by reporting a 201 * bus busy. 202 */ 203 if ((sts_entry->iscsiFlags & 204 ISCSI_FLAG_RESIDUAL_UNDER) == 0) { 205 cmd->result = DID_BUS_BUSY << 16; 206 } else if ((scsi_bufflen(cmd) - residual) < 207 cmd->underflow) { 208 /* 209 * Handle mid-layer underflow??? 210 * 211 * For kernels less than 2.4, the driver must 212 * return an error if an underflow is detected. 213 * For kernels equal-to and above 2.4, the 214 * mid-layer will appearantly handle the 215 * underflow by detecting the residual count -- 216 * unfortunately, we do not see where this is 217 * actually being done. In the interim, we 218 * will return DID_ERROR. 219 */ 220 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " 221 "Mid-layer Data underrun1, " 222 "xferlen = 0x%x, " 223 "residual = 0x%x\n", ha->host_no, 224 cmd->device->channel, 225 cmd->device->id, 226 cmd->device->lun, __func__, 227 scsi_bufflen(cmd), residual)); 228 229 cmd->result = DID_ERROR << 16; 230 } else { 231 cmd->result = DID_OK << 16; 232 } 233 } 234 break; 235 236 case SCS_DEVICE_LOGGED_OUT: 237 case SCS_DEVICE_UNAVAILABLE: 238 /* 239 * Mark device missing so that we won't continue to 240 * send I/O to this device. We should get a ddb 241 * state change AEN soon. 242 */ 243 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 244 qla4xxx_mark_device_missing(ha, ddb_entry); 245 246 cmd->result = DID_BUS_BUSY << 16; 247 break; 248 249 case SCS_QUEUE_FULL: 250 /* 251 * SCSI Mid-Layer handles device queue full 252 */ 253 cmd->result = DID_OK << 16 | sts_entry->scsiStatus; 254 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected " 255 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x," 256 " iResp=%02x\n", ha->host_no, cmd->device->id, 257 cmd->device->lun, __func__, 258 sts_entry->completionStatus, 259 sts_entry->scsiStatus, sts_entry->state_flags, 260 sts_entry->iscsiFlags, 261 sts_entry->iscsiResponse)); 262 break; 263 264 default: 265 cmd->result = DID_ERROR << 16; 266 break; 267 } 268 269 status_entry_exit: 270 271 /* complete the request */ 272 srb->cc_stat = sts_entry->completionStatus; 273 qla4xxx_srb_compl(ha, srb); 274 } 275 276 /** 277 * qla4xxx_process_response_queue - process response queue completions 278 * @ha: Pointer to host adapter structure. 279 * 280 * This routine process response queue completions in interrupt context. 281 * Hardware_lock locked upon entry 282 **/ 283 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) 284 { 285 uint32_t count = 0; 286 struct srb *srb = NULL; 287 struct status_entry *sts_entry; 288 289 /* Process all responses from response queue */ 290 while ((ha->response_in = 291 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) != 292 ha->response_out) { 293 sts_entry = (struct status_entry *) ha->response_ptr; 294 count++; 295 296 /* Advance pointers for next entry */ 297 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) { 298 ha->response_out = 0; 299 ha->response_ptr = ha->response_ring; 300 } else { 301 ha->response_out++; 302 ha->response_ptr++; 303 } 304 305 /* process entry */ 306 switch (sts_entry->hdr.entryType) { 307 case ET_STATUS: 308 /* 309 * Common status - Single completion posted in single 310 * IOSB. 311 */ 312 qla4xxx_status_entry(ha, sts_entry); 313 break; 314 315 case ET_PASSTHRU_STATUS: 316 break; 317 318 case ET_STATUS_CONTINUATION: 319 /* Just throw away the status continuation entries */ 320 DEBUG2(printk("scsi%ld: %s: Status Continuation entry " 321 "- ignoring\n", ha->host_no, __func__)); 322 break; 323 324 case ET_COMMAND: 325 /* ISP device queue is full. Command not 326 * accepted by ISP. Queue command for 327 * later */ 328 329 srb = qla4xxx_del_from_active_array(ha, 330 le32_to_cpu(sts_entry-> 331 handle)); 332 if (srb == NULL) 333 goto exit_prq_invalid_handle; 334 335 DEBUG2(printk("scsi%ld: %s: FW device queue full, " 336 "srb %p\n", ha->host_no, __func__, srb)); 337 338 /* ETRY normally by sending it back with 339 * DID_BUS_BUSY */ 340 srb->cmd->result = DID_BUS_BUSY << 16; 341 qla4xxx_srb_compl(ha, srb); 342 break; 343 344 case ET_CONTINUE: 345 /* Just throw away the continuation entries */ 346 DEBUG2(printk("scsi%ld: %s: Continuation entry - " 347 "ignoring\n", ha->host_no, __func__)); 348 break; 349 350 default: 351 /* 352 * Invalid entry in response queue, reset RISC 353 * firmware. 354 */ 355 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in " 356 "response queue \n", ha->host_no, 357 __func__, 358 sts_entry->hdr.entryType)); 359 goto exit_prq_error; 360 } 361 } 362 363 /* 364 * Done with responses, update the ISP For QLA4010, this also clears 365 * the interrupt. 366 */ 367 writel(ha->response_out, &ha->reg->rsp_q_out); 368 readl(&ha->reg->rsp_q_out); 369 370 return; 371 372 exit_prq_invalid_handle: 373 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n", 374 ha->host_no, __func__, srb, sts_entry->hdr.entryType, 375 sts_entry->completionStatus)); 376 377 exit_prq_error: 378 writel(ha->response_out, &ha->reg->rsp_q_out); 379 readl(&ha->reg->rsp_q_out); 380 381 set_bit(DPC_RESET_HA, &ha->dpc_flags); 382 } 383 384 /** 385 * qla4xxx_isr_decode_mailbox - decodes mailbox status 386 * @ha: Pointer to host adapter structure. 387 * @mailbox_status: Mailbox status. 388 * 389 * This routine decodes the mailbox status during the ISR. 390 * Hardware_lock locked upon entry. runs in interrupt context. 391 **/ 392 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, 393 uint32_t mbox_status) 394 { 395 int i; 396 uint32_t mbox_stat2, mbox_stat3; 397 398 if ((mbox_status == MBOX_STS_BUSY) || 399 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 400 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) { 401 ha->mbox_status[0] = mbox_status; 402 403 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { 404 /* 405 * Copy all mailbox registers to a temporary 406 * location and set mailbox command done flag 407 */ 408 for (i = 1; i < ha->mbox_status_count; i++) 409 ha->mbox_status[i] = 410 readl(&ha->reg->mailbox[i]); 411 412 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 413 } 414 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 415 /* Immediately process the AENs that don't require much work. 416 * Only queue the database_changed AENs */ 417 if (ha->aen_log.count < MAX_AEN_ENTRIES) { 418 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 419 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = 420 readl(&ha->reg->mailbox[i]); 421 ha->aen_log.count++; 422 } 423 switch (mbox_status) { 424 case MBOX_ASTS_SYSTEM_ERROR: 425 /* Log Mailbox registers */ 426 if (ql4xdontresethba) { 427 DEBUG2(printk("%s:Dont Reset HBA\n", 428 __func__)); 429 } else { 430 set_bit(AF_GET_CRASH_RECORD, &ha->flags); 431 set_bit(DPC_RESET_HA, &ha->dpc_flags); 432 } 433 break; 434 435 case MBOX_ASTS_REQUEST_TRANSFER_ERROR: 436 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR: 437 case MBOX_ASTS_NVRAM_INVALID: 438 case MBOX_ASTS_IP_ADDRESS_CHANGED: 439 case MBOX_ASTS_DHCP_LEASE_EXPIRED: 440 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " 441 "Reset HA\n", ha->host_no, mbox_status)); 442 set_bit(DPC_RESET_HA, &ha->dpc_flags); 443 break; 444 445 case MBOX_ASTS_LINK_UP: 446 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n", 447 ha->host_no, mbox_status)); 448 set_bit(AF_LINK_UP, &ha->flags); 449 break; 450 451 case MBOX_ASTS_LINK_DOWN: 452 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n", 453 ha->host_no, mbox_status)); 454 clear_bit(AF_LINK_UP, &ha->flags); 455 break; 456 457 case MBOX_ASTS_HEARTBEAT: 458 ha->seconds_since_last_heartbeat = 0; 459 break; 460 461 case MBOX_ASTS_DHCP_LEASE_ACQUIRED: 462 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE " 463 "ACQUIRED\n", ha->host_no, mbox_status)); 464 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 465 break; 466 467 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM: 468 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target 469 * mode 470 * only */ 471 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ 472 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: 473 case MBOX_ASTS_SUBNET_STATE_CHANGE: 474 /* No action */ 475 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, 476 mbox_status)); 477 break; 478 479 case MBOX_ASTS_IP_ADDR_STATE_CHANGED: 480 mbox_stat2 = readl(&ha->reg->mailbox[2]); 481 mbox_stat3 = readl(&ha->reg->mailbox[3]); 482 483 if ((mbox_stat3 == 5) && (mbox_stat2 == 3)) 484 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 485 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5)) 486 set_bit(DPC_RESET_HA, &ha->dpc_flags); 487 break; 488 489 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 490 case MBOX_ASTS_DNS: 491 /* No action */ 492 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " 493 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", 494 ha->host_no, mbox_status, 495 readl(&ha->reg->mailbox[1]), 496 readl(&ha->reg->mailbox[2]))); 497 break; 498 499 case MBOX_ASTS_SELF_TEST_FAILED: 500 case MBOX_ASTS_LOGIN_FAILED: 501 /* No action */ 502 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " 503 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", 504 ha->host_no, mbox_status, 505 readl(&ha->reg->mailbox[1]), 506 readl(&ha->reg->mailbox[2]), 507 readl(&ha->reg->mailbox[3]))); 508 break; 509 510 case MBOX_ASTS_DATABASE_CHANGED: 511 /* Queue AEN information and process it in the DPC 512 * routine */ 513 if (ha->aen_q_count > 0) { 514 515 /* decrement available counter */ 516 ha->aen_q_count--; 517 518 for (i = 1; i < MBOX_AEN_REG_COUNT; i++) 519 ha->aen_q[ha->aen_in].mbox_sts[i] = 520 readl(&ha->reg->mailbox[i]); 521 522 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status; 523 524 /* print debug message */ 525 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 526 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 527 ha->host_no, ha->aen_in, 528 mbox_status, 529 ha->aen_q[ha->aen_in].mbox_sts[1], 530 ha->aen_q[ha->aen_in].mbox_sts[2], 531 ha->aen_q[ha->aen_in].mbox_sts[3], 532 ha->aen_q[ha->aen_in]. mbox_sts[4])); 533 /* advance pointer */ 534 ha->aen_in++; 535 if (ha->aen_in == MAX_AEN_ENTRIES) 536 ha->aen_in = 0; 537 538 /* The DPC routine will process the aen */ 539 set_bit(DPC_AEN, &ha->dpc_flags); 540 } else { 541 DEBUG2(printk("scsi%ld: %s: aen %04x, queue " 542 "overflowed! AEN LOST!!\n", 543 ha->host_no, __func__, 544 mbox_status)); 545 546 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", 547 ha->host_no)); 548 549 for (i = 0; i < MAX_AEN_ENTRIES; i++) { 550 DEBUG2(printk("AEN[%d] %04x %04x %04x " 551 "%04x\n", i, 552 ha->aen_q[i].mbox_sts[0], 553 ha->aen_q[i].mbox_sts[1], 554 ha->aen_q[i].mbox_sts[2], 555 ha->aen_q[i].mbox_sts[3])); 556 } 557 } 558 break; 559 560 default: 561 DEBUG2(printk(KERN_WARNING 562 "scsi%ld: AEN %04x UNKNOWN\n", 563 ha->host_no, mbox_status)); 564 break; 565 } 566 } else { 567 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n", 568 ha->host_no, mbox_status)); 569 570 ha->mbox_status[0] = mbox_status; 571 } 572 } 573 574 /** 575 * qla4xxx_interrupt_service_routine - isr 576 * @ha: pointer to host adapter structure. 577 * 578 * This is the main interrupt service routine. 579 * hardware_lock locked upon entry. runs in interrupt context. 580 **/ 581 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, 582 uint32_t intr_status) 583 { 584 /* Process response queue interrupt. */ 585 if (intr_status & CSR_SCSI_COMPLETION_INTR) 586 qla4xxx_process_response_queue(ha); 587 588 /* Process mailbox/asynch event interrupt.*/ 589 if (intr_status & CSR_SCSI_PROCESSOR_INTR) { 590 qla4xxx_isr_decode_mailbox(ha, 591 readl(&ha->reg->mailbox[0])); 592 593 /* Clear Mailbox Interrupt */ 594 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 595 &ha->reg->ctrl_status); 596 readl(&ha->reg->ctrl_status); 597 } 598 } 599 600 /** 601 * qla4xxx_intr_handler - hardware interrupt handler. 602 * @irq: Unused 603 * @dev_id: Pointer to host adapter structure 604 **/ 605 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) 606 { 607 struct scsi_qla_host *ha; 608 uint32_t intr_status; 609 unsigned long flags = 0; 610 uint8_t reqs_count = 0; 611 612 ha = (struct scsi_qla_host *) dev_id; 613 if (!ha) { 614 DEBUG2(printk(KERN_INFO 615 "qla4xxx: Interrupt with NULL host ptr\n")); 616 return IRQ_NONE; 617 } 618 619 spin_lock_irqsave(&ha->hardware_lock, flags); 620 621 ha->isr_count++; 622 /* 623 * Repeatedly service interrupts up to a maximum of 624 * MAX_REQS_SERVICED_PER_INTR 625 */ 626 while (1) { 627 /* 628 * Read interrupt status 629 */ 630 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) != 631 ha->response_out) 632 intr_status = CSR_SCSI_COMPLETION_INTR; 633 else 634 intr_status = readl(&ha->reg->ctrl_status); 635 636 if ((intr_status & 637 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 638 0) { 639 if (reqs_count == 0) 640 ha->spurious_int_count++; 641 break; 642 } 643 644 if (intr_status & CSR_FATAL_ERROR) { 645 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, " 646 "Status 0x%04x\n", ha->host_no, 647 readl(isp_port_error_status (ha)))); 648 649 /* Issue Soft Reset to clear this error condition. 650 * This will prevent the RISC from repeatedly 651 * interrupting the driver; thus, allowing the DPC to 652 * get scheduled to continue error recovery. 653 * NOTE: Disabling RISC interrupts does not work in 654 * this case, as CSR_FATAL_ERROR overrides 655 * CSR_SCSI_INTR_ENABLE */ 656 if ((readl(&ha->reg->ctrl_status) & 657 CSR_SCSI_RESET_INTR) == 0) { 658 writel(set_rmask(CSR_SOFT_RESET), 659 &ha->reg->ctrl_status); 660 readl(&ha->reg->ctrl_status); 661 } 662 663 writel(set_rmask(CSR_FATAL_ERROR), 664 &ha->reg->ctrl_status); 665 readl(&ha->reg->ctrl_status); 666 667 __qla4xxx_disable_intrs(ha); 668 669 set_bit(DPC_RESET_HA, &ha->dpc_flags); 670 671 break; 672 } else if (intr_status & CSR_SCSI_RESET_INTR) { 673 clear_bit(AF_ONLINE, &ha->flags); 674 __qla4xxx_disable_intrs(ha); 675 676 writel(set_rmask(CSR_SCSI_RESET_INTR), 677 &ha->reg->ctrl_status); 678 readl(&ha->reg->ctrl_status); 679 680 if (!ql4_mod_unload) 681 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 682 683 break; 684 } else if (intr_status & INTR_PENDING) { 685 qla4xxx_interrupt_service_routine(ha, intr_status); 686 ha->total_io_count++; 687 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 688 break; 689 690 intr_status = 0; 691 } 692 } 693 694 spin_unlock_irqrestore(&ha->hardware_lock, flags); 695 696 return IRQ_HANDLED; 697 } 698 699 /** 700 * qla4xxx_process_aen - processes AENs generated by firmware 701 * @ha: pointer to host adapter structure. 702 * @process_aen: type of AENs to process 703 * 704 * Processes specific types of Asynchronous Events generated by firmware. 705 * The type of AENs to process is specified by process_aen and can be 706 * PROCESS_ALL_AENS 0 707 * FLUSH_DDB_CHANGED_AENS 1 708 * RELOGIN_DDB_CHANGED_AENS 2 709 **/ 710 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) 711 { 712 uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; 713 struct aen *aen; 714 int i; 715 unsigned long flags; 716 717 spin_lock_irqsave(&ha->hardware_lock, flags); 718 while (ha->aen_out != ha->aen_in) { 719 aen = &ha->aen_q[ha->aen_out]; 720 /* copy aen information to local structure */ 721 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 722 mbox_sts[i] = aen->mbox_sts[i]; 723 724 ha->aen_q_count++; 725 ha->aen_out++; 726 727 if (ha->aen_out == MAX_AEN_ENTRIES) 728 ha->aen_out = 0; 729 730 spin_unlock_irqrestore(&ha->hardware_lock, flags); 731 732 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x" 733 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no, 734 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)), 735 mbox_sts[0], mbox_sts[1], mbox_sts[2], 736 mbox_sts[3], mbox_sts[4])); 737 738 switch (mbox_sts[0]) { 739 case MBOX_ASTS_DATABASE_CHANGED: 740 if (process_aen == FLUSH_DDB_CHANGED_AENS) { 741 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index " 742 "[%d] state=%04x FLUSHED!\n", 743 ha->host_no, ha->aen_out, 744 mbox_sts[0], mbox_sts[2], 745 mbox_sts[3])); 746 break; 747 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { 748 /* for use during init time, we only want to 749 * relogin non-active ddbs */ 750 struct ddb_entry *ddb_entry; 751 752 ddb_entry = 753 /* FIXME: name length? */ 754 qla4xxx_lookup_ddb_by_fw_index(ha, 755 mbox_sts[2]); 756 if (!ddb_entry) 757 break; 758 759 ddb_entry->dev_scan_wait_to_complete_relogin = 760 0; 761 ddb_entry->dev_scan_wait_to_start_relogin = 762 jiffies + 763 ((ddb_entry->default_time2wait + 764 4) * HZ); 765 766 DEBUG2(printk("scsi%ld: ddb index [%d] initate" 767 " RELOGIN after %d seconds\n", 768 ha->host_no, 769 ddb_entry->fw_ddb_index, 770 ddb_entry->default_time2wait + 771 4)); 772 break; 773 } 774 775 if (mbox_sts[1] == 0) { /* Global DB change. */ 776 qla4xxx_reinitialize_ddb_list(ha); 777 } else if (mbox_sts[1] == 1) { /* Specific device. */ 778 qla4xxx_process_ddb_changed(ha, mbox_sts[2], 779 mbox_sts[3]); 780 } 781 break; 782 } 783 spin_lock_irqsave(&ha->hardware_lock, flags); 784 } 785 spin_unlock_irqrestore(&ha->hardware_lock, flags); 786 } 787 788