1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 8 #include "ql4_def.h" 9 10 /** 11 * qla2x00_process_completed_request() - Process a Fast Post response. 12 * @ha: SCSI driver HA context 13 * @index: SRB index 14 **/ 15 static void qla4xxx_process_completed_request(struct scsi_qla_host *ha, 16 uint32_t index) 17 { 18 struct srb *srb; 19 20 srb = qla4xxx_del_from_active_array(ha, index); 21 if (srb) { 22 /* Save ISP completion status */ 23 srb->cmd->result = DID_OK << 16; 24 qla4xxx_srb_compl(ha, srb); 25 } else { 26 DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = " 27 "%d\n", ha->host_no, index)); 28 set_bit(DPC_RESET_HA, &ha->dpc_flags); 29 } 30 } 31 32 /** 33 * qla4xxx_status_entry - processes status IOCBs 34 * @ha: Pointer to host adapter structure. 35 * @sts_entry: Pointer to status entry structure. 36 **/ 37 static void qla4xxx_status_entry(struct scsi_qla_host *ha, 38 struct status_entry *sts_entry) 39 { 40 uint8_t scsi_status; 41 struct scsi_cmnd *cmd; 42 struct srb *srb; 43 struct ddb_entry *ddb_entry; 44 uint32_t residual; 45 uint16_t sensebytecnt; 46 47 if (sts_entry->completionStatus == SCS_COMPLETE && 48 sts_entry->scsiStatus == 0) { 49 qla4xxx_process_completed_request(ha, 50 le32_to_cpu(sts_entry-> 51 handle)); 52 return; 53 } 54 55 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 56 if (!srb) { 57 /* FIXMEdg: Don't we need to reset ISP in this case??? */ 58 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid " 59 "handle 0x%x, sp=%p. This cmd may have already " 60 "been completed.\n", ha->host_no, __func__, 61 le32_to_cpu(sts_entry->handle), srb)); 62 return; 63 } 64 65 cmd = srb->cmd; 66 if (cmd == NULL) { 67 DEBUG2(printk("scsi%ld: %s: Command already returned back to " 68 "OS pkt->handle=%d srb=%p srb->state:%d\n", 69 ha->host_no, __func__, sts_entry->handle, 70 srb, srb->state)); 71 dev_warn(&ha->pdev->dev, "Command is NULL:" 72 " already returned to OS (srb=%p)\n", srb); 73 return; 74 } 75 76 ddb_entry = srb->ddb; 77 if (ddb_entry == NULL) { 78 cmd->result = DID_NO_CONNECT << 16; 79 goto status_entry_exit; 80 } 81 82 residual = le32_to_cpu(sts_entry->residualByteCnt); 83 84 /* Translate ISP error to a Linux SCSI error. */ 85 scsi_status = sts_entry->scsiStatus; 86 switch (sts_entry->completionStatus) { 87 case SCS_COMPLETE: 88 if (scsi_status == 0) { 89 cmd->result = DID_OK << 16; 90 break; 91 } 92 93 if (sts_entry->iscsiFlags & 94 (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER)) 95 cmd->resid = residual; 96 97 cmd->result = DID_OK << 16 | scsi_status; 98 99 if (scsi_status != SCSI_CHECK_CONDITION) 100 break; 101 102 /* Copy Sense Data into sense buffer. */ 103 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 104 105 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt); 106 if (sensebytecnt == 0) 107 break; 108 109 memcpy(cmd->sense_buffer, sts_entry->senseData, 110 min(sensebytecnt, 111 (uint16_t) sizeof(cmd->sense_buffer))); 112 113 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " 114 "ASC/ASCQ = %02x/%02x\n", ha->host_no, 115 cmd->device->channel, cmd->device->id, 116 cmd->device->lun, __func__, 117 sts_entry->senseData[2] & 0x0f, 118 sts_entry->senseData[12], 119 sts_entry->senseData[13])); 120 121 srb->flags |= SRB_GOT_SENSE; 122 break; 123 124 case SCS_INCOMPLETE: 125 /* Always set the status to DID_ERROR, since 126 * all conditions result in that status anyway */ 127 cmd->result = DID_ERROR << 16; 128 break; 129 130 case SCS_RESET_OCCURRED: 131 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n", 132 ha->host_no, cmd->device->channel, 133 cmd->device->id, cmd->device->lun, __func__)); 134 135 cmd->result = DID_RESET << 16; 136 break; 137 138 case SCS_ABORTED: 139 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n", 140 ha->host_no, cmd->device->channel, 141 cmd->device->id, cmd->device->lun, __func__)); 142 143 cmd->result = DID_RESET << 16; 144 break; 145 146 case SCS_TIMEOUT: 147 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n", 148 ha->host_no, cmd->device->channel, 149 cmd->device->id, cmd->device->lun)); 150 151 cmd->result = DID_BUS_BUSY << 16; 152 153 /* 154 * Mark device missing so that we won't continue to send 155 * I/O to this device. We should get a ddb state change 156 * AEN soon. 157 */ 158 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 159 qla4xxx_mark_device_missing(ha, ddb_entry); 160 break; 161 162 case SCS_DATA_UNDERRUN: 163 case SCS_DATA_OVERRUN: 164 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { 165 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, " 166 "residual = 0x%x\n", ha->host_no, 167 cmd->device->channel, cmd->device->id, 168 cmd->device->lun, __func__, residual)); 169 170 cmd->result = DID_ERROR << 16; 171 break; 172 } 173 174 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) { 175 /* 176 * Firmware detected a SCSI transport underrun 177 * condition 178 */ 179 cmd->resid = residual; 180 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status " 181 "detected, xferlen = 0x%x, residual = " 182 "0x%x\n", 183 ha->host_no, cmd->device->channel, 184 cmd->device->id, 185 cmd->device->lun, __func__, 186 cmd->request_bufflen, 187 residual)); 188 } 189 190 /* 191 * If there is scsi_status, it takes precedense over 192 * underflow condition. 193 */ 194 if (scsi_status != 0) { 195 cmd->result = DID_OK << 16 | scsi_status; 196 197 if (scsi_status != SCSI_CHECK_CONDITION) 198 break; 199 200 /* Copy Sense Data into sense buffer. */ 201 memset(cmd->sense_buffer, 0, 202 sizeof(cmd->sense_buffer)); 203 204 sensebytecnt = 205 le16_to_cpu(sts_entry->senseDataByteCnt); 206 if (sensebytecnt == 0) 207 break; 208 209 memcpy(cmd->sense_buffer, sts_entry->senseData, 210 min(sensebytecnt, 211 (uint16_t) sizeof(cmd->sense_buffer))); 212 213 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, " 214 "ASC/ASCQ = %02x/%02x\n", ha->host_no, 215 cmd->device->channel, cmd->device->id, 216 cmd->device->lun, __func__, 217 sts_entry->senseData[2] & 0x0f, 218 sts_entry->senseData[12], 219 sts_entry->senseData[13])); 220 } else { 221 /* 222 * If RISC reports underrun and target does not 223 * report it then we must have a lost frame, so 224 * tell upper layer to retry it by reporting a 225 * bus busy. 226 */ 227 if ((sts_entry->iscsiFlags & 228 ISCSI_FLAG_RESIDUAL_UNDER) == 0) { 229 cmd->result = DID_BUS_BUSY << 16; 230 } else if ((cmd->request_bufflen - residual) < 231 cmd->underflow) { 232 /* 233 * Handle mid-layer underflow??? 234 * 235 * For kernels less than 2.4, the driver must 236 * return an error if an underflow is detected. 237 * For kernels equal-to and above 2.4, the 238 * mid-layer will appearantly handle the 239 * underflow by detecting the residual count -- 240 * unfortunately, we do not see where this is 241 * actually being done. In the interim, we 242 * will return DID_ERROR. 243 */ 244 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " 245 "Mid-layer Data underrun, " 246 "xferlen = 0x%x, " 247 "residual = 0x%x\n", ha->host_no, 248 cmd->device->channel, 249 cmd->device->id, 250 cmd->device->lun, __func__, 251 cmd->request_bufflen, residual)); 252 253 cmd->result = DID_ERROR << 16; 254 } else { 255 cmd->result = DID_OK << 16; 256 } 257 } 258 break; 259 260 case SCS_DEVICE_LOGGED_OUT: 261 case SCS_DEVICE_UNAVAILABLE: 262 /* 263 * Mark device missing so that we won't continue to 264 * send I/O to this device. We should get a ddb 265 * state change AEN soon. 266 */ 267 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 268 qla4xxx_mark_device_missing(ha, ddb_entry); 269 270 cmd->result = DID_BUS_BUSY << 16; 271 break; 272 273 case SCS_QUEUE_FULL: 274 /* 275 * SCSI Mid-Layer handles device queue full 276 */ 277 cmd->result = DID_OK << 16 | sts_entry->scsiStatus; 278 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected " 279 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x," 280 " iResp=%02x\n", ha->host_no, cmd->device->id, 281 cmd->device->lun, __func__, 282 sts_entry->completionStatus, 283 sts_entry->scsiStatus, sts_entry->state_flags, 284 sts_entry->iscsiFlags, 285 sts_entry->iscsiResponse)); 286 break; 287 288 default: 289 cmd->result = DID_ERROR << 16; 290 break; 291 } 292 293 status_entry_exit: 294 295 /* complete the request */ 296 srb->cc_stat = sts_entry->completionStatus; 297 qla4xxx_srb_compl(ha, srb); 298 } 299 300 /** 301 * qla4xxx_process_response_queue - process response queue completions 302 * @ha: Pointer to host adapter structure. 303 * 304 * This routine process response queue completions in interrupt context. 305 * Hardware_lock locked upon entry 306 **/ 307 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) 308 { 309 uint32_t count = 0; 310 struct srb *srb = NULL; 311 struct status_entry *sts_entry; 312 313 /* Process all responses from response queue */ 314 while ((ha->response_in = 315 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) != 316 ha->response_out) { 317 sts_entry = (struct status_entry *) ha->response_ptr; 318 count++; 319 320 /* Advance pointers for next entry */ 321 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) { 322 ha->response_out = 0; 323 ha->response_ptr = ha->response_ring; 324 } else { 325 ha->response_out++; 326 ha->response_ptr++; 327 } 328 329 /* process entry */ 330 switch (sts_entry->hdr.entryType) { 331 case ET_STATUS: 332 /* 333 * Common status - Single completion posted in single 334 * IOSB. 335 */ 336 qla4xxx_status_entry(ha, sts_entry); 337 break; 338 339 case ET_PASSTHRU_STATUS: 340 break; 341 342 case ET_STATUS_CONTINUATION: 343 /* Just throw away the status continuation entries */ 344 DEBUG2(printk("scsi%ld: %s: Status Continuation entry " 345 "- ignoring\n", ha->host_no, __func__)); 346 break; 347 348 case ET_COMMAND: 349 /* ISP device queue is full. Command not 350 * accepted by ISP. Queue command for 351 * later */ 352 353 srb = qla4xxx_del_from_active_array(ha, 354 le32_to_cpu(sts_entry-> 355 handle)); 356 if (srb == NULL) 357 goto exit_prq_invalid_handle; 358 359 DEBUG2(printk("scsi%ld: %s: FW device queue full, " 360 "srb %p\n", ha->host_no, __func__, srb)); 361 362 /* ETRY normally by sending it back with 363 * DID_BUS_BUSY */ 364 srb->cmd->result = DID_BUS_BUSY << 16; 365 qla4xxx_srb_compl(ha, srb); 366 break; 367 368 case ET_CONTINUE: 369 /* Just throw away the continuation entries */ 370 DEBUG2(printk("scsi%ld: %s: Continuation entry - " 371 "ignoring\n", ha->host_no, __func__)); 372 break; 373 374 default: 375 /* 376 * Invalid entry in response queue, reset RISC 377 * firmware. 378 */ 379 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in " 380 "response queue \n", ha->host_no, 381 __func__, 382 sts_entry->hdr.entryType)); 383 goto exit_prq_error; 384 } 385 } 386 387 /* 388 * Done with responses, update the ISP For QLA4010, this also clears 389 * the interrupt. 390 */ 391 writel(ha->response_out, &ha->reg->rsp_q_out); 392 readl(&ha->reg->rsp_q_out); 393 394 return; 395 396 exit_prq_invalid_handle: 397 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n", 398 ha->host_no, __func__, srb, sts_entry->hdr.entryType, 399 sts_entry->completionStatus)); 400 401 exit_prq_error: 402 writel(ha->response_out, &ha->reg->rsp_q_out); 403 readl(&ha->reg->rsp_q_out); 404 405 set_bit(DPC_RESET_HA, &ha->dpc_flags); 406 } 407 408 /** 409 * qla4xxx_isr_decode_mailbox - decodes mailbox status 410 * @ha: Pointer to host adapter structure. 411 * @mailbox_status: Mailbox status. 412 * 413 * This routine decodes the mailbox status during the ISR. 414 * Hardware_lock locked upon entry. runs in interrupt context. 415 **/ 416 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, 417 uint32_t mbox_status) 418 { 419 int i; 420 421 if ((mbox_status == MBOX_STS_BUSY) || 422 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 423 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) { 424 ha->mbox_status[0] = mbox_status; 425 426 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { 427 /* 428 * Copy all mailbox registers to a temporary 429 * location and set mailbox command done flag 430 */ 431 for (i = 1; i < ha->mbox_status_count; i++) 432 ha->mbox_status[i] = 433 readl(&ha->reg->mailbox[i]); 434 435 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 436 } 437 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 438 /* Immediately process the AENs that don't require much work. 439 * Only queue the database_changed AENs */ 440 switch (mbox_status) { 441 case MBOX_ASTS_SYSTEM_ERROR: 442 /* Log Mailbox registers */ 443 if (ql4xdontresethba) { 444 DEBUG2(printk("%s:Dont Reset HBA\n", 445 __func__)); 446 } else { 447 set_bit(AF_GET_CRASH_RECORD, &ha->flags); 448 set_bit(DPC_RESET_HA, &ha->dpc_flags); 449 } 450 break; 451 452 case MBOX_ASTS_REQUEST_TRANSFER_ERROR: 453 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR: 454 case MBOX_ASTS_NVRAM_INVALID: 455 case MBOX_ASTS_IP_ADDRESS_CHANGED: 456 case MBOX_ASTS_DHCP_LEASE_EXPIRED: 457 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " 458 "Reset HA\n", ha->host_no, mbox_status)); 459 set_bit(DPC_RESET_HA, &ha->dpc_flags); 460 break; 461 462 case MBOX_ASTS_LINK_UP: 463 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n", 464 ha->host_no, mbox_status)); 465 set_bit(AF_LINK_UP, &ha->flags); 466 break; 467 468 case MBOX_ASTS_LINK_DOWN: 469 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n", 470 ha->host_no, mbox_status)); 471 clear_bit(AF_LINK_UP, &ha->flags); 472 break; 473 474 case MBOX_ASTS_HEARTBEAT: 475 ha->seconds_since_last_heartbeat = 0; 476 break; 477 478 case MBOX_ASTS_DHCP_LEASE_ACQUIRED: 479 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE " 480 "ACQUIRED\n", ha->host_no, mbox_status)); 481 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 482 break; 483 484 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM: 485 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target 486 * mode 487 * only */ 488 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ 489 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: 490 case MBOX_ASTS_SUBNET_STATE_CHANGE: 491 /* No action */ 492 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, 493 mbox_status)); 494 break; 495 496 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 497 case MBOX_ASTS_DNS: 498 /* No action */ 499 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " 500 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", 501 ha->host_no, mbox_status, 502 readl(&ha->reg->mailbox[1]), 503 readl(&ha->reg->mailbox[2]))); 504 break; 505 506 case MBOX_ASTS_SELF_TEST_FAILED: 507 case MBOX_ASTS_LOGIN_FAILED: 508 /* No action */ 509 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " 510 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", 511 ha->host_no, mbox_status, 512 readl(&ha->reg->mailbox[1]), 513 readl(&ha->reg->mailbox[2]), 514 readl(&ha->reg->mailbox[3]))); 515 break; 516 517 case MBOX_ASTS_DATABASE_CHANGED: 518 /* Queue AEN information and process it in the DPC 519 * routine */ 520 if (ha->aen_q_count > 0) { 521 /* advance pointer */ 522 if (ha->aen_in == (MAX_AEN_ENTRIES - 1)) 523 ha->aen_in = 0; 524 else 525 ha->aen_in++; 526 527 /* decrement available counter */ 528 ha->aen_q_count--; 529 530 for (i = 1; i < MBOX_AEN_REG_COUNT; i++) 531 ha->aen_q[ha->aen_in].mbox_sts[i] = 532 readl(&ha->reg->mailbox[i]); 533 534 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status; 535 536 /* print debug message */ 537 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 538 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 539 ha->host_no, ha->aen_in, 540 mbox_status, 541 ha->aen_q[ha->aen_in].mbox_sts[1], 542 ha->aen_q[ha->aen_in].mbox_sts[2], 543 ha->aen_q[ha->aen_in].mbox_sts[3], 544 ha->aen_q[ha->aen_in]. mbox_sts[4])); 545 546 /* The DPC routine will process the aen */ 547 set_bit(DPC_AEN, &ha->dpc_flags); 548 } else { 549 DEBUG2(printk("scsi%ld: %s: aen %04x, queue " 550 "overflowed! AEN LOST!!\n", 551 ha->host_no, __func__, 552 mbox_status)); 553 554 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", 555 ha->host_no)); 556 557 for (i = 0; i < MAX_AEN_ENTRIES; i++) { 558 DEBUG2(printk("AEN[%d] %04x %04x %04x " 559 "%04x\n", i, 560 ha->aen_q[i].mbox_sts[0], 561 ha->aen_q[i].mbox_sts[1], 562 ha->aen_q[i].mbox_sts[2], 563 ha->aen_q[i].mbox_sts[3])); 564 } 565 } 566 break; 567 568 default: 569 DEBUG2(printk(KERN_WARNING 570 "scsi%ld: AEN %04x UNKNOWN\n", 571 ha->host_no, mbox_status)); 572 break; 573 } 574 } else { 575 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n", 576 ha->host_no, mbox_status)); 577 578 ha->mbox_status[0] = mbox_status; 579 } 580 } 581 582 /** 583 * qla4xxx_interrupt_service_routine - isr 584 * @ha: pointer to host adapter structure. 585 * 586 * This is the main interrupt service routine. 587 * hardware_lock locked upon entry. runs in interrupt context. 588 **/ 589 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, 590 uint32_t intr_status) 591 { 592 /* Process response queue interrupt. */ 593 if (intr_status & CSR_SCSI_COMPLETION_INTR) 594 qla4xxx_process_response_queue(ha); 595 596 /* Process mailbox/asynch event interrupt.*/ 597 if (intr_status & CSR_SCSI_PROCESSOR_INTR) { 598 qla4xxx_isr_decode_mailbox(ha, 599 readl(&ha->reg->mailbox[0])); 600 601 /* Clear Mailbox Interrupt */ 602 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 603 &ha->reg->ctrl_status); 604 readl(&ha->reg->ctrl_status); 605 } 606 } 607 608 /** 609 * qla4xxx_intr_handler - hardware interrupt handler. 610 * @irq: Unused 611 * @dev_id: Pointer to host adapter structure 612 **/ 613 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) 614 { 615 struct scsi_qla_host *ha; 616 uint32_t intr_status; 617 unsigned long flags = 0; 618 uint8_t reqs_count = 0; 619 620 ha = (struct scsi_qla_host *) dev_id; 621 if (!ha) { 622 DEBUG2(printk(KERN_INFO 623 "qla4xxx: Interrupt with NULL host ptr\n")); 624 return IRQ_NONE; 625 } 626 627 spin_lock_irqsave(&ha->hardware_lock, flags); 628 629 ha->isr_count++; 630 /* 631 * Repeatedly service interrupts up to a maximum of 632 * MAX_REQS_SERVICED_PER_INTR 633 */ 634 while (1) { 635 /* 636 * Read interrupt status 637 */ 638 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) != 639 ha->response_out) 640 intr_status = CSR_SCSI_COMPLETION_INTR; 641 else 642 intr_status = readl(&ha->reg->ctrl_status); 643 644 if ((intr_status & 645 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 646 0) { 647 if (reqs_count == 0) 648 ha->spurious_int_count++; 649 break; 650 } 651 652 if (intr_status & CSR_FATAL_ERROR) { 653 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, " 654 "Status 0x%04x\n", ha->host_no, 655 readl(isp_port_error_status (ha)))); 656 657 /* Issue Soft Reset to clear this error condition. 658 * This will prevent the RISC from repeatedly 659 * interrupting the driver; thus, allowing the DPC to 660 * get scheduled to continue error recovery. 661 * NOTE: Disabling RISC interrupts does not work in 662 * this case, as CSR_FATAL_ERROR overrides 663 * CSR_SCSI_INTR_ENABLE */ 664 if ((readl(&ha->reg->ctrl_status) & 665 CSR_SCSI_RESET_INTR) == 0) { 666 writel(set_rmask(CSR_SOFT_RESET), 667 &ha->reg->ctrl_status); 668 readl(&ha->reg->ctrl_status); 669 } 670 671 writel(set_rmask(CSR_FATAL_ERROR), 672 &ha->reg->ctrl_status); 673 readl(&ha->reg->ctrl_status); 674 675 __qla4xxx_disable_intrs(ha); 676 677 set_bit(DPC_RESET_HA, &ha->dpc_flags); 678 679 break; 680 } else if (intr_status & CSR_SCSI_RESET_INTR) { 681 clear_bit(AF_ONLINE, &ha->flags); 682 __qla4xxx_disable_intrs(ha); 683 684 writel(set_rmask(CSR_SCSI_RESET_INTR), 685 &ha->reg->ctrl_status); 686 readl(&ha->reg->ctrl_status); 687 688 if (!ql4_mod_unload) 689 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 690 691 break; 692 } else if (intr_status & INTR_PENDING) { 693 qla4xxx_interrupt_service_routine(ha, intr_status); 694 ha->total_io_count++; 695 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 696 break; 697 698 intr_status = 0; 699 } 700 } 701 702 spin_unlock_irqrestore(&ha->hardware_lock, flags); 703 704 return IRQ_HANDLED; 705 } 706 707 /** 708 * qla4xxx_process_aen - processes AENs generated by firmware 709 * @ha: pointer to host adapter structure. 710 * @process_aen: type of AENs to process 711 * 712 * Processes specific types of Asynchronous Events generated by firmware. 713 * The type of AENs to process is specified by process_aen and can be 714 * PROCESS_ALL_AENS 0 715 * FLUSH_DDB_CHANGED_AENS 1 716 * RELOGIN_DDB_CHANGED_AENS 2 717 **/ 718 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) 719 { 720 uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; 721 struct aen *aen; 722 int i; 723 unsigned long flags; 724 725 spin_lock_irqsave(&ha->hardware_lock, flags); 726 while (ha->aen_out != ha->aen_in) { 727 /* Advance pointers for next entry */ 728 if (ha->aen_out == (MAX_AEN_ENTRIES - 1)) 729 ha->aen_out = 0; 730 else 731 ha->aen_out++; 732 733 ha->aen_q_count++; 734 aen = &ha->aen_q[ha->aen_out]; 735 736 /* copy aen information to local structure */ 737 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 738 mbox_sts[i] = aen->mbox_sts[i]; 739 740 spin_unlock_irqrestore(&ha->hardware_lock, flags); 741 742 DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x " 743 "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out, 744 mbox_sts[0], mbox_sts[2], mbox_sts[3], 745 mbox_sts[1], mbox_sts[4])); 746 747 switch (mbox_sts[0]) { 748 case MBOX_ASTS_DATABASE_CHANGED: 749 if (process_aen == FLUSH_DDB_CHANGED_AENS) { 750 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index " 751 "[%d] state=%04x FLUSHED!\n", 752 ha->host_no, ha->aen_out, 753 mbox_sts[0], mbox_sts[2], 754 mbox_sts[3])); 755 break; 756 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { 757 /* for use during init time, we only want to 758 * relogin non-active ddbs */ 759 struct ddb_entry *ddb_entry; 760 761 ddb_entry = 762 /* FIXME: name length? */ 763 qla4xxx_lookup_ddb_by_fw_index(ha, 764 mbox_sts[2]); 765 if (!ddb_entry) 766 break; 767 768 ddb_entry->dev_scan_wait_to_complete_relogin = 769 0; 770 ddb_entry->dev_scan_wait_to_start_relogin = 771 jiffies + 772 ((ddb_entry->default_time2wait + 773 4) * HZ); 774 775 DEBUG2(printk("scsi%ld: ddb index [%d] initate" 776 " RELOGIN after %d seconds\n", 777 ha->host_no, 778 ddb_entry->fw_ddb_index, 779 ddb_entry->default_time2wait + 780 4)); 781 break; 782 } 783 784 if (mbox_sts[1] == 0) { /* Global DB change. */ 785 qla4xxx_reinitialize_ddb_list(ha); 786 } else if (mbox_sts[1] == 1) { /* Specific device. */ 787 qla4xxx_process_ddb_changed(ha, mbox_sts[2], 788 mbox_sts[3]); 789 } 790 break; 791 } 792 spin_lock_irqsave(&ha->hardware_lock, flags); 793 } 794 spin_unlock_irqrestore(&ha->hardware_lock, flags); 795 796 } 797 798