1 /* 2 * QLogic iSCSI HBA Driver 3 * Copyright (c) 2003-2006 QLogic Corporation 4 * 5 * See LICENSE.qla4xxx for copyright and licensing details. 6 */ 7 8 #include "ql4_def.h" 9 #include "ql4_glbl.h" 10 #include "ql4_dbg.h" 11 #include "ql4_inline.h" 12 13 /** 14 * qla4xxx_copy_sense - copy sense data into cmd sense buffer 15 * @ha: Pointer to host adapter structure. 16 * @sts_entry: Pointer to status entry structure. 17 * @srb: Pointer to srb structure. 18 **/ 19 static void qla4xxx_copy_sense(struct scsi_qla_host *ha, 20 struct status_entry *sts_entry, 21 struct srb *srb) 22 { 23 struct scsi_cmnd *cmd = srb->cmd; 24 uint16_t sense_len; 25 26 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 27 sense_len = le16_to_cpu(sts_entry->senseDataByteCnt); 28 if (sense_len == 0) 29 return; 30 31 /* Save total available sense length, 32 * not to exceed cmd's sense buffer size */ 33 sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE); 34 srb->req_sense_ptr = cmd->sense_buffer; 35 srb->req_sense_len = sense_len; 36 37 /* Copy sense from sts_entry pkt */ 38 sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN); 39 memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len); 40 41 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, " 42 "ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, 43 cmd->device->channel, cmd->device->id, 44 cmd->device->lun, __func__, 45 sts_entry->senseData[2] & 0x0f, 46 sts_entry->senseData[7], 47 sts_entry->senseData[12], 48 sts_entry->senseData[13])); 49 50 DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len)); 51 srb->flags |= SRB_GOT_SENSE; 52 53 /* Update srb, in case a sts_cont pkt follows */ 54 srb->req_sense_ptr += sense_len; 55 srb->req_sense_len -= sense_len; 56 if (srb->req_sense_len != 0) 57 ha->status_srb = srb; 58 else 59 ha->status_srb = NULL; 60 } 61 62 /** 63 * qla4xxx_status_cont_entry - Process a Status Continuations entry. 64 * @ha: SCSI driver HA context 65 * @sts_cont: Entry pointer 66 * 67 * Extended sense data. 68 */ 69 static void 70 qla4xxx_status_cont_entry(struct scsi_qla_host *ha, 71 struct status_cont_entry *sts_cont) 72 { 73 struct srb *srb = ha->status_srb; 74 struct scsi_cmnd *cmd; 75 uint8_t sense_len; 76 77 if (srb == NULL) 78 return; 79 80 cmd = srb->cmd; 81 if (cmd == NULL) { 82 DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned " 83 "back to OS srb=%p srb->state:%d\n", ha->host_no, 84 __func__, srb, srb->state)); 85 ha->status_srb = NULL; 86 return; 87 } 88 89 /* Copy sense data. */ 90 sense_len = min_t(uint16_t, srb->req_sense_len, 91 IOCB_MAX_EXT_SENSEDATA_LEN); 92 memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len); 93 DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len)); 94 95 srb->req_sense_ptr += sense_len; 96 srb->req_sense_len -= sense_len; 97 98 /* Place command on done queue. */ 99 if (srb->req_sense_len == 0) { 100 qla4xxx_srb_compl(ha, srb); 101 ha->status_srb = NULL; 102 } 103 } 104 105 /** 106 * qla4xxx_status_entry - processes status IOCBs 107 * @ha: Pointer to host adapter structure. 108 * @sts_entry: Pointer to status entry structure. 109 **/ 110 static void qla4xxx_status_entry(struct scsi_qla_host *ha, 111 struct status_entry *sts_entry) 112 { 113 uint8_t scsi_status; 114 struct scsi_cmnd *cmd; 115 struct srb *srb; 116 struct ddb_entry *ddb_entry; 117 uint32_t residual; 118 119 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle)); 120 if (!srb) { 121 /* FIXMEdg: Don't we need to reset ISP in this case??? */ 122 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid " 123 "handle 0x%x, sp=%p. This cmd may have already " 124 "been completed.\n", ha->host_no, __func__, 125 le32_to_cpu(sts_entry->handle), srb)); 126 dev_warn(&ha->pdev->dev, "%s invalid status entry:" 127 " handle=0x%0x\n", __func__, sts_entry->handle); 128 set_bit(DPC_RESET_HA, &ha->dpc_flags); 129 return; 130 } 131 132 cmd = srb->cmd; 133 if (cmd == NULL) { 134 DEBUG2(printk("scsi%ld: %s: Command already returned back to " 135 "OS pkt->handle=%d srb=%p srb->state:%d\n", 136 ha->host_no, __func__, sts_entry->handle, 137 srb, srb->state)); 138 dev_warn(&ha->pdev->dev, "Command is NULL:" 139 " already returned to OS (srb=%p)\n", srb); 140 return; 141 } 142 143 ddb_entry = srb->ddb; 144 if (ddb_entry == NULL) { 145 cmd->result = DID_NO_CONNECT << 16; 146 goto status_entry_exit; 147 } 148 149 residual = le32_to_cpu(sts_entry->residualByteCnt); 150 151 /* Translate ISP error to a Linux SCSI error. */ 152 scsi_status = sts_entry->scsiStatus; 153 switch (sts_entry->completionStatus) { 154 case SCS_COMPLETE: 155 156 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) { 157 cmd->result = DID_ERROR << 16; 158 break; 159 } 160 161 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) { 162 scsi_set_resid(cmd, residual); 163 if (!scsi_status && ((scsi_bufflen(cmd) - residual) < 164 cmd->underflow)) { 165 166 cmd->result = DID_ERROR << 16; 167 168 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " 169 "Mid-layer Data underrun0, " 170 "xferlen = 0x%x, " 171 "residual = 0x%x\n", ha->host_no, 172 cmd->device->channel, 173 cmd->device->id, 174 cmd->device->lun, __func__, 175 scsi_bufflen(cmd), residual)); 176 break; 177 } 178 } 179 180 cmd->result = DID_OK << 16 | scsi_status; 181 182 if (scsi_status != SCSI_CHECK_CONDITION) 183 break; 184 185 /* Copy Sense Data into sense buffer. */ 186 qla4xxx_copy_sense(ha, sts_entry, srb); 187 break; 188 189 case SCS_INCOMPLETE: 190 /* Always set the status to DID_ERROR, since 191 * all conditions result in that status anyway */ 192 cmd->result = DID_ERROR << 16; 193 break; 194 195 case SCS_RESET_OCCURRED: 196 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n", 197 ha->host_no, cmd->device->channel, 198 cmd->device->id, cmd->device->lun, __func__)); 199 200 cmd->result = DID_RESET << 16; 201 break; 202 203 case SCS_ABORTED: 204 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n", 205 ha->host_no, cmd->device->channel, 206 cmd->device->id, cmd->device->lun, __func__)); 207 208 cmd->result = DID_RESET << 16; 209 break; 210 211 case SCS_TIMEOUT: 212 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n", 213 ha->host_no, cmd->device->channel, 214 cmd->device->id, cmd->device->lun)); 215 216 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 217 218 /* 219 * Mark device missing so that we won't continue to send 220 * I/O to this device. We should get a ddb state change 221 * AEN soon. 222 */ 223 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 224 qla4xxx_mark_device_missing(ha, ddb_entry); 225 break; 226 227 case SCS_DATA_UNDERRUN: 228 case SCS_DATA_OVERRUN: 229 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) || 230 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) { 231 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n", 232 ha->host_no, 233 cmd->device->channel, cmd->device->id, 234 cmd->device->lun, __func__)); 235 236 cmd->result = DID_ERROR << 16; 237 break; 238 } 239 240 scsi_set_resid(cmd, residual); 241 242 /* 243 * If there is scsi_status, it takes precedense over 244 * underflow condition. 245 */ 246 if (scsi_status != 0) { 247 cmd->result = DID_OK << 16 | scsi_status; 248 249 if (scsi_status != SCSI_CHECK_CONDITION) 250 break; 251 252 /* Copy Sense Data into sense buffer. */ 253 qla4xxx_copy_sense(ha, sts_entry, srb); 254 } else { 255 /* 256 * If RISC reports underrun and target does not 257 * report it then we must have a lost frame, so 258 * tell upper layer to retry it by reporting a 259 * bus busy. 260 */ 261 if ((sts_entry->iscsiFlags & 262 ISCSI_FLAG_RESIDUAL_UNDER) == 0) { 263 cmd->result = DID_BUS_BUSY << 16; 264 } else if ((scsi_bufflen(cmd) - residual) < 265 cmd->underflow) { 266 /* 267 * Handle mid-layer underflow??? 268 * 269 * For kernels less than 2.4, the driver must 270 * return an error if an underflow is detected. 271 * For kernels equal-to and above 2.4, the 272 * mid-layer will appearantly handle the 273 * underflow by detecting the residual count -- 274 * unfortunately, we do not see where this is 275 * actually being done. In the interim, we 276 * will return DID_ERROR. 277 */ 278 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " 279 "Mid-layer Data underrun1, " 280 "xferlen = 0x%x, " 281 "residual = 0x%x\n", ha->host_no, 282 cmd->device->channel, 283 cmd->device->id, 284 cmd->device->lun, __func__, 285 scsi_bufflen(cmd), residual)); 286 287 cmd->result = DID_ERROR << 16; 288 } else { 289 cmd->result = DID_OK << 16; 290 } 291 } 292 break; 293 294 case SCS_DEVICE_LOGGED_OUT: 295 case SCS_DEVICE_UNAVAILABLE: 296 /* 297 * Mark device missing so that we won't continue to 298 * send I/O to this device. We should get a ddb 299 * state change AEN soon. 300 */ 301 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) 302 qla4xxx_mark_device_missing(ha, ddb_entry); 303 304 cmd->result = DID_TRANSPORT_DISRUPTED << 16; 305 break; 306 307 case SCS_QUEUE_FULL: 308 /* 309 * SCSI Mid-Layer handles device queue full 310 */ 311 cmd->result = DID_OK << 16 | sts_entry->scsiStatus; 312 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected " 313 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x," 314 " iResp=%02x\n", ha->host_no, cmd->device->id, 315 cmd->device->lun, __func__, 316 sts_entry->completionStatus, 317 sts_entry->scsiStatus, sts_entry->state_flags, 318 sts_entry->iscsiFlags, 319 sts_entry->iscsiResponse)); 320 break; 321 322 default: 323 cmd->result = DID_ERROR << 16; 324 break; 325 } 326 327 status_entry_exit: 328 329 /* complete the request, if not waiting for status_continuation pkt */ 330 srb->cc_stat = sts_entry->completionStatus; 331 if (ha->status_srb == NULL) 332 qla4xxx_srb_compl(ha, srb); 333 } 334 335 /** 336 * qla4xxx_process_response_queue - process response queue completions 337 * @ha: Pointer to host adapter structure. 338 * 339 * This routine process response queue completions in interrupt context. 340 * Hardware_lock locked upon entry 341 **/ 342 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha) 343 { 344 uint32_t count = 0; 345 struct srb *srb = NULL; 346 struct status_entry *sts_entry; 347 348 /* Process all responses from response queue */ 349 while ((ha->response_in = 350 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) != 351 ha->response_out) { 352 sts_entry = (struct status_entry *) ha->response_ptr; 353 count++; 354 355 /* Advance pointers for next entry */ 356 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) { 357 ha->response_out = 0; 358 ha->response_ptr = ha->response_ring; 359 } else { 360 ha->response_out++; 361 ha->response_ptr++; 362 } 363 364 /* process entry */ 365 switch (sts_entry->hdr.entryType) { 366 case ET_STATUS: 367 /* Common status */ 368 qla4xxx_status_entry(ha, sts_entry); 369 break; 370 371 case ET_PASSTHRU_STATUS: 372 break; 373 374 case ET_STATUS_CONTINUATION: 375 qla4xxx_status_cont_entry(ha, 376 (struct status_cont_entry *) sts_entry); 377 break; 378 379 case ET_COMMAND: 380 /* ISP device queue is full. Command not 381 * accepted by ISP. Queue command for 382 * later */ 383 384 srb = qla4xxx_del_from_active_array(ha, 385 le32_to_cpu(sts_entry-> 386 handle)); 387 if (srb == NULL) 388 goto exit_prq_invalid_handle; 389 390 DEBUG2(printk("scsi%ld: %s: FW device queue full, " 391 "srb %p\n", ha->host_no, __func__, srb)); 392 393 /* ETRY normally by sending it back with 394 * DID_BUS_BUSY */ 395 srb->cmd->result = DID_BUS_BUSY << 16; 396 qla4xxx_srb_compl(ha, srb); 397 break; 398 399 case ET_CONTINUE: 400 /* Just throw away the continuation entries */ 401 DEBUG2(printk("scsi%ld: %s: Continuation entry - " 402 "ignoring\n", ha->host_no, __func__)); 403 break; 404 405 default: 406 /* 407 * Invalid entry in response queue, reset RISC 408 * firmware. 409 */ 410 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in " 411 "response queue \n", ha->host_no, 412 __func__, 413 sts_entry->hdr.entryType)); 414 goto exit_prq_error; 415 } 416 } 417 418 /* 419 * Done with responses, update the ISP For QLA4010, this also clears 420 * the interrupt. 421 */ 422 writel(ha->response_out, &ha->reg->rsp_q_out); 423 readl(&ha->reg->rsp_q_out); 424 425 return; 426 427 exit_prq_invalid_handle: 428 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n", 429 ha->host_no, __func__, srb, sts_entry->hdr.entryType, 430 sts_entry->completionStatus)); 431 432 exit_prq_error: 433 writel(ha->response_out, &ha->reg->rsp_q_out); 434 readl(&ha->reg->rsp_q_out); 435 436 set_bit(DPC_RESET_HA, &ha->dpc_flags); 437 } 438 439 /** 440 * qla4xxx_isr_decode_mailbox - decodes mailbox status 441 * @ha: Pointer to host adapter structure. 442 * @mailbox_status: Mailbox status. 443 * 444 * This routine decodes the mailbox status during the ISR. 445 * Hardware_lock locked upon entry. runs in interrupt context. 446 **/ 447 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, 448 uint32_t mbox_status) 449 { 450 int i; 451 uint32_t mbox_stat2, mbox_stat3; 452 453 if ((mbox_status == MBOX_STS_BUSY) || 454 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 455 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) { 456 ha->mbox_status[0] = mbox_status; 457 458 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) { 459 /* 460 * Copy all mailbox registers to a temporary 461 * location and set mailbox command done flag 462 */ 463 for (i = 1; i < ha->mbox_status_count; i++) 464 ha->mbox_status[i] = 465 readl(&ha->reg->mailbox[i]); 466 467 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags); 468 } 469 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 470 /* Immediately process the AENs that don't require much work. 471 * Only queue the database_changed AENs */ 472 if (ha->aen_log.count < MAX_AEN_ENTRIES) { 473 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 474 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = 475 readl(&ha->reg->mailbox[i]); 476 ha->aen_log.count++; 477 } 478 switch (mbox_status) { 479 case MBOX_ASTS_SYSTEM_ERROR: 480 /* Log Mailbox registers */ 481 if (ql4xdontresethba) { 482 DEBUG2(printk("%s:Dont Reset HBA\n", 483 __func__)); 484 } else { 485 set_bit(AF_GET_CRASH_RECORD, &ha->flags); 486 set_bit(DPC_RESET_HA, &ha->dpc_flags); 487 } 488 break; 489 490 case MBOX_ASTS_REQUEST_TRANSFER_ERROR: 491 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR: 492 case MBOX_ASTS_NVRAM_INVALID: 493 case MBOX_ASTS_IP_ADDRESS_CHANGED: 494 case MBOX_ASTS_DHCP_LEASE_EXPIRED: 495 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, " 496 "Reset HA\n", ha->host_no, mbox_status)); 497 set_bit(DPC_RESET_HA, &ha->dpc_flags); 498 break; 499 500 case MBOX_ASTS_LINK_UP: 501 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n", 502 ha->host_no, mbox_status)); 503 set_bit(AF_LINK_UP, &ha->flags); 504 break; 505 506 case MBOX_ASTS_LINK_DOWN: 507 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n", 508 ha->host_no, mbox_status)); 509 clear_bit(AF_LINK_UP, &ha->flags); 510 break; 511 512 case MBOX_ASTS_HEARTBEAT: 513 ha->seconds_since_last_heartbeat = 0; 514 break; 515 516 case MBOX_ASTS_DHCP_LEASE_ACQUIRED: 517 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE " 518 "ACQUIRED\n", ha->host_no, mbox_status)); 519 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 520 break; 521 522 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM: 523 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target 524 * mode 525 * only */ 526 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */ 527 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR: 528 case MBOX_ASTS_SUBNET_STATE_CHANGE: 529 /* No action */ 530 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no, 531 mbox_status)); 532 break; 533 534 case MBOX_ASTS_IP_ADDR_STATE_CHANGED: 535 mbox_stat2 = readl(&ha->reg->mailbox[2]); 536 mbox_stat3 = readl(&ha->reg->mailbox[3]); 537 538 if ((mbox_stat3 == 5) && (mbox_stat2 == 3)) 539 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags); 540 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5)) 541 set_bit(DPC_RESET_HA, &ha->dpc_flags); 542 break; 543 544 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 545 case MBOX_ASTS_DNS: 546 /* No action */ 547 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, " 548 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", 549 ha->host_no, mbox_status, 550 readl(&ha->reg->mailbox[1]), 551 readl(&ha->reg->mailbox[2]))); 552 break; 553 554 case MBOX_ASTS_SELF_TEST_FAILED: 555 case MBOX_ASTS_LOGIN_FAILED: 556 /* No action */ 557 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, " 558 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", 559 ha->host_no, mbox_status, 560 readl(&ha->reg->mailbox[1]), 561 readl(&ha->reg->mailbox[2]), 562 readl(&ha->reg->mailbox[3]))); 563 break; 564 565 case MBOX_ASTS_DATABASE_CHANGED: 566 /* Queue AEN information and process it in the DPC 567 * routine */ 568 if (ha->aen_q_count > 0) { 569 570 /* decrement available counter */ 571 ha->aen_q_count--; 572 573 for (i = 1; i < MBOX_AEN_REG_COUNT; i++) 574 ha->aen_q[ha->aen_in].mbox_sts[i] = 575 readl(&ha->reg->mailbox[i]); 576 577 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status; 578 579 /* print debug message */ 580 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued" 581 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n", 582 ha->host_no, ha->aen_in, 583 mbox_status, 584 ha->aen_q[ha->aen_in].mbox_sts[1], 585 ha->aen_q[ha->aen_in].mbox_sts[2], 586 ha->aen_q[ha->aen_in].mbox_sts[3], 587 ha->aen_q[ha->aen_in]. mbox_sts[4])); 588 /* advance pointer */ 589 ha->aen_in++; 590 if (ha->aen_in == MAX_AEN_ENTRIES) 591 ha->aen_in = 0; 592 593 /* The DPC routine will process the aen */ 594 set_bit(DPC_AEN, &ha->dpc_flags); 595 } else { 596 DEBUG2(printk("scsi%ld: %s: aen %04x, queue " 597 "overflowed! AEN LOST!!\n", 598 ha->host_no, __func__, 599 mbox_status)); 600 601 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n", 602 ha->host_no)); 603 604 for (i = 0; i < MAX_AEN_ENTRIES; i++) { 605 DEBUG2(printk("AEN[%d] %04x %04x %04x " 606 "%04x\n", i, 607 ha->aen_q[i].mbox_sts[0], 608 ha->aen_q[i].mbox_sts[1], 609 ha->aen_q[i].mbox_sts[2], 610 ha->aen_q[i].mbox_sts[3])); 611 } 612 } 613 break; 614 615 default: 616 DEBUG2(printk(KERN_WARNING 617 "scsi%ld: AEN %04x UNKNOWN\n", 618 ha->host_no, mbox_status)); 619 break; 620 } 621 } else { 622 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n", 623 ha->host_no, mbox_status)); 624 625 ha->mbox_status[0] = mbox_status; 626 } 627 } 628 629 /** 630 * qla4xxx_interrupt_service_routine - isr 631 * @ha: pointer to host adapter structure. 632 * 633 * This is the main interrupt service routine. 634 * hardware_lock locked upon entry. runs in interrupt context. 635 **/ 636 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, 637 uint32_t intr_status) 638 { 639 /* Process response queue interrupt. */ 640 if (intr_status & CSR_SCSI_COMPLETION_INTR) 641 qla4xxx_process_response_queue(ha); 642 643 /* Process mailbox/asynch event interrupt.*/ 644 if (intr_status & CSR_SCSI_PROCESSOR_INTR) { 645 qla4xxx_isr_decode_mailbox(ha, 646 readl(&ha->reg->mailbox[0])); 647 648 /* Clear Mailbox Interrupt */ 649 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), 650 &ha->reg->ctrl_status); 651 readl(&ha->reg->ctrl_status); 652 } 653 } 654 655 /** 656 * qla4xxx_intr_handler - hardware interrupt handler. 657 * @irq: Unused 658 * @dev_id: Pointer to host adapter structure 659 **/ 660 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id) 661 { 662 struct scsi_qla_host *ha; 663 uint32_t intr_status; 664 unsigned long flags = 0; 665 uint8_t reqs_count = 0; 666 667 ha = (struct scsi_qla_host *) dev_id; 668 if (!ha) { 669 DEBUG2(printk(KERN_INFO 670 "qla4xxx: Interrupt with NULL host ptr\n")); 671 return IRQ_NONE; 672 } 673 674 spin_lock_irqsave(&ha->hardware_lock, flags); 675 676 ha->isr_count++; 677 /* 678 * Repeatedly service interrupts up to a maximum of 679 * MAX_REQS_SERVICED_PER_INTR 680 */ 681 while (1) { 682 /* 683 * Read interrupt status 684 */ 685 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) != 686 ha->response_out) 687 intr_status = CSR_SCSI_COMPLETION_INTR; 688 else 689 intr_status = readl(&ha->reg->ctrl_status); 690 691 if ((intr_status & 692 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 693 0) { 694 if (reqs_count == 0) 695 ha->spurious_int_count++; 696 break; 697 } 698 699 if (intr_status & CSR_FATAL_ERROR) { 700 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, " 701 "Status 0x%04x\n", ha->host_no, 702 readl(isp_port_error_status (ha)))); 703 704 /* Issue Soft Reset to clear this error condition. 705 * This will prevent the RISC from repeatedly 706 * interrupting the driver; thus, allowing the DPC to 707 * get scheduled to continue error recovery. 708 * NOTE: Disabling RISC interrupts does not work in 709 * this case, as CSR_FATAL_ERROR overrides 710 * CSR_SCSI_INTR_ENABLE */ 711 if ((readl(&ha->reg->ctrl_status) & 712 CSR_SCSI_RESET_INTR) == 0) { 713 writel(set_rmask(CSR_SOFT_RESET), 714 &ha->reg->ctrl_status); 715 readl(&ha->reg->ctrl_status); 716 } 717 718 writel(set_rmask(CSR_FATAL_ERROR), 719 &ha->reg->ctrl_status); 720 readl(&ha->reg->ctrl_status); 721 722 __qla4xxx_disable_intrs(ha); 723 724 set_bit(DPC_RESET_HA, &ha->dpc_flags); 725 726 break; 727 } else if (intr_status & CSR_SCSI_RESET_INTR) { 728 clear_bit(AF_ONLINE, &ha->flags); 729 __qla4xxx_disable_intrs(ha); 730 731 writel(set_rmask(CSR_SCSI_RESET_INTR), 732 &ha->reg->ctrl_status); 733 readl(&ha->reg->ctrl_status); 734 735 if (!ql4_mod_unload) 736 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags); 737 738 break; 739 } else if (intr_status & INTR_PENDING) { 740 qla4xxx_interrupt_service_routine(ha, intr_status); 741 ha->total_io_count++; 742 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR) 743 break; 744 745 intr_status = 0; 746 } 747 } 748 749 spin_unlock_irqrestore(&ha->hardware_lock, flags); 750 751 return IRQ_HANDLED; 752 } 753 754 /** 755 * qla4xxx_process_aen - processes AENs generated by firmware 756 * @ha: pointer to host adapter structure. 757 * @process_aen: type of AENs to process 758 * 759 * Processes specific types of Asynchronous Events generated by firmware. 760 * The type of AENs to process is specified by process_aen and can be 761 * PROCESS_ALL_AENS 0 762 * FLUSH_DDB_CHANGED_AENS 1 763 * RELOGIN_DDB_CHANGED_AENS 2 764 **/ 765 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) 766 { 767 uint32_t mbox_sts[MBOX_AEN_REG_COUNT]; 768 struct aen *aen; 769 int i; 770 unsigned long flags; 771 772 spin_lock_irqsave(&ha->hardware_lock, flags); 773 while (ha->aen_out != ha->aen_in) { 774 aen = &ha->aen_q[ha->aen_out]; 775 /* copy aen information to local structure */ 776 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 777 mbox_sts[i] = aen->mbox_sts[i]; 778 779 ha->aen_q_count++; 780 ha->aen_out++; 781 782 if (ha->aen_out == MAX_AEN_ENTRIES) 783 ha->aen_out = 0; 784 785 spin_unlock_irqrestore(&ha->hardware_lock, flags); 786 787 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x" 788 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no, 789 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)), 790 mbox_sts[0], mbox_sts[1], mbox_sts[2], 791 mbox_sts[3], mbox_sts[4])); 792 793 switch (mbox_sts[0]) { 794 case MBOX_ASTS_DATABASE_CHANGED: 795 if (process_aen == FLUSH_DDB_CHANGED_AENS) { 796 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index " 797 "[%d] state=%04x FLUSHED!\n", 798 ha->host_no, ha->aen_out, 799 mbox_sts[0], mbox_sts[2], 800 mbox_sts[3])); 801 break; 802 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) { 803 /* for use during init time, we only want to 804 * relogin non-active ddbs */ 805 struct ddb_entry *ddb_entry; 806 807 ddb_entry = 808 /* FIXME: name length? */ 809 qla4xxx_lookup_ddb_by_fw_index(ha, 810 mbox_sts[2]); 811 if (!ddb_entry) 812 break; 813 814 ddb_entry->dev_scan_wait_to_complete_relogin = 815 0; 816 ddb_entry->dev_scan_wait_to_start_relogin = 817 jiffies + 818 ((ddb_entry->default_time2wait + 819 4) * HZ); 820 821 DEBUG2(printk("scsi%ld: ddb index [%d] initate" 822 " RELOGIN after %d seconds\n", 823 ha->host_no, 824 ddb_entry->fw_ddb_index, 825 ddb_entry->default_time2wait + 826 4)); 827 break; 828 } 829 830 if (mbox_sts[1] == 0) { /* Global DB change. */ 831 qla4xxx_reinitialize_ddb_list(ha); 832 } else if (mbox_sts[1] == 1) { /* Specific device. */ 833 qla4xxx_process_ddb_changed(ha, mbox_sts[2], 834 mbox_sts[3]); 835 } 836 break; 837 } 838 spin_lock_irqsave(&ha->hardware_lock, flags); 839 } 840 spin_unlock_irqrestore(&ha->hardware_lock, flags); 841 } 842 843