xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_isr.c (revision 643d1f7f)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2006 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_dbg.h"
11 #include "ql4_inline.h"
12 
13 /**
14  * qla2x00_process_completed_request() - Process a Fast Post response.
15  * @ha: SCSI driver HA context
16  * @index: SRB index
17  **/
18 static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
19 					      uint32_t index)
20 {
21 	struct srb *srb;
22 
23 	srb = qla4xxx_del_from_active_array(ha, index);
24 	if (srb) {
25 		/* Save ISP completion status */
26 		srb->cmd->result = DID_OK << 16;
27 		qla4xxx_srb_compl(ha, srb);
28 	} else {
29 		DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
30 			      "%d\n", ha->host_no, index));
31 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
32 	}
33 }
34 
35 /**
36  * qla4xxx_status_entry - processes status IOCBs
37  * @ha: Pointer to host adapter structure.
38  * @sts_entry: Pointer to status entry structure.
39  **/
40 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
41 				 struct status_entry *sts_entry)
42 {
43 	uint8_t scsi_status;
44 	struct scsi_cmnd *cmd;
45 	struct srb *srb;
46 	struct ddb_entry *ddb_entry;
47 	uint32_t residual;
48 	uint16_t sensebytecnt;
49 
50 	if (sts_entry->completionStatus == SCS_COMPLETE &&
51 	    sts_entry->scsiStatus == 0) {
52 		qla4xxx_process_completed_request(ha,
53 						  le32_to_cpu(sts_entry->
54 							      handle));
55 		return;
56 	}
57 
58 	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
59 	if (!srb) {
60 		/* FIXMEdg: Don't we need to reset ISP in this case??? */
61 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
62 			      "handle 0x%x, sp=%p. This cmd may have already "
63 			      "been completed.\n", ha->host_no, __func__,
64 			      le32_to_cpu(sts_entry->handle), srb));
65 		return;
66 	}
67 
68 	cmd = srb->cmd;
69 	if (cmd == NULL) {
70 		DEBUG2(printk("scsi%ld: %s: Command already returned back to "
71 			      "OS pkt->handle=%d srb=%p srb->state:%d\n",
72 			      ha->host_no, __func__, sts_entry->handle,
73 			      srb, srb->state));
74 		dev_warn(&ha->pdev->dev, "Command is NULL:"
75 			" already returned to OS (srb=%p)\n", srb);
76 		return;
77 	}
78 
79 	ddb_entry = srb->ddb;
80 	if (ddb_entry == NULL) {
81 		cmd->result = DID_NO_CONNECT << 16;
82 		goto status_entry_exit;
83 	}
84 
85 	residual = le32_to_cpu(sts_entry->residualByteCnt);
86 
87 	/* Translate ISP error to a Linux SCSI error. */
88 	scsi_status = sts_entry->scsiStatus;
89 	switch (sts_entry->completionStatus) {
90 	case SCS_COMPLETE:
91 		if (scsi_status == 0) {
92 			cmd->result = DID_OK << 16;
93 			break;
94 		}
95 
96 		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
97 			cmd->result = DID_ERROR << 16;
98 			break;
99 		}
100 
101 		if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 			scsi_set_resid(cmd, residual);
103 			if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
104 				cmd->underflow)) {
105 
106 				cmd->result = DID_ERROR << 16;
107 
108 				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
109 					"Mid-layer Data underrun0, "
110 					"xferlen = 0x%x, "
111 					"residual = 0x%x\n", ha->host_no,
112 					cmd->device->channel,
113 					cmd->device->id,
114 					cmd->device->lun, __func__,
115 					scsi_bufflen(cmd), residual));
116 				break;
117 			}
118 		}
119 
120 		cmd->result = DID_OK << 16 | scsi_status;
121 
122 		if (scsi_status != SCSI_CHECK_CONDITION)
123 			break;
124 
125 		/* Copy Sense Data into sense buffer. */
126 		memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
127 
128 		sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
129 		if (sensebytecnt == 0)
130 			break;
131 
132 		memcpy(cmd->sense_buffer, sts_entry->senseData,
133 		       min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
134 
135 		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
136 			      "ASC/ASCQ = %02x/%02x\n", ha->host_no,
137 			      cmd->device->channel, cmd->device->id,
138 			      cmd->device->lun, __func__,
139 			      sts_entry->senseData[2] & 0x0f,
140 			      sts_entry->senseData[12],
141 			      sts_entry->senseData[13]));
142 
143 		srb->flags |= SRB_GOT_SENSE;
144 		break;
145 
146 	case SCS_INCOMPLETE:
147 		/* Always set the status to DID_ERROR, since
148 		 * all conditions result in that status anyway */
149 		cmd->result = DID_ERROR << 16;
150 		break;
151 
152 	case SCS_RESET_OCCURRED:
153 		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
154 			      ha->host_no, cmd->device->channel,
155 			      cmd->device->id, cmd->device->lun, __func__));
156 
157 		cmd->result = DID_RESET << 16;
158 		break;
159 
160 	case SCS_ABORTED:
161 		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
162 			      ha->host_no, cmd->device->channel,
163 			      cmd->device->id, cmd->device->lun, __func__));
164 
165 		cmd->result = DID_RESET << 16;
166 		break;
167 
168 	case SCS_TIMEOUT:
169 		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
170 			      ha->host_no, cmd->device->channel,
171 			      cmd->device->id, cmd->device->lun));
172 
173 		cmd->result = DID_BUS_BUSY << 16;
174 
175 		/*
176 		 * Mark device missing so that we won't continue to send
177 		 * I/O to this device.	We should get a ddb state change
178 		 * AEN soon.
179 		 */
180 		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
181 			qla4xxx_mark_device_missing(ha, ddb_entry);
182 		break;
183 
184 	case SCS_DATA_UNDERRUN:
185 	case SCS_DATA_OVERRUN:
186 		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
187 			(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
188 			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
189 				      "residual = 0x%x\n", ha->host_no,
190 				      cmd->device->channel, cmd->device->id,
191 				      cmd->device->lun, __func__, residual));
192 
193 			cmd->result = DID_ERROR << 16;
194 			break;
195 		}
196 
197 		scsi_set_resid(cmd, residual);
198 
199 		/*
200 		 * If there is scsi_status, it takes precedense over
201 		 * underflow condition.
202 		 */
203 		if (scsi_status != 0) {
204 			cmd->result = DID_OK << 16 | scsi_status;
205 
206 			if (scsi_status != SCSI_CHECK_CONDITION)
207 				break;
208 
209 			/* Copy Sense Data into sense buffer. */
210 			memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
211 
212 			sensebytecnt =
213 				le16_to_cpu(sts_entry->senseDataByteCnt);
214 			if (sensebytecnt == 0)
215 				break;
216 
217 			memcpy(cmd->sense_buffer, sts_entry->senseData,
218 			       min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
219 
220 			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
221 				      "ASC/ASCQ = %02x/%02x\n", ha->host_no,
222 				      cmd->device->channel, cmd->device->id,
223 				      cmd->device->lun, __func__,
224 				      sts_entry->senseData[2] & 0x0f,
225 				      sts_entry->senseData[12],
226 				      sts_entry->senseData[13]));
227 		} else {
228 			/*
229 			 * If RISC reports underrun and target does not
230 			 * report it then we must have a lost frame, so
231 			 * tell upper layer to retry it by reporting a
232 			 * bus busy.
233 			 */
234 			if ((sts_entry->iscsiFlags &
235 			     ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
236 				cmd->result = DID_BUS_BUSY << 16;
237 			} else if ((scsi_bufflen(cmd) - residual) <
238 				   cmd->underflow) {
239 				/*
240 				 * Handle mid-layer underflow???
241 				 *
242 				 * For kernels less than 2.4, the driver must
243 				 * return an error if an underflow is detected.
244 				 * For kernels equal-to and above 2.4, the
245 				 * mid-layer will appearantly handle the
246 				 * underflow by detecting the residual count --
247 				 * unfortunately, we do not see where this is
248 				 * actually being done.	 In the interim, we
249 				 * will return DID_ERROR.
250 				 */
251 				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
252 					"Mid-layer Data underrun1, "
253 					"xferlen = 0x%x, "
254 					"residual = 0x%x\n", ha->host_no,
255 					cmd->device->channel,
256 					cmd->device->id,
257 					cmd->device->lun, __func__,
258 					scsi_bufflen(cmd), residual));
259 
260 				cmd->result = DID_ERROR << 16;
261 			} else {
262 				cmd->result = DID_OK << 16;
263 			}
264 		}
265 		break;
266 
267 	case SCS_DEVICE_LOGGED_OUT:
268 	case SCS_DEVICE_UNAVAILABLE:
269 		/*
270 		 * Mark device missing so that we won't continue to
271 		 * send I/O to this device.  We should get a ddb
272 		 * state change AEN soon.
273 		 */
274 		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
275 			qla4xxx_mark_device_missing(ha, ddb_entry);
276 
277 		cmd->result = DID_BUS_BUSY << 16;
278 		break;
279 
280 	case SCS_QUEUE_FULL:
281 		/*
282 		 * SCSI Mid-Layer handles device queue full
283 		 */
284 		cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
285 		DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
286 			      "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
287 			      " iResp=%02x\n", ha->host_no, cmd->device->id,
288 			      cmd->device->lun, __func__,
289 			      sts_entry->completionStatus,
290 			      sts_entry->scsiStatus, sts_entry->state_flags,
291 			      sts_entry->iscsiFlags,
292 			      sts_entry->iscsiResponse));
293 		break;
294 
295 	default:
296 		cmd->result = DID_ERROR << 16;
297 		break;
298 	}
299 
300 status_entry_exit:
301 
302 	/* complete the request */
303 	srb->cc_stat = sts_entry->completionStatus;
304 	qla4xxx_srb_compl(ha, srb);
305 }
306 
307 /**
308  * qla4xxx_process_response_queue - process response queue completions
309  * @ha: Pointer to host adapter structure.
310  *
311  * This routine process response queue completions in interrupt context.
312  * Hardware_lock locked upon entry
313  **/
314 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
315 {
316 	uint32_t count = 0;
317 	struct srb *srb = NULL;
318 	struct status_entry *sts_entry;
319 
320 	/* Process all responses from response queue */
321 	while ((ha->response_in =
322 		(uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
323 	       ha->response_out) {
324 		sts_entry = (struct status_entry *) ha->response_ptr;
325 		count++;
326 
327 		/* Advance pointers for next entry */
328 		if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
329 			ha->response_out = 0;
330 			ha->response_ptr = ha->response_ring;
331 		} else {
332 			ha->response_out++;
333 			ha->response_ptr++;
334 		}
335 
336 		/* process entry */
337 		switch (sts_entry->hdr.entryType) {
338 		case ET_STATUS:
339 			/*
340 			 * Common status - Single completion posted in single
341 			 * IOSB.
342 			 */
343 			qla4xxx_status_entry(ha, sts_entry);
344 			break;
345 
346 		case ET_PASSTHRU_STATUS:
347 			break;
348 
349 		case ET_STATUS_CONTINUATION:
350 			/* Just throw away the status continuation entries */
351 			DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
352 				      "- ignoring\n", ha->host_no, __func__));
353 			break;
354 
355 		case ET_COMMAND:
356 			/* ISP device queue is full. Command not
357 			 * accepted by ISP.  Queue command for
358 			 * later */
359 
360 			srb = qla4xxx_del_from_active_array(ha,
361 						    le32_to_cpu(sts_entry->
362 								handle));
363 			if (srb == NULL)
364 				goto exit_prq_invalid_handle;
365 
366 			DEBUG2(printk("scsi%ld: %s: FW device queue full, "
367 				      "srb %p\n", ha->host_no, __func__, srb));
368 
369 			/* ETRY normally by sending it back with
370 			 * DID_BUS_BUSY */
371 			srb->cmd->result = DID_BUS_BUSY << 16;
372 			qla4xxx_srb_compl(ha, srb);
373 			break;
374 
375 		case ET_CONTINUE:
376 			/* Just throw away the continuation entries */
377 			DEBUG2(printk("scsi%ld: %s: Continuation entry - "
378 				      "ignoring\n", ha->host_no, __func__));
379 			break;
380 
381 		default:
382 			/*
383 			 * Invalid entry in response queue, reset RISC
384 			 * firmware.
385 			 */
386 			DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
387 				      "response queue \n", ha->host_no,
388 				      __func__,
389 				      sts_entry->hdr.entryType));
390 			goto exit_prq_error;
391 		}
392 	}
393 
394 	/*
395 	 * Done with responses, update the ISP For QLA4010, this also clears
396 	 * the interrupt.
397 	 */
398 	writel(ha->response_out, &ha->reg->rsp_q_out);
399 	readl(&ha->reg->rsp_q_out);
400 
401 	return;
402 
403 exit_prq_invalid_handle:
404 	DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
405 		      ha->host_no, __func__, srb, sts_entry->hdr.entryType,
406 		      sts_entry->completionStatus));
407 
408 exit_prq_error:
409 	writel(ha->response_out, &ha->reg->rsp_q_out);
410 	readl(&ha->reg->rsp_q_out);
411 
412 	set_bit(DPC_RESET_HA, &ha->dpc_flags);
413 }
414 
415 /**
416  * qla4xxx_isr_decode_mailbox - decodes mailbox status
417  * @ha: Pointer to host adapter structure.
418  * @mailbox_status: Mailbox status.
419  *
420  * This routine decodes the mailbox status during the ISR.
421  * Hardware_lock locked upon entry. runs in interrupt context.
422  **/
423 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
424 				       uint32_t mbox_status)
425 {
426 	int i;
427 	uint32_t mbox_stat2, mbox_stat3;
428 
429 	if ((mbox_status == MBOX_STS_BUSY) ||
430 	    (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
431 	    (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
432 		ha->mbox_status[0] = mbox_status;
433 
434 		if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
435 			/*
436 			 * Copy all mailbox registers to a temporary
437 			 * location and set mailbox command done flag
438 			 */
439 			for (i = 1; i < ha->mbox_status_count; i++)
440 				ha->mbox_status[i] =
441 					readl(&ha->reg->mailbox[i]);
442 
443 			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
444 		}
445 	} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
446 		/* Immediately process the AENs that don't require much work.
447 		 * Only queue the database_changed AENs */
448 		if (ha->aen_log.count < MAX_AEN_ENTRIES) {
449 			for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
450 				ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
451 					readl(&ha->reg->mailbox[i]);
452 			ha->aen_log.count++;
453 		}
454 		switch (mbox_status) {
455 		case MBOX_ASTS_SYSTEM_ERROR:
456 			/* Log Mailbox registers */
457 			if (ql4xdontresethba) {
458 				DEBUG2(printk("%s:Dont Reset HBA\n",
459 					      __func__));
460 			} else {
461 				set_bit(AF_GET_CRASH_RECORD, &ha->flags);
462 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
463 			}
464 			break;
465 
466 		case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
467 		case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
468 		case MBOX_ASTS_NVRAM_INVALID:
469 		case MBOX_ASTS_IP_ADDRESS_CHANGED:
470 		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
471 			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
472 				      "Reset HA\n", ha->host_no, mbox_status));
473 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
474 			break;
475 
476 		case MBOX_ASTS_LINK_UP:
477 			DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
478 				      ha->host_no, mbox_status));
479 			set_bit(AF_LINK_UP, &ha->flags);
480 			break;
481 
482 		case MBOX_ASTS_LINK_DOWN:
483 			DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
484 				      ha->host_no, mbox_status));
485 			clear_bit(AF_LINK_UP, &ha->flags);
486 			break;
487 
488 		case MBOX_ASTS_HEARTBEAT:
489 			ha->seconds_since_last_heartbeat = 0;
490 			break;
491 
492 		case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
493 			DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
494 				      "ACQUIRED\n", ha->host_no, mbox_status));
495 			set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
496 			break;
497 
498 		case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
499 		case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
500 							   * mode
501 							   * only */
502 		case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
503 		case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
504 		case MBOX_ASTS_SUBNET_STATE_CHANGE:
505 			/* No action */
506 			DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
507 				      mbox_status));
508 			break;
509 
510 		case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
511 			mbox_stat2 = readl(&ha->reg->mailbox[2]);
512 			mbox_stat3 = readl(&ha->reg->mailbox[3]);
513 
514 			if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
515 				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
516 			else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
517 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
518 			break;
519 
520 		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
521 		case MBOX_ASTS_DNS:
522 			/* No action */
523 			DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
524 				      "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
525 				      ha->host_no, mbox_status,
526 				      readl(&ha->reg->mailbox[1]),
527 				      readl(&ha->reg->mailbox[2])));
528 			break;
529 
530 		case MBOX_ASTS_SELF_TEST_FAILED:
531 		case MBOX_ASTS_LOGIN_FAILED:
532 			/* No action */
533 			DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
534 				      "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
535 				      ha->host_no, mbox_status,
536 				      readl(&ha->reg->mailbox[1]),
537 				      readl(&ha->reg->mailbox[2]),
538 				      readl(&ha->reg->mailbox[3])));
539 			break;
540 
541 		case MBOX_ASTS_DATABASE_CHANGED:
542 			/* Queue AEN information and process it in the DPC
543 			 * routine */
544 			if (ha->aen_q_count > 0) {
545 
546 				/* decrement available counter */
547 				ha->aen_q_count--;
548 
549 				for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
550 					ha->aen_q[ha->aen_in].mbox_sts[i] =
551 						readl(&ha->reg->mailbox[i]);
552 
553 				ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
554 
555 				/* print debug message */
556 				DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
557 					      " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
558 					      ha->host_no, ha->aen_in,
559 					      mbox_status,
560 					      ha->aen_q[ha->aen_in].mbox_sts[1],
561 					      ha->aen_q[ha->aen_in].mbox_sts[2],
562 					      ha->aen_q[ha->aen_in].mbox_sts[3],
563 					      ha->aen_q[ha->aen_in].  mbox_sts[4]));
564 				/* advance pointer */
565 				ha->aen_in++;
566 				if (ha->aen_in == MAX_AEN_ENTRIES)
567 					ha->aen_in = 0;
568 
569 				/* The DPC routine will process the aen */
570 				set_bit(DPC_AEN, &ha->dpc_flags);
571 			} else {
572 				DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
573 					      "overflowed!  AEN LOST!!\n",
574 					      ha->host_no, __func__,
575 					      mbox_status));
576 
577 				DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
578 					      ha->host_no));
579 
580 				for (i = 0; i < MAX_AEN_ENTRIES; i++) {
581 					DEBUG2(printk("AEN[%d] %04x %04x %04x "
582 						      "%04x\n", i,
583 						      ha->aen_q[i].mbox_sts[0],
584 						      ha->aen_q[i].mbox_sts[1],
585 						      ha->aen_q[i].mbox_sts[2],
586 						      ha->aen_q[i].mbox_sts[3]));
587 				}
588 			}
589 			break;
590 
591 		default:
592 			DEBUG2(printk(KERN_WARNING
593 				      "scsi%ld: AEN %04x UNKNOWN\n",
594 				      ha->host_no, mbox_status));
595 			break;
596 		}
597 	} else {
598 		DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
599 			      ha->host_no, mbox_status));
600 
601 		ha->mbox_status[0] = mbox_status;
602 	}
603 }
604 
605 /**
606  * qla4xxx_interrupt_service_routine - isr
607  * @ha: pointer to host adapter structure.
608  *
609  * This is the main interrupt service routine.
610  * hardware_lock locked upon entry. runs in interrupt context.
611  **/
612 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
613 				       uint32_t intr_status)
614 {
615 	/* Process response queue interrupt. */
616 	if (intr_status & CSR_SCSI_COMPLETION_INTR)
617 		qla4xxx_process_response_queue(ha);
618 
619 	/* Process mailbox/asynch event	 interrupt.*/
620 	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
621 		qla4xxx_isr_decode_mailbox(ha,
622 					   readl(&ha->reg->mailbox[0]));
623 
624 		/* Clear Mailbox Interrupt */
625 		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
626 		       &ha->reg->ctrl_status);
627 		readl(&ha->reg->ctrl_status);
628 	}
629 }
630 
631 /**
632  * qla4xxx_intr_handler - hardware interrupt handler.
633  * @irq: Unused
634  * @dev_id: Pointer to host adapter structure
635  **/
636 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
637 {
638 	struct scsi_qla_host *ha;
639 	uint32_t intr_status;
640 	unsigned long flags = 0;
641 	uint8_t reqs_count = 0;
642 
643 	ha = (struct scsi_qla_host *) dev_id;
644 	if (!ha) {
645 		DEBUG2(printk(KERN_INFO
646 			      "qla4xxx: Interrupt with NULL host ptr\n"));
647 		return IRQ_NONE;
648 	}
649 
650 	spin_lock_irqsave(&ha->hardware_lock, flags);
651 
652 	ha->isr_count++;
653 	/*
654 	 * Repeatedly service interrupts up to a maximum of
655 	 * MAX_REQS_SERVICED_PER_INTR
656 	 */
657 	while (1) {
658 		/*
659 		 * Read interrupt status
660 		 */
661 		if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
662 		    ha->response_out)
663 			intr_status = CSR_SCSI_COMPLETION_INTR;
664 		else
665 			intr_status = readl(&ha->reg->ctrl_status);
666 
667 		if ((intr_status &
668 		     (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
669 		    0) {
670 			if (reqs_count == 0)
671 				ha->spurious_int_count++;
672 			break;
673 		}
674 
675 		if (intr_status & CSR_FATAL_ERROR) {
676 			DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
677 				      "Status 0x%04x\n", ha->host_no,
678 				      readl(isp_port_error_status (ha))));
679 
680 			/* Issue Soft Reset to clear this error condition.
681 			 * This will prevent the RISC from repeatedly
682 			 * interrupting the driver; thus, allowing the DPC to
683 			 * get scheduled to continue error recovery.
684 			 * NOTE: Disabling RISC interrupts does not work in
685 			 * this case, as CSR_FATAL_ERROR overrides
686 			 * CSR_SCSI_INTR_ENABLE */
687 			if ((readl(&ha->reg->ctrl_status) &
688 			     CSR_SCSI_RESET_INTR) == 0) {
689 				writel(set_rmask(CSR_SOFT_RESET),
690 				       &ha->reg->ctrl_status);
691 				readl(&ha->reg->ctrl_status);
692 			}
693 
694 			writel(set_rmask(CSR_FATAL_ERROR),
695 			       &ha->reg->ctrl_status);
696 			readl(&ha->reg->ctrl_status);
697 
698 			__qla4xxx_disable_intrs(ha);
699 
700 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
701 
702 			break;
703 		} else if (intr_status & CSR_SCSI_RESET_INTR) {
704 			clear_bit(AF_ONLINE, &ha->flags);
705 			__qla4xxx_disable_intrs(ha);
706 
707 			writel(set_rmask(CSR_SCSI_RESET_INTR),
708 			       &ha->reg->ctrl_status);
709 			readl(&ha->reg->ctrl_status);
710 
711 			if (!ql4_mod_unload)
712 				set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
713 
714 			break;
715 		} else if (intr_status & INTR_PENDING) {
716 			qla4xxx_interrupt_service_routine(ha, intr_status);
717 			ha->total_io_count++;
718 			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
719 				break;
720 
721 			intr_status = 0;
722 		}
723 	}
724 
725 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
726 
727 	return IRQ_HANDLED;
728 }
729 
730 /**
731  * qla4xxx_process_aen - processes AENs generated by firmware
732  * @ha: pointer to host adapter structure.
733  * @process_aen: type of AENs to process
734  *
735  * Processes specific types of Asynchronous Events generated by firmware.
736  * The type of AENs to process is specified by process_aen and can be
737  *	PROCESS_ALL_AENS	 0
738  *	FLUSH_DDB_CHANGED_AENS	 1
739  *	RELOGIN_DDB_CHANGED_AENS 2
740  **/
741 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
742 {
743 	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
744 	struct aen *aen;
745 	int i;
746 	unsigned long flags;
747 
748 	spin_lock_irqsave(&ha->hardware_lock, flags);
749 	while (ha->aen_out != ha->aen_in) {
750 		aen = &ha->aen_q[ha->aen_out];
751 		/* copy aen information to local structure */
752 		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
753 			mbox_sts[i] = aen->mbox_sts[i];
754 
755 		ha->aen_q_count++;
756 		ha->aen_out++;
757 
758 		if (ha->aen_out == MAX_AEN_ENTRIES)
759 			ha->aen_out = 0;
760 
761 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
762 
763 		DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
764 			" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
765 			(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
766 			mbox_sts[0], mbox_sts[1], mbox_sts[2],
767 			mbox_sts[3], mbox_sts[4]));
768 
769 		switch (mbox_sts[0]) {
770 		case MBOX_ASTS_DATABASE_CHANGED:
771 			if (process_aen == FLUSH_DDB_CHANGED_AENS) {
772 				DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
773 					      "[%d] state=%04x FLUSHED!\n",
774 					      ha->host_no, ha->aen_out,
775 					      mbox_sts[0], mbox_sts[2],
776 					      mbox_sts[3]));
777 				break;
778 			} else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
779 				/* for use during init time, we only want to
780 				 * relogin non-active ddbs */
781 				struct ddb_entry *ddb_entry;
782 
783 				ddb_entry =
784 					/* FIXME: name length? */
785 					qla4xxx_lookup_ddb_by_fw_index(ha,
786 								       mbox_sts[2]);
787 				if (!ddb_entry)
788 					break;
789 
790 				ddb_entry->dev_scan_wait_to_complete_relogin =
791 					0;
792 				ddb_entry->dev_scan_wait_to_start_relogin =
793 					jiffies +
794 					((ddb_entry->default_time2wait +
795 					  4) * HZ);
796 
797 				DEBUG2(printk("scsi%ld: ddb index [%d] initate"
798 					      " RELOGIN after %d seconds\n",
799 					      ha->host_no,
800 					      ddb_entry->fw_ddb_index,
801 					      ddb_entry->default_time2wait +
802 					      4));
803 				break;
804 			}
805 
806 			if (mbox_sts[1] == 0) {	/* Global DB change. */
807 				qla4xxx_reinitialize_ddb_list(ha);
808 			} else if (mbox_sts[1] == 1) {	/* Specific device. */
809 				qla4xxx_process_ddb_changed(ha, mbox_sts[2],
810 							    mbox_sts[3]);
811 			}
812 			break;
813 		}
814 		spin_lock_irqsave(&ha->hardware_lock, flags);
815 	}
816 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
817 }
818 
819