xref: /openbmc/linux/drivers/scsi/qla4xxx/ql4_isr.c (revision c21b37f6)
1 /*
2  * QLogic iSCSI HBA Driver
3  * Copyright (c)  2003-2006 QLogic Corporation
4  *
5  * See LICENSE.qla4xxx for copyright and licensing details.
6  */
7 
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_dbg.h"
11 #include "ql4_inline.h"
12 
13 /**
14  * qla2x00_process_completed_request() - Process a Fast Post response.
15  * @ha: SCSI driver HA context
16  * @index: SRB index
17  **/
18 static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
19 					      uint32_t index)
20 {
21 	struct srb *srb;
22 
23 	srb = qla4xxx_del_from_active_array(ha, index);
24 	if (srb) {
25 		/* Save ISP completion status */
26 		srb->cmd->result = DID_OK << 16;
27 		qla4xxx_srb_compl(ha, srb);
28 	} else {
29 		DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
30 			      "%d\n", ha->host_no, index));
31 		set_bit(DPC_RESET_HA, &ha->dpc_flags);
32 	}
33 }
34 
35 /**
36  * qla4xxx_status_entry - processes status IOCBs
37  * @ha: Pointer to host adapter structure.
38  * @sts_entry: Pointer to status entry structure.
39  **/
40 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
41 				 struct status_entry *sts_entry)
42 {
43 	uint8_t scsi_status;
44 	struct scsi_cmnd *cmd;
45 	struct srb *srb;
46 	struct ddb_entry *ddb_entry;
47 	uint32_t residual;
48 	uint16_t sensebytecnt;
49 
50 	if (sts_entry->completionStatus == SCS_COMPLETE &&
51 	    sts_entry->scsiStatus == 0) {
52 		qla4xxx_process_completed_request(ha,
53 						  le32_to_cpu(sts_entry->
54 							      handle));
55 		return;
56 	}
57 
58 	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
59 	if (!srb) {
60 		/* FIXMEdg: Don't we need to reset ISP in this case??? */
61 		DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
62 			      "handle 0x%x, sp=%p. This cmd may have already "
63 			      "been completed.\n", ha->host_no, __func__,
64 			      le32_to_cpu(sts_entry->handle), srb));
65 		return;
66 	}
67 
68 	cmd = srb->cmd;
69 	if (cmd == NULL) {
70 		DEBUG2(printk("scsi%ld: %s: Command already returned back to "
71 			      "OS pkt->handle=%d srb=%p srb->state:%d\n",
72 			      ha->host_no, __func__, sts_entry->handle,
73 			      srb, srb->state));
74 		dev_warn(&ha->pdev->dev, "Command is NULL:"
75 			" already returned to OS (srb=%p)\n", srb);
76 		return;
77 	}
78 
79 	ddb_entry = srb->ddb;
80 	if (ddb_entry == NULL) {
81 		cmd->result = DID_NO_CONNECT << 16;
82 		goto status_entry_exit;
83 	}
84 
85 	residual = le32_to_cpu(sts_entry->residualByteCnt);
86 
87 	/* Translate ISP error to a Linux SCSI error. */
88 	scsi_status = sts_entry->scsiStatus;
89 	switch (sts_entry->completionStatus) {
90 	case SCS_COMPLETE:
91 		if (scsi_status == 0) {
92 			cmd->result = DID_OK << 16;
93 			break;
94 		}
95 
96 		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
97 			cmd->result = DID_ERROR << 16;
98 			break;
99 		}
100 
101 		if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 			scsi_set_resid(cmd, residual);
103 			if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
104 				cmd->underflow)) {
105 
106 				cmd->result = DID_ERROR << 16;
107 
108 				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
109 					"Mid-layer Data underrun0, "
110 					"xferlen = 0x%x, "
111 					"residual = 0x%x\n", ha->host_no,
112 					cmd->device->channel,
113 					cmd->device->id,
114 					cmd->device->lun, __func__,
115 					scsi_bufflen(cmd), residual));
116 				break;
117 			}
118 		}
119 
120 		cmd->result = DID_OK << 16 | scsi_status;
121 
122 		if (scsi_status != SCSI_CHECK_CONDITION)
123 			break;
124 
125 		/* Copy Sense Data into sense buffer. */
126 		memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
127 
128 		sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
129 		if (sensebytecnt == 0)
130 			break;
131 
132 		memcpy(cmd->sense_buffer, sts_entry->senseData,
133 		       min(sensebytecnt,
134 			   (uint16_t) sizeof(cmd->sense_buffer)));
135 
136 		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
137 			      "ASC/ASCQ = %02x/%02x\n", ha->host_no,
138 			      cmd->device->channel, cmd->device->id,
139 			      cmd->device->lun, __func__,
140 			      sts_entry->senseData[2] & 0x0f,
141 			      sts_entry->senseData[12],
142 			      sts_entry->senseData[13]));
143 
144 		srb->flags |= SRB_GOT_SENSE;
145 		break;
146 
147 	case SCS_INCOMPLETE:
148 		/* Always set the status to DID_ERROR, since
149 		 * all conditions result in that status anyway */
150 		cmd->result = DID_ERROR << 16;
151 		break;
152 
153 	case SCS_RESET_OCCURRED:
154 		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
155 			      ha->host_no, cmd->device->channel,
156 			      cmd->device->id, cmd->device->lun, __func__));
157 
158 		cmd->result = DID_RESET << 16;
159 		break;
160 
161 	case SCS_ABORTED:
162 		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
163 			      ha->host_no, cmd->device->channel,
164 			      cmd->device->id, cmd->device->lun, __func__));
165 
166 		cmd->result = DID_RESET << 16;
167 		break;
168 
169 	case SCS_TIMEOUT:
170 		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
171 			      ha->host_no, cmd->device->channel,
172 			      cmd->device->id, cmd->device->lun));
173 
174 		cmd->result = DID_BUS_BUSY << 16;
175 
176 		/*
177 		 * Mark device missing so that we won't continue to send
178 		 * I/O to this device.	We should get a ddb state change
179 		 * AEN soon.
180 		 */
181 		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
182 			qla4xxx_mark_device_missing(ha, ddb_entry);
183 		break;
184 
185 	case SCS_DATA_UNDERRUN:
186 	case SCS_DATA_OVERRUN:
187 		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
188 			(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
189 			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
190 				      "residual = 0x%x\n", ha->host_no,
191 				      cmd->device->channel, cmd->device->id,
192 				      cmd->device->lun, __func__, residual));
193 
194 			cmd->result = DID_ERROR << 16;
195 			break;
196 		}
197 
198 		scsi_set_resid(cmd, residual);
199 
200 		/*
201 		 * If there is scsi_status, it takes precedense over
202 		 * underflow condition.
203 		 */
204 		if (scsi_status != 0) {
205 			cmd->result = DID_OK << 16 | scsi_status;
206 
207 			if (scsi_status != SCSI_CHECK_CONDITION)
208 				break;
209 
210 			/* Copy Sense Data into sense buffer. */
211 			memset(cmd->sense_buffer, 0,
212 			       sizeof(cmd->sense_buffer));
213 
214 			sensebytecnt =
215 				le16_to_cpu(sts_entry->senseDataByteCnt);
216 			if (sensebytecnt == 0)
217 				break;
218 
219 			memcpy(cmd->sense_buffer, sts_entry->senseData,
220 			       min(sensebytecnt,
221 				   (uint16_t) sizeof(cmd->sense_buffer)));
222 
223 			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
224 				      "ASC/ASCQ = %02x/%02x\n", ha->host_no,
225 				      cmd->device->channel, cmd->device->id,
226 				      cmd->device->lun, __func__,
227 				      sts_entry->senseData[2] & 0x0f,
228 				      sts_entry->senseData[12],
229 				      sts_entry->senseData[13]));
230 		} else {
231 			/*
232 			 * If RISC reports underrun and target does not
233 			 * report it then we must have a lost frame, so
234 			 * tell upper layer to retry it by reporting a
235 			 * bus busy.
236 			 */
237 			if ((sts_entry->iscsiFlags &
238 			     ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
239 				cmd->result = DID_BUS_BUSY << 16;
240 			} else if ((scsi_bufflen(cmd) - residual) <
241 				   cmd->underflow) {
242 				/*
243 				 * Handle mid-layer underflow???
244 				 *
245 				 * For kernels less than 2.4, the driver must
246 				 * return an error if an underflow is detected.
247 				 * For kernels equal-to and above 2.4, the
248 				 * mid-layer will appearantly handle the
249 				 * underflow by detecting the residual count --
250 				 * unfortunately, we do not see where this is
251 				 * actually being done.	 In the interim, we
252 				 * will return DID_ERROR.
253 				 */
254 				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
255 					"Mid-layer Data underrun1, "
256 					"xferlen = 0x%x, "
257 					"residual = 0x%x\n", ha->host_no,
258 					cmd->device->channel,
259 					cmd->device->id,
260 					cmd->device->lun, __func__,
261 					scsi_bufflen(cmd), residual));
262 
263 				cmd->result = DID_ERROR << 16;
264 			} else {
265 				cmd->result = DID_OK << 16;
266 			}
267 		}
268 		break;
269 
270 	case SCS_DEVICE_LOGGED_OUT:
271 	case SCS_DEVICE_UNAVAILABLE:
272 		/*
273 		 * Mark device missing so that we won't continue to
274 		 * send I/O to this device.  We should get a ddb
275 		 * state change AEN soon.
276 		 */
277 		if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
278 			qla4xxx_mark_device_missing(ha, ddb_entry);
279 
280 		cmd->result = DID_BUS_BUSY << 16;
281 		break;
282 
283 	case SCS_QUEUE_FULL:
284 		/*
285 		 * SCSI Mid-Layer handles device queue full
286 		 */
287 		cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
288 		DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
289 			      "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
290 			      " iResp=%02x\n", ha->host_no, cmd->device->id,
291 			      cmd->device->lun, __func__,
292 			      sts_entry->completionStatus,
293 			      sts_entry->scsiStatus, sts_entry->state_flags,
294 			      sts_entry->iscsiFlags,
295 			      sts_entry->iscsiResponse));
296 		break;
297 
298 	default:
299 		cmd->result = DID_ERROR << 16;
300 		break;
301 	}
302 
303 status_entry_exit:
304 
305 	/* complete the request */
306 	srb->cc_stat = sts_entry->completionStatus;
307 	qla4xxx_srb_compl(ha, srb);
308 }
309 
310 /**
311  * qla4xxx_process_response_queue - process response queue completions
312  * @ha: Pointer to host adapter structure.
313  *
314  * This routine process response queue completions in interrupt context.
315  * Hardware_lock locked upon entry
316  **/
317 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
318 {
319 	uint32_t count = 0;
320 	struct srb *srb = NULL;
321 	struct status_entry *sts_entry;
322 
323 	/* Process all responses from response queue */
324 	while ((ha->response_in =
325 		(uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
326 	       ha->response_out) {
327 		sts_entry = (struct status_entry *) ha->response_ptr;
328 		count++;
329 
330 		/* Advance pointers for next entry */
331 		if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
332 			ha->response_out = 0;
333 			ha->response_ptr = ha->response_ring;
334 		} else {
335 			ha->response_out++;
336 			ha->response_ptr++;
337 		}
338 
339 		/* process entry */
340 		switch (sts_entry->hdr.entryType) {
341 		case ET_STATUS:
342 			/*
343 			 * Common status - Single completion posted in single
344 			 * IOSB.
345 			 */
346 			qla4xxx_status_entry(ha, sts_entry);
347 			break;
348 
349 		case ET_PASSTHRU_STATUS:
350 			break;
351 
352 		case ET_STATUS_CONTINUATION:
353 			/* Just throw away the status continuation entries */
354 			DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
355 				      "- ignoring\n", ha->host_no, __func__));
356 			break;
357 
358 		case ET_COMMAND:
359 			/* ISP device queue is full. Command not
360 			 * accepted by ISP.  Queue command for
361 			 * later */
362 
363 			srb = qla4xxx_del_from_active_array(ha,
364 						    le32_to_cpu(sts_entry->
365 								handle));
366 			if (srb == NULL)
367 				goto exit_prq_invalid_handle;
368 
369 			DEBUG2(printk("scsi%ld: %s: FW device queue full, "
370 				      "srb %p\n", ha->host_no, __func__, srb));
371 
372 			/* ETRY normally by sending it back with
373 			 * DID_BUS_BUSY */
374 			srb->cmd->result = DID_BUS_BUSY << 16;
375 			qla4xxx_srb_compl(ha, srb);
376 			break;
377 
378 		case ET_CONTINUE:
379 			/* Just throw away the continuation entries */
380 			DEBUG2(printk("scsi%ld: %s: Continuation entry - "
381 				      "ignoring\n", ha->host_no, __func__));
382 			break;
383 
384 		default:
385 			/*
386 			 * Invalid entry in response queue, reset RISC
387 			 * firmware.
388 			 */
389 			DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
390 				      "response queue \n", ha->host_no,
391 				      __func__,
392 				      sts_entry->hdr.entryType));
393 			goto exit_prq_error;
394 		}
395 	}
396 
397 	/*
398 	 * Done with responses, update the ISP For QLA4010, this also clears
399 	 * the interrupt.
400 	 */
401 	writel(ha->response_out, &ha->reg->rsp_q_out);
402 	readl(&ha->reg->rsp_q_out);
403 
404 	return;
405 
406 exit_prq_invalid_handle:
407 	DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
408 		      ha->host_no, __func__, srb, sts_entry->hdr.entryType,
409 		      sts_entry->completionStatus));
410 
411 exit_prq_error:
412 	writel(ha->response_out, &ha->reg->rsp_q_out);
413 	readl(&ha->reg->rsp_q_out);
414 
415 	set_bit(DPC_RESET_HA, &ha->dpc_flags);
416 }
417 
418 /**
419  * qla4xxx_isr_decode_mailbox - decodes mailbox status
420  * @ha: Pointer to host adapter structure.
421  * @mailbox_status: Mailbox status.
422  *
423  * This routine decodes the mailbox status during the ISR.
424  * Hardware_lock locked upon entry. runs in interrupt context.
425  **/
426 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
427 				       uint32_t mbox_status)
428 {
429 	int i;
430 	uint32_t mbox_stat2, mbox_stat3;
431 
432 	if ((mbox_status == MBOX_STS_BUSY) ||
433 	    (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
434 	    (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
435 		ha->mbox_status[0] = mbox_status;
436 
437 		if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
438 			/*
439 			 * Copy all mailbox registers to a temporary
440 			 * location and set mailbox command done flag
441 			 */
442 			for (i = 1; i < ha->mbox_status_count; i++)
443 				ha->mbox_status[i] =
444 					readl(&ha->reg->mailbox[i]);
445 
446 			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
447 		}
448 	} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
449 		/* Immediately process the AENs that don't require much work.
450 		 * Only queue the database_changed AENs */
451 		if (ha->aen_log.count < MAX_AEN_ENTRIES) {
452 			for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
453 				ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
454 					readl(&ha->reg->mailbox[i]);
455 			ha->aen_log.count++;
456 		}
457 		switch (mbox_status) {
458 		case MBOX_ASTS_SYSTEM_ERROR:
459 			/* Log Mailbox registers */
460 			if (ql4xdontresethba) {
461 				DEBUG2(printk("%s:Dont Reset HBA\n",
462 					      __func__));
463 			} else {
464 				set_bit(AF_GET_CRASH_RECORD, &ha->flags);
465 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
466 			}
467 			break;
468 
469 		case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
470 		case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
471 		case MBOX_ASTS_NVRAM_INVALID:
472 		case MBOX_ASTS_IP_ADDRESS_CHANGED:
473 		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
474 			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
475 				      "Reset HA\n", ha->host_no, mbox_status));
476 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
477 			break;
478 
479 		case MBOX_ASTS_LINK_UP:
480 			DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
481 				      ha->host_no, mbox_status));
482 			set_bit(AF_LINK_UP, &ha->flags);
483 			break;
484 
485 		case MBOX_ASTS_LINK_DOWN:
486 			DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
487 				      ha->host_no, mbox_status));
488 			clear_bit(AF_LINK_UP, &ha->flags);
489 			break;
490 
491 		case MBOX_ASTS_HEARTBEAT:
492 			ha->seconds_since_last_heartbeat = 0;
493 			break;
494 
495 		case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
496 			DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
497 				      "ACQUIRED\n", ha->host_no, mbox_status));
498 			set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
499 			break;
500 
501 		case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
502 		case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
503 							   * mode
504 							   * only */
505 		case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
506 		case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
507 		case MBOX_ASTS_SUBNET_STATE_CHANGE:
508 			/* No action */
509 			DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
510 				      mbox_status));
511 			break;
512 
513 		case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
514 			mbox_stat2 = readl(&ha->reg->mailbox[2]);
515 			mbox_stat3 = readl(&ha->reg->mailbox[3]);
516 
517 			if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
518 				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
519 			else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
520 				set_bit(DPC_RESET_HA, &ha->dpc_flags);
521 			break;
522 
523 		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
524 		case MBOX_ASTS_DNS:
525 			/* No action */
526 			DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
527 				      "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
528 				      ha->host_no, mbox_status,
529 				      readl(&ha->reg->mailbox[1]),
530 				      readl(&ha->reg->mailbox[2])));
531 			break;
532 
533 		case MBOX_ASTS_SELF_TEST_FAILED:
534 		case MBOX_ASTS_LOGIN_FAILED:
535 			/* No action */
536 			DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
537 				      "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
538 				      ha->host_no, mbox_status,
539 				      readl(&ha->reg->mailbox[1]),
540 				      readl(&ha->reg->mailbox[2]),
541 				      readl(&ha->reg->mailbox[3])));
542 			break;
543 
544 		case MBOX_ASTS_DATABASE_CHANGED:
545 			/* Queue AEN information and process it in the DPC
546 			 * routine */
547 			if (ha->aen_q_count > 0) {
548 
549 				/* decrement available counter */
550 				ha->aen_q_count--;
551 
552 				for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
553 					ha->aen_q[ha->aen_in].mbox_sts[i] =
554 						readl(&ha->reg->mailbox[i]);
555 
556 				ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
557 
558 				/* print debug message */
559 				DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
560 					      " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
561 					      ha->host_no, ha->aen_in,
562 					      mbox_status,
563 					      ha->aen_q[ha->aen_in].mbox_sts[1],
564 					      ha->aen_q[ha->aen_in].mbox_sts[2],
565 					      ha->aen_q[ha->aen_in].mbox_sts[3],
566 					      ha->aen_q[ha->aen_in].  mbox_sts[4]));
567 				/* advance pointer */
568 				ha->aen_in++;
569 				if (ha->aen_in == MAX_AEN_ENTRIES)
570 					ha->aen_in = 0;
571 
572 				/* The DPC routine will process the aen */
573 				set_bit(DPC_AEN, &ha->dpc_flags);
574 			} else {
575 				DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
576 					      "overflowed!  AEN LOST!!\n",
577 					      ha->host_no, __func__,
578 					      mbox_status));
579 
580 				DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
581 					      ha->host_no));
582 
583 				for (i = 0; i < MAX_AEN_ENTRIES; i++) {
584 					DEBUG2(printk("AEN[%d] %04x %04x %04x "
585 						      "%04x\n", i,
586 						      ha->aen_q[i].mbox_sts[0],
587 						      ha->aen_q[i].mbox_sts[1],
588 						      ha->aen_q[i].mbox_sts[2],
589 						      ha->aen_q[i].mbox_sts[3]));
590 				}
591 			}
592 			break;
593 
594 		default:
595 			DEBUG2(printk(KERN_WARNING
596 				      "scsi%ld: AEN %04x UNKNOWN\n",
597 				      ha->host_no, mbox_status));
598 			break;
599 		}
600 	} else {
601 		DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
602 			      ha->host_no, mbox_status));
603 
604 		ha->mbox_status[0] = mbox_status;
605 	}
606 }
607 
608 /**
609  * qla4xxx_interrupt_service_routine - isr
610  * @ha: pointer to host adapter structure.
611  *
612  * This is the main interrupt service routine.
613  * hardware_lock locked upon entry. runs in interrupt context.
614  **/
615 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
616 				       uint32_t intr_status)
617 {
618 	/* Process response queue interrupt. */
619 	if (intr_status & CSR_SCSI_COMPLETION_INTR)
620 		qla4xxx_process_response_queue(ha);
621 
622 	/* Process mailbox/asynch event	 interrupt.*/
623 	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
624 		qla4xxx_isr_decode_mailbox(ha,
625 					   readl(&ha->reg->mailbox[0]));
626 
627 		/* Clear Mailbox Interrupt */
628 		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
629 		       &ha->reg->ctrl_status);
630 		readl(&ha->reg->ctrl_status);
631 	}
632 }
633 
634 /**
635  * qla4xxx_intr_handler - hardware interrupt handler.
636  * @irq: Unused
637  * @dev_id: Pointer to host adapter structure
638  **/
639 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
640 {
641 	struct scsi_qla_host *ha;
642 	uint32_t intr_status;
643 	unsigned long flags = 0;
644 	uint8_t reqs_count = 0;
645 
646 	ha = (struct scsi_qla_host *) dev_id;
647 	if (!ha) {
648 		DEBUG2(printk(KERN_INFO
649 			      "qla4xxx: Interrupt with NULL host ptr\n"));
650 		return IRQ_NONE;
651 	}
652 
653 	spin_lock_irqsave(&ha->hardware_lock, flags);
654 
655 	ha->isr_count++;
656 	/*
657 	 * Repeatedly service interrupts up to a maximum of
658 	 * MAX_REQS_SERVICED_PER_INTR
659 	 */
660 	while (1) {
661 		/*
662 		 * Read interrupt status
663 		 */
664 		if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
665 		    ha->response_out)
666 			intr_status = CSR_SCSI_COMPLETION_INTR;
667 		else
668 			intr_status = readl(&ha->reg->ctrl_status);
669 
670 		if ((intr_status &
671 		     (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
672 		    0) {
673 			if (reqs_count == 0)
674 				ha->spurious_int_count++;
675 			break;
676 		}
677 
678 		if (intr_status & CSR_FATAL_ERROR) {
679 			DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
680 				      "Status 0x%04x\n", ha->host_no,
681 				      readl(isp_port_error_status (ha))));
682 
683 			/* Issue Soft Reset to clear this error condition.
684 			 * This will prevent the RISC from repeatedly
685 			 * interrupting the driver; thus, allowing the DPC to
686 			 * get scheduled to continue error recovery.
687 			 * NOTE: Disabling RISC interrupts does not work in
688 			 * this case, as CSR_FATAL_ERROR overrides
689 			 * CSR_SCSI_INTR_ENABLE */
690 			if ((readl(&ha->reg->ctrl_status) &
691 			     CSR_SCSI_RESET_INTR) == 0) {
692 				writel(set_rmask(CSR_SOFT_RESET),
693 				       &ha->reg->ctrl_status);
694 				readl(&ha->reg->ctrl_status);
695 			}
696 
697 			writel(set_rmask(CSR_FATAL_ERROR),
698 			       &ha->reg->ctrl_status);
699 			readl(&ha->reg->ctrl_status);
700 
701 			__qla4xxx_disable_intrs(ha);
702 
703 			set_bit(DPC_RESET_HA, &ha->dpc_flags);
704 
705 			break;
706 		} else if (intr_status & CSR_SCSI_RESET_INTR) {
707 			clear_bit(AF_ONLINE, &ha->flags);
708 			__qla4xxx_disable_intrs(ha);
709 
710 			writel(set_rmask(CSR_SCSI_RESET_INTR),
711 			       &ha->reg->ctrl_status);
712 			readl(&ha->reg->ctrl_status);
713 
714 			if (!ql4_mod_unload)
715 				set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
716 
717 			break;
718 		} else if (intr_status & INTR_PENDING) {
719 			qla4xxx_interrupt_service_routine(ha, intr_status);
720 			ha->total_io_count++;
721 			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
722 				break;
723 
724 			intr_status = 0;
725 		}
726 	}
727 
728 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
729 
730 	return IRQ_HANDLED;
731 }
732 
733 /**
734  * qla4xxx_process_aen - processes AENs generated by firmware
735  * @ha: pointer to host adapter structure.
736  * @process_aen: type of AENs to process
737  *
738  * Processes specific types of Asynchronous Events generated by firmware.
739  * The type of AENs to process is specified by process_aen and can be
740  *	PROCESS_ALL_AENS	 0
741  *	FLUSH_DDB_CHANGED_AENS	 1
742  *	RELOGIN_DDB_CHANGED_AENS 2
743  **/
744 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
745 {
746 	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
747 	struct aen *aen;
748 	int i;
749 	unsigned long flags;
750 
751 	spin_lock_irqsave(&ha->hardware_lock, flags);
752 	while (ha->aen_out != ha->aen_in) {
753 		aen = &ha->aen_q[ha->aen_out];
754 		/* copy aen information to local structure */
755 		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
756 			mbox_sts[i] = aen->mbox_sts[i];
757 
758 		ha->aen_q_count++;
759 		ha->aen_out++;
760 
761 		if (ha->aen_out == MAX_AEN_ENTRIES)
762 			ha->aen_out = 0;
763 
764 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
765 
766 		DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
767 			" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
768 			(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
769 			mbox_sts[0], mbox_sts[1], mbox_sts[2],
770 			mbox_sts[3], mbox_sts[4]));
771 
772 		switch (mbox_sts[0]) {
773 		case MBOX_ASTS_DATABASE_CHANGED:
774 			if (process_aen == FLUSH_DDB_CHANGED_AENS) {
775 				DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
776 					      "[%d] state=%04x FLUSHED!\n",
777 					      ha->host_no, ha->aen_out,
778 					      mbox_sts[0], mbox_sts[2],
779 					      mbox_sts[3]));
780 				break;
781 			} else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
782 				/* for use during init time, we only want to
783 				 * relogin non-active ddbs */
784 				struct ddb_entry *ddb_entry;
785 
786 				ddb_entry =
787 					/* FIXME: name length? */
788 					qla4xxx_lookup_ddb_by_fw_index(ha,
789 								       mbox_sts[2]);
790 				if (!ddb_entry)
791 					break;
792 
793 				ddb_entry->dev_scan_wait_to_complete_relogin =
794 					0;
795 				ddb_entry->dev_scan_wait_to_start_relogin =
796 					jiffies +
797 					((ddb_entry->default_time2wait +
798 					  4) * HZ);
799 
800 				DEBUG2(printk("scsi%ld: ddb index [%d] initate"
801 					      " RELOGIN after %d seconds\n",
802 					      ha->host_no,
803 					      ddb_entry->fw_ddb_index,
804 					      ddb_entry->default_time2wait +
805 					      4));
806 				break;
807 			}
808 
809 			if (mbox_sts[1] == 0) {	/* Global DB change. */
810 				qla4xxx_reinitialize_ddb_list(ha);
811 			} else if (mbox_sts[1] == 1) {	/* Specific device. */
812 				qla4xxx_process_ddb_changed(ha, mbox_sts[2],
813 							    mbox_sts[3]);
814 			}
815 			break;
816 		}
817 		spin_lock_irqsave(&ha->hardware_lock, flags);
818 	}
819 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
820 }
821 
822