xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_isr.c (revision a1e58bbd)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2005 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11 
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
14 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
15 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
17 static void qla2x00_ms_entry(scsi_qla_host_t *, ms_iocb_entry_t *);
18 
19 static void qla24xx_ms_entry(scsi_qla_host_t *, struct ct_entry_24xx *);
20 
21 /**
22  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23  * @irq:
24  * @dev_id: SCSI driver HA context
25  *
26  * Called by system whenever the host adapter generates an interrupt.
27  *
28  * Returns handled flag.
29  */
30 irqreturn_t
31 qla2100_intr_handler(int irq, void *dev_id)
32 {
33 	scsi_qla_host_t	*ha;
34 	struct device_reg_2xxx __iomem *reg;
35 	int		status;
36 	unsigned long	flags;
37 	unsigned long	iter;
38 	uint16_t	hccr;
39 	uint16_t	mb[4];
40 
41 	ha = (scsi_qla_host_t *) dev_id;
42 	if (!ha) {
43 		printk(KERN_INFO
44 		    "%s(): NULL host pointer\n", __func__);
45 		return (IRQ_NONE);
46 	}
47 
48 	reg = &ha->iobase->isp;
49 	status = 0;
50 
51 	spin_lock_irqsave(&ha->hardware_lock, flags);
52 	for (iter = 50; iter--; ) {
53 		hccr = RD_REG_WORD(&reg->hccr);
54 		if (hccr & HCCR_RISC_PAUSE) {
55 			if (pci_channel_offline(ha->pdev))
56 				break;
57 
58 			/*
59 			 * Issue a "HARD" reset in order for the RISC interrupt
60 			 * bit to be cleared.  Schedule a big hammmer to get
61 			 * out of the RISC PAUSED state.
62 			 */
63 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
64 			RD_REG_WORD(&reg->hccr);
65 
66 			ha->isp_ops->fw_dump(ha, 1);
67 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
68 			break;
69 		} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
70 			break;
71 
72 		if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
73 			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
74 			RD_REG_WORD(&reg->hccr);
75 
76 			/* Get mailbox data. */
77 			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
78 			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
79 				qla2x00_mbx_completion(ha, mb[0]);
80 				status |= MBX_INTERRUPT;
81 			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
82 				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
83 				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
84 				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
85 				qla2x00_async_event(ha, mb);
86 			} else {
87 				/*EMPTY*/
88 				DEBUG2(printk("scsi(%ld): Unrecognized "
89 				    "interrupt type (%d).\n",
90 				    ha->host_no, mb[0]));
91 			}
92 			/* Release mailbox registers. */
93 			WRT_REG_WORD(&reg->semaphore, 0);
94 			RD_REG_WORD(&reg->semaphore);
95 		} else {
96 			qla2x00_process_response_queue(ha);
97 
98 			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
99 			RD_REG_WORD(&reg->hccr);
100 		}
101 	}
102 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
103 
104 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
105 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
106 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
107 		complete(&ha->mbx_intr_comp);
108 	}
109 
110 	return (IRQ_HANDLED);
111 }
112 
113 /**
114  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
115  * @irq:
116  * @dev_id: SCSI driver HA context
117  *
118  * Called by system whenever the host adapter generates an interrupt.
119  *
120  * Returns handled flag.
121  */
122 irqreturn_t
123 qla2300_intr_handler(int irq, void *dev_id)
124 {
125 	scsi_qla_host_t	*ha;
126 	struct device_reg_2xxx __iomem *reg;
127 	int		status;
128 	unsigned long	flags;
129 	unsigned long	iter;
130 	uint32_t	stat;
131 	uint16_t	hccr;
132 	uint16_t	mb[4];
133 
134 	ha = (scsi_qla_host_t *) dev_id;
135 	if (!ha) {
136 		printk(KERN_INFO
137 		    "%s(): NULL host pointer\n", __func__);
138 		return (IRQ_NONE);
139 	}
140 
141 	reg = &ha->iobase->isp;
142 	status = 0;
143 
144 	spin_lock_irqsave(&ha->hardware_lock, flags);
145 	for (iter = 50; iter--; ) {
146 		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
147 		if (stat & HSR_RISC_PAUSED) {
148 			if (pci_channel_offline(ha->pdev))
149 				break;
150 
151 			hccr = RD_REG_WORD(&reg->hccr);
152 			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
153 				qla_printk(KERN_INFO, ha, "Parity error -- "
154 				    "HCCR=%x, Dumping firmware!\n", hccr);
155 			else
156 				qla_printk(KERN_INFO, ha, "RISC paused -- "
157 				    "HCCR=%x, Dumping firmware!\n", hccr);
158 
159 			/*
160 			 * Issue a "HARD" reset in order for the RISC
161 			 * interrupt bit to be cleared.  Schedule a big
162 			 * hammmer to get out of the RISC PAUSED state.
163 			 */
164 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
165 			RD_REG_WORD(&reg->hccr);
166 
167 			ha->isp_ops->fw_dump(ha, 1);
168 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
169 			break;
170 		} else if ((stat & HSR_RISC_INT) == 0)
171 			break;
172 
173 		switch (stat & 0xff) {
174 		case 0x1:
175 		case 0x2:
176 		case 0x10:
177 		case 0x11:
178 			qla2x00_mbx_completion(ha, MSW(stat));
179 			status |= MBX_INTERRUPT;
180 
181 			/* Release mailbox registers. */
182 			WRT_REG_WORD(&reg->semaphore, 0);
183 			break;
184 		case 0x12:
185 			mb[0] = MSW(stat);
186 			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
187 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
188 			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
189 			qla2x00_async_event(ha, mb);
190 			break;
191 		case 0x13:
192 			qla2x00_process_response_queue(ha);
193 			break;
194 		case 0x15:
195 			mb[0] = MBA_CMPLT_1_16BIT;
196 			mb[1] = MSW(stat);
197 			qla2x00_async_event(ha, mb);
198 			break;
199 		case 0x16:
200 			mb[0] = MBA_SCSI_COMPLETION;
201 			mb[1] = MSW(stat);
202 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
203 			qla2x00_async_event(ha, mb);
204 			break;
205 		default:
206 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
207 			    "(%d).\n",
208 			    ha->host_no, stat & 0xff));
209 			break;
210 		}
211 		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
212 		RD_REG_WORD_RELAXED(&reg->hccr);
213 	}
214 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
215 
216 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
217 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
218 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
219 		complete(&ha->mbx_intr_comp);
220 	}
221 
222 	return (IRQ_HANDLED);
223 }
224 
225 /**
226  * qla2x00_mbx_completion() - Process mailbox command completions.
227  * @ha: SCSI driver HA context
228  * @mb0: Mailbox0 register
229  */
230 static void
231 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
232 {
233 	uint16_t	cnt;
234 	uint16_t __iomem *wptr;
235 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
236 
237 	/* Load return mailbox registers. */
238 	ha->flags.mbox_int = 1;
239 	ha->mailbox_out[0] = mb0;
240 	wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
241 
242 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
243 		if (IS_QLA2200(ha) && cnt == 8)
244 			wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
245 		if (cnt == 4 || cnt == 5)
246 			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
247 		else
248 			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
249 
250 		wptr++;
251 	}
252 
253 	if (ha->mcp) {
254 		DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
255 		    __func__, ha->host_no, ha->mcp->mb[0]));
256 	} else {
257 		DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
258 		    __func__, ha->host_no));
259 	}
260 }
261 
262 /**
263  * qla2x00_async_event() - Process aynchronous events.
264  * @ha: SCSI driver HA context
265  * @mb: Mailbox registers (0 - 3)
266  */
267 void
268 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
269 {
270 #define LS_UNKNOWN	2
271 	static char	*link_speeds[5] = { "1", "2", "?", "4", "8" };
272 	char		*link_speed;
273 	uint16_t	handle_cnt;
274 	uint16_t	cnt;
275 	uint32_t	handles[5];
276 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
277 	uint32_t	rscn_entry, host_pid;
278 	uint8_t		rscn_queue_index;
279 
280 	/* Setup to process RIO completion. */
281 	handle_cnt = 0;
282 	switch (mb[0]) {
283 	case MBA_SCSI_COMPLETION:
284 		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
285 		handle_cnt = 1;
286 		break;
287 	case MBA_CMPLT_1_16BIT:
288 		handles[0] = mb[1];
289 		handle_cnt = 1;
290 		mb[0] = MBA_SCSI_COMPLETION;
291 		break;
292 	case MBA_CMPLT_2_16BIT:
293 		handles[0] = mb[1];
294 		handles[1] = mb[2];
295 		handle_cnt = 2;
296 		mb[0] = MBA_SCSI_COMPLETION;
297 		break;
298 	case MBA_CMPLT_3_16BIT:
299 		handles[0] = mb[1];
300 		handles[1] = mb[2];
301 		handles[2] = mb[3];
302 		handle_cnt = 3;
303 		mb[0] = MBA_SCSI_COMPLETION;
304 		break;
305 	case MBA_CMPLT_4_16BIT:
306 		handles[0] = mb[1];
307 		handles[1] = mb[2];
308 		handles[2] = mb[3];
309 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
310 		handle_cnt = 4;
311 		mb[0] = MBA_SCSI_COMPLETION;
312 		break;
313 	case MBA_CMPLT_5_16BIT:
314 		handles[0] = mb[1];
315 		handles[1] = mb[2];
316 		handles[2] = mb[3];
317 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
318 		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
319 		handle_cnt = 5;
320 		mb[0] = MBA_SCSI_COMPLETION;
321 		break;
322 	case MBA_CMPLT_2_32BIT:
323 		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
324 		handles[1] = le32_to_cpu(
325 		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
326 		    RD_MAILBOX_REG(ha, reg, 6));
327 		handle_cnt = 2;
328 		mb[0] = MBA_SCSI_COMPLETION;
329 		break;
330 	default:
331 		break;
332 	}
333 
334 	switch (mb[0]) {
335 	case MBA_SCSI_COMPLETION:	/* Fast Post */
336 		if (!ha->flags.online)
337 			break;
338 
339 		for (cnt = 0; cnt < handle_cnt; cnt++)
340 			qla2x00_process_completed_request(ha, handles[cnt]);
341 		break;
342 
343 	case MBA_RESET:			/* Reset */
344 		DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
345 
346 		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
347 		break;
348 
349 	case MBA_SYSTEM_ERR:		/* System Error */
350 		qla_printk(KERN_INFO, ha,
351 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
352 		    mb[1], mb[2], mb[3]);
353 
354 		ha->isp_ops->fw_dump(ha, 1);
355 
356 		if (IS_FWI2_CAPABLE(ha)) {
357 			if (mb[1] == 0 && mb[2] == 0) {
358 				qla_printk(KERN_ERR, ha,
359 				    "Unrecoverable Hardware Error: adapter "
360 				    "marked OFFLINE!\n");
361 				ha->flags.online = 0;
362 			} else
363 				set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
364 		} else if (mb[1] == 0) {
365 			qla_printk(KERN_INFO, ha,
366 			    "Unrecoverable Hardware Error: adapter marked "
367 			    "OFFLINE!\n");
368 			ha->flags.online = 0;
369 		} else
370 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
371 		break;
372 
373 	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
374 		DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
375 		    ha->host_no));
376 		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
377 
378 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
379 		break;
380 
381 	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
382 		DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
383 		    ha->host_no));
384 		qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
385 
386 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
387 		break;
388 
389 	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
390 		DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
391 		    ha->host_no));
392 		break;
393 
394 	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
395 		DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
396 		    mb[1]));
397 		qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
398 
399 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
400 			atomic_set(&ha->loop_state, LOOP_DOWN);
401 			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
402 			qla2x00_mark_all_devices_lost(ha, 1);
403 		}
404 
405 		if (ha->parent) {
406 			atomic_set(&ha->vp_state, VP_FAILED);
407 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
408 		}
409 
410 		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
411 
412 		ha->flags.management_server_logged_in = 0;
413 		break;
414 
415 	case MBA_LOOP_UP:		/* Loop Up Event */
416 		if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
417 			link_speed = link_speeds[0];
418 			ha->link_data_rate = PORT_SPEED_1GB;
419 		} else {
420 			link_speed = link_speeds[LS_UNKNOWN];
421 			if (mb[1] < 5)
422 				link_speed = link_speeds[mb[1]];
423 			ha->link_data_rate = mb[1];
424 		}
425 
426 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
427 		    ha->host_no, link_speed));
428 		qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
429 		    link_speed);
430 
431 		ha->flags.management_server_logged_in = 0;
432 		break;
433 
434 	case MBA_LOOP_DOWN:		/* Loop Down Event */
435 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
436 		    ha->host_no, mb[1]));
437 		qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);
438 
439 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
440 			atomic_set(&ha->loop_state, LOOP_DOWN);
441 			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
442 			ha->device_flags |= DFLG_NO_CABLE;
443 			qla2x00_mark_all_devices_lost(ha, 1);
444 		}
445 
446 		if (ha->parent) {
447 			atomic_set(&ha->vp_state, VP_FAILED);
448 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
449 		}
450 
451 		ha->flags.management_server_logged_in = 0;
452 		ha->link_data_rate = PORT_SPEED_UNKNOWN;
453 		if (ql2xfdmienable)
454 			set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
455 		break;
456 
457 	case MBA_LIP_RESET:		/* LIP reset occurred */
458 		DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
459 		    ha->host_no, mb[1]));
460 		qla_printk(KERN_INFO, ha,
461 		    "LIP reset occured (%x).\n", mb[1]);
462 
463 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
464 			atomic_set(&ha->loop_state, LOOP_DOWN);
465 			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
466 			qla2x00_mark_all_devices_lost(ha, 1);
467 		}
468 
469 		if (ha->parent) {
470 			atomic_set(&ha->vp_state, VP_FAILED);
471 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
472 		}
473 
474 		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
475 
476 		ha->operating_mode = LOOP;
477 		ha->flags.management_server_logged_in = 0;
478 		break;
479 
480 	case MBA_POINT_TO_POINT:	/* Point-to-Point */
481 		if (IS_QLA2100(ha))
482 			break;
483 
484 		DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
485 		    ha->host_no));
486 
487 		/*
488 		 * Until there's a transition from loop down to loop up, treat
489 		 * this as loop down only.
490 		 */
491 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
492 			atomic_set(&ha->loop_state, LOOP_DOWN);
493 			if (!atomic_read(&ha->loop_down_timer))
494 				atomic_set(&ha->loop_down_timer,
495 				    LOOP_DOWN_TIME);
496 			qla2x00_mark_all_devices_lost(ha, 1);
497 		}
498 
499 		if (ha->parent) {
500 			atomic_set(&ha->vp_state, VP_FAILED);
501 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
502 		}
503 
504 		if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
505 			set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
506 		}
507 		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
508 
509 		ha->flags.gpsc_supported = 1;
510 		ha->flags.management_server_logged_in = 0;
511 		break;
512 
513 	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
514 		if (IS_QLA2100(ha))
515 			break;
516 
517 		DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
518 		    "received.\n",
519 		    ha->host_no));
520 		qla_printk(KERN_INFO, ha,
521 		    "Configuration change detected: value=%x.\n", mb[1]);
522 
523 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
524 			atomic_set(&ha->loop_state, LOOP_DOWN);
525 			if (!atomic_read(&ha->loop_down_timer))
526 				atomic_set(&ha->loop_down_timer,
527 				    LOOP_DOWN_TIME);
528 			qla2x00_mark_all_devices_lost(ha, 1);
529 		}
530 
531 		if (ha->parent) {
532 			atomic_set(&ha->vp_state, VP_FAILED);
533 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
534 		}
535 
536 		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
537 		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
538 		break;
539 
540 	case MBA_PORT_UPDATE:		/* Port database update */
541 		/*
542 		 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
543 		 * event etc. earlier indicating loop is down) then process
544 		 * it.  Otherwise ignore it and Wait for RSCN to come in.
545 		 */
546 		atomic_set(&ha->loop_down_timer, 0);
547 		if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
548 		    atomic_read(&ha->loop_state) != LOOP_DEAD) {
549 			DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
550 			    "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
551 			    mb[2], mb[3]));
552 			break;
553 		}
554 
555 		DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
556 		    ha->host_no));
557 		DEBUG(printk(KERN_INFO
558 		    "scsi(%ld): Port database changed %04x %04x %04x.\n",
559 		    ha->host_no, mb[1], mb[2], mb[3]));
560 
561 		/*
562 		 * Mark all devices as missing so we will login again.
563 		 */
564 		atomic_set(&ha->loop_state, LOOP_UP);
565 
566 		qla2x00_mark_all_devices_lost(ha, 1);
567 
568 		ha->flags.rscn_queue_overflow = 1;
569 
570 		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
571 		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
572 		break;
573 
574 	case MBA_RSCN_UPDATE:		/* State Change Registration */
575 		/* Check if the Vport has issued a SCR */
576 		if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
577 			break;
578 		/* Only handle SCNs for our Vport index. */
579 		if (ha->flags.npiv_supported && ha->vp_idx != mb[3])
580 			break;
581 
582 		DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
583 		    ha->host_no));
584 		DEBUG(printk(KERN_INFO
585 		    "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
586 		    ha->host_no, mb[1], mb[2], mb[3]));
587 
588 		rscn_entry = (mb[1] << 16) | mb[2];
589 		host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
590 		    ha->d_id.b.al_pa;
591 		if (rscn_entry == host_pid) {
592 			DEBUG(printk(KERN_INFO
593 			    "scsi(%ld): Ignoring RSCN update to local host "
594 			    "port ID (%06x)\n",
595 			    ha->host_no, host_pid));
596 			break;
597 		}
598 
599 		rscn_queue_index = ha->rscn_in_ptr + 1;
600 		if (rscn_queue_index == MAX_RSCN_COUNT)
601 			rscn_queue_index = 0;
602 		if (rscn_queue_index != ha->rscn_out_ptr) {
603 			ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
604 			ha->rscn_in_ptr = rscn_queue_index;
605 		} else {
606 			ha->flags.rscn_queue_overflow = 1;
607 		}
608 
609 		atomic_set(&ha->loop_state, LOOP_UPDATE);
610 		atomic_set(&ha->loop_down_timer, 0);
611 		ha->flags.management_server_logged_in = 0;
612 
613 		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
614 		set_bit(RSCN_UPDATE, &ha->dpc_flags);
615 		break;
616 
617 	/* case MBA_RIO_RESPONSE: */
618 	case MBA_ZIO_RESPONSE:
619 		DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
620 		    ha->host_no));
621 		DEBUG(printk(KERN_INFO
622 		    "scsi(%ld): [R|Z]IO update completion.\n",
623 		    ha->host_no));
624 
625 		if (IS_FWI2_CAPABLE(ha))
626 			qla24xx_process_response_queue(ha);
627 		else
628 			qla2x00_process_response_queue(ha);
629 		break;
630 
631 	case MBA_DISCARD_RND_FRAME:
632 		DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
633 		    "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
634 		break;
635 
636 	case MBA_TRACE_NOTIFICATION:
637 		DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
638 		ha->host_no, mb[1], mb[2]));
639 		break;
640 	}
641 
642 	if (!ha->parent && ha->num_vhosts)
643 		qla2x00_alert_all_vps(ha, mb);
644 }
645 
646 static void
647 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
648 {
649 	fc_port_t *fcport = data;
650 
651 	if (fcport->ha->max_q_depth <= sdev->queue_depth)
652 		return;
653 
654 	if (sdev->ordered_tags)
655 		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
656 		    sdev->queue_depth + 1);
657 	else
658 		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
659 		    sdev->queue_depth + 1);
660 
661 	fcport->last_ramp_up = jiffies;
662 
663 	DEBUG2(qla_printk(KERN_INFO, fcport->ha,
664 	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
665 	    fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
666 	    sdev->queue_depth));
667 }
668 
669 static void
670 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
671 {
672 	fc_port_t *fcport = data;
673 
674 	if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
675 		return;
676 
677 	DEBUG2(qla_printk(KERN_INFO, fcport->ha,
678 	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
679 	    fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
680 	    sdev->queue_depth));
681 }
682 
683 static inline void
684 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
685 {
686 	fc_port_t *fcport;
687 	struct scsi_device *sdev;
688 
689 	sdev = sp->cmd->device;
690 	if (sdev->queue_depth >= ha->max_q_depth)
691 		return;
692 
693 	fcport = sp->fcport;
694 	if (time_before(jiffies,
695 	    fcport->last_ramp_up + ql2xqfullrampup * HZ))
696 		return;
697 	if (time_before(jiffies,
698 	    fcport->last_queue_full + ql2xqfullrampup * HZ))
699 		return;
700 
701 	starget_for_each_device(sdev->sdev_target, fcport,
702 	    qla2x00_adjust_sdev_qdepth_up);
703 }
704 
705 /**
706  * qla2x00_process_completed_request() - Process a Fast Post response.
707  * @ha: SCSI driver HA context
708  * @index: SRB index
709  */
710 static void
711 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
712 {
713 	srb_t *sp;
714 
715 	/* Validate handle. */
716 	if (index >= MAX_OUTSTANDING_COMMANDS) {
717 		DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
718 		    ha->host_no, index));
719 		qla_printk(KERN_WARNING, ha,
720 		    "Invalid SCSI completion handle %d.\n", index);
721 
722 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
723 		return;
724 	}
725 
726 	sp = ha->outstanding_cmds[index];
727 	if (sp) {
728 		/* Free outstanding command slot. */
729 		ha->outstanding_cmds[index] = NULL;
730 
731 		CMD_COMPL_STATUS(sp->cmd) = 0L;
732 		CMD_SCSI_STATUS(sp->cmd) = 0L;
733 
734 		/* Save ISP completion status */
735 		sp->cmd->result = DID_OK << 16;
736 
737 		qla2x00_ramp_up_queue_depth(ha, sp);
738 		qla2x00_sp_compl(ha, sp);
739 	} else {
740 		DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
741 		    ha->host_no));
742 		qla_printk(KERN_WARNING, ha,
743 		    "Invalid ISP SCSI completion handle\n");
744 
745 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
746 	}
747 }
748 
749 /**
750  * qla2x00_process_response_queue() - Process response queue entries.
751  * @ha: SCSI driver HA context
752  */
753 void
754 qla2x00_process_response_queue(struct scsi_qla_host *ha)
755 {
756 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
757 	sts_entry_t	*pkt;
758 	uint16_t        handle_cnt;
759 	uint16_t        cnt;
760 
761 	if (!ha->flags.online)
762 		return;
763 
764 	while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
765 		pkt = (sts_entry_t *)ha->response_ring_ptr;
766 
767 		ha->rsp_ring_index++;
768 		if (ha->rsp_ring_index == ha->response_q_length) {
769 			ha->rsp_ring_index = 0;
770 			ha->response_ring_ptr = ha->response_ring;
771 		} else {
772 			ha->response_ring_ptr++;
773 		}
774 
775 		if (pkt->entry_status != 0) {
776 			DEBUG3(printk(KERN_INFO
777 			    "scsi(%ld): Process error entry.\n", ha->host_no));
778 
779 			qla2x00_error_entry(ha, pkt);
780 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
781 			wmb();
782 			continue;
783 		}
784 
785 		switch (pkt->entry_type) {
786 		case STATUS_TYPE:
787 			qla2x00_status_entry(ha, pkt);
788 			break;
789 		case STATUS_TYPE_21:
790 			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
791 			for (cnt = 0; cnt < handle_cnt; cnt++) {
792 				qla2x00_process_completed_request(ha,
793 				    ((sts21_entry_t *)pkt)->handle[cnt]);
794 			}
795 			break;
796 		case STATUS_TYPE_22:
797 			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
798 			for (cnt = 0; cnt < handle_cnt; cnt++) {
799 				qla2x00_process_completed_request(ha,
800 				    ((sts22_entry_t *)pkt)->handle[cnt]);
801 			}
802 			break;
803 		case STATUS_CONT_TYPE:
804 			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
805 			break;
806 		case MS_IOCB_TYPE:
807 			qla2x00_ms_entry(ha, (ms_iocb_entry_t *)pkt);
808 			break;
809 		default:
810 			/* Type Not Supported. */
811 			DEBUG4(printk(KERN_WARNING
812 			    "scsi(%ld): Received unknown response pkt type %x "
813 			    "entry status=%x.\n",
814 			    ha->host_no, pkt->entry_type, pkt->entry_status));
815 			break;
816 		}
817 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
818 		wmb();
819 	}
820 
821 	/* Adjust ring index */
822 	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
823 }
824 
825 static inline void
826 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
827 {
828 	struct scsi_cmnd *cp = sp->cmd;
829 
830 	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
831 		sense_len = SCSI_SENSE_BUFFERSIZE;
832 
833 	CMD_ACTUAL_SNSLEN(cp) = sense_len;
834 	sp->request_sense_length = sense_len;
835 	sp->request_sense_ptr = cp->sense_buffer;
836 	if (sp->request_sense_length > 32)
837 		sense_len = 32;
838 
839 	memcpy(cp->sense_buffer, sense_data, sense_len);
840 
841 	sp->request_sense_ptr += sense_len;
842 	sp->request_sense_length -= sense_len;
843 	if (sp->request_sense_length != 0)
844 		sp->ha->status_srb = sp;
845 
846 	DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
847 	    "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel,
848 	    cp->device->id, cp->device->lun, cp, cp->serial_number));
849 	if (sense_len)
850 		DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
851 		    CMD_ACTUAL_SNSLEN(cp)));
852 }
853 
854 /**
855  * qla2x00_status_entry() - Process a Status IOCB entry.
856  * @ha: SCSI driver HA context
857  * @pkt: Entry pointer
858  */
859 static void
860 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
861 {
862 	srb_t		*sp;
863 	fc_port_t	*fcport;
864 	struct scsi_cmnd *cp;
865 	sts_entry_t *sts;
866 	struct sts_entry_24xx *sts24;
867 	uint16_t	comp_status;
868 	uint16_t	scsi_status;
869 	uint8_t		lscsi_status;
870 	int32_t		resid;
871 	uint32_t	sense_len, rsp_info_len, resid_len, fw_resid_len;
872 	uint8_t		*rsp_info, *sense_data;
873 
874 	sts = (sts_entry_t *) pkt;
875 	sts24 = (struct sts_entry_24xx *) pkt;
876 	if (IS_FWI2_CAPABLE(ha)) {
877 		comp_status = le16_to_cpu(sts24->comp_status);
878 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
879 	} else {
880 		comp_status = le16_to_cpu(sts->comp_status);
881 		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
882 	}
883 
884 	/* Fast path completion. */
885 	if (comp_status == CS_COMPLETE && scsi_status == 0) {
886 		qla2x00_process_completed_request(ha, sts->handle);
887 
888 		return;
889 	}
890 
891 	/* Validate handle. */
892 	if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
893 		sp = ha->outstanding_cmds[sts->handle];
894 		ha->outstanding_cmds[sts->handle] = NULL;
895 	} else
896 		sp = NULL;
897 
898 	if (sp == NULL) {
899 		DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
900 		    ha->host_no));
901 		qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
902 
903 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
904 		qla2xxx_wake_dpc(ha);
905 		return;
906 	}
907 	cp = sp->cmd;
908 	if (cp == NULL) {
909 		DEBUG2(printk("scsi(%ld): Command already returned back to OS "
910 		    "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
911 		qla_printk(KERN_WARNING, ha,
912 		    "Command is NULL: already returned to OS (sp=%p)\n", sp);
913 
914 		return;
915 	}
916 
917   	lscsi_status = scsi_status & STATUS_MASK;
918 	CMD_ENTRY_STATUS(cp) = sts->entry_status;
919 	CMD_COMPL_STATUS(cp) = comp_status;
920 	CMD_SCSI_STATUS(cp) = scsi_status;
921 
922 	fcport = sp->fcport;
923 
924 	sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
925 	if (IS_FWI2_CAPABLE(ha)) {
926 		sense_len = le32_to_cpu(sts24->sense_len);
927 		rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
928 		resid_len = le32_to_cpu(sts24->rsp_residual_count);
929 		fw_resid_len = le32_to_cpu(sts24->residual_len);
930 		rsp_info = sts24->data;
931 		sense_data = sts24->data;
932 		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
933 	} else {
934 		sense_len = le16_to_cpu(sts->req_sense_length);
935 		rsp_info_len = le16_to_cpu(sts->rsp_info_len);
936 		resid_len = le32_to_cpu(sts->residual_length);
937 		rsp_info = sts->rsp_info;
938 		sense_data = sts->req_sense_data;
939 	}
940 
941 	/* Check for any FCP transport errors. */
942 	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
943 		/* Sense data lies beyond any FCP RESPONSE data. */
944 		if (IS_FWI2_CAPABLE(ha))
945 			sense_data += rsp_info_len;
946 		if (rsp_info_len > 3 && rsp_info[3]) {
947 			DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
948 			    "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
949 			    "retrying command\n", ha->host_no,
950 			    cp->device->channel, cp->device->id,
951 			    cp->device->lun, rsp_info_len, rsp_info[0],
952 			    rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
953 			    rsp_info[5], rsp_info[6], rsp_info[7]));
954 
955 			cp->result = DID_BUS_BUSY << 16;
956 			qla2x00_sp_compl(ha, sp);
957 			return;
958 		}
959 	}
960 
961 	/* Check for overrun. */
962 	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
963 	    scsi_status & SS_RESIDUAL_OVER)
964 		comp_status = CS_DATA_OVERRUN;
965 
966 	/*
967 	 * Based on Host and scsi status generate status code for Linux
968 	 */
969 	switch (comp_status) {
970 	case CS_COMPLETE:
971 	case CS_QUEUE_FULL:
972 		if (scsi_status == 0) {
973 			cp->result = DID_OK << 16;
974 			break;
975 		}
976 		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
977 			resid = resid_len;
978 			scsi_set_resid(cp, resid);
979 			CMD_RESID_LEN(cp) = resid;
980 
981 			if (!lscsi_status &&
982 			    ((unsigned)(scsi_bufflen(cp) - resid) <
983 			     cp->underflow)) {
984 				qla_printk(KERN_INFO, ha,
985 					   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
986 					   "detected (%x of %x bytes)...returning "
987 					   "error status.\n", ha->host_no,
988 					   cp->device->channel, cp->device->id,
989 					   cp->device->lun, resid,
990 					   scsi_bufflen(cp));
991 
992 				cp->result = DID_ERROR << 16;
993 				break;
994 			}
995 		}
996 		cp->result = DID_OK << 16 | lscsi_status;
997 
998 		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
999 			DEBUG2(printk(KERN_INFO
1000 			    "scsi(%ld): QUEUE FULL status detected "
1001 			    "0x%x-0x%x.\n", ha->host_no, comp_status,
1002 			    scsi_status));
1003 
1004 			/* Adjust queue depth for all luns on the port. */
1005 			fcport->last_queue_full = jiffies;
1006 			starget_for_each_device(cp->device->sdev_target,
1007 			    fcport, qla2x00_adjust_sdev_qdepth_down);
1008 			break;
1009 		}
1010 		if (lscsi_status != SS_CHECK_CONDITION)
1011 			break;
1012 
1013 		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1014 		if (!(scsi_status & SS_SENSE_LEN_VALID))
1015 			break;
1016 
1017 		qla2x00_handle_sense(sp, sense_data, sense_len);
1018 		break;
1019 
1020 	case CS_DATA_UNDERRUN:
1021 		resid = resid_len;
1022 		/* Use F/W calculated residual length. */
1023 		if (IS_FWI2_CAPABLE(ha)) {
1024 			if (scsi_status & SS_RESIDUAL_UNDER &&
1025 			    resid != fw_resid_len) {
1026 				scsi_status &= ~SS_RESIDUAL_UNDER;
1027 				lscsi_status = 0;
1028 			}
1029 			resid = fw_resid_len;
1030 		}
1031 
1032 		if (scsi_status & SS_RESIDUAL_UNDER) {
1033 			scsi_set_resid(cp, resid);
1034 			CMD_RESID_LEN(cp) = resid;
1035 		} else {
1036 			DEBUG2(printk(KERN_INFO
1037 			    "scsi(%ld:%d:%d) UNDERRUN status detected "
1038 			    "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1039 			    "os_underflow=0x%x\n", ha->host_no,
1040 			    cp->device->id, cp->device->lun, comp_status,
1041 			    scsi_status, resid_len, resid, cp->cmnd[0],
1042 			    cp->underflow));
1043 
1044 		}
1045 
1046 		/*
1047 		 * Check to see if SCSI Status is non zero. If so report SCSI
1048 		 * Status.
1049 		 */
1050 		if (lscsi_status != 0) {
1051 			cp->result = DID_OK << 16 | lscsi_status;
1052 
1053 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1054 				DEBUG2(printk(KERN_INFO
1055 				    "scsi(%ld): QUEUE FULL status detected "
1056 				    "0x%x-0x%x.\n", ha->host_no, comp_status,
1057 				    scsi_status));
1058 
1059 				/*
1060 				 * Adjust queue depth for all luns on the
1061 				 * port.
1062 				 */
1063 				fcport->last_queue_full = jiffies;
1064 				starget_for_each_device(
1065 				    cp->device->sdev_target, fcport,
1066 				    qla2x00_adjust_sdev_qdepth_down);
1067 				break;
1068 			}
1069 			if (lscsi_status != SS_CHECK_CONDITION)
1070 				break;
1071 
1072 			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1073 			if (!(scsi_status & SS_SENSE_LEN_VALID))
1074 				break;
1075 
1076 			qla2x00_handle_sense(sp, sense_data, sense_len);
1077 
1078 			/*
1079 			 * In case of a Underrun condition, set both the lscsi
1080 			 * status and the completion status to appropriate
1081 			 * values.
1082 			 */
1083 			if (resid &&
1084 			    ((unsigned)(scsi_bufflen(cp) - resid) <
1085 			     cp->underflow)) {
1086 				DEBUG2(qla_printk(KERN_INFO, ha,
1087 				    "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1088 				    "detected (%x of %x bytes)...returning "
1089 				    "error status.\n", ha->host_no,
1090 				    cp->device->channel, cp->device->id,
1091 				    cp->device->lun, resid,
1092 				    scsi_bufflen(cp)));
1093 
1094 				cp->result = DID_ERROR << 16 | lscsi_status;
1095 			}
1096 		} else {
1097 			/*
1098 			 * If RISC reports underrun and target does not report
1099 			 * it then we must have a lost frame, so tell upper
1100 			 * layer to retry it by reporting a bus busy.
1101 			 */
1102 			if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1103 				DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1104 					      "frame(s) detected (%x of %x bytes)..."
1105 					      "retrying command.\n", ha->host_no,
1106 					      cp->device->channel, cp->device->id,
1107 					      cp->device->lun, resid,
1108 					      scsi_bufflen(cp)));
1109 
1110 				cp->result = DID_BUS_BUSY << 16;
1111 				break;
1112 			}
1113 
1114 			/* Handle mid-layer underflow */
1115 			if ((unsigned)(scsi_bufflen(cp) - resid) <
1116 			    cp->underflow) {
1117 				qla_printk(KERN_INFO, ha,
1118 					   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1119 					   "detected (%x of %x bytes)...returning "
1120 					   "error status.\n", ha->host_no,
1121 					   cp->device->channel, cp->device->id,
1122 					   cp->device->lun, resid,
1123 					   scsi_bufflen(cp));
1124 
1125 				cp->result = DID_ERROR << 16;
1126 				break;
1127 			}
1128 
1129 			/* Everybody online, looking good... */
1130 			cp->result = DID_OK << 16;
1131 		}
1132 		break;
1133 
1134 	case CS_DATA_OVERRUN:
1135 		DEBUG2(printk(KERN_INFO
1136 		    "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1137 		    ha->host_no, cp->device->id, cp->device->lun, comp_status,
1138 		    scsi_status));
1139 		DEBUG2(printk(KERN_INFO
1140 		    "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1141 		    cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1142 		    cp->cmnd[4], cp->cmnd[5]));
1143 		DEBUG2(printk(KERN_INFO
1144 		    "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1145 		    "status!\n",
1146 		    cp->serial_number, scsi_bufflen(cp), resid_len));
1147 
1148 		cp->result = DID_ERROR << 16;
1149 		break;
1150 
1151 	case CS_PORT_LOGGED_OUT:
1152 	case CS_PORT_CONFIG_CHG:
1153 	case CS_PORT_BUSY:
1154 	case CS_INCOMPLETE:
1155 	case CS_PORT_UNAVAILABLE:
1156 		/*
1157 		 * If the port is in Target Down state, return all IOs for this
1158 		 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1159 		 * retry_queue.
1160 		 */
1161 		DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1162 		    "pid=%ld, compl status=0x%x, port state=0x%x\n",
1163 		    ha->host_no, cp->device->id, cp->device->lun,
1164 		    cp->serial_number, comp_status,
1165 		    atomic_read(&fcport->state)));
1166 
1167 		cp->result = DID_BUS_BUSY << 16;
1168 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
1169 			qla2x00_mark_device_lost(ha, fcport, 1, 1);
1170 		}
1171 		break;
1172 
1173 	case CS_RESET:
1174 		DEBUG2(printk(KERN_INFO
1175 		    "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1176 		    ha->host_no, comp_status, scsi_status));
1177 
1178 		cp->result = DID_RESET << 16;
1179 		break;
1180 
1181 	case CS_ABORTED:
1182 		/*
1183 		 * hv2.19.12 - DID_ABORT does not retry the request if we
1184 		 * aborted this request then abort otherwise it must be a
1185 		 * reset.
1186 		 */
1187 		DEBUG2(printk(KERN_INFO
1188 		    "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1189 		    ha->host_no, comp_status, scsi_status));
1190 
1191 		cp->result = DID_RESET << 16;
1192 		break;
1193 
1194 	case CS_TIMEOUT:
1195 		cp->result = DID_BUS_BUSY << 16;
1196 
1197 		if (IS_FWI2_CAPABLE(ha)) {
1198 			DEBUG2(printk(KERN_INFO
1199 			    "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1200 			    "0x%x-0x%x\n", ha->host_no, cp->device->channel,
1201 			    cp->device->id, cp->device->lun, comp_status,
1202 			    scsi_status));
1203 			break;
1204 		}
1205 		DEBUG2(printk(KERN_INFO
1206 		    "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1207 		    "sflags=%x.\n", ha->host_no, cp->device->channel,
1208 		    cp->device->id, cp->device->lun, comp_status, scsi_status,
1209 		    le16_to_cpu(sts->status_flags)));
1210 
1211 		/* Check to see if logout occurred. */
1212 		if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1213 			qla2x00_mark_device_lost(ha, fcport, 1, 1);
1214 		break;
1215 
1216 	default:
1217 		DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1218 		    "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
1219 		qla_printk(KERN_INFO, ha,
1220 		    "Unknown status detected 0x%x-0x%x.\n",
1221 		    comp_status, scsi_status);
1222 
1223 		cp->result = DID_ERROR << 16;
1224 		break;
1225 	}
1226 
1227 	/* Place command on done queue. */
1228 	if (ha->status_srb == NULL)
1229 		qla2x00_sp_compl(ha, sp);
1230 }
1231 
1232 /**
1233  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1234  * @ha: SCSI driver HA context
1235  * @pkt: Entry pointer
1236  *
1237  * Extended sense data.
1238  */
1239 static void
1240 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1241 {
1242 	uint8_t		sense_sz = 0;
1243 	srb_t		*sp = ha->status_srb;
1244 	struct scsi_cmnd *cp;
1245 
1246 	if (sp != NULL && sp->request_sense_length != 0) {
1247 		cp = sp->cmd;
1248 		if (cp == NULL) {
1249 			DEBUG2(printk("%s(): Cmd already returned back to OS "
1250 			    "sp=%p.\n", __func__, sp));
1251 			qla_printk(KERN_INFO, ha,
1252 			    "cmd is NULL: already returned to OS (sp=%p)\n",
1253 			    sp);
1254 
1255 			ha->status_srb = NULL;
1256 			return;
1257 		}
1258 
1259 		if (sp->request_sense_length > sizeof(pkt->data)) {
1260 			sense_sz = sizeof(pkt->data);
1261 		} else {
1262 			sense_sz = sp->request_sense_length;
1263 		}
1264 
1265 		/* Move sense data. */
1266 		if (IS_FWI2_CAPABLE(ha))
1267 			host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1268 		memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1269 		DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1270 
1271 		sp->request_sense_ptr += sense_sz;
1272 		sp->request_sense_length -= sense_sz;
1273 
1274 		/* Place command on done queue. */
1275 		if (sp->request_sense_length == 0) {
1276 			ha->status_srb = NULL;
1277 			qla2x00_sp_compl(ha, sp);
1278 		}
1279 	}
1280 }
1281 
1282 /**
1283  * qla2x00_error_entry() - Process an error entry.
1284  * @ha: SCSI driver HA context
1285  * @pkt: Entry pointer
1286  */
1287 static void
1288 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1289 {
1290 	srb_t *sp;
1291 
1292 #if defined(QL_DEBUG_LEVEL_2)
1293 	if (pkt->entry_status & RF_INV_E_ORDER)
1294 		qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1295 	else if (pkt->entry_status & RF_INV_E_COUNT)
1296 		qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1297 	else if (pkt->entry_status & RF_INV_E_PARAM)
1298 		qla_printk(KERN_ERR, ha,
1299 		    "%s: Invalid Entry Parameter\n", __func__);
1300 	else if (pkt->entry_status & RF_INV_E_TYPE)
1301 		qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1302 	else if (pkt->entry_status & RF_BUSY)
1303 		qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1304 	else
1305 		qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1306 #endif
1307 
1308 	/* Validate handle. */
1309 	if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1310 		sp = ha->outstanding_cmds[pkt->handle];
1311 	else
1312 		sp = NULL;
1313 
1314 	if (sp) {
1315 		/* Free outstanding command slot. */
1316 		ha->outstanding_cmds[pkt->handle] = NULL;
1317 
1318 		/* Bad payload or header */
1319 		if (pkt->entry_status &
1320 		    (RF_INV_E_ORDER | RF_INV_E_COUNT |
1321 		     RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1322 			sp->cmd->result = DID_ERROR << 16;
1323 		} else if (pkt->entry_status & RF_BUSY) {
1324 			sp->cmd->result = DID_BUS_BUSY << 16;
1325 		} else {
1326 			sp->cmd->result = DID_ERROR << 16;
1327 		}
1328 		qla2x00_sp_compl(ha, sp);
1329 
1330 	} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1331 	    COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1332 		DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1333 		    ha->host_no));
1334 		qla_printk(KERN_WARNING, ha,
1335 		    "Error entry - invalid handle\n");
1336 
1337 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1338 		qla2xxx_wake_dpc(ha);
1339 	}
1340 }
1341 
1342 /**
1343  * qla2x00_ms_entry() - Process a Management Server entry.
1344  * @ha: SCSI driver HA context
1345  * @index: Response queue out pointer
1346  */
1347 static void
1348 qla2x00_ms_entry(scsi_qla_host_t *ha, ms_iocb_entry_t *pkt)
1349 {
1350 	srb_t          *sp;
1351 
1352 	DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1353 	    __func__, ha->host_no, pkt, pkt->handle1));
1354 
1355 	/* Validate handle. */
1356  	if (pkt->handle1 < MAX_OUTSTANDING_COMMANDS)
1357  		sp = ha->outstanding_cmds[pkt->handle1];
1358 	else
1359 		sp = NULL;
1360 
1361 	if (sp == NULL) {
1362 		DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1363 		    ha->host_no));
1364 		qla_printk(KERN_WARNING, ha, "MS entry - invalid handle\n");
1365 
1366 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1367 		return;
1368 	}
1369 
1370 	CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->status);
1371 	CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1372 
1373 	/* Free outstanding command slot. */
1374 	ha->outstanding_cmds[pkt->handle1] = NULL;
1375 
1376 	qla2x00_sp_compl(ha, sp);
1377 }
1378 
1379 
1380 /**
1381  * qla24xx_mbx_completion() - Process mailbox command completions.
1382  * @ha: SCSI driver HA context
1383  * @mb0: Mailbox0 register
1384  */
1385 static void
1386 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1387 {
1388 	uint16_t	cnt;
1389 	uint16_t __iomem *wptr;
1390 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1391 
1392 	/* Load return mailbox registers. */
1393 	ha->flags.mbox_int = 1;
1394 	ha->mailbox_out[0] = mb0;
1395 	wptr = (uint16_t __iomem *)&reg->mailbox1;
1396 
1397 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1398 		ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1399 		wptr++;
1400 	}
1401 
1402 	if (ha->mcp) {
1403 		DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1404 		    __func__, ha->host_no, ha->mcp->mb[0]));
1405 	} else {
1406 		DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1407 		    __func__, ha->host_no));
1408 	}
1409 }
1410 
1411 /**
1412  * qla24xx_process_response_queue() - Process response queue entries.
1413  * @ha: SCSI driver HA context
1414  */
1415 void
1416 qla24xx_process_response_queue(struct scsi_qla_host *ha)
1417 {
1418 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1419 	struct sts_entry_24xx *pkt;
1420 
1421 	if (!ha->flags.online)
1422 		return;
1423 
1424 	while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
1425 		pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
1426 
1427 		ha->rsp_ring_index++;
1428 		if (ha->rsp_ring_index == ha->response_q_length) {
1429 			ha->rsp_ring_index = 0;
1430 			ha->response_ring_ptr = ha->response_ring;
1431 		} else {
1432 			ha->response_ring_ptr++;
1433 		}
1434 
1435 		if (pkt->entry_status != 0) {
1436 			DEBUG3(printk(KERN_INFO
1437 			    "scsi(%ld): Process error entry.\n", ha->host_no));
1438 
1439 			qla2x00_error_entry(ha, (sts_entry_t *) pkt);
1440 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1441 			wmb();
1442 			continue;
1443 		}
1444 
1445 		switch (pkt->entry_type) {
1446 		case STATUS_TYPE:
1447 			qla2x00_status_entry(ha, pkt);
1448 			break;
1449 		case STATUS_CONT_TYPE:
1450 			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
1451 			break;
1452 		case MS_IOCB_TYPE:
1453 			qla24xx_ms_entry(ha, (struct ct_entry_24xx *)pkt);
1454 			break;
1455 		case VP_RPT_ID_IOCB_TYPE:
1456 			qla24xx_report_id_acquisition(ha,
1457 			    (struct vp_rpt_id_entry_24xx *)pkt);
1458 			break;
1459 		default:
1460 			/* Type Not Supported. */
1461 			DEBUG4(printk(KERN_WARNING
1462 			    "scsi(%ld): Received unknown response pkt type %x "
1463 			    "entry status=%x.\n",
1464 			    ha->host_no, pkt->entry_type, pkt->entry_status));
1465 			break;
1466 		}
1467 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1468 		wmb();
1469 	}
1470 
1471 	/* Adjust ring index */
1472 	WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
1473 }
1474 
1475 static void
1476 qla2xxx_check_risc_status(scsi_qla_host_t *ha)
1477 {
1478 	int rval;
1479 	uint32_t cnt;
1480 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1481 
1482 	if (!IS_QLA25XX(ha))
1483 		return;
1484 
1485 	rval = QLA_SUCCESS;
1486 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1487 	RD_REG_DWORD(&reg->iobase_addr);
1488 	WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1489 	for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1490 	    rval == QLA_SUCCESS; cnt--) {
1491 		if (cnt) {
1492 			WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1493 			udelay(10);
1494 		} else
1495 			rval = QLA_FUNCTION_TIMEOUT;
1496 	}
1497 	if (rval == QLA_SUCCESS)
1498 		goto next_test;
1499 
1500 	WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1501 	for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1502 	    rval == QLA_SUCCESS; cnt--) {
1503 		if (cnt) {
1504 			WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1505 			udelay(10);
1506 		} else
1507 			rval = QLA_FUNCTION_TIMEOUT;
1508 	}
1509 	if (rval != QLA_SUCCESS)
1510 		goto done;
1511 
1512 next_test:
1513 	if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1514 		qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1515 
1516 done:
1517 	WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1518 	RD_REG_DWORD(&reg->iobase_window);
1519 }
1520 
1521 /**
1522  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1523  * @irq:
1524  * @dev_id: SCSI driver HA context
1525  *
1526  * Called by system whenever the host adapter generates an interrupt.
1527  *
1528  * Returns handled flag.
1529  */
1530 irqreturn_t
1531 qla24xx_intr_handler(int irq, void *dev_id)
1532 {
1533 	scsi_qla_host_t	*ha;
1534 	struct device_reg_24xx __iomem *reg;
1535 	int		status;
1536 	unsigned long	flags;
1537 	unsigned long	iter;
1538 	uint32_t	stat;
1539 	uint32_t	hccr;
1540 	uint16_t	mb[4];
1541 
1542 	ha = (scsi_qla_host_t *) dev_id;
1543 	if (!ha) {
1544 		printk(KERN_INFO
1545 		    "%s(): NULL host pointer\n", __func__);
1546 		return IRQ_NONE;
1547 	}
1548 
1549 	reg = &ha->iobase->isp24;
1550 	status = 0;
1551 
1552 	spin_lock_irqsave(&ha->hardware_lock, flags);
1553 	for (iter = 50; iter--; ) {
1554 		stat = RD_REG_DWORD(&reg->host_status);
1555 		if (stat & HSRX_RISC_PAUSED) {
1556 			if (pci_channel_offline(ha->pdev))
1557 				break;
1558 
1559 			hccr = RD_REG_DWORD(&reg->hccr);
1560 
1561 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1562 			    "Dumping firmware!\n", hccr);
1563 
1564 			qla2xxx_check_risc_status(ha);
1565 
1566 			ha->isp_ops->fw_dump(ha, 1);
1567 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1568 			break;
1569 		} else if ((stat & HSRX_RISC_INT) == 0)
1570 			break;
1571 
1572 		switch (stat & 0xff) {
1573 		case 0x1:
1574 		case 0x2:
1575 		case 0x10:
1576 		case 0x11:
1577 			qla24xx_mbx_completion(ha, MSW(stat));
1578 			status |= MBX_INTERRUPT;
1579 
1580 			break;
1581 		case 0x12:
1582 			mb[0] = MSW(stat);
1583 			mb[1] = RD_REG_WORD(&reg->mailbox1);
1584 			mb[2] = RD_REG_WORD(&reg->mailbox2);
1585 			mb[3] = RD_REG_WORD(&reg->mailbox3);
1586 			qla2x00_async_event(ha, mb);
1587 			break;
1588 		case 0x13:
1589 			qla24xx_process_response_queue(ha);
1590 			break;
1591 		default:
1592 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1593 			    "(%d).\n",
1594 			    ha->host_no, stat & 0xff));
1595 			break;
1596 		}
1597 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1598 		RD_REG_DWORD_RELAXED(&reg->hccr);
1599 	}
1600 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1601 
1602 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1603 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1604 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1605 		complete(&ha->mbx_intr_comp);
1606 	}
1607 
1608 	return IRQ_HANDLED;
1609 }
1610 
1611 /**
1612  * qla24xx_ms_entry() - Process a Management Server entry.
1613  * @ha: SCSI driver HA context
1614  * @index: Response queue out pointer
1615  */
1616 static void
1617 qla24xx_ms_entry(scsi_qla_host_t *ha, struct ct_entry_24xx *pkt)
1618 {
1619 	srb_t          *sp;
1620 
1621 	DEBUG3(printk("%s(%ld): pkt=%p pkthandle=%d.\n",
1622 	    __func__, ha->host_no, pkt, pkt->handle));
1623 
1624 	DEBUG9(printk("%s: ct pkt dump:\n", __func__));
1625 	DEBUG9(qla2x00_dump_buffer((void *)pkt, sizeof(struct ct_entry_24xx)));
1626 
1627 	/* Validate handle. */
1628  	if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1629  		sp = ha->outstanding_cmds[pkt->handle];
1630 	else
1631 		sp = NULL;
1632 
1633 	if (sp == NULL) {
1634 		DEBUG2(printk("scsi(%ld): MS entry - invalid handle\n",
1635 		    ha->host_no));
1636 		DEBUG10(printk("scsi(%ld): MS entry - invalid handle\n",
1637 		    ha->host_no));
1638 		qla_printk(KERN_WARNING, ha, "MS entry - invalid handle %d\n",
1639 		    pkt->handle);
1640 
1641 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1642 		return;
1643 	}
1644 
1645 	CMD_COMPL_STATUS(sp->cmd) = le16_to_cpu(pkt->comp_status);
1646 	CMD_ENTRY_STATUS(sp->cmd) = pkt->entry_status;
1647 
1648 	/* Free outstanding command slot. */
1649 	ha->outstanding_cmds[pkt->handle] = NULL;
1650 
1651 	qla2x00_sp_compl(ha, sp);
1652 }
1653 
1654 static irqreturn_t
1655 qla24xx_msix_rsp_q(int irq, void *dev_id)
1656 {
1657 	scsi_qla_host_t	*ha;
1658 	struct device_reg_24xx __iomem *reg;
1659 	unsigned long flags;
1660 
1661 	ha = dev_id;
1662 	reg = &ha->iobase->isp24;
1663 
1664 	spin_lock_irqsave(&ha->hardware_lock, flags);
1665 
1666 	qla24xx_process_response_queue(ha);
1667 
1668 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1669 
1670 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1671 
1672 	return IRQ_HANDLED;
1673 }
1674 
1675 static irqreturn_t
1676 qla24xx_msix_default(int irq, void *dev_id)
1677 {
1678 	scsi_qla_host_t	*ha;
1679 	struct device_reg_24xx __iomem *reg;
1680 	int		status;
1681 	unsigned long	flags;
1682 	uint32_t	stat;
1683 	uint32_t	hccr;
1684 	uint16_t	mb[4];
1685 
1686 	ha = dev_id;
1687 	reg = &ha->iobase->isp24;
1688 	status = 0;
1689 
1690 	spin_lock_irqsave(&ha->hardware_lock, flags);
1691 	do {
1692 		stat = RD_REG_DWORD(&reg->host_status);
1693 		if (stat & HSRX_RISC_PAUSED) {
1694 			if (pci_channel_offline(ha->pdev))
1695 				break;
1696 
1697 			hccr = RD_REG_DWORD(&reg->hccr);
1698 
1699 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1700 			    "Dumping firmware!\n", hccr);
1701 
1702 			qla2xxx_check_risc_status(ha);
1703 
1704 			ha->isp_ops->fw_dump(ha, 1);
1705 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1706 			break;
1707 		} else if ((stat & HSRX_RISC_INT) == 0)
1708 			break;
1709 
1710 		switch (stat & 0xff) {
1711 		case 0x1:
1712 		case 0x2:
1713 		case 0x10:
1714 		case 0x11:
1715 			qla24xx_mbx_completion(ha, MSW(stat));
1716 			status |= MBX_INTERRUPT;
1717 
1718 			break;
1719 		case 0x12:
1720 			mb[0] = MSW(stat);
1721 			mb[1] = RD_REG_WORD(&reg->mailbox1);
1722 			mb[2] = RD_REG_WORD(&reg->mailbox2);
1723 			mb[3] = RD_REG_WORD(&reg->mailbox3);
1724 			qla2x00_async_event(ha, mb);
1725 			break;
1726 		case 0x13:
1727 			qla24xx_process_response_queue(ha);
1728 			break;
1729 		default:
1730 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1731 			    "(%d).\n",
1732 			    ha->host_no, stat & 0xff));
1733 			break;
1734 		}
1735 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1736 	} while (0);
1737 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1738 
1739 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1740 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1741 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1742 		complete(&ha->mbx_intr_comp);
1743 	}
1744 
1745 	return IRQ_HANDLED;
1746 }
1747 
1748 /* Interrupt handling helpers. */
1749 
1750 struct qla_init_msix_entry {
1751 	uint16_t entry;
1752 	uint16_t index;
1753 	const char *name;
1754 	irq_handler_t handler;
1755 };
1756 
1757 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1758 	{ QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1759 		"qla2xxx (default)", qla24xx_msix_default },
1760 
1761 	{ QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1762 		"qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1763 };
1764 
1765 static void
1766 qla24xx_disable_msix(scsi_qla_host_t *ha)
1767 {
1768 	int i;
1769 	struct qla_msix_entry *qentry;
1770 
1771 	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1772 		qentry = &ha->msix_entries[imsix_entries[i].index];
1773 		if (qentry->have_irq)
1774 			free_irq(qentry->msix_vector, ha);
1775 	}
1776 	pci_disable_msix(ha->pdev);
1777 }
1778 
1779 static int
1780 qla24xx_enable_msix(scsi_qla_host_t *ha)
1781 {
1782 	int i, ret;
1783 	struct msix_entry entries[QLA_MSIX_ENTRIES];
1784 	struct qla_msix_entry *qentry;
1785 
1786 	for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1787 		entries[i].entry = imsix_entries[i].entry;
1788 
1789 	ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1790 	if (ret) {
1791 		qla_printk(KERN_WARNING, ha,
1792 		    "MSI-X: Failed to enable support -- %d/%d\n",
1793 		    QLA_MSIX_ENTRIES, ret);
1794 		goto msix_out;
1795 	}
1796 	ha->flags.msix_enabled = 1;
1797 
1798 	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1799 		qentry = &ha->msix_entries[imsix_entries[i].index];
1800 		qentry->msix_vector = entries[i].vector;
1801 		qentry->msix_entry = entries[i].entry;
1802 		qentry->have_irq = 0;
1803 		ret = request_irq(qentry->msix_vector,
1804 		    imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1805 		if (ret) {
1806 			qla_printk(KERN_WARNING, ha,
1807 			    "MSI-X: Unable to register handler -- %x/%d.\n",
1808 			    imsix_entries[i].index, ret);
1809 			qla24xx_disable_msix(ha);
1810 			goto msix_out;
1811 		}
1812 		qentry->have_irq = 1;
1813 	}
1814 
1815 msix_out:
1816 	return ret;
1817 }
1818 
1819 int
1820 qla2x00_request_irqs(scsi_qla_host_t *ha)
1821 {
1822 	int ret;
1823 	device_reg_t __iomem *reg = ha->iobase;
1824 	unsigned long flags;
1825 
1826 	/* If possible, enable MSI-X. */
1827 	if (!IS_QLA2432(ha) && !IS_QLA2532(ha))
1828 		goto skip_msix;
1829 
1830         if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1831 	    !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1832 		DEBUG2(qla_printk(KERN_WARNING, ha,
1833 		    "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1834 		    ha->chip_revision, ha->fw_attributes));
1835 
1836 		goto skip_msix;
1837 	}
1838 
1839 	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1840 	    (ha->pdev->subsystem_device == 0x7040 ||
1841 		ha->pdev->subsystem_device == 0x7041 ||
1842 		ha->pdev->subsystem_device == 0x1705)) {
1843 		DEBUG2(qla_printk(KERN_WARNING, ha,
1844 		    "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1845 		    ha->pdev->subsystem_vendor,
1846 		    ha->pdev->subsystem_device));
1847 
1848 		goto skip_msi;
1849 	}
1850 
1851 	ret = qla24xx_enable_msix(ha);
1852 	if (!ret) {
1853 		DEBUG2(qla_printk(KERN_INFO, ha,
1854 		    "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1855 		    ha->fw_attributes));
1856 		goto clear_risc_ints;
1857 	}
1858 	qla_printk(KERN_WARNING, ha,
1859 	    "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1860 skip_msix:
1861 
1862 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha))
1863 		goto skip_msi;
1864 
1865 	ret = pci_enable_msi(ha->pdev);
1866 	if (!ret) {
1867 		DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1868 		ha->flags.msi_enabled = 1;
1869 	}
1870 skip_msi:
1871 
1872 	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1873 	    IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1874 	if (ret) {
1875 		qla_printk(KERN_WARNING, ha,
1876 		    "Failed to reserve interrupt %d already in use.\n",
1877 		    ha->pdev->irq);
1878 		goto fail;
1879 	}
1880 	ha->flags.inta_enabled = 1;
1881 	ha->host->irq = ha->pdev->irq;
1882 clear_risc_ints:
1883 
1884 	ha->isp_ops->disable_intrs(ha);
1885 	spin_lock_irqsave(&ha->hardware_lock, flags);
1886 	if (IS_FWI2_CAPABLE(ha)) {
1887 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1888 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1889 	} else {
1890 		WRT_REG_WORD(&reg->isp.semaphore, 0);
1891 		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1892 		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1893 	}
1894 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1895 	ha->isp_ops->enable_intrs(ha);
1896 
1897 fail:
1898 	return ret;
1899 }
1900 
1901 void
1902 qla2x00_free_irqs(scsi_qla_host_t *ha)
1903 {
1904 
1905 	if (ha->flags.msix_enabled)
1906 		qla24xx_disable_msix(ha);
1907 	else if (ha->flags.inta_enabled) {
1908 		free_irq(ha->host->irq, ha);
1909 		pci_disable_msi(ha->pdev);
1910 	}
1911 }
1912