xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_isr.c (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11 
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
14 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
15 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
17 
18 /**
19  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
20  * @irq:
21  * @dev_id: SCSI driver HA context
22  *
23  * Called by system whenever the host adapter generates an interrupt.
24  *
25  * Returns handled flag.
26  */
27 irqreturn_t
28 qla2100_intr_handler(int irq, void *dev_id)
29 {
30 	scsi_qla_host_t	*ha;
31 	struct device_reg_2xxx __iomem *reg;
32 	int		status;
33 	unsigned long	iter;
34 	uint16_t	hccr;
35 	uint16_t	mb[4];
36 
37 	ha = (scsi_qla_host_t *) dev_id;
38 	if (!ha) {
39 		printk(KERN_INFO
40 		    "%s(): NULL host pointer\n", __func__);
41 		return (IRQ_NONE);
42 	}
43 
44 	reg = &ha->iobase->isp;
45 	status = 0;
46 
47 	spin_lock(&ha->hardware_lock);
48 	for (iter = 50; iter--; ) {
49 		hccr = RD_REG_WORD(&reg->hccr);
50 		if (hccr & HCCR_RISC_PAUSE) {
51 			if (pci_channel_offline(ha->pdev))
52 				break;
53 
54 			/*
55 			 * Issue a "HARD" reset in order for the RISC interrupt
56 			 * bit to be cleared.  Schedule a big hammmer to get
57 			 * out of the RISC PAUSED state.
58 			 */
59 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
60 			RD_REG_WORD(&reg->hccr);
61 
62 			ha->isp_ops->fw_dump(ha, 1);
63 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
64 			break;
65 		} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
66 			break;
67 
68 		if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
69 			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
70 			RD_REG_WORD(&reg->hccr);
71 
72 			/* Get mailbox data. */
73 			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
74 			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
75 				qla2x00_mbx_completion(ha, mb[0]);
76 				status |= MBX_INTERRUPT;
77 			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
78 				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
79 				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
80 				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
81 				qla2x00_async_event(ha, mb);
82 			} else {
83 				/*EMPTY*/
84 				DEBUG2(printk("scsi(%ld): Unrecognized "
85 				    "interrupt type (%d).\n",
86 				    ha->host_no, mb[0]));
87 			}
88 			/* Release mailbox registers. */
89 			WRT_REG_WORD(&reg->semaphore, 0);
90 			RD_REG_WORD(&reg->semaphore);
91 		} else {
92 			qla2x00_process_response_queue(ha);
93 
94 			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
95 			RD_REG_WORD(&reg->hccr);
96 		}
97 	}
98 	spin_unlock(&ha->hardware_lock);
99 
100 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
101 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
102 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
103 		complete(&ha->mbx_intr_comp);
104 	}
105 
106 	return (IRQ_HANDLED);
107 }
108 
109 /**
110  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
111  * @irq:
112  * @dev_id: SCSI driver HA context
113  *
114  * Called by system whenever the host adapter generates an interrupt.
115  *
116  * Returns handled flag.
117  */
118 irqreturn_t
119 qla2300_intr_handler(int irq, void *dev_id)
120 {
121 	scsi_qla_host_t	*ha;
122 	struct device_reg_2xxx __iomem *reg;
123 	int		status;
124 	unsigned long	iter;
125 	uint32_t	stat;
126 	uint16_t	hccr;
127 	uint16_t	mb[4];
128 
129 	ha = (scsi_qla_host_t *) dev_id;
130 	if (!ha) {
131 		printk(KERN_INFO
132 		    "%s(): NULL host pointer\n", __func__);
133 		return (IRQ_NONE);
134 	}
135 
136 	reg = &ha->iobase->isp;
137 	status = 0;
138 
139 	spin_lock(&ha->hardware_lock);
140 	for (iter = 50; iter--; ) {
141 		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
142 		if (stat & HSR_RISC_PAUSED) {
143 			if (pci_channel_offline(ha->pdev))
144 				break;
145 
146 			hccr = RD_REG_WORD(&reg->hccr);
147 			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
148 				qla_printk(KERN_INFO, ha, "Parity error -- "
149 				    "HCCR=%x, Dumping firmware!\n", hccr);
150 			else
151 				qla_printk(KERN_INFO, ha, "RISC paused -- "
152 				    "HCCR=%x, Dumping firmware!\n", hccr);
153 
154 			/*
155 			 * Issue a "HARD" reset in order for the RISC
156 			 * interrupt bit to be cleared.  Schedule a big
157 			 * hammmer to get out of the RISC PAUSED state.
158 			 */
159 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
160 			RD_REG_WORD(&reg->hccr);
161 
162 			ha->isp_ops->fw_dump(ha, 1);
163 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
164 			break;
165 		} else if ((stat & HSR_RISC_INT) == 0)
166 			break;
167 
168 		switch (stat & 0xff) {
169 		case 0x1:
170 		case 0x2:
171 		case 0x10:
172 		case 0x11:
173 			qla2x00_mbx_completion(ha, MSW(stat));
174 			status |= MBX_INTERRUPT;
175 
176 			/* Release mailbox registers. */
177 			WRT_REG_WORD(&reg->semaphore, 0);
178 			break;
179 		case 0x12:
180 			mb[0] = MSW(stat);
181 			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
182 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
183 			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
184 			qla2x00_async_event(ha, mb);
185 			break;
186 		case 0x13:
187 			qla2x00_process_response_queue(ha);
188 			break;
189 		case 0x15:
190 			mb[0] = MBA_CMPLT_1_16BIT;
191 			mb[1] = MSW(stat);
192 			qla2x00_async_event(ha, mb);
193 			break;
194 		case 0x16:
195 			mb[0] = MBA_SCSI_COMPLETION;
196 			mb[1] = MSW(stat);
197 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198 			qla2x00_async_event(ha, mb);
199 			break;
200 		default:
201 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
202 			    "(%d).\n",
203 			    ha->host_no, stat & 0xff));
204 			break;
205 		}
206 		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
207 		RD_REG_WORD_RELAXED(&reg->hccr);
208 	}
209 	spin_unlock(&ha->hardware_lock);
210 
211 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
212 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
213 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
214 		complete(&ha->mbx_intr_comp);
215 	}
216 
217 	return (IRQ_HANDLED);
218 }
219 
220 /**
221  * qla2x00_mbx_completion() - Process mailbox command completions.
222  * @ha: SCSI driver HA context
223  * @mb0: Mailbox0 register
224  */
225 static void
226 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
227 {
228 	uint16_t	cnt;
229 	uint16_t __iomem *wptr;
230 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231 
232 	/* Load return mailbox registers. */
233 	ha->flags.mbox_int = 1;
234 	ha->mailbox_out[0] = mb0;
235 	wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
236 
237 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
238 		if (IS_QLA2200(ha) && cnt == 8)
239 			wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
240 		if (cnt == 4 || cnt == 5)
241 			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
242 		else
243 			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
244 
245 		wptr++;
246 	}
247 
248 	if (ha->mcp) {
249 		DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
250 		    __func__, ha->host_no, ha->mcp->mb[0]));
251 	} else {
252 		DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
253 		    __func__, ha->host_no));
254 	}
255 }
256 
257 /**
258  * qla2x00_async_event() - Process aynchronous events.
259  * @ha: SCSI driver HA context
260  * @mb: Mailbox registers (0 - 3)
261  */
262 void
263 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
264 {
265 #define LS_UNKNOWN	2
266 	static char	*link_speeds[5] = { "1", "2", "?", "4", "8" };
267 	char		*link_speed;
268 	uint16_t	handle_cnt;
269 	uint16_t	cnt;
270 	uint32_t	handles[5];
271 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
272 	uint32_t	rscn_entry, host_pid;
273 	uint8_t		rscn_queue_index;
274 	unsigned long	flags;
275 
276 	/* Setup to process RIO completion. */
277 	handle_cnt = 0;
278 	switch (mb[0]) {
279 	case MBA_SCSI_COMPLETION:
280 		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
281 		handle_cnt = 1;
282 		break;
283 	case MBA_CMPLT_1_16BIT:
284 		handles[0] = mb[1];
285 		handle_cnt = 1;
286 		mb[0] = MBA_SCSI_COMPLETION;
287 		break;
288 	case MBA_CMPLT_2_16BIT:
289 		handles[0] = mb[1];
290 		handles[1] = mb[2];
291 		handle_cnt = 2;
292 		mb[0] = MBA_SCSI_COMPLETION;
293 		break;
294 	case MBA_CMPLT_3_16BIT:
295 		handles[0] = mb[1];
296 		handles[1] = mb[2];
297 		handles[2] = mb[3];
298 		handle_cnt = 3;
299 		mb[0] = MBA_SCSI_COMPLETION;
300 		break;
301 	case MBA_CMPLT_4_16BIT:
302 		handles[0] = mb[1];
303 		handles[1] = mb[2];
304 		handles[2] = mb[3];
305 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
306 		handle_cnt = 4;
307 		mb[0] = MBA_SCSI_COMPLETION;
308 		break;
309 	case MBA_CMPLT_5_16BIT:
310 		handles[0] = mb[1];
311 		handles[1] = mb[2];
312 		handles[2] = mb[3];
313 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
314 		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
315 		handle_cnt = 5;
316 		mb[0] = MBA_SCSI_COMPLETION;
317 		break;
318 	case MBA_CMPLT_2_32BIT:
319 		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
320 		handles[1] = le32_to_cpu(
321 		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
322 		    RD_MAILBOX_REG(ha, reg, 6));
323 		handle_cnt = 2;
324 		mb[0] = MBA_SCSI_COMPLETION;
325 		break;
326 	default:
327 		break;
328 	}
329 
330 	switch (mb[0]) {
331 	case MBA_SCSI_COMPLETION:	/* Fast Post */
332 		if (!ha->flags.online)
333 			break;
334 
335 		for (cnt = 0; cnt < handle_cnt; cnt++)
336 			qla2x00_process_completed_request(ha, handles[cnt]);
337 		break;
338 
339 	case MBA_RESET:			/* Reset */
340 		DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
341 
342 		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
343 		break;
344 
345 	case MBA_SYSTEM_ERR:		/* System Error */
346 		qla_printk(KERN_INFO, ha,
347 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
348 		    mb[1], mb[2], mb[3]);
349 
350 		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
351 		ha->isp_ops->fw_dump(ha, 1);
352 
353 		if (IS_FWI2_CAPABLE(ha)) {
354 			if (mb[1] == 0 && mb[2] == 0) {
355 				qla_printk(KERN_ERR, ha,
356 				    "Unrecoverable Hardware Error: adapter "
357 				    "marked OFFLINE!\n");
358 				ha->flags.online = 0;
359 			} else
360 				set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
361 		} else if (mb[1] == 0) {
362 			qla_printk(KERN_INFO, ha,
363 			    "Unrecoverable Hardware Error: adapter marked "
364 			    "OFFLINE!\n");
365 			ha->flags.online = 0;
366 		} else
367 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
368 		break;
369 
370 	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
371 		DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
372 		    ha->host_no));
373 		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
374 
375 		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
376 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
377 		break;
378 
379 	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
380 		DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
381 		    ha->host_no));
382 		qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
383 
384 		qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
385 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
386 		break;
387 
388 	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
389 		DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
390 		    ha->host_no));
391 		break;
392 
393 	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
394 		DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
395 		    mb[1]));
396 		qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
397 
398 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
399 			atomic_set(&ha->loop_state, LOOP_DOWN);
400 			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
401 			qla2x00_mark_all_devices_lost(ha, 1);
402 		}
403 
404 		if (ha->parent) {
405 			atomic_set(&ha->vp_state, VP_FAILED);
406 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
407 		}
408 
409 		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
410 		set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
411 
412 		ha->flags.management_server_logged_in = 0;
413 		qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
414 		break;
415 
416 	case MBA_LOOP_UP:		/* Loop Up Event */
417 		if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
418 			link_speed = link_speeds[0];
419 			ha->link_data_rate = PORT_SPEED_1GB;
420 		} else {
421 			link_speed = link_speeds[LS_UNKNOWN];
422 			if (mb[1] < 5)
423 				link_speed = link_speeds[mb[1]];
424 			ha->link_data_rate = mb[1];
425 		}
426 
427 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
428 		    ha->host_no, link_speed));
429 		qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
430 		    link_speed);
431 
432 		ha->flags.management_server_logged_in = 0;
433 		qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
434 		break;
435 
436 	case MBA_LOOP_DOWN:		/* Loop Down Event */
437 		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
438 		    "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3]));
439 		qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
440 		    mb[1], mb[2], mb[3]);
441 
442 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
443 			atomic_set(&ha->loop_state, LOOP_DOWN);
444 			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
445 			ha->device_flags |= DFLG_NO_CABLE;
446 			qla2x00_mark_all_devices_lost(ha, 1);
447 		}
448 
449 		if (ha->parent) {
450 			atomic_set(&ha->vp_state, VP_FAILED);
451 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
452 		}
453 
454 		ha->flags.management_server_logged_in = 0;
455 		ha->link_data_rate = PORT_SPEED_UNKNOWN;
456 		qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
457 		break;
458 
459 	case MBA_LIP_RESET:		/* LIP reset occurred */
460 		DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
461 		    ha->host_no, mb[1]));
462 		qla_printk(KERN_INFO, ha,
463 		    "LIP reset occured (%x).\n", mb[1]);
464 
465 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
466 			atomic_set(&ha->loop_state, LOOP_DOWN);
467 			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
468 			qla2x00_mark_all_devices_lost(ha, 1);
469 		}
470 
471 		if (ha->parent) {
472 			atomic_set(&ha->vp_state, VP_FAILED);
473 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
474 		}
475 
476 		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
477 
478 		ha->operating_mode = LOOP;
479 		ha->flags.management_server_logged_in = 0;
480 		qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
481 		break;
482 
483 	case MBA_POINT_TO_POINT:	/* Point-to-Point */
484 		if (IS_QLA2100(ha))
485 			break;
486 
487 		DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
488 		    ha->host_no));
489 
490 		/*
491 		 * Until there's a transition from loop down to loop up, treat
492 		 * this as loop down only.
493 		 */
494 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
495 			atomic_set(&ha->loop_state, LOOP_DOWN);
496 			if (!atomic_read(&ha->loop_down_timer))
497 				atomic_set(&ha->loop_down_timer,
498 				    LOOP_DOWN_TIME);
499 			qla2x00_mark_all_devices_lost(ha, 1);
500 		}
501 
502 		if (ha->parent) {
503 			atomic_set(&ha->vp_state, VP_FAILED);
504 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
505 		}
506 
507 		if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
508 			set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
509 		}
510 		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
511 		set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
512 
513 		ha->flags.gpsc_supported = 1;
514 		ha->flags.management_server_logged_in = 0;
515 		break;
516 
517 	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
518 		if (IS_QLA2100(ha))
519 			break;
520 
521 		DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
522 		    "received.\n",
523 		    ha->host_no));
524 		qla_printk(KERN_INFO, ha,
525 		    "Configuration change detected: value=%x.\n", mb[1]);
526 
527 		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
528 			atomic_set(&ha->loop_state, LOOP_DOWN);
529 			if (!atomic_read(&ha->loop_down_timer))
530 				atomic_set(&ha->loop_down_timer,
531 				    LOOP_DOWN_TIME);
532 			qla2x00_mark_all_devices_lost(ha, 1);
533 		}
534 
535 		if (ha->parent) {
536 			atomic_set(&ha->vp_state, VP_FAILED);
537 			fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
538 		}
539 
540 		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
541 		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
542 		break;
543 
544 	case MBA_PORT_UPDATE:		/* Port database update */
545 		/*
546 		 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
547 		 * event etc. earlier indicating loop is down) then process
548 		 * it.  Otherwise ignore it and Wait for RSCN to come in.
549 		 */
550 		atomic_set(&ha->loop_down_timer, 0);
551 		if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
552 		    atomic_read(&ha->loop_state) != LOOP_DEAD) {
553 			DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
554 			    "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
555 			    mb[2], mb[3]));
556 			break;
557 		}
558 
559 		DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
560 		    ha->host_no));
561 		DEBUG(printk(KERN_INFO
562 		    "scsi(%ld): Port database changed %04x %04x %04x.\n",
563 		    ha->host_no, mb[1], mb[2], mb[3]));
564 
565 		/*
566 		 * Mark all devices as missing so we will login again.
567 		 */
568 		atomic_set(&ha->loop_state, LOOP_UP);
569 
570 		qla2x00_mark_all_devices_lost(ha, 1);
571 
572 		ha->flags.rscn_queue_overflow = 1;
573 
574 		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
575 		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
576 		break;
577 
578 	case MBA_RSCN_UPDATE:		/* State Change Registration */
579 		/* Check if the Vport has issued a SCR */
580 		if (ha->parent && test_bit(VP_SCR_NEEDED, &ha->vp_flags))
581 			break;
582 		/* Only handle SCNs for our Vport index. */
583 		if (ha->parent && ha->vp_idx != (mb[3] & 0xff))
584 			break;
585 
586 		DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
587 		    ha->host_no));
588 		DEBUG(printk(KERN_INFO
589 		    "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
590 		    ha->host_no, mb[1], mb[2], mb[3]));
591 
592 		rscn_entry = (mb[1] << 16) | mb[2];
593 		host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
594 		    ha->d_id.b.al_pa;
595 		if (rscn_entry == host_pid) {
596 			DEBUG(printk(KERN_INFO
597 			    "scsi(%ld): Ignoring RSCN update to local host "
598 			    "port ID (%06x)\n",
599 			    ha->host_no, host_pid));
600 			break;
601 		}
602 
603 		rscn_queue_index = ha->rscn_in_ptr + 1;
604 		if (rscn_queue_index == MAX_RSCN_COUNT)
605 			rscn_queue_index = 0;
606 		if (rscn_queue_index != ha->rscn_out_ptr) {
607 			ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
608 			ha->rscn_in_ptr = rscn_queue_index;
609 		} else {
610 			ha->flags.rscn_queue_overflow = 1;
611 		}
612 
613 		atomic_set(&ha->loop_state, LOOP_UPDATE);
614 		atomic_set(&ha->loop_down_timer, 0);
615 		ha->flags.management_server_logged_in = 0;
616 
617 		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
618 		set_bit(RSCN_UPDATE, &ha->dpc_flags);
619 		qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
620 		break;
621 
622 	/* case MBA_RIO_RESPONSE: */
623 	case MBA_ZIO_RESPONSE:
624 		DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
625 		    ha->host_no));
626 		DEBUG(printk(KERN_INFO
627 		    "scsi(%ld): [R|Z]IO update completion.\n",
628 		    ha->host_no));
629 
630 		if (IS_FWI2_CAPABLE(ha))
631 			qla24xx_process_response_queue(ha);
632 		else
633 			qla2x00_process_response_queue(ha);
634 		break;
635 
636 	case MBA_DISCARD_RND_FRAME:
637 		DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
638 		    "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
639 		break;
640 
641 	case MBA_TRACE_NOTIFICATION:
642 		DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
643 		ha->host_no, mb[1], mb[2]));
644 		break;
645 
646 	case MBA_ISP84XX_ALERT:
647 		DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
648 		    "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3]));
649 
650 		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
651 		switch (mb[1]) {
652 		case A84_PANIC_RECOVERY:
653 			qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
654 			    "%04x %04x\n", mb[2], mb[3]);
655 			break;
656 		case A84_OP_LOGIN_COMPLETE:
657 			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
658 			DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
659 			    "firmware version %x\n", ha->cs84xx->op_fw_version));
660 			break;
661 		case A84_DIAG_LOGIN_COMPLETE:
662 			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
663 			DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
664 			    "diagnostic firmware version %x\n",
665 			    ha->cs84xx->diag_fw_version));
666 			break;
667 		case A84_GOLD_LOGIN_COMPLETE:
668 			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
669 			ha->cs84xx->fw_update = 1;
670 			DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
671 			    "firmware version %x\n",
672 			    ha->cs84xx->gold_fw_version));
673 			break;
674 		default:
675 			qla_printk(KERN_ERR, ha,
676 			    "Alert 84xx: Invalid Alert %04x %04x %04x\n",
677 			    mb[1], mb[2], mb[3]);
678 		}
679 		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
680 		break;
681 	}
682 
683 	if (!ha->parent && ha->num_vhosts)
684 		qla2x00_alert_all_vps(ha, mb);
685 }
686 
687 static void
688 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
689 {
690 	fc_port_t *fcport = data;
691 
692 	if (fcport->ha->max_q_depth <= sdev->queue_depth)
693 		return;
694 
695 	if (sdev->ordered_tags)
696 		scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
697 		    sdev->queue_depth + 1);
698 	else
699 		scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
700 		    sdev->queue_depth + 1);
701 
702 	fcport->last_ramp_up = jiffies;
703 
704 	DEBUG2(qla_printk(KERN_INFO, fcport->ha,
705 	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
706 	    fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
707 	    sdev->queue_depth));
708 }
709 
710 static void
711 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
712 {
713 	fc_port_t *fcport = data;
714 
715 	if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
716 		return;
717 
718 	DEBUG2(qla_printk(KERN_INFO, fcport->ha,
719 	    "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
720 	    fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
721 	    sdev->queue_depth));
722 }
723 
724 static inline void
725 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
726 {
727 	fc_port_t *fcport;
728 	struct scsi_device *sdev;
729 
730 	sdev = sp->cmd->device;
731 	if (sdev->queue_depth >= ha->max_q_depth)
732 		return;
733 
734 	fcport = sp->fcport;
735 	if (time_before(jiffies,
736 	    fcport->last_ramp_up + ql2xqfullrampup * HZ))
737 		return;
738 	if (time_before(jiffies,
739 	    fcport->last_queue_full + ql2xqfullrampup * HZ))
740 		return;
741 
742 	starget_for_each_device(sdev->sdev_target, fcport,
743 	    qla2x00_adjust_sdev_qdepth_up);
744 }
745 
746 /**
747  * qla2x00_process_completed_request() - Process a Fast Post response.
748  * @ha: SCSI driver HA context
749  * @index: SRB index
750  */
751 static void
752 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
753 {
754 	srb_t *sp;
755 
756 	/* Validate handle. */
757 	if (index >= MAX_OUTSTANDING_COMMANDS) {
758 		DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
759 		    ha->host_no, index));
760 		qla_printk(KERN_WARNING, ha,
761 		    "Invalid SCSI completion handle %d.\n", index);
762 
763 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
764 		return;
765 	}
766 
767 	sp = ha->outstanding_cmds[index];
768 	if (sp) {
769 		/* Free outstanding command slot. */
770 		ha->outstanding_cmds[index] = NULL;
771 
772 		CMD_COMPL_STATUS(sp->cmd) = 0L;
773 		CMD_SCSI_STATUS(sp->cmd) = 0L;
774 
775 		/* Save ISP completion status */
776 		sp->cmd->result = DID_OK << 16;
777 
778 		qla2x00_ramp_up_queue_depth(ha, sp);
779 		qla2x00_sp_compl(ha, sp);
780 	} else {
781 		DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
782 		    ha->host_no));
783 		qla_printk(KERN_WARNING, ha,
784 		    "Invalid ISP SCSI completion handle\n");
785 
786 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
787 	}
788 }
789 
790 /**
791  * qla2x00_process_response_queue() - Process response queue entries.
792  * @ha: SCSI driver HA context
793  */
794 void
795 qla2x00_process_response_queue(struct scsi_qla_host *ha)
796 {
797 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
798 	sts_entry_t	*pkt;
799 	uint16_t        handle_cnt;
800 	uint16_t        cnt;
801 
802 	if (!ha->flags.online)
803 		return;
804 
805 	while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
806 		pkt = (sts_entry_t *)ha->response_ring_ptr;
807 
808 		ha->rsp_ring_index++;
809 		if (ha->rsp_ring_index == ha->response_q_length) {
810 			ha->rsp_ring_index = 0;
811 			ha->response_ring_ptr = ha->response_ring;
812 		} else {
813 			ha->response_ring_ptr++;
814 		}
815 
816 		if (pkt->entry_status != 0) {
817 			DEBUG3(printk(KERN_INFO
818 			    "scsi(%ld): Process error entry.\n", ha->host_no));
819 
820 			qla2x00_error_entry(ha, pkt);
821 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
822 			wmb();
823 			continue;
824 		}
825 
826 		switch (pkt->entry_type) {
827 		case STATUS_TYPE:
828 			qla2x00_status_entry(ha, pkt);
829 			break;
830 		case STATUS_TYPE_21:
831 			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
832 			for (cnt = 0; cnt < handle_cnt; cnt++) {
833 				qla2x00_process_completed_request(ha,
834 				    ((sts21_entry_t *)pkt)->handle[cnt]);
835 			}
836 			break;
837 		case STATUS_TYPE_22:
838 			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
839 			for (cnt = 0; cnt < handle_cnt; cnt++) {
840 				qla2x00_process_completed_request(ha,
841 				    ((sts22_entry_t *)pkt)->handle[cnt]);
842 			}
843 			break;
844 		case STATUS_CONT_TYPE:
845 			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
846 			break;
847 		default:
848 			/* Type Not Supported. */
849 			DEBUG4(printk(KERN_WARNING
850 			    "scsi(%ld): Received unknown response pkt type %x "
851 			    "entry status=%x.\n",
852 			    ha->host_no, pkt->entry_type, pkt->entry_status));
853 			break;
854 		}
855 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
856 		wmb();
857 	}
858 
859 	/* Adjust ring index */
860 	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
861 }
862 
863 static inline void
864 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
865 {
866 	struct scsi_cmnd *cp = sp->cmd;
867 
868 	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
869 		sense_len = SCSI_SENSE_BUFFERSIZE;
870 
871 	CMD_ACTUAL_SNSLEN(cp) = sense_len;
872 	sp->request_sense_length = sense_len;
873 	sp->request_sense_ptr = cp->sense_buffer;
874 	if (sp->request_sense_length > 32)
875 		sense_len = 32;
876 
877 	memcpy(cp->sense_buffer, sense_data, sense_len);
878 
879 	sp->request_sense_ptr += sense_len;
880 	sp->request_sense_length -= sense_len;
881 	if (sp->request_sense_length != 0)
882 		sp->ha->status_srb = sp;
883 
884 	DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
885 	    "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel,
886 	    cp->device->id, cp->device->lun, cp, cp->serial_number));
887 	if (sense_len)
888 		DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
889 		    CMD_ACTUAL_SNSLEN(cp)));
890 }
891 
892 /**
893  * qla2x00_status_entry() - Process a Status IOCB entry.
894  * @ha: SCSI driver HA context
895  * @pkt: Entry pointer
896  */
897 static void
898 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
899 {
900 	srb_t		*sp;
901 	fc_port_t	*fcport;
902 	struct scsi_cmnd *cp;
903 	sts_entry_t *sts;
904 	struct sts_entry_24xx *sts24;
905 	uint16_t	comp_status;
906 	uint16_t	scsi_status;
907 	uint8_t		lscsi_status;
908 	int32_t		resid;
909 	uint32_t	sense_len, rsp_info_len, resid_len, fw_resid_len;
910 	uint8_t		*rsp_info, *sense_data;
911 
912 	sts = (sts_entry_t *) pkt;
913 	sts24 = (struct sts_entry_24xx *) pkt;
914 	if (IS_FWI2_CAPABLE(ha)) {
915 		comp_status = le16_to_cpu(sts24->comp_status);
916 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
917 	} else {
918 		comp_status = le16_to_cpu(sts->comp_status);
919 		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
920 	}
921 
922 	/* Fast path completion. */
923 	if (comp_status == CS_COMPLETE && scsi_status == 0) {
924 		qla2x00_process_completed_request(ha, sts->handle);
925 
926 		return;
927 	}
928 
929 	/* Validate handle. */
930 	if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
931 		sp = ha->outstanding_cmds[sts->handle];
932 		ha->outstanding_cmds[sts->handle] = NULL;
933 	} else
934 		sp = NULL;
935 
936 	if (sp == NULL) {
937 		DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
938 		    ha->host_no));
939 		qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
940 
941 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
942 		qla2xxx_wake_dpc(ha);
943 		return;
944 	}
945 	cp = sp->cmd;
946 	if (cp == NULL) {
947 		DEBUG2(printk("scsi(%ld): Command already returned back to OS "
948 		    "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
949 		qla_printk(KERN_WARNING, ha,
950 		    "Command is NULL: already returned to OS (sp=%p)\n", sp);
951 
952 		return;
953 	}
954 
955   	lscsi_status = scsi_status & STATUS_MASK;
956 	CMD_ENTRY_STATUS(cp) = sts->entry_status;
957 	CMD_COMPL_STATUS(cp) = comp_status;
958 	CMD_SCSI_STATUS(cp) = scsi_status;
959 
960 	fcport = sp->fcport;
961 
962 	sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
963 	if (IS_FWI2_CAPABLE(ha)) {
964 		sense_len = le32_to_cpu(sts24->sense_len);
965 		rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
966 		resid_len = le32_to_cpu(sts24->rsp_residual_count);
967 		fw_resid_len = le32_to_cpu(sts24->residual_len);
968 		rsp_info = sts24->data;
969 		sense_data = sts24->data;
970 		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
971 	} else {
972 		sense_len = le16_to_cpu(sts->req_sense_length);
973 		rsp_info_len = le16_to_cpu(sts->rsp_info_len);
974 		resid_len = le32_to_cpu(sts->residual_length);
975 		rsp_info = sts->rsp_info;
976 		sense_data = sts->req_sense_data;
977 	}
978 
979 	/* Check for any FCP transport errors. */
980 	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
981 		/* Sense data lies beyond any FCP RESPONSE data. */
982 		if (IS_FWI2_CAPABLE(ha))
983 			sense_data += rsp_info_len;
984 		if (rsp_info_len > 3 && rsp_info[3]) {
985 			DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
986 			    "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
987 			    "retrying command\n", ha->host_no,
988 			    cp->device->channel, cp->device->id,
989 			    cp->device->lun, rsp_info_len, rsp_info[0],
990 			    rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
991 			    rsp_info[5], rsp_info[6], rsp_info[7]));
992 
993 			cp->result = DID_BUS_BUSY << 16;
994 			qla2x00_sp_compl(ha, sp);
995 			return;
996 		}
997 	}
998 
999 	/* Check for overrun. */
1000 	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1001 	    scsi_status & SS_RESIDUAL_OVER)
1002 		comp_status = CS_DATA_OVERRUN;
1003 
1004 	/*
1005 	 * Based on Host and scsi status generate status code for Linux
1006 	 */
1007 	switch (comp_status) {
1008 	case CS_COMPLETE:
1009 	case CS_QUEUE_FULL:
1010 		if (scsi_status == 0) {
1011 			cp->result = DID_OK << 16;
1012 			break;
1013 		}
1014 		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1015 			resid = resid_len;
1016 			scsi_set_resid(cp, resid);
1017 			CMD_RESID_LEN(cp) = resid;
1018 
1019 			if (!lscsi_status &&
1020 			    ((unsigned)(scsi_bufflen(cp) - resid) <
1021 			     cp->underflow)) {
1022 				qla_printk(KERN_INFO, ha,
1023 					   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1024 					   "detected (%x of %x bytes)...returning "
1025 					   "error status.\n", ha->host_no,
1026 					   cp->device->channel, cp->device->id,
1027 					   cp->device->lun, resid,
1028 					   scsi_bufflen(cp));
1029 
1030 				cp->result = DID_ERROR << 16;
1031 				break;
1032 			}
1033 		}
1034 		cp->result = DID_OK << 16 | lscsi_status;
1035 
1036 		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1037 			DEBUG2(printk(KERN_INFO
1038 			    "scsi(%ld): QUEUE FULL status detected "
1039 			    "0x%x-0x%x.\n", ha->host_no, comp_status,
1040 			    scsi_status));
1041 
1042 			/* Adjust queue depth for all luns on the port. */
1043 			fcport->last_queue_full = jiffies;
1044 			starget_for_each_device(cp->device->sdev_target,
1045 			    fcport, qla2x00_adjust_sdev_qdepth_down);
1046 			break;
1047 		}
1048 		if (lscsi_status != SS_CHECK_CONDITION)
1049 			break;
1050 
1051 		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1052 		if (!(scsi_status & SS_SENSE_LEN_VALID))
1053 			break;
1054 
1055 		qla2x00_handle_sense(sp, sense_data, sense_len);
1056 		break;
1057 
1058 	case CS_DATA_UNDERRUN:
1059 		resid = resid_len;
1060 		/* Use F/W calculated residual length. */
1061 		if (IS_FWI2_CAPABLE(ha)) {
1062 			if (scsi_status & SS_RESIDUAL_UNDER &&
1063 			    resid != fw_resid_len) {
1064 				scsi_status &= ~SS_RESIDUAL_UNDER;
1065 				lscsi_status = 0;
1066 			}
1067 			resid = fw_resid_len;
1068 		}
1069 
1070 		if (scsi_status & SS_RESIDUAL_UNDER) {
1071 			scsi_set_resid(cp, resid);
1072 			CMD_RESID_LEN(cp) = resid;
1073 		} else {
1074 			DEBUG2(printk(KERN_INFO
1075 			    "scsi(%ld:%d:%d) UNDERRUN status detected "
1076 			    "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1077 			    "os_underflow=0x%x\n", ha->host_no,
1078 			    cp->device->id, cp->device->lun, comp_status,
1079 			    scsi_status, resid_len, resid, cp->cmnd[0],
1080 			    cp->underflow));
1081 
1082 		}
1083 
1084 		/*
1085 		 * Check to see if SCSI Status is non zero. If so report SCSI
1086 		 * Status.
1087 		 */
1088 		if (lscsi_status != 0) {
1089 			cp->result = DID_OK << 16 | lscsi_status;
1090 
1091 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1092 				DEBUG2(printk(KERN_INFO
1093 				    "scsi(%ld): QUEUE FULL status detected "
1094 				    "0x%x-0x%x.\n", ha->host_no, comp_status,
1095 				    scsi_status));
1096 
1097 				/*
1098 				 * Adjust queue depth for all luns on the
1099 				 * port.
1100 				 */
1101 				fcport->last_queue_full = jiffies;
1102 				starget_for_each_device(
1103 				    cp->device->sdev_target, fcport,
1104 				    qla2x00_adjust_sdev_qdepth_down);
1105 				break;
1106 			}
1107 			if (lscsi_status != SS_CHECK_CONDITION)
1108 				break;
1109 
1110 			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1111 			if (!(scsi_status & SS_SENSE_LEN_VALID))
1112 				break;
1113 
1114 			qla2x00_handle_sense(sp, sense_data, sense_len);
1115 		} else {
1116 			/*
1117 			 * If RISC reports underrun and target does not report
1118 			 * it then we must have a lost frame, so tell upper
1119 			 * layer to retry it by reporting a bus busy.
1120 			 */
1121 			if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1122 				DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1123 					      "frame(s) detected (%x of %x bytes)..."
1124 					      "retrying command.\n", ha->host_no,
1125 					      cp->device->channel, cp->device->id,
1126 					      cp->device->lun, resid,
1127 					      scsi_bufflen(cp)));
1128 
1129 				cp->result = DID_BUS_BUSY << 16;
1130 				break;
1131 			}
1132 
1133 			/* Handle mid-layer underflow */
1134 			if ((unsigned)(scsi_bufflen(cp) - resid) <
1135 			    cp->underflow) {
1136 				qla_printk(KERN_INFO, ha,
1137 					   "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1138 					   "detected (%x of %x bytes)...returning "
1139 					   "error status.\n", ha->host_no,
1140 					   cp->device->channel, cp->device->id,
1141 					   cp->device->lun, resid,
1142 					   scsi_bufflen(cp));
1143 
1144 				cp->result = DID_ERROR << 16;
1145 				break;
1146 			}
1147 
1148 			/* Everybody online, looking good... */
1149 			cp->result = DID_OK << 16;
1150 		}
1151 		break;
1152 
1153 	case CS_DATA_OVERRUN:
1154 		DEBUG2(printk(KERN_INFO
1155 		    "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1156 		    ha->host_no, cp->device->id, cp->device->lun, comp_status,
1157 		    scsi_status));
1158 		DEBUG2(printk(KERN_INFO
1159 		    "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1160 		    cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1161 		    cp->cmnd[4], cp->cmnd[5]));
1162 		DEBUG2(printk(KERN_INFO
1163 		    "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1164 		    "status!\n",
1165 		    cp->serial_number, scsi_bufflen(cp), resid_len));
1166 
1167 		cp->result = DID_ERROR << 16;
1168 		break;
1169 
1170 	case CS_PORT_LOGGED_OUT:
1171 	case CS_PORT_CONFIG_CHG:
1172 	case CS_PORT_BUSY:
1173 	case CS_INCOMPLETE:
1174 	case CS_PORT_UNAVAILABLE:
1175 		/*
1176 		 * If the port is in Target Down state, return all IOs for this
1177 		 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1178 		 * retry_queue.
1179 		 */
1180 		DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1181 		    "pid=%ld, compl status=0x%x, port state=0x%x\n",
1182 		    ha->host_no, cp->device->id, cp->device->lun,
1183 		    cp->serial_number, comp_status,
1184 		    atomic_read(&fcport->state)));
1185 
1186 		cp->result = DID_BUS_BUSY << 16;
1187 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
1188 			qla2x00_mark_device_lost(ha, fcport, 1, 1);
1189 		}
1190 		break;
1191 
1192 	case CS_RESET:
1193 		DEBUG2(printk(KERN_INFO
1194 		    "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1195 		    ha->host_no, comp_status, scsi_status));
1196 
1197 		cp->result = DID_RESET << 16;
1198 		break;
1199 
1200 	case CS_ABORTED:
1201 		/*
1202 		 * hv2.19.12 - DID_ABORT does not retry the request if we
1203 		 * aborted this request then abort otherwise it must be a
1204 		 * reset.
1205 		 */
1206 		DEBUG2(printk(KERN_INFO
1207 		    "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1208 		    ha->host_no, comp_status, scsi_status));
1209 
1210 		cp->result = DID_RESET << 16;
1211 		break;
1212 
1213 	case CS_TIMEOUT:
1214 		cp->result = DID_BUS_BUSY << 16;
1215 
1216 		if (IS_FWI2_CAPABLE(ha)) {
1217 			DEBUG2(printk(KERN_INFO
1218 			    "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1219 			    "0x%x-0x%x\n", ha->host_no, cp->device->channel,
1220 			    cp->device->id, cp->device->lun, comp_status,
1221 			    scsi_status));
1222 			break;
1223 		}
1224 		DEBUG2(printk(KERN_INFO
1225 		    "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1226 		    "sflags=%x.\n", ha->host_no, cp->device->channel,
1227 		    cp->device->id, cp->device->lun, comp_status, scsi_status,
1228 		    le16_to_cpu(sts->status_flags)));
1229 
1230 		/* Check to see if logout occurred. */
1231 		if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1232 			qla2x00_mark_device_lost(ha, fcport, 1, 1);
1233 		break;
1234 
1235 	default:
1236 		DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1237 		    "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
1238 		qla_printk(KERN_INFO, ha,
1239 		    "Unknown status detected 0x%x-0x%x.\n",
1240 		    comp_status, scsi_status);
1241 
1242 		cp->result = DID_ERROR << 16;
1243 		break;
1244 	}
1245 
1246 	/* Place command on done queue. */
1247 	if (ha->status_srb == NULL)
1248 		qla2x00_sp_compl(ha, sp);
1249 }
1250 
1251 /**
1252  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1253  * @ha: SCSI driver HA context
1254  * @pkt: Entry pointer
1255  *
1256  * Extended sense data.
1257  */
1258 static void
1259 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1260 {
1261 	uint8_t		sense_sz = 0;
1262 	srb_t		*sp = ha->status_srb;
1263 	struct scsi_cmnd *cp;
1264 
1265 	if (sp != NULL && sp->request_sense_length != 0) {
1266 		cp = sp->cmd;
1267 		if (cp == NULL) {
1268 			DEBUG2(printk("%s(): Cmd already returned back to OS "
1269 			    "sp=%p.\n", __func__, sp));
1270 			qla_printk(KERN_INFO, ha,
1271 			    "cmd is NULL: already returned to OS (sp=%p)\n",
1272 			    sp);
1273 
1274 			ha->status_srb = NULL;
1275 			return;
1276 		}
1277 
1278 		if (sp->request_sense_length > sizeof(pkt->data)) {
1279 			sense_sz = sizeof(pkt->data);
1280 		} else {
1281 			sense_sz = sp->request_sense_length;
1282 		}
1283 
1284 		/* Move sense data. */
1285 		if (IS_FWI2_CAPABLE(ha))
1286 			host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1287 		memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1288 		DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1289 
1290 		sp->request_sense_ptr += sense_sz;
1291 		sp->request_sense_length -= sense_sz;
1292 
1293 		/* Place command on done queue. */
1294 		if (sp->request_sense_length == 0) {
1295 			ha->status_srb = NULL;
1296 			qla2x00_sp_compl(ha, sp);
1297 		}
1298 	}
1299 }
1300 
1301 /**
1302  * qla2x00_error_entry() - Process an error entry.
1303  * @ha: SCSI driver HA context
1304  * @pkt: Entry pointer
1305  */
1306 static void
1307 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1308 {
1309 	srb_t *sp;
1310 
1311 #if defined(QL_DEBUG_LEVEL_2)
1312 	if (pkt->entry_status & RF_INV_E_ORDER)
1313 		qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1314 	else if (pkt->entry_status & RF_INV_E_COUNT)
1315 		qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1316 	else if (pkt->entry_status & RF_INV_E_PARAM)
1317 		qla_printk(KERN_ERR, ha,
1318 		    "%s: Invalid Entry Parameter\n", __func__);
1319 	else if (pkt->entry_status & RF_INV_E_TYPE)
1320 		qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1321 	else if (pkt->entry_status & RF_BUSY)
1322 		qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1323 	else
1324 		qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1325 #endif
1326 
1327 	/* Validate handle. */
1328 	if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1329 		sp = ha->outstanding_cmds[pkt->handle];
1330 	else
1331 		sp = NULL;
1332 
1333 	if (sp) {
1334 		/* Free outstanding command slot. */
1335 		ha->outstanding_cmds[pkt->handle] = NULL;
1336 
1337 		/* Bad payload or header */
1338 		if (pkt->entry_status &
1339 		    (RF_INV_E_ORDER | RF_INV_E_COUNT |
1340 		     RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1341 			sp->cmd->result = DID_ERROR << 16;
1342 		} else if (pkt->entry_status & RF_BUSY) {
1343 			sp->cmd->result = DID_BUS_BUSY << 16;
1344 		} else {
1345 			sp->cmd->result = DID_ERROR << 16;
1346 		}
1347 		qla2x00_sp_compl(ha, sp);
1348 
1349 	} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1350 	    COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1351 		DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1352 		    ha->host_no));
1353 		qla_printk(KERN_WARNING, ha,
1354 		    "Error entry - invalid handle\n");
1355 
1356 		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1357 		qla2xxx_wake_dpc(ha);
1358 	}
1359 }
1360 
1361 /**
1362  * qla24xx_mbx_completion() - Process mailbox command completions.
1363  * @ha: SCSI driver HA context
1364  * @mb0: Mailbox0 register
1365  */
1366 static void
1367 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1368 {
1369 	uint16_t	cnt;
1370 	uint16_t __iomem *wptr;
1371 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1372 
1373 	/* Load return mailbox registers. */
1374 	ha->flags.mbox_int = 1;
1375 	ha->mailbox_out[0] = mb0;
1376 	wptr = (uint16_t __iomem *)&reg->mailbox1;
1377 
1378 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1379 		ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1380 		wptr++;
1381 	}
1382 
1383 	if (ha->mcp) {
1384 		DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1385 		    __func__, ha->host_no, ha->mcp->mb[0]));
1386 	} else {
1387 		DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1388 		    __func__, ha->host_no));
1389 	}
1390 }
1391 
1392 /**
1393  * qla24xx_process_response_queue() - Process response queue entries.
1394  * @ha: SCSI driver HA context
1395  */
1396 void
1397 qla24xx_process_response_queue(struct scsi_qla_host *ha)
1398 {
1399 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1400 	struct sts_entry_24xx *pkt;
1401 
1402 	if (!ha->flags.online)
1403 		return;
1404 
1405 	while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
1406 		pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
1407 
1408 		ha->rsp_ring_index++;
1409 		if (ha->rsp_ring_index == ha->response_q_length) {
1410 			ha->rsp_ring_index = 0;
1411 			ha->response_ring_ptr = ha->response_ring;
1412 		} else {
1413 			ha->response_ring_ptr++;
1414 		}
1415 
1416 		if (pkt->entry_status != 0) {
1417 			DEBUG3(printk(KERN_INFO
1418 			    "scsi(%ld): Process error entry.\n", ha->host_no));
1419 
1420 			qla2x00_error_entry(ha, (sts_entry_t *) pkt);
1421 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1422 			wmb();
1423 			continue;
1424 		}
1425 
1426 		switch (pkt->entry_type) {
1427 		case STATUS_TYPE:
1428 			qla2x00_status_entry(ha, pkt);
1429 			break;
1430 		case STATUS_CONT_TYPE:
1431 			qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
1432 			break;
1433 		case VP_RPT_ID_IOCB_TYPE:
1434 			qla24xx_report_id_acquisition(ha,
1435 			    (struct vp_rpt_id_entry_24xx *)pkt);
1436 			break;
1437 		default:
1438 			/* Type Not Supported. */
1439 			DEBUG4(printk(KERN_WARNING
1440 			    "scsi(%ld): Received unknown response pkt type %x "
1441 			    "entry status=%x.\n",
1442 			    ha->host_no, pkt->entry_type, pkt->entry_status));
1443 			break;
1444 		}
1445 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1446 		wmb();
1447 	}
1448 
1449 	/* Adjust ring index */
1450 	WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
1451 }
1452 
1453 static void
1454 qla2xxx_check_risc_status(scsi_qla_host_t *ha)
1455 {
1456 	int rval;
1457 	uint32_t cnt;
1458 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1459 
1460 	if (!IS_QLA25XX(ha))
1461 		return;
1462 
1463 	rval = QLA_SUCCESS;
1464 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1465 	RD_REG_DWORD(&reg->iobase_addr);
1466 	WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1467 	for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1468 	    rval == QLA_SUCCESS; cnt--) {
1469 		if (cnt) {
1470 			WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1471 			udelay(10);
1472 		} else
1473 			rval = QLA_FUNCTION_TIMEOUT;
1474 	}
1475 	if (rval == QLA_SUCCESS)
1476 		goto next_test;
1477 
1478 	WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1479 	for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1480 	    rval == QLA_SUCCESS; cnt--) {
1481 		if (cnt) {
1482 			WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1483 			udelay(10);
1484 		} else
1485 			rval = QLA_FUNCTION_TIMEOUT;
1486 	}
1487 	if (rval != QLA_SUCCESS)
1488 		goto done;
1489 
1490 next_test:
1491 	if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1492 		qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1493 
1494 done:
1495 	WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1496 	RD_REG_DWORD(&reg->iobase_window);
1497 }
1498 
1499 /**
1500  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1501  * @irq:
1502  * @dev_id: SCSI driver HA context
1503  *
1504  * Called by system whenever the host adapter generates an interrupt.
1505  *
1506  * Returns handled flag.
1507  */
1508 irqreturn_t
1509 qla24xx_intr_handler(int irq, void *dev_id)
1510 {
1511 	scsi_qla_host_t	*ha;
1512 	struct device_reg_24xx __iomem *reg;
1513 	int		status;
1514 	unsigned long	iter;
1515 	uint32_t	stat;
1516 	uint32_t	hccr;
1517 	uint16_t	mb[4];
1518 
1519 	ha = (scsi_qla_host_t *) dev_id;
1520 	if (!ha) {
1521 		printk(KERN_INFO
1522 		    "%s(): NULL host pointer\n", __func__);
1523 		return IRQ_NONE;
1524 	}
1525 
1526 	reg = &ha->iobase->isp24;
1527 	status = 0;
1528 
1529 	spin_lock(&ha->hardware_lock);
1530 	for (iter = 50; iter--; ) {
1531 		stat = RD_REG_DWORD(&reg->host_status);
1532 		if (stat & HSRX_RISC_PAUSED) {
1533 			if (pci_channel_offline(ha->pdev))
1534 				break;
1535 
1536 			if (ha->hw_event_pause_errors == 0)
1537 				qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1538 				    0, MSW(stat), LSW(stat));
1539 			else if (ha->hw_event_pause_errors < 0xffffffff)
1540 				ha->hw_event_pause_errors++;
1541 
1542 			hccr = RD_REG_DWORD(&reg->hccr);
1543 
1544 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1545 			    "Dumping firmware!\n", hccr);
1546 
1547 			qla2xxx_check_risc_status(ha);
1548 
1549 			ha->isp_ops->fw_dump(ha, 1);
1550 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1551 			break;
1552 		} else if ((stat & HSRX_RISC_INT) == 0)
1553 			break;
1554 
1555 		switch (stat & 0xff) {
1556 		case 0x1:
1557 		case 0x2:
1558 		case 0x10:
1559 		case 0x11:
1560 			qla24xx_mbx_completion(ha, MSW(stat));
1561 			status |= MBX_INTERRUPT;
1562 
1563 			break;
1564 		case 0x12:
1565 			mb[0] = MSW(stat);
1566 			mb[1] = RD_REG_WORD(&reg->mailbox1);
1567 			mb[2] = RD_REG_WORD(&reg->mailbox2);
1568 			mb[3] = RD_REG_WORD(&reg->mailbox3);
1569 			qla2x00_async_event(ha, mb);
1570 			break;
1571 		case 0x13:
1572 			qla24xx_process_response_queue(ha);
1573 			break;
1574 		default:
1575 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1576 			    "(%d).\n",
1577 			    ha->host_no, stat & 0xff));
1578 			break;
1579 		}
1580 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1581 		RD_REG_DWORD_RELAXED(&reg->hccr);
1582 	}
1583 	spin_unlock(&ha->hardware_lock);
1584 
1585 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1586 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1587 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1588 		complete(&ha->mbx_intr_comp);
1589 	}
1590 
1591 	return IRQ_HANDLED;
1592 }
1593 
1594 static irqreturn_t
1595 qla24xx_msix_rsp_q(int irq, void *dev_id)
1596 {
1597 	scsi_qla_host_t	*ha;
1598 	struct device_reg_24xx __iomem *reg;
1599 
1600 	ha = dev_id;
1601 	reg = &ha->iobase->isp24;
1602 
1603 	spin_lock_irq(&ha->hardware_lock);
1604 
1605 	qla24xx_process_response_queue(ha);
1606 	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1607 
1608 	spin_unlock_irq(&ha->hardware_lock);
1609 
1610 	return IRQ_HANDLED;
1611 }
1612 
1613 static irqreturn_t
1614 qla24xx_msix_default(int irq, void *dev_id)
1615 {
1616 	scsi_qla_host_t	*ha;
1617 	struct device_reg_24xx __iomem *reg;
1618 	int		status;
1619 	uint32_t	stat;
1620 	uint32_t	hccr;
1621 	uint16_t	mb[4];
1622 
1623 	ha = dev_id;
1624 	reg = &ha->iobase->isp24;
1625 	status = 0;
1626 
1627 	spin_lock_irq(&ha->hardware_lock);
1628 	do {
1629 		stat = RD_REG_DWORD(&reg->host_status);
1630 		if (stat & HSRX_RISC_PAUSED) {
1631 			if (pci_channel_offline(ha->pdev))
1632 				break;
1633 
1634 			if (ha->hw_event_pause_errors == 0)
1635 				qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1636 				    0, MSW(stat), LSW(stat));
1637 			else if (ha->hw_event_pause_errors < 0xffffffff)
1638 				ha->hw_event_pause_errors++;
1639 
1640 			hccr = RD_REG_DWORD(&reg->hccr);
1641 
1642 			qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1643 			    "Dumping firmware!\n", hccr);
1644 
1645 			qla2xxx_check_risc_status(ha);
1646 
1647 			ha->isp_ops->fw_dump(ha, 1);
1648 			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1649 			break;
1650 		} else if ((stat & HSRX_RISC_INT) == 0)
1651 			break;
1652 
1653 		switch (stat & 0xff) {
1654 		case 0x1:
1655 		case 0x2:
1656 		case 0x10:
1657 		case 0x11:
1658 			qla24xx_mbx_completion(ha, MSW(stat));
1659 			status |= MBX_INTERRUPT;
1660 
1661 			break;
1662 		case 0x12:
1663 			mb[0] = MSW(stat);
1664 			mb[1] = RD_REG_WORD(&reg->mailbox1);
1665 			mb[2] = RD_REG_WORD(&reg->mailbox2);
1666 			mb[3] = RD_REG_WORD(&reg->mailbox3);
1667 			qla2x00_async_event(ha, mb);
1668 			break;
1669 		case 0x13:
1670 			qla24xx_process_response_queue(ha);
1671 			break;
1672 		default:
1673 			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1674 			    "(%d).\n",
1675 			    ha->host_no, stat & 0xff));
1676 			break;
1677 		}
1678 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1679 	} while (0);
1680 	spin_unlock_irq(&ha->hardware_lock);
1681 
1682 	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1683 	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1684 		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1685 		complete(&ha->mbx_intr_comp);
1686 	}
1687 
1688 	return IRQ_HANDLED;
1689 }
1690 
1691 /* Interrupt handling helpers. */
1692 
1693 struct qla_init_msix_entry {
1694 	uint16_t entry;
1695 	uint16_t index;
1696 	const char *name;
1697 	irq_handler_t handler;
1698 };
1699 
1700 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1701 	{ QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1702 		"qla2xxx (default)", qla24xx_msix_default },
1703 
1704 	{ QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1705 		"qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1706 };
1707 
1708 static void
1709 qla24xx_disable_msix(scsi_qla_host_t *ha)
1710 {
1711 	int i;
1712 	struct qla_msix_entry *qentry;
1713 
1714 	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1715 		qentry = &ha->msix_entries[imsix_entries[i].index];
1716 		if (qentry->have_irq)
1717 			free_irq(qentry->msix_vector, ha);
1718 	}
1719 	pci_disable_msix(ha->pdev);
1720 }
1721 
1722 static int
1723 qla24xx_enable_msix(scsi_qla_host_t *ha)
1724 {
1725 	int i, ret;
1726 	struct msix_entry entries[QLA_MSIX_ENTRIES];
1727 	struct qla_msix_entry *qentry;
1728 
1729 	for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1730 		entries[i].entry = imsix_entries[i].entry;
1731 
1732 	ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1733 	if (ret) {
1734 		qla_printk(KERN_WARNING, ha,
1735 		    "MSI-X: Failed to enable support -- %d/%d\n",
1736 		    QLA_MSIX_ENTRIES, ret);
1737 		goto msix_out;
1738 	}
1739 	ha->flags.msix_enabled = 1;
1740 
1741 	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1742 		qentry = &ha->msix_entries[imsix_entries[i].index];
1743 		qentry->msix_vector = entries[i].vector;
1744 		qentry->msix_entry = entries[i].entry;
1745 		qentry->have_irq = 0;
1746 		ret = request_irq(qentry->msix_vector,
1747 		    imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1748 		if (ret) {
1749 			qla_printk(KERN_WARNING, ha,
1750 			    "MSI-X: Unable to register handler -- %x/%d.\n",
1751 			    imsix_entries[i].index, ret);
1752 			qla24xx_disable_msix(ha);
1753 			goto msix_out;
1754 		}
1755 		qentry->have_irq = 1;
1756 	}
1757 
1758 msix_out:
1759 	return ret;
1760 }
1761 
1762 int
1763 qla2x00_request_irqs(scsi_qla_host_t *ha)
1764 {
1765 	int ret;
1766 	device_reg_t __iomem *reg = ha->iobase;
1767 
1768 	/* If possible, enable MSI-X. */
1769 	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1770 		goto skip_msix;
1771 
1772         if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1773 	    !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1774 		DEBUG2(qla_printk(KERN_WARNING, ha,
1775 		    "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1776 		    ha->chip_revision, ha->fw_attributes));
1777 
1778 		goto skip_msix;
1779 	}
1780 
1781 	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1782 	    (ha->pdev->subsystem_device == 0x7040 ||
1783 		ha->pdev->subsystem_device == 0x7041 ||
1784 		ha->pdev->subsystem_device == 0x1705)) {
1785 		DEBUG2(qla_printk(KERN_WARNING, ha,
1786 		    "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1787 		    ha->pdev->subsystem_vendor,
1788 		    ha->pdev->subsystem_device));
1789 
1790 		goto skip_msi;
1791 	}
1792 
1793 	ret = qla24xx_enable_msix(ha);
1794 	if (!ret) {
1795 		DEBUG2(qla_printk(KERN_INFO, ha,
1796 		    "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1797 		    ha->fw_attributes));
1798 		goto clear_risc_ints;
1799 	}
1800 	qla_printk(KERN_WARNING, ha,
1801 	    "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1802 skip_msix:
1803 
1804 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1805 		goto skip_msi;
1806 
1807 	ret = pci_enable_msi(ha->pdev);
1808 	if (!ret) {
1809 		DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1810 		ha->flags.msi_enabled = 1;
1811 	}
1812 skip_msi:
1813 
1814 	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1815 	    IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1816 	if (ret) {
1817 		qla_printk(KERN_WARNING, ha,
1818 		    "Failed to reserve interrupt %d already in use.\n",
1819 		    ha->pdev->irq);
1820 		goto fail;
1821 	}
1822 	ha->flags.inta_enabled = 1;
1823 	ha->host->irq = ha->pdev->irq;
1824 clear_risc_ints:
1825 
1826 	ha->isp_ops->disable_intrs(ha);
1827 	spin_lock_irq(&ha->hardware_lock);
1828 	if (IS_FWI2_CAPABLE(ha)) {
1829 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1830 		WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1831 	} else {
1832 		WRT_REG_WORD(&reg->isp.semaphore, 0);
1833 		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1834 		WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1835 	}
1836 	spin_unlock_irq(&ha->hardware_lock);
1837 	ha->isp_ops->enable_intrs(ha);
1838 
1839 fail:
1840 	return ret;
1841 }
1842 
1843 void
1844 qla2x00_free_irqs(scsi_qla_host_t *ha)
1845 {
1846 
1847 	if (ha->flags.msix_enabled)
1848 		qla24xx_disable_msix(ha);
1849 	else if (ha->flags.inta_enabled) {
1850 		free_irq(ha->host->irq, ha);
1851 		pci_disable_msi(ha->pdev);
1852 	}
1853 }
1854