xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_isr.c (revision ee8a99bd)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
15 
16 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
17 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
18 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
19 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
20 	sts_entry_t *);
21 
22 /**
23  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
24  * @irq:
25  * @dev_id: SCSI driver HA context
26  *
27  * Called by system whenever the host adapter generates an interrupt.
28  *
29  * Returns handled flag.
30  */
31 irqreturn_t
32 qla2100_intr_handler(int irq, void *dev_id)
33 {
34 	scsi_qla_host_t	*vha;
35 	struct qla_hw_data *ha;
36 	struct device_reg_2xxx __iomem *reg;
37 	int		status;
38 	unsigned long	iter;
39 	uint16_t	hccr;
40 	uint16_t	mb[4];
41 	struct rsp_que *rsp;
42 	unsigned long	flags;
43 
44 	rsp = (struct rsp_que *) dev_id;
45 	if (!rsp) {
46 		ql_log(ql_log_info, NULL, 0x505d,
47 		    "%s: NULL response queue pointer.\n", __func__);
48 		return (IRQ_NONE);
49 	}
50 
51 	ha = rsp->hw;
52 	reg = &ha->iobase->isp;
53 	status = 0;
54 
55 	spin_lock_irqsave(&ha->hardware_lock, flags);
56 	vha = pci_get_drvdata(ha->pdev);
57 	for (iter = 50; iter--; ) {
58 		hccr = RD_REG_WORD(&reg->hccr);
59 		if (hccr & HCCR_RISC_PAUSE) {
60 			if (pci_channel_offline(ha->pdev))
61 				break;
62 
63 			/*
64 			 * Issue a "HARD" reset in order for the RISC interrupt
65 			 * bit to be cleared.  Schedule a big hammer to get
66 			 * out of the RISC PAUSED state.
67 			 */
68 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
69 			RD_REG_WORD(&reg->hccr);
70 
71 			ha->isp_ops->fw_dump(vha, 1);
72 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
73 			break;
74 		} else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
75 			break;
76 
77 		if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
78 			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
79 			RD_REG_WORD(&reg->hccr);
80 
81 			/* Get mailbox data. */
82 			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
83 			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
84 				qla2x00_mbx_completion(vha, mb[0]);
85 				status |= MBX_INTERRUPT;
86 			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
87 				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
88 				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
89 				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
90 				qla2x00_async_event(vha, rsp, mb);
91 			} else {
92 				/*EMPTY*/
93 				ql_dbg(ql_dbg_async, vha, 0x5025,
94 				    "Unrecognized interrupt type (%d).\n",
95 				    mb[0]);
96 			}
97 			/* Release mailbox registers. */
98 			WRT_REG_WORD(&reg->semaphore, 0);
99 			RD_REG_WORD(&reg->semaphore);
100 		} else {
101 			qla2x00_process_response_queue(rsp);
102 
103 			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
104 			RD_REG_WORD(&reg->hccr);
105 		}
106 	}
107 	qla2x00_handle_mbx_completion(ha, status);
108 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
109 
110 	return (IRQ_HANDLED);
111 }
112 
113 /**
114  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
115  * @irq:
116  * @dev_id: SCSI driver HA context
117  *
118  * Called by system whenever the host adapter generates an interrupt.
119  *
120  * Returns handled flag.
121  */
122 irqreturn_t
123 qla2300_intr_handler(int irq, void *dev_id)
124 {
125 	scsi_qla_host_t	*vha;
126 	struct device_reg_2xxx __iomem *reg;
127 	int		status;
128 	unsigned long	iter;
129 	uint32_t	stat;
130 	uint16_t	hccr;
131 	uint16_t	mb[4];
132 	struct rsp_que *rsp;
133 	struct qla_hw_data *ha;
134 	unsigned long	flags;
135 
136 	rsp = (struct rsp_que *) dev_id;
137 	if (!rsp) {
138 		ql_log(ql_log_info, NULL, 0x5058,
139 		    "%s: NULL response queue pointer.\n", __func__);
140 		return (IRQ_NONE);
141 	}
142 
143 	ha = rsp->hw;
144 	reg = &ha->iobase->isp;
145 	status = 0;
146 
147 	spin_lock_irqsave(&ha->hardware_lock, flags);
148 	vha = pci_get_drvdata(ha->pdev);
149 	for (iter = 50; iter--; ) {
150 		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
151 		if (stat & HSR_RISC_PAUSED) {
152 			if (unlikely(pci_channel_offline(ha->pdev)))
153 				break;
154 
155 			hccr = RD_REG_WORD(&reg->hccr);
156 			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
157 				ql_log(ql_log_warn, vha, 0x5026,
158 				    "Parity error -- HCCR=%x, Dumping "
159 				    "firmware.\n", hccr);
160 			else
161 				ql_log(ql_log_warn, vha, 0x5027,
162 				    "RISC paused -- HCCR=%x, Dumping "
163 				    "firmware.\n", hccr);
164 
165 			/*
166 			 * Issue a "HARD" reset in order for the RISC
167 			 * interrupt bit to be cleared.  Schedule a big
168 			 * hammer to get out of the RISC PAUSED state.
169 			 */
170 			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
171 			RD_REG_WORD(&reg->hccr);
172 
173 			ha->isp_ops->fw_dump(vha, 1);
174 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
175 			break;
176 		} else if ((stat & HSR_RISC_INT) == 0)
177 			break;
178 
179 		switch (stat & 0xff) {
180 		case 0x1:
181 		case 0x2:
182 		case 0x10:
183 		case 0x11:
184 			qla2x00_mbx_completion(vha, MSW(stat));
185 			status |= MBX_INTERRUPT;
186 
187 			/* Release mailbox registers. */
188 			WRT_REG_WORD(&reg->semaphore, 0);
189 			break;
190 		case 0x12:
191 			mb[0] = MSW(stat);
192 			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
193 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
194 			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
195 			qla2x00_async_event(vha, rsp, mb);
196 			break;
197 		case 0x13:
198 			qla2x00_process_response_queue(rsp);
199 			break;
200 		case 0x15:
201 			mb[0] = MBA_CMPLT_1_16BIT;
202 			mb[1] = MSW(stat);
203 			qla2x00_async_event(vha, rsp, mb);
204 			break;
205 		case 0x16:
206 			mb[0] = MBA_SCSI_COMPLETION;
207 			mb[1] = MSW(stat);
208 			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
209 			qla2x00_async_event(vha, rsp, mb);
210 			break;
211 		default:
212 			ql_dbg(ql_dbg_async, vha, 0x5028,
213 			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
214 			break;
215 		}
216 		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
217 		RD_REG_WORD_RELAXED(&reg->hccr);
218 	}
219 	qla2x00_handle_mbx_completion(ha, status);
220 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
221 
222 	return (IRQ_HANDLED);
223 }
224 
225 /**
226  * qla2x00_mbx_completion() - Process mailbox command completions.
227  * @ha: SCSI driver HA context
228  * @mb0: Mailbox0 register
229  */
230 static void
231 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
232 {
233 	uint16_t	cnt;
234 	uint32_t	mboxes;
235 	uint16_t __iomem *wptr;
236 	struct qla_hw_data *ha = vha->hw;
237 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
238 
239 	/* Read all mbox registers? */
240 	mboxes = (1 << ha->mbx_count) - 1;
241 	if (!ha->mcp)
242 		ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
243 	else
244 		mboxes = ha->mcp->in_mb;
245 
246 	/* Load return mailbox registers. */
247 	ha->flags.mbox_int = 1;
248 	ha->mailbox_out[0] = mb0;
249 	mboxes >>= 1;
250 	wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
251 
252 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
253 		if (IS_QLA2200(ha) && cnt == 8)
254 			wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
255 		if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
256 			ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
257 		else if (mboxes & BIT_0)
258 			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
259 
260 		wptr++;
261 		mboxes >>= 1;
262 	}
263 }
264 
265 static void
266 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
267 {
268 	static char *event[] =
269 		{ "Complete", "Request Notification", "Time Extension" };
270 	int rval;
271 	struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
272 	uint16_t __iomem *wptr;
273 	uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
274 
275 	/* Seed data -- mailbox1 -> mailbox7. */
276 	wptr = (uint16_t __iomem *)&reg24->mailbox1;
277 	for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
278 		mb[cnt] = RD_REG_WORD(wptr);
279 
280 	ql_dbg(ql_dbg_async, vha, 0x5021,
281 	    "Inter-Driver Communication %s -- "
282 	    "%04x %04x %04x %04x %04x %04x %04x.\n",
283 	    event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
284 	    mb[4], mb[5], mb[6]);
285 	if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
286 		vha->hw->flags.idc_compl_status = 1;
287 		if (vha->hw->notify_dcbx_comp)
288 			complete(&vha->hw->dcbx_comp);
289 	}
290 
291 	/* Acknowledgement needed? [Notify && non-zero timeout]. */
292 	timeout = (descr >> 8) & 0xf;
293 	if (aen != MBA_IDC_NOTIFY || !timeout)
294 		return;
295 
296 	ql_dbg(ql_dbg_async, vha, 0x5022,
297 	    "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
298 	    vha->host_no, event[aen & 0xff], timeout);
299 
300 	rval = qla2x00_post_idc_ack_work(vha, mb);
301 	if (rval != QLA_SUCCESS)
302 		ql_log(ql_log_warn, vha, 0x5023,
303 		    "IDC failed to post ACK.\n");
304 }
305 
306 #define LS_UNKNOWN	2
307 const char *
308 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
309 {
310 	static const char * const link_speeds[] = {
311 		"1", "2", "?", "4", "8", "16", "10"
312 	};
313 
314 	if (IS_QLA2100(ha) || IS_QLA2200(ha))
315 		return link_speeds[0];
316 	else if (speed == 0x13)
317 		return link_speeds[6];
318 	else if (speed < 6)
319 		return link_speeds[speed];
320 	else
321 		return link_speeds[LS_UNKNOWN];
322 }
323 
324 static void
325 qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
326 {
327 	struct qla_hw_data *ha = vha->hw;
328 
329 	/*
330 	 * 8200 AEN Interpretation:
331 	 * mb[0] = AEN code
332 	 * mb[1] = AEN Reason code
333 	 * mb[2] = LSW of Peg-Halt Status-1 Register
334 	 * mb[6] = MSW of Peg-Halt Status-1 Register
335 	 * mb[3] = LSW of Peg-Halt Status-2 register
336 	 * mb[7] = MSW of Peg-Halt Status-2 register
337 	 * mb[4] = IDC Device-State Register value
338 	 * mb[5] = IDC Driver-Presence Register value
339 	 */
340 	ql_dbg(ql_dbg_async, vha, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
341 	    "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
342 	    mb[0], mb[1], mb[2], mb[6]);
343 	ql_dbg(ql_dbg_async, vha, 0x506c, "PH-status2: mb[3] = 0x%x "
344 	    "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
345 	    "Drv-Presence: mb[5] = 0x%x.\n", mb[3], mb[7], mb[4], mb[5]);
346 
347 	if (mb[1] & (IDC_PEG_HALT_STATUS_CHANGE | IDC_NIC_FW_REPORTED_FAILURE |
348 				IDC_HEARTBEAT_FAILURE)) {
349 		ha->flags.nic_core_hung = 1;
350 		ql_log(ql_log_warn, vha, 0x5060,
351 		    "83XX: F/W Error Reported: Check if reset required.\n");
352 
353 		if (mb[1] & IDC_PEG_HALT_STATUS_CHANGE) {
354 			uint32_t protocol_engine_id, fw_err_code, err_level;
355 
356 			/*
357 			 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
358 			 *  - PEG-Halt Status-1 Register:
359 			 *	(LSW = mb[2], MSW = mb[6])
360 			 *	Bits 0-7   = protocol-engine ID
361 			 *	Bits 8-28  = f/w error code
362 			 *	Bits 29-31 = Error-level
363 			 *	    Error-level 0x1 = Non-Fatal error
364 			 *	    Error-level 0x2 = Recoverable Fatal error
365 			 *	    Error-level 0x4 = UnRecoverable Fatal error
366 			 *  - PEG-Halt Status-2 Register:
367 			 *	(LSW = mb[3], MSW = mb[7])
368 			 */
369 			protocol_engine_id = (mb[2] & 0xff);
370 			fw_err_code = (((mb[2] & 0xff00) >> 8) |
371 			    ((mb[6] & 0x1fff) << 8));
372 			err_level = ((mb[6] & 0xe000) >> 13);
373 			ql_log(ql_log_warn, vha, 0x5061, "PegHalt Status-1 "
374 			    "Register: protocol_engine_id=0x%x "
375 			    "fw_err_code=0x%x err_level=0x%x.\n",
376 			    protocol_engine_id, fw_err_code, err_level);
377 			ql_log(ql_log_warn, vha, 0x5062, "PegHalt Status-2 "
378 			    "Register: 0x%x%x.\n", mb[7], mb[3]);
379 			if (err_level == ERR_LEVEL_NON_FATAL) {
380 				ql_log(ql_log_warn, vha, 0x5063,
381 				    "Not a fatal error, f/w has recovered "
382 				    "iteself.\n");
383 			} else if (err_level == ERR_LEVEL_RECOVERABLE_FATAL) {
384 				ql_log(ql_log_fatal, vha, 0x5064,
385 				    "Recoverable Fatal error: Chip reset "
386 				    "required.\n");
387 				qla83xx_schedule_work(vha,
388 				    QLA83XX_NIC_CORE_RESET);
389 			} else if (err_level == ERR_LEVEL_UNRECOVERABLE_FATAL) {
390 				ql_log(ql_log_fatal, vha, 0x5065,
391 				    "Unrecoverable Fatal error: Set FAILED "
392 				    "state, reboot required.\n");
393 				qla83xx_schedule_work(vha,
394 				    QLA83XX_NIC_CORE_UNRECOVERABLE);
395 			}
396 		}
397 
398 		if (mb[1] & IDC_NIC_FW_REPORTED_FAILURE) {
399 			uint16_t peg_fw_state, nw_interface_link_up;
400 			uint16_t nw_interface_signal_detect, sfp_status;
401 			uint16_t htbt_counter, htbt_monitor_enable;
402 			uint16_t sfp_additonal_info, sfp_multirate;
403 			uint16_t sfp_tx_fault, link_speed, dcbx_status;
404 
405 			/*
406 			 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
407 			 *  - PEG-to-FC Status Register:
408 			 *	(LSW = mb[2], MSW = mb[6])
409 			 *	Bits 0-7   = Peg-Firmware state
410 			 *	Bit 8      = N/W Interface Link-up
411 			 *	Bit 9      = N/W Interface signal detected
412 			 *	Bits 10-11 = SFP Status
413 			 *	  SFP Status 0x0 = SFP+ transceiver not expected
414 			 *	  SFP Status 0x1 = SFP+ transceiver not present
415 			 *	  SFP Status 0x2 = SFP+ transceiver invalid
416 			 *	  SFP Status 0x3 = SFP+ transceiver present and
417 			 *	  valid
418 			 *	Bits 12-14 = Heartbeat Counter
419 			 *	Bit 15     = Heartbeat Monitor Enable
420 			 *	Bits 16-17 = SFP Additional Info
421 			 *	  SFP info 0x0 = Unregocnized transceiver for
422 			 *	  Ethernet
423 			 *	  SFP info 0x1 = SFP+ brand validation failed
424 			 *	  SFP info 0x2 = SFP+ speed validation failed
425 			 *	  SFP info 0x3 = SFP+ access error
426 			 *	Bit 18     = SFP Multirate
427 			 *	Bit 19     = SFP Tx Fault
428 			 *	Bits 20-22 = Link Speed
429 			 *	Bits 23-27 = Reserved
430 			 *	Bits 28-30 = DCBX Status
431 			 *	  DCBX Status 0x0 = DCBX Disabled
432 			 *	  DCBX Status 0x1 = DCBX Enabled
433 			 *	  DCBX Status 0x2 = DCBX Exchange error
434 			 *	Bit 31     = Reserved
435 			 */
436 			peg_fw_state = (mb[2] & 0x00ff);
437 			nw_interface_link_up = ((mb[2] & 0x0100) >> 8);
438 			nw_interface_signal_detect = ((mb[2] & 0x0200) >> 9);
439 			sfp_status = ((mb[2] & 0x0c00) >> 10);
440 			htbt_counter = ((mb[2] & 0x7000) >> 12);
441 			htbt_monitor_enable = ((mb[2] & 0x8000) >> 15);
442 			sfp_additonal_info = (mb[6] & 0x0003);
443 			sfp_multirate = ((mb[6] & 0x0004) >> 2);
444 			sfp_tx_fault = ((mb[6] & 0x0008) >> 3);
445 			link_speed = ((mb[6] & 0x0070) >> 4);
446 			dcbx_status = ((mb[6] & 0x7000) >> 12);
447 
448 			ql_log(ql_log_warn, vha, 0x5066,
449 			    "Peg-to-Fc Status Register:\n"
450 			    "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
451 			    "nw_interface_signal_detect=0x%x"
452 			    "\nsfp_statis=0x%x.\n ", peg_fw_state,
453 			    nw_interface_link_up, nw_interface_signal_detect,
454 			    sfp_status);
455 			ql_log(ql_log_warn, vha, 0x5067,
456 			    "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
457 			    "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
458 			    htbt_counter, htbt_monitor_enable,
459 			    sfp_additonal_info, sfp_multirate);
460 			ql_log(ql_log_warn, vha, 0x5068,
461 			    "sfp_tx_fault=0x%x, link_state=0x%x, "
462 			    "dcbx_status=0x%x.\n", sfp_tx_fault, link_speed,
463 			    dcbx_status);
464 
465 			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
466 		}
467 
468 		if (mb[1] & IDC_HEARTBEAT_FAILURE) {
469 			ql_log(ql_log_warn, vha, 0x5069,
470 			    "Heartbeat Failure encountered, chip reset "
471 			    "required.\n");
472 
473 			qla83xx_schedule_work(vha, QLA83XX_NIC_CORE_RESET);
474 		}
475 	}
476 
477 	if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
478 		ql_log(ql_log_info, vha, 0x506a,
479 		    "IDC Device-State changed = 0x%x.\n", mb[4]);
480 		if (ha->flags.nic_core_reset_owner)
481 			return;
482 		qla83xx_schedule_work(vha, MBA_IDC_AEN);
483 	}
484 }
485 
486 int
487 qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
488 {
489 	struct qla_hw_data *ha = vha->hw;
490 	scsi_qla_host_t *vp;
491 	uint32_t vp_did;
492 	unsigned long flags;
493 	int ret = 0;
494 
495 	if (!ha->num_vhosts)
496 		return ret;
497 
498 	spin_lock_irqsave(&ha->vport_slock, flags);
499 	list_for_each_entry(vp, &ha->vp_list, list) {
500 		vp_did = vp->d_id.b24;
501 		if (vp_did == rscn_entry) {
502 			ret = 1;
503 			break;
504 		}
505 	}
506 	spin_unlock_irqrestore(&ha->vport_slock, flags);
507 
508 	return ret;
509 }
510 
511 /**
512  * qla2x00_async_event() - Process aynchronous events.
513  * @ha: SCSI driver HA context
514  * @mb: Mailbox registers (0 - 3)
515  */
516 void
517 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
518 {
519 	uint16_t	handle_cnt;
520 	uint16_t	cnt, mbx;
521 	uint32_t	handles[5];
522 	struct qla_hw_data *ha = vha->hw;
523 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
524 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
525 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
526 	uint32_t	rscn_entry, host_pid;
527 	unsigned long	flags;
528 
529 	/* Setup to process RIO completion. */
530 	handle_cnt = 0;
531 	if (IS_CNA_CAPABLE(ha))
532 		goto skip_rio;
533 	switch (mb[0]) {
534 	case MBA_SCSI_COMPLETION:
535 		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
536 		handle_cnt = 1;
537 		break;
538 	case MBA_CMPLT_1_16BIT:
539 		handles[0] = mb[1];
540 		handle_cnt = 1;
541 		mb[0] = MBA_SCSI_COMPLETION;
542 		break;
543 	case MBA_CMPLT_2_16BIT:
544 		handles[0] = mb[1];
545 		handles[1] = mb[2];
546 		handle_cnt = 2;
547 		mb[0] = MBA_SCSI_COMPLETION;
548 		break;
549 	case MBA_CMPLT_3_16BIT:
550 		handles[0] = mb[1];
551 		handles[1] = mb[2];
552 		handles[2] = mb[3];
553 		handle_cnt = 3;
554 		mb[0] = MBA_SCSI_COMPLETION;
555 		break;
556 	case MBA_CMPLT_4_16BIT:
557 		handles[0] = mb[1];
558 		handles[1] = mb[2];
559 		handles[2] = mb[3];
560 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
561 		handle_cnt = 4;
562 		mb[0] = MBA_SCSI_COMPLETION;
563 		break;
564 	case MBA_CMPLT_5_16BIT:
565 		handles[0] = mb[1];
566 		handles[1] = mb[2];
567 		handles[2] = mb[3];
568 		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
569 		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
570 		handle_cnt = 5;
571 		mb[0] = MBA_SCSI_COMPLETION;
572 		break;
573 	case MBA_CMPLT_2_32BIT:
574 		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
575 		handles[1] = le32_to_cpu(
576 		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
577 		    RD_MAILBOX_REG(ha, reg, 6));
578 		handle_cnt = 2;
579 		mb[0] = MBA_SCSI_COMPLETION;
580 		break;
581 	default:
582 		break;
583 	}
584 skip_rio:
585 	switch (mb[0]) {
586 	case MBA_SCSI_COMPLETION:	/* Fast Post */
587 		if (!vha->flags.online)
588 			break;
589 
590 		for (cnt = 0; cnt < handle_cnt; cnt++)
591 			qla2x00_process_completed_request(vha, rsp->req,
592 				handles[cnt]);
593 		break;
594 
595 	case MBA_RESET:			/* Reset */
596 		ql_dbg(ql_dbg_async, vha, 0x5002,
597 		    "Asynchronous RESET.\n");
598 
599 		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
600 		break;
601 
602 	case MBA_SYSTEM_ERR:		/* System Error */
603 		mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha)) ?
604 			RD_REG_WORD(&reg24->mailbox7) : 0;
605 		ql_log(ql_log_warn, vha, 0x5003,
606 		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
607 		    "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
608 
609 		ha->isp_ops->fw_dump(vha, 1);
610 
611 		if (IS_FWI2_CAPABLE(ha)) {
612 			if (mb[1] == 0 && mb[2] == 0) {
613 				ql_log(ql_log_fatal, vha, 0x5004,
614 				    "Unrecoverable Hardware Error: adapter "
615 				    "marked OFFLINE!\n");
616 				vha->flags.online = 0;
617 				vha->device_flags |= DFLG_DEV_FAILED;
618 			} else {
619 				/* Check to see if MPI timeout occurred */
620 				if ((mbx & MBX_3) && (ha->flags.port0))
621 					set_bit(MPI_RESET_NEEDED,
622 					    &vha->dpc_flags);
623 
624 				set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
625 			}
626 		} else if (mb[1] == 0) {
627 			ql_log(ql_log_fatal, vha, 0x5005,
628 			    "Unrecoverable Hardware Error: adapter marked "
629 			    "OFFLINE!\n");
630 			vha->flags.online = 0;
631 			vha->device_flags |= DFLG_DEV_FAILED;
632 		} else
633 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
634 		break;
635 
636 	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
637 		ql_log(ql_log_warn, vha, 0x5006,
638 		    "ISP Request Transfer Error (%x).\n",  mb[1]);
639 
640 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
641 		break;
642 
643 	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
644 		ql_log(ql_log_warn, vha, 0x5007,
645 		    "ISP Response Transfer Error.\n");
646 
647 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
648 		break;
649 
650 	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
651 		ql_dbg(ql_dbg_async, vha, 0x5008,
652 		    "Asynchronous WAKEUP_THRES.\n");
653 
654 		break;
655 	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
656 		ql_dbg(ql_dbg_async, vha, 0x5009,
657 		    "LIP occurred (%x).\n", mb[1]);
658 
659 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
660 			atomic_set(&vha->loop_state, LOOP_DOWN);
661 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
662 			qla2x00_mark_all_devices_lost(vha, 1);
663 		}
664 
665 		if (vha->vp_idx) {
666 			atomic_set(&vha->vp_state, VP_FAILED);
667 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
668 		}
669 
670 		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
671 		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
672 
673 		vha->flags.management_server_logged_in = 0;
674 		qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
675 		break;
676 
677 	case MBA_LOOP_UP:		/* Loop Up Event */
678 		if (IS_QLA2100(ha) || IS_QLA2200(ha))
679 			ha->link_data_rate = PORT_SPEED_1GB;
680 		else
681 			ha->link_data_rate = mb[1];
682 
683 		ql_dbg(ql_dbg_async, vha, 0x500a,
684 		    "LOOP UP detected (%s Gbps).\n",
685 		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
686 
687 		vha->flags.management_server_logged_in = 0;
688 		qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
689 		break;
690 
691 	case MBA_LOOP_DOWN:		/* Loop Down Event */
692 		mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
693 			? RD_REG_WORD(&reg24->mailbox4) : 0;
694 		mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
695 		ql_dbg(ql_dbg_async, vha, 0x500b,
696 		    "LOOP DOWN detected (%x %x %x %x).\n",
697 		    mb[1], mb[2], mb[3], mbx);
698 
699 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
700 			atomic_set(&vha->loop_state, LOOP_DOWN);
701 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
702 			vha->device_flags |= DFLG_NO_CABLE;
703 			qla2x00_mark_all_devices_lost(vha, 1);
704 		}
705 
706 		if (vha->vp_idx) {
707 			atomic_set(&vha->vp_state, VP_FAILED);
708 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
709 		}
710 
711 		vha->flags.management_server_logged_in = 0;
712 		ha->link_data_rate = PORT_SPEED_UNKNOWN;
713 		qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
714 		break;
715 
716 	case MBA_LIP_RESET:		/* LIP reset occurred */
717 		ql_dbg(ql_dbg_async, vha, 0x500c,
718 		    "LIP reset occurred (%x).\n", mb[1]);
719 
720 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
721 			atomic_set(&vha->loop_state, LOOP_DOWN);
722 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
723 			qla2x00_mark_all_devices_lost(vha, 1);
724 		}
725 
726 		if (vha->vp_idx) {
727 			atomic_set(&vha->vp_state, VP_FAILED);
728 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
729 		}
730 
731 		set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
732 
733 		ha->operating_mode = LOOP;
734 		vha->flags.management_server_logged_in = 0;
735 		qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
736 		break;
737 
738 	/* case MBA_DCBX_COMPLETE: */
739 	case MBA_POINT_TO_POINT:	/* Point-to-Point */
740 		if (IS_QLA2100(ha))
741 			break;
742 
743 		if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
744 			ql_dbg(ql_dbg_async, vha, 0x500d,
745 			    "DCBX Completed -- %04x %04x %04x.\n",
746 			    mb[1], mb[2], mb[3]);
747 			if (ha->notify_dcbx_comp)
748 				complete(&ha->dcbx_comp);
749 
750 		} else
751 			ql_dbg(ql_dbg_async, vha, 0x500e,
752 			    "Asynchronous P2P MODE received.\n");
753 
754 		/*
755 		 * Until there's a transition from loop down to loop up, treat
756 		 * this as loop down only.
757 		 */
758 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
759 			atomic_set(&vha->loop_state, LOOP_DOWN);
760 			if (!atomic_read(&vha->loop_down_timer))
761 				atomic_set(&vha->loop_down_timer,
762 				    LOOP_DOWN_TIME);
763 			qla2x00_mark_all_devices_lost(vha, 1);
764 		}
765 
766 		if (vha->vp_idx) {
767 			atomic_set(&vha->vp_state, VP_FAILED);
768 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
769 		}
770 
771 		if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
772 			set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
773 
774 		set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
775 		set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
776 
777 		ha->flags.gpsc_supported = 1;
778 		vha->flags.management_server_logged_in = 0;
779 		break;
780 
781 	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
782 		if (IS_QLA2100(ha))
783 			break;
784 
785 		ql_dbg(ql_dbg_async, vha, 0x500f,
786 		    "Configuration change detected: value=%x.\n", mb[1]);
787 
788 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
789 			atomic_set(&vha->loop_state, LOOP_DOWN);
790 			if (!atomic_read(&vha->loop_down_timer))
791 				atomic_set(&vha->loop_down_timer,
792 				    LOOP_DOWN_TIME);
793 			qla2x00_mark_all_devices_lost(vha, 1);
794 		}
795 
796 		if (vha->vp_idx) {
797 			atomic_set(&vha->vp_state, VP_FAILED);
798 			fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
799 		}
800 
801 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
802 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
803 		break;
804 
805 	case MBA_PORT_UPDATE:		/* Port database update */
806 		/*
807 		 * Handle only global and vn-port update events
808 		 *
809 		 * Relevant inputs:
810 		 * mb[1] = N_Port handle of changed port
811 		 * OR 0xffff for global event
812 		 * mb[2] = New login state
813 		 * 7 = Port logged out
814 		 * mb[3] = LSB is vp_idx, 0xff = all vps
815 		 *
816 		 * Skip processing if:
817 		 *       Event is global, vp_idx is NOT all vps,
818 		 *           vp_idx does not match
819 		 *       Event is not global, vp_idx does not match
820 		 */
821 		if (IS_QLA2XXX_MIDTYPE(ha) &&
822 		    ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
823 			(mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
824 			break;
825 
826 		/* Global event -- port logout or port unavailable. */
827 		if (mb[1] == 0xffff && mb[2] == 0x7) {
828 			ql_dbg(ql_dbg_async, vha, 0x5010,
829 			    "Port unavailable %04x %04x %04x.\n",
830 			    mb[1], mb[2], mb[3]);
831 			ql_log(ql_log_warn, vha, 0x505e,
832 			    "Link is offline.\n");
833 
834 			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
835 				atomic_set(&vha->loop_state, LOOP_DOWN);
836 				atomic_set(&vha->loop_down_timer,
837 				    LOOP_DOWN_TIME);
838 				vha->device_flags |= DFLG_NO_CABLE;
839 				qla2x00_mark_all_devices_lost(vha, 1);
840 			}
841 
842 			if (vha->vp_idx) {
843 				atomic_set(&vha->vp_state, VP_FAILED);
844 				fc_vport_set_state(vha->fc_vport,
845 				    FC_VPORT_FAILED);
846 				qla2x00_mark_all_devices_lost(vha, 1);
847 			}
848 
849 			vha->flags.management_server_logged_in = 0;
850 			ha->link_data_rate = PORT_SPEED_UNKNOWN;
851 			break;
852 		}
853 
854 		/*
855 		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
856 		 * event etc. earlier indicating loop is down) then process
857 		 * it.  Otherwise ignore it and Wait for RSCN to come in.
858 		 */
859 		atomic_set(&vha->loop_down_timer, 0);
860 		if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
861 			ql_dbg(ql_dbg_async, vha, 0x5011,
862 			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
863 			    mb[1], mb[2], mb[3]);
864 
865 			qlt_async_event(mb[0], vha, mb);
866 			break;
867 		}
868 
869 		ql_dbg(ql_dbg_async, vha, 0x5012,
870 		    "Port database changed %04x %04x %04x.\n",
871 		    mb[1], mb[2], mb[3]);
872 		ql_log(ql_log_warn, vha, 0x505f,
873 		    "Link is operational (%s Gbps).\n",
874 		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
875 
876 		/*
877 		 * Mark all devices as missing so we will login again.
878 		 */
879 		atomic_set(&vha->loop_state, LOOP_UP);
880 
881 		qla2x00_mark_all_devices_lost(vha, 1);
882 
883 		if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
884 			set_bit(SCR_PENDING, &vha->dpc_flags);
885 
886 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
887 		set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
888 
889 		qlt_async_event(mb[0], vha, mb);
890 		break;
891 
892 	case MBA_RSCN_UPDATE:		/* State Change Registration */
893 		/* Check if the Vport has issued a SCR */
894 		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
895 			break;
896 		/* Only handle SCNs for our Vport index. */
897 		if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
898 			break;
899 
900 		ql_dbg(ql_dbg_async, vha, 0x5013,
901 		    "RSCN database changed -- %04x %04x %04x.\n",
902 		    mb[1], mb[2], mb[3]);
903 
904 		rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
905 		host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
906 				| vha->d_id.b.al_pa;
907 		if (rscn_entry == host_pid) {
908 			ql_dbg(ql_dbg_async, vha, 0x5014,
909 			    "Ignoring RSCN update to local host "
910 			    "port ID (%06x).\n", host_pid);
911 			break;
912 		}
913 
914 		/* Ignore reserved bits from RSCN-payload. */
915 		rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
916 
917 		/* Skip RSCNs for virtual ports on the same physical port */
918 		if (qla2x00_is_a_vp_did(vha, rscn_entry))
919 			break;
920 
921 		atomic_set(&vha->loop_down_timer, 0);
922 		vha->flags.management_server_logged_in = 0;
923 
924 		set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
925 		set_bit(RSCN_UPDATE, &vha->dpc_flags);
926 		qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
927 		break;
928 
929 	/* case MBA_RIO_RESPONSE: */
930 	case MBA_ZIO_RESPONSE:
931 		ql_dbg(ql_dbg_async, vha, 0x5015,
932 		    "[R|Z]IO update completion.\n");
933 
934 		if (IS_FWI2_CAPABLE(ha))
935 			qla24xx_process_response_queue(vha, rsp);
936 		else
937 			qla2x00_process_response_queue(rsp);
938 		break;
939 
940 	case MBA_DISCARD_RND_FRAME:
941 		ql_dbg(ql_dbg_async, vha, 0x5016,
942 		    "Discard RND Frame -- %04x %04x %04x.\n",
943 		    mb[1], mb[2], mb[3]);
944 		break;
945 
946 	case MBA_TRACE_NOTIFICATION:
947 		ql_dbg(ql_dbg_async, vha, 0x5017,
948 		    "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
949 		break;
950 
951 	case MBA_ISP84XX_ALERT:
952 		ql_dbg(ql_dbg_async, vha, 0x5018,
953 		    "ISP84XX Alert Notification -- %04x %04x %04x.\n",
954 		    mb[1], mb[2], mb[3]);
955 
956 		spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
957 		switch (mb[1]) {
958 		case A84_PANIC_RECOVERY:
959 			ql_log(ql_log_info, vha, 0x5019,
960 			    "Alert 84XX: panic recovery %04x %04x.\n",
961 			    mb[2], mb[3]);
962 			break;
963 		case A84_OP_LOGIN_COMPLETE:
964 			ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
965 			ql_log(ql_log_info, vha, 0x501a,
966 			    "Alert 84XX: firmware version %x.\n",
967 			    ha->cs84xx->op_fw_version);
968 			break;
969 		case A84_DIAG_LOGIN_COMPLETE:
970 			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
971 			ql_log(ql_log_info, vha, 0x501b,
972 			    "Alert 84XX: diagnostic firmware version %x.\n",
973 			    ha->cs84xx->diag_fw_version);
974 			break;
975 		case A84_GOLD_LOGIN_COMPLETE:
976 			ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
977 			ha->cs84xx->fw_update = 1;
978 			ql_log(ql_log_info, vha, 0x501c,
979 			    "Alert 84XX: gold firmware version %x.\n",
980 			    ha->cs84xx->gold_fw_version);
981 			break;
982 		default:
983 			ql_log(ql_log_warn, vha, 0x501d,
984 			    "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
985 			    mb[1], mb[2], mb[3]);
986 		}
987 		spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
988 		break;
989 	case MBA_DCBX_START:
990 		ql_dbg(ql_dbg_async, vha, 0x501e,
991 		    "DCBX Started -- %04x %04x %04x.\n",
992 		    mb[1], mb[2], mb[3]);
993 		break;
994 	case MBA_DCBX_PARAM_UPDATE:
995 		ql_dbg(ql_dbg_async, vha, 0x501f,
996 		    "DCBX Parameters Updated -- %04x %04x %04x.\n",
997 		    mb[1], mb[2], mb[3]);
998 		break;
999 	case MBA_FCF_CONF_ERR:
1000 		ql_dbg(ql_dbg_async, vha, 0x5020,
1001 		    "FCF Configuration Error -- %04x %04x %04x.\n",
1002 		    mb[1], mb[2], mb[3]);
1003 		break;
1004 	case MBA_IDC_NOTIFY:
1005 		if (IS_QLA8031(vha->hw)) {
1006 			mb[4] = RD_REG_WORD(&reg24->mailbox4);
1007 			if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
1008 			    (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
1009 			    (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
1010 				set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
1011 				/*
1012 				 * Extend loop down timer since port is active.
1013 				 */
1014 				if (atomic_read(&vha->loop_state) == LOOP_DOWN)
1015 					atomic_set(&vha->loop_down_timer,
1016 					    LOOP_DOWN_TIME);
1017 				qla2xxx_wake_dpc(vha);
1018 			}
1019 		}
1020 	case MBA_IDC_COMPLETE:
1021 		if (ha->notify_lb_portup_comp)
1022 			complete(&ha->lb_portup_comp);
1023 		/* Fallthru */
1024 	case MBA_IDC_TIME_EXT:
1025 		if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
1026 			qla81xx_idc_event(vha, mb[0], mb[1]);
1027 		break;
1028 
1029 	case MBA_IDC_AEN:
1030 		mb[4] = RD_REG_WORD(&reg24->mailbox4);
1031 		mb[5] = RD_REG_WORD(&reg24->mailbox5);
1032 		mb[6] = RD_REG_WORD(&reg24->mailbox6);
1033 		mb[7] = RD_REG_WORD(&reg24->mailbox7);
1034 		qla83xx_handle_8200_aen(vha, mb);
1035 		break;
1036 
1037 	default:
1038 		ql_dbg(ql_dbg_async, vha, 0x5057,
1039 		    "Unknown AEN:%04x %04x %04x %04x\n",
1040 		    mb[0], mb[1], mb[2], mb[3]);
1041 	}
1042 
1043 	qlt_async_event(mb[0], vha, mb);
1044 
1045 	if (!vha->vp_idx && ha->num_vhosts)
1046 		qla2x00_alert_all_vps(rsp, mb);
1047 }
1048 
1049 /**
1050  * qla2x00_process_completed_request() - Process a Fast Post response.
1051  * @ha: SCSI driver HA context
1052  * @index: SRB index
1053  */
1054 void
1055 qla2x00_process_completed_request(struct scsi_qla_host *vha,
1056 				  struct req_que *req, uint32_t index)
1057 {
1058 	srb_t *sp;
1059 	struct qla_hw_data *ha = vha->hw;
1060 
1061 	/* Validate handle. */
1062 	if (index >= req->num_outstanding_cmds) {
1063 		ql_log(ql_log_warn, vha, 0x3014,
1064 		    "Invalid SCSI command index (%x).\n", index);
1065 
1066 		if (IS_QLA82XX(ha))
1067 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1068 		else
1069 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1070 		return;
1071 	}
1072 
1073 	sp = req->outstanding_cmds[index];
1074 	if (sp) {
1075 		/* Free outstanding command slot. */
1076 		req->outstanding_cmds[index] = NULL;
1077 
1078 		/* Save ISP completion status */
1079 		sp->done(ha, sp, DID_OK << 16);
1080 	} else {
1081 		ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
1082 
1083 		if (IS_QLA82XX(ha))
1084 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1085 		else
1086 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1087 	}
1088 }
1089 
1090 srb_t *
1091 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
1092     struct req_que *req, void *iocb)
1093 {
1094 	struct qla_hw_data *ha = vha->hw;
1095 	sts_entry_t *pkt = iocb;
1096 	srb_t *sp = NULL;
1097 	uint16_t index;
1098 
1099 	index = LSW(pkt->handle);
1100 	if (index >= req->num_outstanding_cmds) {
1101 		ql_log(ql_log_warn, vha, 0x5031,
1102 		    "Invalid command index (%x).\n", index);
1103 		if (IS_QLA82XX(ha))
1104 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1105 		else
1106 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1107 		goto done;
1108 	}
1109 	sp = req->outstanding_cmds[index];
1110 	if (!sp) {
1111 		ql_log(ql_log_warn, vha, 0x5032,
1112 		    "Invalid completion handle (%x) -- timed-out.\n", index);
1113 		return sp;
1114 	}
1115 	if (sp->handle != index) {
1116 		ql_log(ql_log_warn, vha, 0x5033,
1117 		    "SRB handle (%x) mismatch %x.\n", sp->handle, index);
1118 		return NULL;
1119 	}
1120 
1121 	req->outstanding_cmds[index] = NULL;
1122 
1123 done:
1124 	return sp;
1125 }
1126 
1127 static void
1128 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1129     struct mbx_entry *mbx)
1130 {
1131 	const char func[] = "MBX-IOCB";
1132 	const char *type;
1133 	fc_port_t *fcport;
1134 	srb_t *sp;
1135 	struct srb_iocb *lio;
1136 	uint16_t *data;
1137 	uint16_t status;
1138 
1139 	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
1140 	if (!sp)
1141 		return;
1142 
1143 	lio = &sp->u.iocb_cmd;
1144 	type = sp->name;
1145 	fcport = sp->fcport;
1146 	data = lio->u.logio.data;
1147 
1148 	data[0] = MBS_COMMAND_ERROR;
1149 	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1150 	    QLA_LOGIO_LOGIN_RETRIED : 0;
1151 	if (mbx->entry_status) {
1152 		ql_dbg(ql_dbg_async, vha, 0x5043,
1153 		    "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1154 		    "entry-status=%x status=%x state-flag=%x "
1155 		    "status-flags=%x.\n", type, sp->handle,
1156 		    fcport->d_id.b.domain, fcport->d_id.b.area,
1157 		    fcport->d_id.b.al_pa, mbx->entry_status,
1158 		    le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
1159 		    le16_to_cpu(mbx->status_flags));
1160 
1161 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
1162 		    (uint8_t *)mbx, sizeof(*mbx));
1163 
1164 		goto logio_done;
1165 	}
1166 
1167 	status = le16_to_cpu(mbx->status);
1168 	if (status == 0x30 && sp->type == SRB_LOGIN_CMD &&
1169 	    le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
1170 		status = 0;
1171 	if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
1172 		ql_dbg(ql_dbg_async, vha, 0x5045,
1173 		    "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1174 		    type, sp->handle, fcport->d_id.b.domain,
1175 		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1176 		    le16_to_cpu(mbx->mb1));
1177 
1178 		data[0] = MBS_COMMAND_COMPLETE;
1179 		if (sp->type == SRB_LOGIN_CMD) {
1180 			fcport->port_type = FCT_TARGET;
1181 			if (le16_to_cpu(mbx->mb1) & BIT_0)
1182 				fcport->port_type = FCT_INITIATOR;
1183 			else if (le16_to_cpu(mbx->mb1) & BIT_1)
1184 				fcport->flags |= FCF_FCP2_DEVICE;
1185 		}
1186 		goto logio_done;
1187 	}
1188 
1189 	data[0] = le16_to_cpu(mbx->mb0);
1190 	switch (data[0]) {
1191 	case MBS_PORT_ID_USED:
1192 		data[1] = le16_to_cpu(mbx->mb1);
1193 		break;
1194 	case MBS_LOOP_ID_USED:
1195 		break;
1196 	default:
1197 		data[0] = MBS_COMMAND_ERROR;
1198 		break;
1199 	}
1200 
1201 	ql_log(ql_log_warn, vha, 0x5046,
1202 	    "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1203 	    "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
1204 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
1205 	    status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
1206 	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
1207 	    le16_to_cpu(mbx->mb7));
1208 
1209 logio_done:
1210 	sp->done(vha, sp, 0);
1211 }
1212 
1213 static void
1214 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1215     sts_entry_t *pkt, int iocb_type)
1216 {
1217 	const char func[] = "CT_IOCB";
1218 	const char *type;
1219 	srb_t *sp;
1220 	struct fc_bsg_job *bsg_job;
1221 	uint16_t comp_status;
1222 	int res;
1223 
1224 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1225 	if (!sp)
1226 		return;
1227 
1228 	bsg_job = sp->u.bsg_job;
1229 
1230 	type = "ct pass-through";
1231 
1232 	comp_status = le16_to_cpu(pkt->comp_status);
1233 
1234 	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1235 	 * fc payload  to the caller
1236 	 */
1237 	bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1238 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1239 
1240 	if (comp_status != CS_COMPLETE) {
1241 		if (comp_status == CS_DATA_UNDERRUN) {
1242 			res = DID_OK << 16;
1243 			bsg_job->reply->reply_payload_rcv_len =
1244 			    le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1245 
1246 			ql_log(ql_log_warn, vha, 0x5048,
1247 			    "CT pass-through-%s error "
1248 			    "comp_status-status=0x%x total_byte = 0x%x.\n",
1249 			    type, comp_status,
1250 			    bsg_job->reply->reply_payload_rcv_len);
1251 		} else {
1252 			ql_log(ql_log_warn, vha, 0x5049,
1253 			    "CT pass-through-%s error "
1254 			    "comp_status-status=0x%x.\n", type, comp_status);
1255 			res = DID_ERROR << 16;
1256 			bsg_job->reply->reply_payload_rcv_len = 0;
1257 		}
1258 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1259 		    (uint8_t *)pkt, sizeof(*pkt));
1260 	} else {
1261 		res = DID_OK << 16;
1262 		bsg_job->reply->reply_payload_rcv_len =
1263 		    bsg_job->reply_payload.payload_len;
1264 		bsg_job->reply_len = 0;
1265 	}
1266 
1267 	sp->done(vha, sp, res);
1268 }
1269 
1270 static void
1271 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1272     struct sts_entry_24xx *pkt, int iocb_type)
1273 {
1274 	const char func[] = "ELS_CT_IOCB";
1275 	const char *type;
1276 	srb_t *sp;
1277 	struct fc_bsg_job *bsg_job;
1278 	uint16_t comp_status;
1279 	uint32_t fw_status[3];
1280 	uint8_t* fw_sts_ptr;
1281 	int res;
1282 
1283 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1284 	if (!sp)
1285 		return;
1286 	bsg_job = sp->u.bsg_job;
1287 
1288 	type = NULL;
1289 	switch (sp->type) {
1290 	case SRB_ELS_CMD_RPT:
1291 	case SRB_ELS_CMD_HST:
1292 		type = "els";
1293 		break;
1294 	case SRB_CT_CMD:
1295 		type = "ct pass-through";
1296 		break;
1297 	default:
1298 		ql_dbg(ql_dbg_user, vha, 0x503e,
1299 		    "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type);
1300 		return;
1301 	}
1302 
1303 	comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1304 	fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1305 	fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1306 
1307 	/* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1308 	 * fc payload  to the caller
1309 	 */
1310 	bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1311 	bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1312 
1313 	if (comp_status != CS_COMPLETE) {
1314 		if (comp_status == CS_DATA_UNDERRUN) {
1315 			res = DID_OK << 16;
1316 			bsg_job->reply->reply_payload_rcv_len =
1317 			    le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count);
1318 
1319 			ql_dbg(ql_dbg_user, vha, 0x503f,
1320 			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1321 			    "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1322 			    type, sp->handle, comp_status, fw_status[1], fw_status[2],
1323 			    le16_to_cpu(((struct els_sts_entry_24xx *)
1324 				pkt)->total_byte_count));
1325 			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1326 			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1327 		}
1328 		else {
1329 			ql_dbg(ql_dbg_user, vha, 0x5040,
1330 			    "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1331 			    "error subcode 1=0x%x error subcode 2=0x%x.\n",
1332 			    type, sp->handle, comp_status,
1333 			    le16_to_cpu(((struct els_sts_entry_24xx *)
1334 				pkt)->error_subcode_1),
1335 			    le16_to_cpu(((struct els_sts_entry_24xx *)
1336 				    pkt)->error_subcode_2));
1337 			res = DID_ERROR << 16;
1338 			bsg_job->reply->reply_payload_rcv_len = 0;
1339 			fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1340 			memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1341 		}
1342 		ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056,
1343 				(uint8_t *)pkt, sizeof(*pkt));
1344 	}
1345 	else {
1346 		res =  DID_OK << 16;
1347 		bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1348 		bsg_job->reply_len = 0;
1349 	}
1350 
1351 	sp->done(vha, sp, res);
1352 }
1353 
1354 static void
1355 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1356     struct logio_entry_24xx *logio)
1357 {
1358 	const char func[] = "LOGIO-IOCB";
1359 	const char *type;
1360 	fc_port_t *fcport;
1361 	srb_t *sp;
1362 	struct srb_iocb *lio;
1363 	uint16_t *data;
1364 	uint32_t iop[2];
1365 
1366 	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1367 	if (!sp)
1368 		return;
1369 
1370 	lio = &sp->u.iocb_cmd;
1371 	type = sp->name;
1372 	fcport = sp->fcport;
1373 	data = lio->u.logio.data;
1374 
1375 	data[0] = MBS_COMMAND_ERROR;
1376 	data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1377 		QLA_LOGIO_LOGIN_RETRIED : 0;
1378 	if (logio->entry_status) {
1379 		ql_log(ql_log_warn, fcport->vha, 0x5034,
1380 		    "Async-%s error entry - hdl=%x"
1381 		    "portid=%02x%02x%02x entry-status=%x.\n",
1382 		    type, sp->handle, fcport->d_id.b.domain,
1383 		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1384 		    logio->entry_status);
1385 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1386 		    (uint8_t *)logio, sizeof(*logio));
1387 
1388 		goto logio_done;
1389 	}
1390 
1391 	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1392 		ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
1393 		    "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1394 		    "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1395 		    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1396 		    le32_to_cpu(logio->io_parameter[0]));
1397 
1398 		data[0] = MBS_COMMAND_COMPLETE;
1399 		if (sp->type != SRB_LOGIN_CMD)
1400 			goto logio_done;
1401 
1402 		iop[0] = le32_to_cpu(logio->io_parameter[0]);
1403 		if (iop[0] & BIT_4) {
1404 			fcport->port_type = FCT_TARGET;
1405 			if (iop[0] & BIT_8)
1406 				fcport->flags |= FCF_FCP2_DEVICE;
1407 		} else if (iop[0] & BIT_5)
1408 			fcport->port_type = FCT_INITIATOR;
1409 
1410 		if (iop[0] & BIT_7)
1411 			fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1412 
1413 		if (logio->io_parameter[7] || logio->io_parameter[8])
1414 			fcport->supported_classes |= FC_COS_CLASS2;
1415 		if (logio->io_parameter[9] || logio->io_parameter[10])
1416 			fcport->supported_classes |= FC_COS_CLASS3;
1417 
1418 		goto logio_done;
1419 	}
1420 
1421 	iop[0] = le32_to_cpu(logio->io_parameter[0]);
1422 	iop[1] = le32_to_cpu(logio->io_parameter[1]);
1423 	switch (iop[0]) {
1424 	case LSC_SCODE_PORTID_USED:
1425 		data[0] = MBS_PORT_ID_USED;
1426 		data[1] = LSW(iop[1]);
1427 		break;
1428 	case LSC_SCODE_NPORT_USED:
1429 		data[0] = MBS_LOOP_ID_USED;
1430 		break;
1431 	default:
1432 		data[0] = MBS_COMMAND_ERROR;
1433 		break;
1434 	}
1435 
1436 	ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
1437 	    "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1438 	    "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1439 	    fcport->d_id.b.area, fcport->d_id.b.al_pa,
1440 	    le16_to_cpu(logio->comp_status),
1441 	    le32_to_cpu(logio->io_parameter[0]),
1442 	    le32_to_cpu(logio->io_parameter[1]));
1443 
1444 logio_done:
1445 	sp->done(vha, sp, 0);
1446 }
1447 
1448 static void
1449 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1450     struct tsk_mgmt_entry *tsk)
1451 {
1452 	const char func[] = "TMF-IOCB";
1453 	const char *type;
1454 	fc_port_t *fcport;
1455 	srb_t *sp;
1456 	struct srb_iocb *iocb;
1457 	struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1458 	int error = 1;
1459 
1460 	sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1461 	if (!sp)
1462 		return;
1463 
1464 	iocb = &sp->u.iocb_cmd;
1465 	type = sp->name;
1466 	fcport = sp->fcport;
1467 
1468 	if (sts->entry_status) {
1469 		ql_log(ql_log_warn, fcport->vha, 0x5038,
1470 		    "Async-%s error - hdl=%x entry-status(%x).\n",
1471 		    type, sp->handle, sts->entry_status);
1472 	} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1473 		ql_log(ql_log_warn, fcport->vha, 0x5039,
1474 		    "Async-%s error - hdl=%x completion status(%x).\n",
1475 		    type, sp->handle, sts->comp_status);
1476 	} else if (!(le16_to_cpu(sts->scsi_status) &
1477 	    SS_RESPONSE_INFO_LEN_VALID)) {
1478 		ql_log(ql_log_warn, fcport->vha, 0x503a,
1479 		    "Async-%s error - hdl=%x no response info(%x).\n",
1480 		    type, sp->handle, sts->scsi_status);
1481 	} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1482 		ql_log(ql_log_warn, fcport->vha, 0x503b,
1483 		    "Async-%s error - hdl=%x not enough response(%d).\n",
1484 		    type, sp->handle, sts->rsp_data_len);
1485 	} else if (sts->data[3]) {
1486 		ql_log(ql_log_warn, fcport->vha, 0x503c,
1487 		    "Async-%s error - hdl=%x response(%x).\n",
1488 		    type, sp->handle, sts->data[3]);
1489 	} else {
1490 		error = 0;
1491 	}
1492 
1493 	if (error) {
1494 		iocb->u.tmf.data = error;
1495 		ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1496 		    (uint8_t *)sts, sizeof(*sts));
1497 	}
1498 
1499 	sp->done(vha, sp, 0);
1500 }
1501 
1502 /**
1503  * qla2x00_process_response_queue() - Process response queue entries.
1504  * @ha: SCSI driver HA context
1505  */
1506 void
1507 qla2x00_process_response_queue(struct rsp_que *rsp)
1508 {
1509 	struct scsi_qla_host *vha;
1510 	struct qla_hw_data *ha = rsp->hw;
1511 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1512 	sts_entry_t	*pkt;
1513 	uint16_t        handle_cnt;
1514 	uint16_t        cnt;
1515 
1516 	vha = pci_get_drvdata(ha->pdev);
1517 
1518 	if (!vha->flags.online)
1519 		return;
1520 
1521 	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1522 		pkt = (sts_entry_t *)rsp->ring_ptr;
1523 
1524 		rsp->ring_index++;
1525 		if (rsp->ring_index == rsp->length) {
1526 			rsp->ring_index = 0;
1527 			rsp->ring_ptr = rsp->ring;
1528 		} else {
1529 			rsp->ring_ptr++;
1530 		}
1531 
1532 		if (pkt->entry_status != 0) {
1533 			qla2x00_error_entry(vha, rsp, pkt);
1534 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1535 			wmb();
1536 			continue;
1537 		}
1538 
1539 		switch (pkt->entry_type) {
1540 		case STATUS_TYPE:
1541 			qla2x00_status_entry(vha, rsp, pkt);
1542 			break;
1543 		case STATUS_TYPE_21:
1544 			handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1545 			for (cnt = 0; cnt < handle_cnt; cnt++) {
1546 				qla2x00_process_completed_request(vha, rsp->req,
1547 				    ((sts21_entry_t *)pkt)->handle[cnt]);
1548 			}
1549 			break;
1550 		case STATUS_TYPE_22:
1551 			handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1552 			for (cnt = 0; cnt < handle_cnt; cnt++) {
1553 				qla2x00_process_completed_request(vha, rsp->req,
1554 				    ((sts22_entry_t *)pkt)->handle[cnt]);
1555 			}
1556 			break;
1557 		case STATUS_CONT_TYPE:
1558 			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1559 			break;
1560 		case MBX_IOCB_TYPE:
1561 			qla2x00_mbx_iocb_entry(vha, rsp->req,
1562 			    (struct mbx_entry *)pkt);
1563 			break;
1564 		case CT_IOCB_TYPE:
1565 			qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1566 			break;
1567 		default:
1568 			/* Type Not Supported. */
1569 			ql_log(ql_log_warn, vha, 0x504a,
1570 			    "Received unknown response pkt type %x "
1571 			    "entry status=%x.\n",
1572 			    pkt->entry_type, pkt->entry_status);
1573 			break;
1574 		}
1575 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1576 		wmb();
1577 	}
1578 
1579 	/* Adjust ring index */
1580 	WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1581 }
1582 
1583 static inline void
1584 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1585 		     uint32_t sense_len, struct rsp_que *rsp, int res)
1586 {
1587 	struct scsi_qla_host *vha = sp->fcport->vha;
1588 	struct scsi_cmnd *cp = GET_CMD_SP(sp);
1589 	uint32_t track_sense_len;
1590 
1591 	if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1592 		sense_len = SCSI_SENSE_BUFFERSIZE;
1593 
1594 	SET_CMD_SENSE_LEN(sp, sense_len);
1595 	SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
1596 	track_sense_len = sense_len;
1597 
1598 	if (sense_len > par_sense_len)
1599 		sense_len = par_sense_len;
1600 
1601 	memcpy(cp->sense_buffer, sense_data, sense_len);
1602 
1603 	SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
1604 	track_sense_len -= sense_len;
1605 	SET_CMD_SENSE_LEN(sp, track_sense_len);
1606 
1607 	if (track_sense_len != 0) {
1608 		rsp->status_srb = sp;
1609 		cp->result = res;
1610 	}
1611 
1612 	if (sense_len) {
1613 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1614 		    "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1615 		    sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1616 		    cp);
1617 		ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1618 		    cp->sense_buffer, sense_len);
1619 	}
1620 }
1621 
1622 struct scsi_dif_tuple {
1623 	__be16 guard;       /* Checksum */
1624 	__be16 app_tag;         /* APPL identifier */
1625 	__be32 ref_tag;         /* Target LBA or indirect LBA */
1626 };
1627 
1628 /*
1629  * Checks the guard or meta-data for the type of error
1630  * detected by the HBA. In case of errors, we set the
1631  * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1632  * to indicate to the kernel that the HBA detected error.
1633  */
1634 static inline int
1635 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1636 {
1637 	struct scsi_qla_host *vha = sp->fcport->vha;
1638 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1639 	uint8_t		*ap = &sts24->data[12];
1640 	uint8_t		*ep = &sts24->data[20];
1641 	uint32_t	e_ref_tag, a_ref_tag;
1642 	uint16_t	e_app_tag, a_app_tag;
1643 	uint16_t	e_guard, a_guard;
1644 
1645 	/*
1646 	 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1647 	 * would make guard field appear at offset 2
1648 	 */
1649 	a_guard   = le16_to_cpu(*(uint16_t *)(ap + 2));
1650 	a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1651 	a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1652 	e_guard   = le16_to_cpu(*(uint16_t *)(ep + 2));
1653 	e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1654 	e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1655 
1656 	ql_dbg(ql_dbg_io, vha, 0x3023,
1657 	    "iocb(s) %p Returned STATUS.\n", sts24);
1658 
1659 	ql_dbg(ql_dbg_io, vha, 0x3024,
1660 	    "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1661 	    " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1662 	    " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1663 	    cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1664 	    a_app_tag, e_app_tag, a_guard, e_guard);
1665 
1666 	/*
1667 	 * Ignore sector if:
1668 	 * For type     3: ref & app tag is all 'f's
1669 	 * For type 0,1,2: app tag is all 'f's
1670 	 */
1671 	if ((a_app_tag == 0xffff) &&
1672 	    ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1673 	     (a_ref_tag == 0xffffffff))) {
1674 		uint32_t blocks_done, resid;
1675 		sector_t lba_s = scsi_get_lba(cmd);
1676 
1677 		/* 2TB boundary case covered automatically with this */
1678 		blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1679 
1680 		resid = scsi_bufflen(cmd) - (blocks_done *
1681 		    cmd->device->sector_size);
1682 
1683 		scsi_set_resid(cmd, resid);
1684 		cmd->result = DID_OK << 16;
1685 
1686 		/* Update protection tag */
1687 		if (scsi_prot_sg_count(cmd)) {
1688 			uint32_t i, j = 0, k = 0, num_ent;
1689 			struct scatterlist *sg;
1690 			struct sd_dif_tuple *spt;
1691 
1692 			/* Patch the corresponding protection tags */
1693 			scsi_for_each_prot_sg(cmd, sg,
1694 			    scsi_prot_sg_count(cmd), i) {
1695 				num_ent = sg_dma_len(sg) / 8;
1696 				if (k + num_ent < blocks_done) {
1697 					k += num_ent;
1698 					continue;
1699 				}
1700 				j = blocks_done - k - 1;
1701 				k = blocks_done;
1702 				break;
1703 			}
1704 
1705 			if (k != blocks_done) {
1706 				ql_log(ql_log_warn, vha, 0x302f,
1707 				    "unexpected tag values tag:lba=%x:%llx)\n",
1708 				    e_ref_tag, (unsigned long long)lba_s);
1709 				return 1;
1710 			}
1711 
1712 			spt = page_address(sg_page(sg)) + sg->offset;
1713 			spt += j;
1714 
1715 			spt->app_tag = 0xffff;
1716 			if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1717 				spt->ref_tag = 0xffffffff;
1718 		}
1719 
1720 		return 0;
1721 	}
1722 
1723 	/* check guard */
1724 	if (e_guard != a_guard) {
1725 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1726 		    0x10, 0x1);
1727 		set_driver_byte(cmd, DRIVER_SENSE);
1728 		set_host_byte(cmd, DID_ABORT);
1729 		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1730 		return 1;
1731 	}
1732 
1733 	/* check ref tag */
1734 	if (e_ref_tag != a_ref_tag) {
1735 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1736 		    0x10, 0x3);
1737 		set_driver_byte(cmd, DRIVER_SENSE);
1738 		set_host_byte(cmd, DID_ABORT);
1739 		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1740 		return 1;
1741 	}
1742 
1743 	/* check appl tag */
1744 	if (e_app_tag != a_app_tag) {
1745 		scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1746 		    0x10, 0x2);
1747 		set_driver_byte(cmd, DRIVER_SENSE);
1748 		set_host_byte(cmd, DID_ABORT);
1749 		cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1750 		return 1;
1751 	}
1752 
1753 	return 1;
1754 }
1755 
1756 static void
1757 qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
1758 				  struct req_que *req, uint32_t index)
1759 {
1760 	struct qla_hw_data *ha = vha->hw;
1761 	srb_t *sp;
1762 	uint16_t	comp_status;
1763 	uint16_t	scsi_status;
1764 	uint16_t thread_id;
1765 	uint32_t rval = EXT_STATUS_OK;
1766 	struct fc_bsg_job *bsg_job = NULL;
1767 	sts_entry_t *sts;
1768 	struct sts_entry_24xx *sts24;
1769 	sts = (sts_entry_t *) pkt;
1770 	sts24 = (struct sts_entry_24xx *) pkt;
1771 
1772 	/* Validate handle. */
1773 	if (index >= req->num_outstanding_cmds) {
1774 		ql_log(ql_log_warn, vha, 0x70af,
1775 		    "Invalid SCSI completion handle 0x%x.\n", index);
1776 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1777 		return;
1778 	}
1779 
1780 	sp = req->outstanding_cmds[index];
1781 	if (sp) {
1782 		/* Free outstanding command slot. */
1783 		req->outstanding_cmds[index] = NULL;
1784 		bsg_job = sp->u.bsg_job;
1785 	} else {
1786 		ql_log(ql_log_warn, vha, 0x70b0,
1787 		    "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1788 		    req->id, index);
1789 
1790 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1791 		return;
1792 	}
1793 
1794 	if (IS_FWI2_CAPABLE(ha)) {
1795 		comp_status = le16_to_cpu(sts24->comp_status);
1796 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1797 	} else {
1798 		comp_status = le16_to_cpu(sts->comp_status);
1799 		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1800 	}
1801 
1802 	thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1803 	switch (comp_status) {
1804 	case CS_COMPLETE:
1805 		if (scsi_status == 0) {
1806 			bsg_job->reply->reply_payload_rcv_len =
1807 					bsg_job->reply_payload.payload_len;
1808 			rval = EXT_STATUS_OK;
1809 		}
1810 		goto done;
1811 
1812 	case CS_DATA_OVERRUN:
1813 		ql_dbg(ql_dbg_user, vha, 0x70b1,
1814 		    "Command completed with date overrun thread_id=%d\n",
1815 		    thread_id);
1816 		rval = EXT_STATUS_DATA_OVERRUN;
1817 		break;
1818 
1819 	case CS_DATA_UNDERRUN:
1820 		ql_dbg(ql_dbg_user, vha, 0x70b2,
1821 		    "Command completed with date underrun thread_id=%d\n",
1822 		    thread_id);
1823 		rval = EXT_STATUS_DATA_UNDERRUN;
1824 		break;
1825 	case CS_BIDIR_RD_OVERRUN:
1826 		ql_dbg(ql_dbg_user, vha, 0x70b3,
1827 		    "Command completed with read data overrun thread_id=%d\n",
1828 		    thread_id);
1829 		rval = EXT_STATUS_DATA_OVERRUN;
1830 		break;
1831 
1832 	case CS_BIDIR_RD_WR_OVERRUN:
1833 		ql_dbg(ql_dbg_user, vha, 0x70b4,
1834 		    "Command completed with read and write data overrun "
1835 		    "thread_id=%d\n", thread_id);
1836 		rval = EXT_STATUS_DATA_OVERRUN;
1837 		break;
1838 
1839 	case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN:
1840 		ql_dbg(ql_dbg_user, vha, 0x70b5,
1841 		    "Command completed with read data over and write data "
1842 		    "underrun thread_id=%d\n", thread_id);
1843 		rval = EXT_STATUS_DATA_OVERRUN;
1844 		break;
1845 
1846 	case CS_BIDIR_RD_UNDERRUN:
1847 		ql_dbg(ql_dbg_user, vha, 0x70b6,
1848 		    "Command completed with read data data underrun "
1849 		    "thread_id=%d\n", thread_id);
1850 		rval = EXT_STATUS_DATA_UNDERRUN;
1851 		break;
1852 
1853 	case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN:
1854 		ql_dbg(ql_dbg_user, vha, 0x70b7,
1855 		    "Command completed with read data under and write data "
1856 		    "overrun thread_id=%d\n", thread_id);
1857 		rval = EXT_STATUS_DATA_UNDERRUN;
1858 		break;
1859 
1860 	case CS_BIDIR_RD_WR_UNDERRUN:
1861 		ql_dbg(ql_dbg_user, vha, 0x70b8,
1862 		    "Command completed with read and write data underrun "
1863 		    "thread_id=%d\n", thread_id);
1864 		rval = EXT_STATUS_DATA_UNDERRUN;
1865 		break;
1866 
1867 	case CS_BIDIR_DMA:
1868 		ql_dbg(ql_dbg_user, vha, 0x70b9,
1869 		    "Command completed with data DMA error thread_id=%d\n",
1870 		    thread_id);
1871 		rval = EXT_STATUS_DMA_ERR;
1872 		break;
1873 
1874 	case CS_TIMEOUT:
1875 		ql_dbg(ql_dbg_user, vha, 0x70ba,
1876 		    "Command completed with timeout thread_id=%d\n",
1877 		    thread_id);
1878 		rval = EXT_STATUS_TIMEOUT;
1879 		break;
1880 	default:
1881 		ql_dbg(ql_dbg_user, vha, 0x70bb,
1882 		    "Command completed with completion status=0x%x "
1883 		    "thread_id=%d\n", comp_status, thread_id);
1884 		rval = EXT_STATUS_ERR;
1885 		break;
1886 	}
1887 		bsg_job->reply->reply_payload_rcv_len = 0;
1888 
1889 done:
1890 	/* Return the vendor specific reply to API */
1891 	bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1892 	bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1893 	/* Always return DID_OK, bsg will send the vendor specific response
1894 	 * in this case only */
1895 	sp->done(vha, sp, (DID_OK << 6));
1896 
1897 }
1898 
1899 /**
1900  * qla2x00_status_entry() - Process a Status IOCB entry.
1901  * @ha: SCSI driver HA context
1902  * @pkt: Entry pointer
1903  */
1904 static void
1905 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1906 {
1907 	srb_t		*sp;
1908 	fc_port_t	*fcport;
1909 	struct scsi_cmnd *cp;
1910 	sts_entry_t *sts;
1911 	struct sts_entry_24xx *sts24;
1912 	uint16_t	comp_status;
1913 	uint16_t	scsi_status;
1914 	uint16_t	ox_id;
1915 	uint8_t		lscsi_status;
1916 	int32_t		resid;
1917 	uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1918 	    fw_resid_len;
1919 	uint8_t		*rsp_info, *sense_data;
1920 	struct qla_hw_data *ha = vha->hw;
1921 	uint32_t handle;
1922 	uint16_t que;
1923 	struct req_que *req;
1924 	int logit = 1;
1925 	int res = 0;
1926 	uint16_t state_flags = 0;
1927 
1928 	sts = (sts_entry_t *) pkt;
1929 	sts24 = (struct sts_entry_24xx *) pkt;
1930 	if (IS_FWI2_CAPABLE(ha)) {
1931 		comp_status = le16_to_cpu(sts24->comp_status);
1932 		scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1933 		state_flags = le16_to_cpu(sts24->state_flags);
1934 	} else {
1935 		comp_status = le16_to_cpu(sts->comp_status);
1936 		scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1937 	}
1938 	handle = (uint32_t) LSW(sts->handle);
1939 	que = MSW(sts->handle);
1940 	req = ha->req_q_map[que];
1941 
1942 	/* Validate handle. */
1943 	if (handle < req->num_outstanding_cmds)
1944 		sp = req->outstanding_cmds[handle];
1945 	else
1946 		sp = NULL;
1947 
1948 	if (sp == NULL) {
1949 		ql_dbg(ql_dbg_io, vha, 0x3017,
1950 		    "Invalid status handle (0x%x).\n", sts->handle);
1951 
1952 		if (IS_QLA82XX(ha))
1953 			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1954 		else
1955 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1956 		qla2xxx_wake_dpc(vha);
1957 		return;
1958 	}
1959 
1960 	if (unlikely((state_flags & BIT_1) && (sp->type == SRB_BIDI_CMD))) {
1961 		qla25xx_process_bidir_status_iocb(vha, pkt, req, handle);
1962 		return;
1963 	}
1964 
1965 	/* Fast path completion. */
1966 	if (comp_status == CS_COMPLETE && scsi_status == 0) {
1967 		qla2x00_do_host_ramp_up(vha);
1968 		qla2x00_process_completed_request(vha, req, handle);
1969 
1970 		return;
1971 	}
1972 
1973 	req->outstanding_cmds[handle] = NULL;
1974 	cp = GET_CMD_SP(sp);
1975 	if (cp == NULL) {
1976 		ql_dbg(ql_dbg_io, vha, 0x3018,
1977 		    "Command already returned (0x%x/%p).\n",
1978 		    sts->handle, sp);
1979 
1980 		return;
1981 	}
1982 
1983 	lscsi_status = scsi_status & STATUS_MASK;
1984 
1985 	fcport = sp->fcport;
1986 
1987 	ox_id = 0;
1988 	sense_len = par_sense_len = rsp_info_len = resid_len =
1989 	    fw_resid_len = 0;
1990 	if (IS_FWI2_CAPABLE(ha)) {
1991 		if (scsi_status & SS_SENSE_LEN_VALID)
1992 			sense_len = le32_to_cpu(sts24->sense_len);
1993 		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1994 			rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1995 		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1996 			resid_len = le32_to_cpu(sts24->rsp_residual_count);
1997 		if (comp_status == CS_DATA_UNDERRUN)
1998 			fw_resid_len = le32_to_cpu(sts24->residual_len);
1999 		rsp_info = sts24->data;
2000 		sense_data = sts24->data;
2001 		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
2002 		ox_id = le16_to_cpu(sts24->ox_id);
2003 		par_sense_len = sizeof(sts24->data);
2004 	} else {
2005 		if (scsi_status & SS_SENSE_LEN_VALID)
2006 			sense_len = le16_to_cpu(sts->req_sense_length);
2007 		if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
2008 			rsp_info_len = le16_to_cpu(sts->rsp_info_len);
2009 		resid_len = le32_to_cpu(sts->residual_length);
2010 		rsp_info = sts->rsp_info;
2011 		sense_data = sts->req_sense_data;
2012 		par_sense_len = sizeof(sts->req_sense_data);
2013 	}
2014 
2015 	/* Check for any FCP transport errors. */
2016 	if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
2017 		/* Sense data lies beyond any FCP RESPONSE data. */
2018 		if (IS_FWI2_CAPABLE(ha)) {
2019 			sense_data += rsp_info_len;
2020 			par_sense_len -= rsp_info_len;
2021 		}
2022 		if (rsp_info_len > 3 && rsp_info[3]) {
2023 			ql_dbg(ql_dbg_io, fcport->vha, 0x3019,
2024 			    "FCP I/O protocol failure (0x%x/0x%x).\n",
2025 			    rsp_info_len, rsp_info[3]);
2026 
2027 			res = DID_BUS_BUSY << 16;
2028 			goto out;
2029 		}
2030 	}
2031 
2032 	/* Check for overrun. */
2033 	if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
2034 	    scsi_status & SS_RESIDUAL_OVER)
2035 		comp_status = CS_DATA_OVERRUN;
2036 
2037 	/*
2038 	 * Based on Host and scsi status generate status code for Linux
2039 	 */
2040 	switch (comp_status) {
2041 	case CS_COMPLETE:
2042 	case CS_QUEUE_FULL:
2043 		if (scsi_status == 0) {
2044 			res = DID_OK << 16;
2045 			break;
2046 		}
2047 		if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
2048 			resid = resid_len;
2049 			scsi_set_resid(cp, resid);
2050 
2051 			if (!lscsi_status &&
2052 			    ((unsigned)(scsi_bufflen(cp) - resid) <
2053 			     cp->underflow)) {
2054 				ql_dbg(ql_dbg_io, fcport->vha, 0x301a,
2055 				    "Mid-layer underflow "
2056 				    "detected (0x%x of 0x%x bytes).\n",
2057 				    resid, scsi_bufflen(cp));
2058 
2059 				res = DID_ERROR << 16;
2060 				break;
2061 			}
2062 		}
2063 		res = DID_OK << 16 | lscsi_status;
2064 
2065 		if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2066 			ql_dbg(ql_dbg_io, fcport->vha, 0x301b,
2067 			    "QUEUE FULL detected.\n");
2068 			break;
2069 		}
2070 		logit = 0;
2071 		if (lscsi_status != SS_CHECK_CONDITION)
2072 			break;
2073 
2074 		memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2075 		if (!(scsi_status & SS_SENSE_LEN_VALID))
2076 			break;
2077 
2078 		qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2079 		    rsp, res);
2080 		break;
2081 
2082 	case CS_DATA_UNDERRUN:
2083 		/* Use F/W calculated residual length. */
2084 		resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
2085 		scsi_set_resid(cp, resid);
2086 		if (scsi_status & SS_RESIDUAL_UNDER) {
2087 			if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
2088 				ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
2089 				    "Dropped frame(s) detected "
2090 				    "(0x%x of 0x%x bytes).\n",
2091 				    resid, scsi_bufflen(cp));
2092 
2093 				res = DID_ERROR << 16 | lscsi_status;
2094 				goto check_scsi_status;
2095 			}
2096 
2097 			if (!lscsi_status &&
2098 			    ((unsigned)(scsi_bufflen(cp) - resid) <
2099 			    cp->underflow)) {
2100 				ql_dbg(ql_dbg_io, fcport->vha, 0x301e,
2101 				    "Mid-layer underflow "
2102 				    "detected (0x%x of 0x%x bytes).\n",
2103 				    resid, scsi_bufflen(cp));
2104 
2105 				res = DID_ERROR << 16;
2106 				break;
2107 			}
2108 		} else if (lscsi_status != SAM_STAT_TASK_SET_FULL &&
2109 			    lscsi_status != SAM_STAT_BUSY) {
2110 			/*
2111 			 * scsi status of task set and busy are considered to be
2112 			 * task not completed.
2113 			 */
2114 
2115 			ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
2116 			    "Dropped frame(s) detected (0x%x "
2117 			    "of 0x%x bytes).\n", resid,
2118 			    scsi_bufflen(cp));
2119 
2120 			res = DID_ERROR << 16 | lscsi_status;
2121 			goto check_scsi_status;
2122 		} else {
2123 			ql_dbg(ql_dbg_io, fcport->vha, 0x3030,
2124 			    "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2125 			    scsi_status, lscsi_status);
2126 		}
2127 
2128 		res = DID_OK << 16 | lscsi_status;
2129 		logit = 0;
2130 
2131 check_scsi_status:
2132 		/*
2133 		 * Check to see if SCSI Status is non zero. If so report SCSI
2134 		 * Status.
2135 		 */
2136 		if (lscsi_status != 0) {
2137 			if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
2138 				ql_dbg(ql_dbg_io, fcport->vha, 0x3020,
2139 				    "QUEUE FULL detected.\n");
2140 				logit = 1;
2141 				break;
2142 			}
2143 			if (lscsi_status != SS_CHECK_CONDITION)
2144 				break;
2145 
2146 			memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2147 			if (!(scsi_status & SS_SENSE_LEN_VALID))
2148 				break;
2149 
2150 			qla2x00_handle_sense(sp, sense_data, par_sense_len,
2151 			    sense_len, rsp, res);
2152 		}
2153 		break;
2154 
2155 	case CS_PORT_LOGGED_OUT:
2156 	case CS_PORT_CONFIG_CHG:
2157 	case CS_PORT_BUSY:
2158 	case CS_INCOMPLETE:
2159 	case CS_PORT_UNAVAILABLE:
2160 	case CS_TIMEOUT:
2161 	case CS_RESET:
2162 
2163 		/*
2164 		 * We are going to have the fc class block the rport
2165 		 * while we try to recover so instruct the mid layer
2166 		 * to requeue until the class decides how to handle this.
2167 		 */
2168 		res = DID_TRANSPORT_DISRUPTED << 16;
2169 
2170 		if (comp_status == CS_TIMEOUT) {
2171 			if (IS_FWI2_CAPABLE(ha))
2172 				break;
2173 			else if ((le16_to_cpu(sts->status_flags) &
2174 			    SF_LOGOUT_SENT) == 0)
2175 				break;
2176 		}
2177 
2178 		ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
2179 		    "Port down status: port-state=0x%x.\n",
2180 		    atomic_read(&fcport->state));
2181 
2182 		if (atomic_read(&fcport->state) == FCS_ONLINE)
2183 			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2184 		break;
2185 
2186 	case CS_ABORTED:
2187 		res = DID_RESET << 16;
2188 		break;
2189 
2190 	case CS_DIF_ERROR:
2191 		logit = qla2x00_handle_dif_error(sp, sts24);
2192 		res = cp->result;
2193 		break;
2194 
2195 	case CS_TRANSPORT:
2196 		res = DID_ERROR << 16;
2197 
2198 		if (!IS_PI_SPLIT_DET_CAPABLE(ha))
2199 			break;
2200 
2201 		if (state_flags & BIT_4)
2202 			scmd_printk(KERN_WARNING, cp,
2203 			    "Unsupported device '%s' found.\n",
2204 			    cp->device->vendor);
2205 		break;
2206 
2207 	default:
2208 		res = DID_ERROR << 16;
2209 		break;
2210 	}
2211 
2212 out:
2213 	if (logit)
2214 		ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
2215 		    "FCP command status: 0x%x-0x%x (0x%x) "
2216 		    "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
2217 		    "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
2218 		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2219 		    comp_status, scsi_status, res, vha->host_no,
2220 		    cp->device->id, cp->device->lun, fcport->d_id.b.domain,
2221 		    fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
2222 		    cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
2223 		    cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
2224 		    cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
2225 		    resid_len, fw_resid_len);
2226 
2227 	if (!res)
2228 		qla2x00_do_host_ramp_up(vha);
2229 
2230 	if (rsp->status_srb == NULL)
2231 		sp->done(ha, sp, res);
2232 }
2233 
2234 /**
2235  * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2236  * @ha: SCSI driver HA context
2237  * @pkt: Entry pointer
2238  *
2239  * Extended sense data.
2240  */
2241 static void
2242 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2243 {
2244 	uint8_t	sense_sz = 0;
2245 	struct qla_hw_data *ha = rsp->hw;
2246 	struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2247 	srb_t *sp = rsp->status_srb;
2248 	struct scsi_cmnd *cp;
2249 	uint32_t sense_len;
2250 	uint8_t *sense_ptr;
2251 
2252 	if (!sp || !GET_CMD_SENSE_LEN(sp))
2253 		return;
2254 
2255 	sense_len = GET_CMD_SENSE_LEN(sp);
2256 	sense_ptr = GET_CMD_SENSE_PTR(sp);
2257 
2258 	cp = GET_CMD_SP(sp);
2259 	if (cp == NULL) {
2260 		ql_log(ql_log_warn, vha, 0x3025,
2261 		    "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2262 
2263 		rsp->status_srb = NULL;
2264 		return;
2265 	}
2266 
2267 	if (sense_len > sizeof(pkt->data))
2268 		sense_sz = sizeof(pkt->data);
2269 	else
2270 		sense_sz = sense_len;
2271 
2272 	/* Move sense data. */
2273 	if (IS_FWI2_CAPABLE(ha))
2274 		host_to_fcp_swap(pkt->data, sizeof(pkt->data));
2275 	memcpy(sense_ptr, pkt->data, sense_sz);
2276 	ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
2277 		sense_ptr, sense_sz);
2278 
2279 	sense_len -= sense_sz;
2280 	sense_ptr += sense_sz;
2281 
2282 	SET_CMD_SENSE_PTR(sp, sense_ptr);
2283 	SET_CMD_SENSE_LEN(sp, sense_len);
2284 
2285 	/* Place command on done queue. */
2286 	if (sense_len == 0) {
2287 		rsp->status_srb = NULL;
2288 		sp->done(ha, sp, cp->result);
2289 	}
2290 }
2291 
2292 /**
2293  * qla2x00_error_entry() - Process an error entry.
2294  * @ha: SCSI driver HA context
2295  * @pkt: Entry pointer
2296  */
2297 static void
2298 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
2299 {
2300 	srb_t *sp;
2301 	struct qla_hw_data *ha = vha->hw;
2302 	const char func[] = "ERROR-IOCB";
2303 	uint16_t que = MSW(pkt->handle);
2304 	struct req_que *req = NULL;
2305 	int res = DID_ERROR << 16;
2306 
2307 	ql_dbg(ql_dbg_async, vha, 0x502a,
2308 	    "type of error status in response: 0x%x\n", pkt->entry_status);
2309 
2310 	if (que >= ha->max_req_queues || !ha->req_q_map[que])
2311 		goto fatal;
2312 
2313 	req = ha->req_q_map[que];
2314 
2315 	if (pkt->entry_status & RF_BUSY)
2316 		res = DID_BUS_BUSY << 16;
2317 
2318 	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2319 	if (sp) {
2320 		sp->done(ha, sp, res);
2321 		return;
2322 	}
2323 fatal:
2324 	ql_log(ql_log_warn, vha, 0x5030,
2325 	    "Error entry - invalid handle/queue.\n");
2326 
2327 	if (IS_QLA82XX(ha))
2328 		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
2329 	else
2330 		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2331 	qla2xxx_wake_dpc(vha);
2332 }
2333 
2334 /**
2335  * qla24xx_mbx_completion() - Process mailbox command completions.
2336  * @ha: SCSI driver HA context
2337  * @mb0: Mailbox0 register
2338  */
2339 static void
2340 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2341 {
2342 	uint16_t	cnt;
2343 	uint32_t	mboxes;
2344 	uint16_t __iomem *wptr;
2345 	struct qla_hw_data *ha = vha->hw;
2346 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2347 
2348 	/* Read all mbox registers? */
2349 	mboxes = (1 << ha->mbx_count) - 1;
2350 	if (!ha->mcp)
2351 		ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
2352 	else
2353 		mboxes = ha->mcp->in_mb;
2354 
2355 	/* Load return mailbox registers. */
2356 	ha->flags.mbox_int = 1;
2357 	ha->mailbox_out[0] = mb0;
2358 	mboxes >>= 1;
2359 	wptr = (uint16_t __iomem *)&reg->mailbox1;
2360 
2361 	for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2362 		if (mboxes & BIT_0)
2363 			ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2364 
2365 		mboxes >>= 1;
2366 		wptr++;
2367 	}
2368 }
2369 
2370 /**
2371  * qla24xx_process_response_queue() - Process response queue entries.
2372  * @ha: SCSI driver HA context
2373  */
2374 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2375 	struct rsp_que *rsp)
2376 {
2377 	struct sts_entry_24xx *pkt;
2378 	struct qla_hw_data *ha = vha->hw;
2379 
2380 	if (!vha->flags.online)
2381 		return;
2382 
2383 	while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2384 		pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2385 
2386 		rsp->ring_index++;
2387 		if (rsp->ring_index == rsp->length) {
2388 			rsp->ring_index = 0;
2389 			rsp->ring_ptr = rsp->ring;
2390 		} else {
2391 			rsp->ring_ptr++;
2392 		}
2393 
2394 		if (pkt->entry_status != 0) {
2395 			qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2396 
2397 			(void)qlt_24xx_process_response_error(vha, pkt);
2398 
2399 			((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2400 			wmb();
2401 			continue;
2402 		}
2403 
2404 		switch (pkt->entry_type) {
2405 		case STATUS_TYPE:
2406 			qla2x00_status_entry(vha, rsp, pkt);
2407 			break;
2408 		case STATUS_CONT_TYPE:
2409 			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2410 			break;
2411 		case VP_RPT_ID_IOCB_TYPE:
2412 			qla24xx_report_id_acquisition(vha,
2413 			    (struct vp_rpt_id_entry_24xx *)pkt);
2414 			break;
2415 		case LOGINOUT_PORT_IOCB_TYPE:
2416 			qla24xx_logio_entry(vha, rsp->req,
2417 			    (struct logio_entry_24xx *)pkt);
2418 			break;
2419 		case TSK_MGMT_IOCB_TYPE:
2420 			qla24xx_tm_iocb_entry(vha, rsp->req,
2421 			    (struct tsk_mgmt_entry *)pkt);
2422 			break;
2423                 case CT_IOCB_TYPE:
2424 			qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2425 			break;
2426                 case ELS_IOCB_TYPE:
2427 			qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2428 			break;
2429 		case ABTS_RECV_24XX:
2430 			/* ensure that the ATIO queue is empty */
2431 			qlt_24xx_process_atio_queue(vha);
2432 		case ABTS_RESP_24XX:
2433 		case CTIO_TYPE7:
2434 		case NOTIFY_ACK_TYPE:
2435 			qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2436 			break;
2437 		case MARKER_TYPE:
2438 			/* Do nothing in this case, this check is to prevent it
2439 			 * from falling into default case
2440 			 */
2441 			break;
2442 		default:
2443 			/* Type Not Supported. */
2444 			ql_dbg(ql_dbg_async, vha, 0x5042,
2445 			    "Received unknown response pkt type %x "
2446 			    "entry status=%x.\n",
2447 			    pkt->entry_type, pkt->entry_status);
2448 			break;
2449 		}
2450 		((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2451 		wmb();
2452 	}
2453 
2454 	/* Adjust ring index */
2455 	if (IS_QLA82XX(ha)) {
2456 		struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2457 		WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2458 	} else
2459 		WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2460 }
2461 
2462 static void
2463 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2464 {
2465 	int rval;
2466 	uint32_t cnt;
2467 	struct qla_hw_data *ha = vha->hw;
2468 	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2469 
2470 	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
2471 		return;
2472 
2473 	rval = QLA_SUCCESS;
2474 	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2475 	RD_REG_DWORD(&reg->iobase_addr);
2476 	WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2477 	for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2478 	    rval == QLA_SUCCESS; cnt--) {
2479 		if (cnt) {
2480 			WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2481 			udelay(10);
2482 		} else
2483 			rval = QLA_FUNCTION_TIMEOUT;
2484 	}
2485 	if (rval == QLA_SUCCESS)
2486 		goto next_test;
2487 
2488 	rval = QLA_SUCCESS;
2489 	WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2490 	for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2491 	    rval == QLA_SUCCESS; cnt--) {
2492 		if (cnt) {
2493 			WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2494 			udelay(10);
2495 		} else
2496 			rval = QLA_FUNCTION_TIMEOUT;
2497 	}
2498 	if (rval != QLA_SUCCESS)
2499 		goto done;
2500 
2501 next_test:
2502 	if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2503 		ql_log(ql_log_info, vha, 0x504c,
2504 		    "Additional code -- 0x55AA.\n");
2505 
2506 done:
2507 	WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2508 	RD_REG_DWORD(&reg->iobase_window);
2509 }
2510 
2511 /**
2512  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2513  * @irq:
2514  * @dev_id: SCSI driver HA context
2515  *
2516  * Called by system whenever the host adapter generates an interrupt.
2517  *
2518  * Returns handled flag.
2519  */
2520 irqreturn_t
2521 qla24xx_intr_handler(int irq, void *dev_id)
2522 {
2523 	scsi_qla_host_t	*vha;
2524 	struct qla_hw_data *ha;
2525 	struct device_reg_24xx __iomem *reg;
2526 	int		status;
2527 	unsigned long	iter;
2528 	uint32_t	stat;
2529 	uint32_t	hccr;
2530 	uint16_t	mb[8];
2531 	struct rsp_que *rsp;
2532 	unsigned long	flags;
2533 
2534 	rsp = (struct rsp_que *) dev_id;
2535 	if (!rsp) {
2536 		ql_log(ql_log_info, NULL, 0x5059,
2537 		    "%s: NULL response queue pointer.\n", __func__);
2538 		return IRQ_NONE;
2539 	}
2540 
2541 	ha = rsp->hw;
2542 	reg = &ha->iobase->isp24;
2543 	status = 0;
2544 
2545 	if (unlikely(pci_channel_offline(ha->pdev)))
2546 		return IRQ_HANDLED;
2547 
2548 	spin_lock_irqsave(&ha->hardware_lock, flags);
2549 	vha = pci_get_drvdata(ha->pdev);
2550 	for (iter = 50; iter--; ) {
2551 		stat = RD_REG_DWORD(&reg->host_status);
2552 		if (stat & HSRX_RISC_PAUSED) {
2553 			if (unlikely(pci_channel_offline(ha->pdev)))
2554 				break;
2555 
2556 			hccr = RD_REG_DWORD(&reg->hccr);
2557 
2558 			ql_log(ql_log_warn, vha, 0x504b,
2559 			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
2560 			    hccr);
2561 
2562 			qla2xxx_check_risc_status(vha);
2563 
2564 			ha->isp_ops->fw_dump(vha, 1);
2565 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2566 			break;
2567 		} else if ((stat & HSRX_RISC_INT) == 0)
2568 			break;
2569 
2570 		switch (stat & 0xff) {
2571 		case INTR_ROM_MB_SUCCESS:
2572 		case INTR_ROM_MB_FAILED:
2573 		case INTR_MB_SUCCESS:
2574 		case INTR_MB_FAILED:
2575 			qla24xx_mbx_completion(vha, MSW(stat));
2576 			status |= MBX_INTERRUPT;
2577 
2578 			break;
2579 		case INTR_ASYNC_EVENT:
2580 			mb[0] = MSW(stat);
2581 			mb[1] = RD_REG_WORD(&reg->mailbox1);
2582 			mb[2] = RD_REG_WORD(&reg->mailbox2);
2583 			mb[3] = RD_REG_WORD(&reg->mailbox3);
2584 			qla2x00_async_event(vha, rsp, mb);
2585 			break;
2586 		case INTR_RSP_QUE_UPDATE:
2587 		case INTR_RSP_QUE_UPDATE_83XX:
2588 			qla24xx_process_response_queue(vha, rsp);
2589 			break;
2590 		case INTR_ATIO_QUE_UPDATE:
2591 			qlt_24xx_process_atio_queue(vha);
2592 			break;
2593 		case INTR_ATIO_RSP_QUE_UPDATE:
2594 			qlt_24xx_process_atio_queue(vha);
2595 			qla24xx_process_response_queue(vha, rsp);
2596 			break;
2597 		default:
2598 			ql_dbg(ql_dbg_async, vha, 0x504f,
2599 			    "Unrecognized interrupt type (%d).\n", stat * 0xff);
2600 			break;
2601 		}
2602 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2603 		RD_REG_DWORD_RELAXED(&reg->hccr);
2604 		if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
2605 			ndelay(3500);
2606 	}
2607 	qla2x00_handle_mbx_completion(ha, status);
2608 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2609 
2610 	return IRQ_HANDLED;
2611 }
2612 
2613 static irqreturn_t
2614 qla24xx_msix_rsp_q(int irq, void *dev_id)
2615 {
2616 	struct qla_hw_data *ha;
2617 	struct rsp_que *rsp;
2618 	struct device_reg_24xx __iomem *reg;
2619 	struct scsi_qla_host *vha;
2620 	unsigned long flags;
2621 
2622 	rsp = (struct rsp_que *) dev_id;
2623 	if (!rsp) {
2624 		ql_log(ql_log_info, NULL, 0x505a,
2625 		    "%s: NULL response queue pointer.\n", __func__);
2626 		return IRQ_NONE;
2627 	}
2628 	ha = rsp->hw;
2629 	reg = &ha->iobase->isp24;
2630 
2631 	spin_lock_irqsave(&ha->hardware_lock, flags);
2632 
2633 	vha = pci_get_drvdata(ha->pdev);
2634 	qla24xx_process_response_queue(vha, rsp);
2635 	if (!ha->flags.disable_msix_handshake) {
2636 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2637 		RD_REG_DWORD_RELAXED(&reg->hccr);
2638 	}
2639 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2640 
2641 	return IRQ_HANDLED;
2642 }
2643 
2644 static irqreturn_t
2645 qla25xx_msix_rsp_q(int irq, void *dev_id)
2646 {
2647 	struct qla_hw_data *ha;
2648 	struct rsp_que *rsp;
2649 	struct device_reg_24xx __iomem *reg;
2650 	unsigned long flags;
2651 
2652 	rsp = (struct rsp_que *) dev_id;
2653 	if (!rsp) {
2654 		ql_log(ql_log_info, NULL, 0x505b,
2655 		    "%s: NULL response queue pointer.\n", __func__);
2656 		return IRQ_NONE;
2657 	}
2658 	ha = rsp->hw;
2659 
2660 	/* Clear the interrupt, if enabled, for this response queue */
2661 	if (!ha->flags.disable_msix_handshake) {
2662 		reg = &ha->iobase->isp24;
2663 		spin_lock_irqsave(&ha->hardware_lock, flags);
2664 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2665 		RD_REG_DWORD_RELAXED(&reg->hccr);
2666 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2667 	}
2668 	queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2669 
2670 	return IRQ_HANDLED;
2671 }
2672 
2673 static irqreturn_t
2674 qla24xx_msix_default(int irq, void *dev_id)
2675 {
2676 	scsi_qla_host_t	*vha;
2677 	struct qla_hw_data *ha;
2678 	struct rsp_que *rsp;
2679 	struct device_reg_24xx __iomem *reg;
2680 	int		status;
2681 	uint32_t	stat;
2682 	uint32_t	hccr;
2683 	uint16_t	mb[8];
2684 	unsigned long flags;
2685 
2686 	rsp = (struct rsp_que *) dev_id;
2687 	if (!rsp) {
2688 		ql_log(ql_log_info, NULL, 0x505c,
2689 		    "%s: NULL response queue pointer.\n", __func__);
2690 		return IRQ_NONE;
2691 	}
2692 	ha = rsp->hw;
2693 	reg = &ha->iobase->isp24;
2694 	status = 0;
2695 
2696 	spin_lock_irqsave(&ha->hardware_lock, flags);
2697 	vha = pci_get_drvdata(ha->pdev);
2698 	do {
2699 		stat = RD_REG_DWORD(&reg->host_status);
2700 		if (stat & HSRX_RISC_PAUSED) {
2701 			if (unlikely(pci_channel_offline(ha->pdev)))
2702 				break;
2703 
2704 			hccr = RD_REG_DWORD(&reg->hccr);
2705 
2706 			ql_log(ql_log_info, vha, 0x5050,
2707 			    "RISC paused -- HCCR=%x, Dumping firmware.\n",
2708 			    hccr);
2709 
2710 			qla2xxx_check_risc_status(vha);
2711 
2712 			ha->isp_ops->fw_dump(vha, 1);
2713 			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2714 			break;
2715 		} else if ((stat & HSRX_RISC_INT) == 0)
2716 			break;
2717 
2718 		switch (stat & 0xff) {
2719 		case INTR_ROM_MB_SUCCESS:
2720 		case INTR_ROM_MB_FAILED:
2721 		case INTR_MB_SUCCESS:
2722 		case INTR_MB_FAILED:
2723 			qla24xx_mbx_completion(vha, MSW(stat));
2724 			status |= MBX_INTERRUPT;
2725 
2726 			break;
2727 		case INTR_ASYNC_EVENT:
2728 			mb[0] = MSW(stat);
2729 			mb[1] = RD_REG_WORD(&reg->mailbox1);
2730 			mb[2] = RD_REG_WORD(&reg->mailbox2);
2731 			mb[3] = RD_REG_WORD(&reg->mailbox3);
2732 			qla2x00_async_event(vha, rsp, mb);
2733 			break;
2734 		case INTR_RSP_QUE_UPDATE:
2735 		case INTR_RSP_QUE_UPDATE_83XX:
2736 			qla24xx_process_response_queue(vha, rsp);
2737 			break;
2738 		case INTR_ATIO_QUE_UPDATE:
2739 			qlt_24xx_process_atio_queue(vha);
2740 			break;
2741 		case INTR_ATIO_RSP_QUE_UPDATE:
2742 			qlt_24xx_process_atio_queue(vha);
2743 			qla24xx_process_response_queue(vha, rsp);
2744 			break;
2745 		default:
2746 			ql_dbg(ql_dbg_async, vha, 0x5051,
2747 			    "Unrecognized interrupt type (%d).\n", stat & 0xff);
2748 			break;
2749 		}
2750 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2751 	} while (0);
2752 	qla2x00_handle_mbx_completion(ha, status);
2753 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2754 
2755 	return IRQ_HANDLED;
2756 }
2757 
2758 /* Interrupt handling helpers. */
2759 
2760 struct qla_init_msix_entry {
2761 	const char *name;
2762 	irq_handler_t handler;
2763 };
2764 
2765 static struct qla_init_msix_entry msix_entries[3] = {
2766 	{ "qla2xxx (default)", qla24xx_msix_default },
2767 	{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2768 	{ "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2769 };
2770 
2771 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2772 	{ "qla2xxx (default)", qla82xx_msix_default },
2773 	{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2774 };
2775 
2776 static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
2777 	{ "qla2xxx (default)", qla24xx_msix_default },
2778 	{ "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2779 	{ "qla2xxx (atio_q)", qla83xx_msix_atio_q },
2780 };
2781 
2782 static void
2783 qla24xx_disable_msix(struct qla_hw_data *ha)
2784 {
2785 	int i;
2786 	struct qla_msix_entry *qentry;
2787 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2788 
2789 	for (i = 0; i < ha->msix_count; i++) {
2790 		qentry = &ha->msix_entries[i];
2791 		if (qentry->have_irq)
2792 			free_irq(qentry->vector, qentry->rsp);
2793 	}
2794 	pci_disable_msix(ha->pdev);
2795 	kfree(ha->msix_entries);
2796 	ha->msix_entries = NULL;
2797 	ha->flags.msix_enabled = 0;
2798 	ql_dbg(ql_dbg_init, vha, 0x0042,
2799 	    "Disabled the MSI.\n");
2800 }
2801 
2802 static int
2803 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2804 {
2805 #define MIN_MSIX_COUNT	2
2806 	int i, ret;
2807 	struct msix_entry *entries;
2808 	struct qla_msix_entry *qentry;
2809 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2810 
2811 	entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2812 			GFP_KERNEL);
2813 	if (!entries) {
2814 		ql_log(ql_log_warn, vha, 0x00bc,
2815 		    "Failed to allocate memory for msix_entry.\n");
2816 		return -ENOMEM;
2817 	}
2818 
2819 	for (i = 0; i < ha->msix_count; i++)
2820 		entries[i].entry = i;
2821 
2822 	ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2823 	if (ret) {
2824 		if (ret < MIN_MSIX_COUNT)
2825 			goto msix_failed;
2826 
2827 		ql_log(ql_log_warn, vha, 0x00c6,
2828 		    "MSI-X: Failed to enable support "
2829 		    "-- %d/%d\n Retry with %d vectors.\n",
2830 		    ha->msix_count, ret, ret);
2831 		ha->msix_count = ret;
2832 		ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2833 		if (ret) {
2834 msix_failed:
2835 			ql_log(ql_log_fatal, vha, 0x00c7,
2836 			    "MSI-X: Failed to enable support, "
2837 			    "giving   up -- %d/%d.\n",
2838 			    ha->msix_count, ret);
2839 			goto msix_out;
2840 		}
2841 		ha->max_rsp_queues = ha->msix_count - 1;
2842 	}
2843 	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2844 				ha->msix_count, GFP_KERNEL);
2845 	if (!ha->msix_entries) {
2846 		ql_log(ql_log_fatal, vha, 0x00c8,
2847 		    "Failed to allocate memory for ha->msix_entries.\n");
2848 		ret = -ENOMEM;
2849 		goto msix_out;
2850 	}
2851 	ha->flags.msix_enabled = 1;
2852 
2853 	for (i = 0; i < ha->msix_count; i++) {
2854 		qentry = &ha->msix_entries[i];
2855 		qentry->vector = entries[i].vector;
2856 		qentry->entry = entries[i].entry;
2857 		qentry->have_irq = 0;
2858 		qentry->rsp = NULL;
2859 	}
2860 
2861 	/* Enable MSI-X vectors for the base queue */
2862 	for (i = 0; i < ha->msix_count; i++) {
2863 		qentry = &ha->msix_entries[i];
2864 		if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
2865 			ret = request_irq(qentry->vector,
2866 				qla83xx_msix_entries[i].handler,
2867 				0, qla83xx_msix_entries[i].name, rsp);
2868 		} else if (IS_QLA82XX(ha)) {
2869 			ret = request_irq(qentry->vector,
2870 				qla82xx_msix_entries[i].handler,
2871 				0, qla82xx_msix_entries[i].name, rsp);
2872 		} else {
2873 			ret = request_irq(qentry->vector,
2874 				msix_entries[i].handler,
2875 				0, msix_entries[i].name, rsp);
2876 		}
2877 		if (ret) {
2878 			ql_log(ql_log_fatal, vha, 0x00cb,
2879 			    "MSI-X: unable to register handler -- %x/%d.\n",
2880 			    qentry->vector, ret);
2881 			qla24xx_disable_msix(ha);
2882 			ha->mqenable = 0;
2883 			goto msix_out;
2884 		}
2885 		qentry->have_irq = 1;
2886 		qentry->rsp = rsp;
2887 		rsp->msix = qentry;
2888 	}
2889 
2890 	/* Enable MSI-X vector for response queue update for queue 0 */
2891 	if (IS_QLA83XX(ha)) {
2892 		if (ha->msixbase && ha->mqiobase &&
2893 		    (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2894 			ha->mqenable = 1;
2895 	} else
2896 		if (ha->mqiobase
2897 		    && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2898 			ha->mqenable = 1;
2899 	ql_dbg(ql_dbg_multiq, vha, 0xc005,
2900 	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2901 	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2902 	ql_dbg(ql_dbg_init, vha, 0x0055,
2903 	    "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2904 	    ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2905 
2906 msix_out:
2907 	kfree(entries);
2908 	return ret;
2909 }
2910 
2911 int
2912 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2913 {
2914 	int ret;
2915 	device_reg_t __iomem *reg = ha->iobase;
2916 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2917 
2918 	/* If possible, enable MSI-X. */
2919 	if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2920 		!IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLAFX00(ha))
2921 		goto skip_msi;
2922 
2923 	if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2924 		(ha->pdev->subsystem_device == 0x7040 ||
2925 		ha->pdev->subsystem_device == 0x7041 ||
2926 		ha->pdev->subsystem_device == 0x1705)) {
2927 		ql_log(ql_log_warn, vha, 0x0034,
2928 		    "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2929 			ha->pdev->subsystem_vendor,
2930 			ha->pdev->subsystem_device);
2931 		goto skip_msi;
2932 	}
2933 
2934 	if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2935 		ql_log(ql_log_warn, vha, 0x0035,
2936 		    "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2937 		    ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2938 		goto skip_msix;
2939 	}
2940 
2941 	ret = qla24xx_enable_msix(ha, rsp);
2942 	if (!ret) {
2943 		ql_dbg(ql_dbg_init, vha, 0x0036,
2944 		    "MSI-X: Enabled (0x%X, 0x%X).\n",
2945 		    ha->chip_revision, ha->fw_attributes);
2946 		goto clear_risc_ints;
2947 	}
2948 	ql_log(ql_log_info, vha, 0x0037,
2949 	    "MSI-X Falling back-to MSI mode -%d.\n", ret);
2950 skip_msix:
2951 
2952 	if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2953 	    !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
2954 		goto skip_msi;
2955 
2956 	ret = pci_enable_msi(ha->pdev);
2957 	if (!ret) {
2958 		ql_dbg(ql_dbg_init, vha, 0x0038,
2959 		    "MSI: Enabled.\n");
2960 		ha->flags.msi_enabled = 1;
2961 	} else
2962 		ql_log(ql_log_warn, vha, 0x0039,
2963 		    "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2964 
2965 	/* Skip INTx on ISP82xx. */
2966 	if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
2967 		return QLA_FUNCTION_FAILED;
2968 
2969 skip_msi:
2970 
2971 	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2972 	    ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2973 	    QLA2XXX_DRIVER_NAME, rsp);
2974 	if (ret) {
2975 		ql_log(ql_log_warn, vha, 0x003a,
2976 		    "Failed to reserve interrupt %d already in use.\n",
2977 		    ha->pdev->irq);
2978 		goto fail;
2979 	} else if (!ha->flags.msi_enabled) {
2980 		ql_dbg(ql_dbg_init, vha, 0x0125,
2981 		    "INTa mode: Enabled.\n");
2982 		ha->flags.mr_intr_valid = 1;
2983 	}
2984 
2985 clear_risc_ints:
2986 
2987 	spin_lock_irq(&ha->hardware_lock);
2988 	if (!IS_FWI2_CAPABLE(ha))
2989 		WRT_REG_WORD(&reg->isp.semaphore, 0);
2990 	spin_unlock_irq(&ha->hardware_lock);
2991 
2992 fail:
2993 	return ret;
2994 }
2995 
2996 void
2997 qla2x00_free_irqs(scsi_qla_host_t *vha)
2998 {
2999 	struct qla_hw_data *ha = vha->hw;
3000 	struct rsp_que *rsp;
3001 
3002 	/*
3003 	 * We need to check that ha->rsp_q_map is valid in case we are called
3004 	 * from a probe failure context.
3005 	 */
3006 	if (!ha->rsp_q_map || !ha->rsp_q_map[0])
3007 		return;
3008 	rsp = ha->rsp_q_map[0];
3009 
3010 	if (ha->flags.msix_enabled)
3011 		qla24xx_disable_msix(ha);
3012 	else if (ha->flags.msi_enabled) {
3013 		free_irq(ha->pdev->irq, rsp);
3014 		pci_disable_msi(ha->pdev);
3015 	} else
3016 		free_irq(ha->pdev->irq, rsp);
3017 }
3018 
3019 
3020 int qla25xx_request_irq(struct rsp_que *rsp)
3021 {
3022 	struct qla_hw_data *ha = rsp->hw;
3023 	struct qla_init_msix_entry *intr = &msix_entries[2];
3024 	struct qla_msix_entry *msix = rsp->msix;
3025 	scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
3026 	int ret;
3027 
3028 	ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
3029 	if (ret) {
3030 		ql_log(ql_log_fatal, vha, 0x00e6,
3031 		    "MSI-X: Unable to register handler -- %x/%d.\n",
3032 		    msix->vector, ret);
3033 		return ret;
3034 	}
3035 	msix->have_irq = 1;
3036 	msix->rsp = rsp;
3037 	return ret;
3038 }
3039