xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_iocb.c (revision c4c3c32d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2014 QLogic Corporation
5  */
6 #include "qla_def.h"
7 #include "qla_target.h"
8 
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11 
12 #include <scsi/scsi_tcq.h>
13 
14 /**
15  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
16  * @sp: SCSI command
17  *
18  * Returns the proper CF_* direction based on CDB.
19  */
20 static inline uint16_t
21 qla2x00_get_cmd_direction(srb_t *sp)
22 {
23 	uint16_t cflags;
24 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
25 	struct scsi_qla_host *vha = sp->vha;
26 
27 	cflags = 0;
28 
29 	/* Set transfer direction */
30 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
31 		cflags = CF_WRITE;
32 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33 		vha->qla_stats.output_requests++;
34 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
35 		cflags = CF_READ;
36 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37 		vha->qla_stats.input_requests++;
38 	}
39 	return (cflags);
40 }
41 
42 /**
43  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44  * Continuation Type 0 IOCBs to allocate.
45  *
46  * @dsds: number of data segment descriptors needed
47  *
48  * Returns the number of IOCB entries needed to store @dsds.
49  */
50 uint16_t
51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53 	uint16_t iocbs;
54 
55 	iocbs = 1;
56 	if (dsds > 3) {
57 		iocbs += (dsds - 3) / 7;
58 		if ((dsds - 3) % 7)
59 			iocbs++;
60 	}
61 	return (iocbs);
62 }
63 
64 /**
65  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66  * Continuation Type 1 IOCBs to allocate.
67  *
68  * @dsds: number of data segment descriptors needed
69  *
70  * Returns the number of IOCB entries needed to store @dsds.
71  */
72 uint16_t
73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75 	uint16_t iocbs;
76 
77 	iocbs = 1;
78 	if (dsds > 2) {
79 		iocbs += (dsds - 2) / 5;
80 		if ((dsds - 2) % 5)
81 			iocbs++;
82 	}
83 	return (iocbs);
84 }
85 
86 /**
87  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88  * @vha: HA context
89  *
90  * Returns a pointer to the Continuation Type 0 IOCB packet.
91  */
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95 	cont_entry_t *cont_pkt;
96 	struct req_que *req = vha->req;
97 	/* Adjust ring index. */
98 	req->ring_index++;
99 	if (req->ring_index == req->length) {
100 		req->ring_index = 0;
101 		req->ring_ptr = req->ring;
102 	} else {
103 		req->ring_ptr++;
104 	}
105 
106 	cont_pkt = (cont_entry_t *)req->ring_ptr;
107 
108 	/* Load packet defaults. */
109 	put_unaligned_le32(CONTINUE_TYPE, &cont_pkt->entry_type);
110 
111 	return (cont_pkt);
112 }
113 
114 /**
115  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
116  * @vha: HA context
117  * @req: request queue
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124 	cont_a64_entry_t *cont_pkt;
125 
126 	/* Adjust ring index. */
127 	req->ring_index++;
128 	if (req->ring_index == req->length) {
129 		req->ring_index = 0;
130 		req->ring_ptr = req->ring;
131 	} else {
132 		req->ring_ptr++;
133 	}
134 
135 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136 
137 	/* Load packet defaults. */
138 	put_unaligned_le32(IS_QLAFX00(vha->hw) ? CONTINUE_A64_TYPE_FX00 :
139 			   CONTINUE_A64_TYPE, &cont_pkt->entry_type);
140 
141 	return (cont_pkt);
142 }
143 
144 inline int
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146 {
147 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 
149 	/* We always use DIFF Bundling for best performance */
150 	*fw_prot_opts = 0;
151 
152 	/* Translate SCSI opcode to a protection opcode */
153 	switch (scsi_get_prot_op(cmd)) {
154 	case SCSI_PROT_READ_STRIP:
155 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
156 		break;
157 	case SCSI_PROT_WRITE_INSERT:
158 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
159 		break;
160 	case SCSI_PROT_READ_INSERT:
161 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
162 		break;
163 	case SCSI_PROT_WRITE_STRIP:
164 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
165 		break;
166 	case SCSI_PROT_READ_PASS:
167 	case SCSI_PROT_WRITE_PASS:
168 		if (cmd->prot_flags & SCSI_PROT_IP_CHECKSUM)
169 			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
170 		else
171 			*fw_prot_opts |= PO_MODE_DIF_PASS;
172 		break;
173 	default:	/* Normal Request */
174 		*fw_prot_opts |= PO_MODE_DIF_PASS;
175 		break;
176 	}
177 
178 	if (!(cmd->prot_flags & SCSI_PROT_GUARD_CHECK))
179 		*fw_prot_opts |= PO_DISABLE_GUARD_CHECK;
180 
181 	return scsi_prot_sg_count(cmd);
182 }
183 
184 /*
185  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
186  * capable IOCB types.
187  *
188  * @sp: SRB command to process
189  * @cmd_pkt: Command type 2 IOCB
190  * @tot_dsds: Total number of segments to transfer
191  */
192 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
193     uint16_t tot_dsds)
194 {
195 	uint16_t	avail_dsds;
196 	struct dsd32	*cur_dsd;
197 	scsi_qla_host_t	*vha;
198 	struct scsi_cmnd *cmd;
199 	struct scatterlist *sg;
200 	int i;
201 
202 	cmd = GET_CMD_SP(sp);
203 
204 	/* Update entry type to indicate Command Type 2 IOCB */
205 	put_unaligned_le32(COMMAND_TYPE, &cmd_pkt->entry_type);
206 
207 	/* No data transfer */
208 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 		cmd_pkt->byte_count = cpu_to_le32(0);
210 		return;
211 	}
212 
213 	vha = sp->vha;
214 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215 
216 	/* Three DSDs are available in the Command Type 2 IOCB */
217 	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd32);
218 	cur_dsd = cmd_pkt->dsd32;
219 
220 	/* Load data segments */
221 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 		cont_entry_t *cont_pkt;
223 
224 		/* Allocate additional continuation packets? */
225 		if (avail_dsds == 0) {
226 			/*
227 			 * Seven DSDs are available in the Continuation
228 			 * Type 0 IOCB.
229 			 */
230 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 			cur_dsd = cont_pkt->dsd;
232 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
233 		}
234 
235 		append_dsd32(&cur_dsd, sg);
236 		avail_dsds--;
237 	}
238 }
239 
240 /**
241  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
242  * capable IOCB types.
243  *
244  * @sp: SRB command to process
245  * @cmd_pkt: Command type 3 IOCB
246  * @tot_dsds: Total number of segments to transfer
247  */
248 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
249     uint16_t tot_dsds)
250 {
251 	uint16_t	avail_dsds;
252 	struct dsd64	*cur_dsd;
253 	scsi_qla_host_t	*vha;
254 	struct scsi_cmnd *cmd;
255 	struct scatterlist *sg;
256 	int i;
257 
258 	cmd = GET_CMD_SP(sp);
259 
260 	/* Update entry type to indicate Command Type 3 IOCB */
261 	put_unaligned_le32(COMMAND_A64_TYPE, &cmd_pkt->entry_type);
262 
263 	/* No data transfer */
264 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
265 		cmd_pkt->byte_count = cpu_to_le32(0);
266 		return;
267 	}
268 
269 	vha = sp->vha;
270 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
271 
272 	/* Two DSDs are available in the Command Type 3 IOCB */
273 	avail_dsds = ARRAY_SIZE(cmd_pkt->dsd64);
274 	cur_dsd = cmd_pkt->dsd64;
275 
276 	/* Load data segments */
277 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
278 		cont_a64_entry_t *cont_pkt;
279 
280 		/* Allocate additional continuation packets? */
281 		if (avail_dsds == 0) {
282 			/*
283 			 * Five DSDs are available in the Continuation
284 			 * Type 1 IOCB.
285 			 */
286 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
287 			cur_dsd = cont_pkt->dsd;
288 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
289 		}
290 
291 		append_dsd64(&cur_dsd, sg);
292 		avail_dsds--;
293 	}
294 }
295 
296 /*
297  * Find the first handle that is not in use, starting from
298  * req->current_outstanding_cmd + 1. The caller must hold the lock that is
299  * associated with @req.
300  */
301 uint32_t qla2xxx_get_next_handle(struct req_que *req)
302 {
303 	uint32_t index, handle = req->current_outstanding_cmd;
304 
305 	for (index = 1; index < req->num_outstanding_cmds; index++) {
306 		handle++;
307 		if (handle == req->num_outstanding_cmds)
308 			handle = 1;
309 		if (!req->outstanding_cmds[handle])
310 			return handle;
311 	}
312 
313 	return 0;
314 }
315 
316 /**
317  * qla2x00_start_scsi() - Send a SCSI command to the ISP
318  * @sp: command to send to the ISP
319  *
320  * Returns non-zero if a failure occurred, else zero.
321  */
322 int
323 qla2x00_start_scsi(srb_t *sp)
324 {
325 	int		nseg;
326 	unsigned long   flags;
327 	scsi_qla_host_t	*vha;
328 	struct scsi_cmnd *cmd;
329 	uint32_t	*clr_ptr;
330 	uint32_t	handle;
331 	cmd_entry_t	*cmd_pkt;
332 	uint16_t	cnt;
333 	uint16_t	req_cnt;
334 	uint16_t	tot_dsds;
335 	struct device_reg_2xxx __iomem *reg;
336 	struct qla_hw_data *ha;
337 	struct req_que *req;
338 	struct rsp_que *rsp;
339 
340 	/* Setup device pointers. */
341 	vha = sp->vha;
342 	ha = vha->hw;
343 	reg = &ha->iobase->isp;
344 	cmd = GET_CMD_SP(sp);
345 	req = ha->req_q_map[0];
346 	rsp = ha->rsp_q_map[0];
347 	/* So we know we haven't pci_map'ed anything yet */
348 	tot_dsds = 0;
349 
350 	/* Send marker if required */
351 	if (vha->marker_needed != 0) {
352 		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
353 		    QLA_SUCCESS) {
354 			return (QLA_FUNCTION_FAILED);
355 		}
356 		vha->marker_needed = 0;
357 	}
358 
359 	/* Acquire ring specific lock */
360 	spin_lock_irqsave(&ha->hardware_lock, flags);
361 
362 	handle = qla2xxx_get_next_handle(req);
363 	if (handle == 0)
364 		goto queuing_error;
365 
366 	/* Map the sg table so we have an accurate count of sg entries needed */
367 	if (scsi_sg_count(cmd)) {
368 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
369 		    scsi_sg_count(cmd), cmd->sc_data_direction);
370 		if (unlikely(!nseg))
371 			goto queuing_error;
372 	} else
373 		nseg = 0;
374 
375 	tot_dsds = nseg;
376 
377 	/* Calculate the number of request entries needed. */
378 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
379 	if (req->cnt < (req_cnt + 2)) {
380 		cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg));
381 		if (req->ring_index < cnt)
382 			req->cnt = cnt - req->ring_index;
383 		else
384 			req->cnt = req->length -
385 			    (req->ring_index - cnt);
386 		/* If still no head room then bail out */
387 		if (req->cnt < (req_cnt + 2))
388 			goto queuing_error;
389 	}
390 
391 	/* Build command packet */
392 	req->current_outstanding_cmd = handle;
393 	req->outstanding_cmds[handle] = sp;
394 	sp->handle = handle;
395 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
396 	req->cnt -= req_cnt;
397 
398 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
399 	cmd_pkt->handle = handle;
400 	/* Zero out remaining portion of packet. */
401 	clr_ptr = (uint32_t *)cmd_pkt + 2;
402 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
403 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
404 
405 	/* Set target ID and LUN number*/
406 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
407 	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
408 	cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
409 
410 	/* Load SCSI command packet. */
411 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
412 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
413 
414 	/* Build IOCB segments */
415 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
416 
417 	/* Set total data segment count. */
418 	cmd_pkt->entry_count = (uint8_t)req_cnt;
419 	wmb();
420 
421 	/* Adjust ring index. */
422 	req->ring_index++;
423 	if (req->ring_index == req->length) {
424 		req->ring_index = 0;
425 		req->ring_ptr = req->ring;
426 	} else
427 		req->ring_ptr++;
428 
429 	sp->flags |= SRB_DMA_VALID;
430 
431 	/* Set chip new ring index. */
432 	wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index);
433 	rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
434 
435 	/* Manage unprocessed RIO/ZIO commands in response queue. */
436 	if (vha->flags.process_response_queue &&
437 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
438 		qla2x00_process_response_queue(rsp);
439 
440 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
441 	return (QLA_SUCCESS);
442 
443 queuing_error:
444 	if (tot_dsds)
445 		scsi_dma_unmap(cmd);
446 
447 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
448 
449 	return (QLA_FUNCTION_FAILED);
450 }
451 
452 /**
453  * qla2x00_start_iocbs() - Execute the IOCB command
454  * @vha: HA context
455  * @req: request queue
456  */
457 void
458 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
459 {
460 	struct qla_hw_data *ha = vha->hw;
461 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
462 
463 	if (IS_P3P_TYPE(ha)) {
464 		qla82xx_start_iocbs(vha);
465 	} else {
466 		/* Adjust ring index. */
467 		req->ring_index++;
468 		if (req->ring_index == req->length) {
469 			req->ring_index = 0;
470 			req->ring_ptr = req->ring;
471 		} else
472 			req->ring_ptr++;
473 
474 		/* Set chip new ring index. */
475 		if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
476 			wrt_reg_dword(req->req_q_in, req->ring_index);
477 		} else if (IS_QLA83XX(ha)) {
478 			wrt_reg_dword(req->req_q_in, req->ring_index);
479 			rd_reg_dword_relaxed(&ha->iobase->isp24.hccr);
480 		} else if (IS_QLAFX00(ha)) {
481 			wrt_reg_dword(&reg->ispfx00.req_q_in, req->ring_index);
482 			rd_reg_dword_relaxed(&reg->ispfx00.req_q_in);
483 			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
484 		} else if (IS_FWI2_CAPABLE(ha)) {
485 			wrt_reg_dword(&reg->isp24.req_q_in, req->ring_index);
486 			rd_reg_dword_relaxed(&reg->isp24.req_q_in);
487 		} else {
488 			wrt_reg_word(ISP_REQ_Q_IN(ha, &reg->isp),
489 				req->ring_index);
490 			rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, &reg->isp));
491 		}
492 	}
493 }
494 
495 /**
496  * __qla2x00_marker() - Send a marker IOCB to the firmware.
497  * @vha: HA context
498  * @qpair: queue pair pointer
499  * @loop_id: loop ID
500  * @lun: LUN
501  * @type: marker modifier
502  *
503  * Can be called from both normal and interrupt context.
504  *
505  * Returns non-zero if a failure occurred, else zero.
506  */
507 static int
508 __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
509     uint16_t loop_id, uint64_t lun, uint8_t type)
510 {
511 	mrk_entry_t *mrk;
512 	struct mrk_entry_24xx *mrk24 = NULL;
513 	struct req_que *req = qpair->req;
514 	struct qla_hw_data *ha = vha->hw;
515 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
516 
517 	mrk = (mrk_entry_t *)__qla2x00_alloc_iocbs(qpair, NULL);
518 	if (mrk == NULL) {
519 		ql_log(ql_log_warn, base_vha, 0x3026,
520 		    "Failed to allocate Marker IOCB.\n");
521 
522 		return (QLA_FUNCTION_FAILED);
523 	}
524 
525 	mrk24 = (struct mrk_entry_24xx *)mrk;
526 
527 	mrk->entry_type = MARKER_TYPE;
528 	mrk->modifier = type;
529 	if (type != MK_SYNC_ALL) {
530 		if (IS_FWI2_CAPABLE(ha)) {
531 			mrk24->nport_handle = cpu_to_le16(loop_id);
532 			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
533 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
534 			mrk24->vp_index = vha->vp_idx;
535 		} else {
536 			SET_TARGET_ID(ha, mrk->target, loop_id);
537 			mrk->lun = cpu_to_le16((uint16_t)lun);
538 		}
539 	}
540 
541 	if (IS_FWI2_CAPABLE(ha))
542 		mrk24->handle = QLA_SKIP_HANDLE;
543 
544 	wmb();
545 
546 	qla2x00_start_iocbs(vha, req);
547 
548 	return (QLA_SUCCESS);
549 }
550 
551 int
552 qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair,
553     uint16_t loop_id, uint64_t lun, uint8_t type)
554 {
555 	int ret;
556 	unsigned long flags = 0;
557 
558 	spin_lock_irqsave(qpair->qp_lock_ptr, flags);
559 	ret = __qla2x00_marker(vha, qpair, loop_id, lun, type);
560 	spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
561 
562 	return (ret);
563 }
564 
565 /*
566  * qla2x00_issue_marker
567  *
568  * Issue marker
569  * Caller CAN have hardware lock held as specified by ha_locked parameter.
570  * Might release it, then reaquire.
571  */
572 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
573 {
574 	if (ha_locked) {
575 		if (__qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
576 					MK_SYNC_ALL) != QLA_SUCCESS)
577 			return QLA_FUNCTION_FAILED;
578 	} else {
579 		if (qla2x00_marker(vha, vha->hw->base_qpair, 0, 0,
580 					MK_SYNC_ALL) != QLA_SUCCESS)
581 			return QLA_FUNCTION_FAILED;
582 	}
583 	vha->marker_needed = 0;
584 
585 	return QLA_SUCCESS;
586 }
587 
588 static inline int
589 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
590 	uint16_t tot_dsds)
591 {
592 	struct dsd64 *cur_dsd = NULL, *next_dsd;
593 	scsi_qla_host_t	*vha;
594 	struct qla_hw_data *ha;
595 	struct scsi_cmnd *cmd;
596 	struct	scatterlist *cur_seg;
597 	uint8_t avail_dsds;
598 	uint8_t first_iocb = 1;
599 	uint32_t dsd_list_len;
600 	struct dsd_dma *dsd_ptr;
601 	struct ct6_dsd *ctx;
602 	struct qla_qpair *qpair = sp->qpair;
603 
604 	cmd = GET_CMD_SP(sp);
605 
606 	/* Update entry type to indicate Command Type 3 IOCB */
607 	put_unaligned_le32(COMMAND_TYPE_6, &cmd_pkt->entry_type);
608 
609 	/* No data transfer */
610 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE ||
611 	    tot_dsds == 0) {
612 		cmd_pkt->byte_count = cpu_to_le32(0);
613 		return 0;
614 	}
615 
616 	vha = sp->vha;
617 	ha = vha->hw;
618 
619 	/* Set transfer direction */
620 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
621 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
622 		qpair->counters.output_bytes += scsi_bufflen(cmd);
623 		qpair->counters.output_requests++;
624 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
625 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
626 		qpair->counters.input_bytes += scsi_bufflen(cmd);
627 		qpair->counters.input_requests++;
628 	}
629 
630 	cur_seg = scsi_sglist(cmd);
631 	ctx = &sp->u.scmd.ct6_ctx;
632 
633 	while (tot_dsds) {
634 		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
635 		    QLA_DSDS_PER_IOCB : tot_dsds;
636 		tot_dsds -= avail_dsds;
637 		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
638 
639 		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
640 		    struct dsd_dma, list);
641 		next_dsd = dsd_ptr->dsd_addr;
642 		list_del(&dsd_ptr->list);
643 		ha->gbl_dsd_avail--;
644 		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
645 		ctx->dsd_use_cnt++;
646 		ha->gbl_dsd_inuse++;
647 
648 		if (first_iocb) {
649 			first_iocb = 0;
650 			put_unaligned_le64(dsd_ptr->dsd_list_dma,
651 					   &cmd_pkt->fcp_dsd.address);
652 			cmd_pkt->fcp_dsd.length = cpu_to_le32(dsd_list_len);
653 		} else {
654 			put_unaligned_le64(dsd_ptr->dsd_list_dma,
655 					   &cur_dsd->address);
656 			cur_dsd->length = cpu_to_le32(dsd_list_len);
657 			cur_dsd++;
658 		}
659 		cur_dsd = next_dsd;
660 		while (avail_dsds) {
661 			append_dsd64(&cur_dsd, cur_seg);
662 			cur_seg = sg_next(cur_seg);
663 			avail_dsds--;
664 		}
665 	}
666 
667 	/* Null termination */
668 	cur_dsd->address = 0;
669 	cur_dsd->length = 0;
670 	cur_dsd++;
671 	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
672 	return 0;
673 }
674 
675 /*
676  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
677  * for Command Type 6.
678  *
679  * @dsds: number of data segment descriptors needed
680  *
681  * Returns the number of dsd list needed to store @dsds.
682  */
683 static inline uint16_t
684 qla24xx_calc_dsd_lists(uint16_t dsds)
685 {
686 	uint16_t dsd_lists = 0;
687 
688 	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
689 	if (dsds % QLA_DSDS_PER_IOCB)
690 		dsd_lists++;
691 	return dsd_lists;
692 }
693 
694 
695 /**
696  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
697  * IOCB types.
698  *
699  * @sp: SRB command to process
700  * @cmd_pkt: Command type 3 IOCB
701  * @tot_dsds: Total number of segments to transfer
702  * @req: pointer to request queue
703  */
704 inline void
705 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
706 	uint16_t tot_dsds, struct req_que *req)
707 {
708 	uint16_t	avail_dsds;
709 	struct dsd64	*cur_dsd;
710 	scsi_qla_host_t	*vha;
711 	struct scsi_cmnd *cmd;
712 	struct scatterlist *sg;
713 	int i;
714 	struct qla_qpair *qpair = sp->qpair;
715 
716 	cmd = GET_CMD_SP(sp);
717 
718 	/* Update entry type to indicate Command Type 3 IOCB */
719 	put_unaligned_le32(COMMAND_TYPE_7, &cmd_pkt->entry_type);
720 
721 	/* No data transfer */
722 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
723 		cmd_pkt->byte_count = cpu_to_le32(0);
724 		return;
725 	}
726 
727 	vha = sp->vha;
728 
729 	/* Set transfer direction */
730 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
731 		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
732 		qpair->counters.output_bytes += scsi_bufflen(cmd);
733 		qpair->counters.output_requests++;
734 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
735 		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
736 		qpair->counters.input_bytes += scsi_bufflen(cmd);
737 		qpair->counters.input_requests++;
738 	}
739 
740 	/* One DSD is available in the Command Type 3 IOCB */
741 	avail_dsds = 1;
742 	cur_dsd = &cmd_pkt->dsd;
743 
744 	/* Load data segments */
745 
746 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
747 		cont_a64_entry_t *cont_pkt;
748 
749 		/* Allocate additional continuation packets? */
750 		if (avail_dsds == 0) {
751 			/*
752 			 * Five DSDs are available in the Continuation
753 			 * Type 1 IOCB.
754 			 */
755 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
756 			cur_dsd = cont_pkt->dsd;
757 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
758 		}
759 
760 		append_dsd64(&cur_dsd, sg);
761 		avail_dsds--;
762 	}
763 }
764 
765 struct fw_dif_context {
766 	__le32	ref_tag;
767 	__le16	app_tag;
768 	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
769 	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
770 };
771 
772 /*
773  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
774  *
775  */
776 static inline void
777 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
778     unsigned int protcnt)
779 {
780 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
781 
782 	pkt->ref_tag = cpu_to_le32(scsi_prot_ref_tag(cmd));
783 
784 	if (cmd->prot_flags & SCSI_PROT_REF_CHECK &&
785 	    qla2x00_hba_err_chk_enabled(sp)) {
786 		pkt->ref_tag_mask[0] = 0xff;
787 		pkt->ref_tag_mask[1] = 0xff;
788 		pkt->ref_tag_mask[2] = 0xff;
789 		pkt->ref_tag_mask[3] = 0xff;
790 	}
791 
792 	pkt->app_tag = cpu_to_le16(0);
793 	pkt->app_tag_mask[0] = 0x0;
794 	pkt->app_tag_mask[1] = 0x0;
795 }
796 
797 int
798 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
799 	uint32_t *partial)
800 {
801 	struct scatterlist *sg;
802 	uint32_t cumulative_partial, sg_len;
803 	dma_addr_t sg_dma_addr;
804 
805 	if (sgx->num_bytes == sgx->tot_bytes)
806 		return 0;
807 
808 	sg = sgx->cur_sg;
809 	cumulative_partial = sgx->tot_partial;
810 
811 	sg_dma_addr = sg_dma_address(sg);
812 	sg_len = sg_dma_len(sg);
813 
814 	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
815 
816 	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
817 		sgx->dma_len = (blk_sz - cumulative_partial);
818 		sgx->tot_partial = 0;
819 		sgx->num_bytes += blk_sz;
820 		*partial = 0;
821 	} else {
822 		sgx->dma_len = sg_len - sgx->bytes_consumed;
823 		sgx->tot_partial += sgx->dma_len;
824 		*partial = 1;
825 	}
826 
827 	sgx->bytes_consumed += sgx->dma_len;
828 
829 	if (sg_len == sgx->bytes_consumed) {
830 		sg = sg_next(sg);
831 		sgx->num_sg++;
832 		sgx->cur_sg = sg;
833 		sgx->bytes_consumed = 0;
834 	}
835 
836 	return 1;
837 }
838 
839 int
840 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
841 	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
842 {
843 	void *next_dsd;
844 	uint8_t avail_dsds = 0;
845 	uint32_t dsd_list_len;
846 	struct dsd_dma *dsd_ptr;
847 	struct scatterlist *sg_prot;
848 	struct dsd64 *cur_dsd = dsd;
849 	uint16_t	used_dsds = tot_dsds;
850 	uint32_t	prot_int; /* protection interval */
851 	uint32_t	partial;
852 	struct qla2_sgx sgx;
853 	dma_addr_t	sle_dma;
854 	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
855 	struct scsi_cmnd *cmd;
856 
857 	memset(&sgx, 0, sizeof(struct qla2_sgx));
858 	if (sp) {
859 		cmd = GET_CMD_SP(sp);
860 		prot_int = scsi_prot_interval(cmd);
861 
862 		sgx.tot_bytes = scsi_bufflen(cmd);
863 		sgx.cur_sg = scsi_sglist(cmd);
864 		sgx.sp = sp;
865 
866 		sg_prot = scsi_prot_sglist(cmd);
867 	} else if (tc) {
868 		prot_int      = tc->blk_sz;
869 		sgx.tot_bytes = tc->bufflen;
870 		sgx.cur_sg    = tc->sg;
871 		sg_prot	      = tc->prot_sg;
872 	} else {
873 		BUG();
874 		return 1;
875 	}
876 
877 	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
878 
879 		sle_dma = sgx.dma_addr;
880 		sle_dma_len = sgx.dma_len;
881 alloc_and_fill:
882 		/* Allocate additional continuation packets? */
883 		if (avail_dsds == 0) {
884 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
885 					QLA_DSDS_PER_IOCB : used_dsds;
886 			dsd_list_len = (avail_dsds + 1) * 12;
887 			used_dsds -= avail_dsds;
888 
889 			/* allocate tracking DS */
890 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
891 			if (!dsd_ptr)
892 				return 1;
893 
894 			/* allocate new list */
895 			dsd_ptr->dsd_addr = next_dsd =
896 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
897 				&dsd_ptr->dsd_list_dma);
898 
899 			if (!next_dsd) {
900 				/*
901 				 * Need to cleanup only this dsd_ptr, rest
902 				 * will be done by sp_free_dma()
903 				 */
904 				kfree(dsd_ptr);
905 				return 1;
906 			}
907 
908 			if (sp) {
909 				list_add_tail(&dsd_ptr->list,
910 					      &sp->u.scmd.crc_ctx->dsd_list);
911 
912 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
913 			} else {
914 				list_add_tail(&dsd_ptr->list,
915 				    &(tc->ctx->dsd_list));
916 				*tc->ctx_dsd_alloced = 1;
917 			}
918 
919 
920 			/* add new list to cmd iocb or last list */
921 			put_unaligned_le64(dsd_ptr->dsd_list_dma,
922 					   &cur_dsd->address);
923 			cur_dsd->length = cpu_to_le32(dsd_list_len);
924 			cur_dsd = next_dsd;
925 		}
926 		put_unaligned_le64(sle_dma, &cur_dsd->address);
927 		cur_dsd->length = cpu_to_le32(sle_dma_len);
928 		cur_dsd++;
929 		avail_dsds--;
930 
931 		if (partial == 0) {
932 			/* Got a full protection interval */
933 			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
934 			sle_dma_len = 8;
935 
936 			tot_prot_dma_len += sle_dma_len;
937 			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
938 				tot_prot_dma_len = 0;
939 				sg_prot = sg_next(sg_prot);
940 			}
941 
942 			partial = 1; /* So as to not re-enter this block */
943 			goto alloc_and_fill;
944 		}
945 	}
946 	/* Null termination */
947 	cur_dsd->address = 0;
948 	cur_dsd->length = 0;
949 	cur_dsd++;
950 	return 0;
951 }
952 
953 int
954 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
955 	struct dsd64 *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
956 {
957 	void *next_dsd;
958 	uint8_t avail_dsds = 0;
959 	uint32_t dsd_list_len;
960 	struct dsd_dma *dsd_ptr;
961 	struct scatterlist *sg, *sgl;
962 	struct dsd64 *cur_dsd = dsd;
963 	int	i;
964 	uint16_t	used_dsds = tot_dsds;
965 	struct scsi_cmnd *cmd;
966 
967 	if (sp) {
968 		cmd = GET_CMD_SP(sp);
969 		sgl = scsi_sglist(cmd);
970 	} else if (tc) {
971 		sgl = tc->sg;
972 	} else {
973 		BUG();
974 		return 1;
975 	}
976 
977 
978 	for_each_sg(sgl, sg, tot_dsds, i) {
979 		/* Allocate additional continuation packets? */
980 		if (avail_dsds == 0) {
981 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
982 					QLA_DSDS_PER_IOCB : used_dsds;
983 			dsd_list_len = (avail_dsds + 1) * 12;
984 			used_dsds -= avail_dsds;
985 
986 			/* allocate tracking DS */
987 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
988 			if (!dsd_ptr)
989 				return 1;
990 
991 			/* allocate new list */
992 			dsd_ptr->dsd_addr = next_dsd =
993 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
994 				&dsd_ptr->dsd_list_dma);
995 
996 			if (!next_dsd) {
997 				/*
998 				 * Need to cleanup only this dsd_ptr, rest
999 				 * will be done by sp_free_dma()
1000 				 */
1001 				kfree(dsd_ptr);
1002 				return 1;
1003 			}
1004 
1005 			if (sp) {
1006 				list_add_tail(&dsd_ptr->list,
1007 					      &sp->u.scmd.crc_ctx->dsd_list);
1008 
1009 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
1010 			} else {
1011 				list_add_tail(&dsd_ptr->list,
1012 				    &(tc->ctx->dsd_list));
1013 				*tc->ctx_dsd_alloced = 1;
1014 			}
1015 
1016 			/* add new list to cmd iocb or last list */
1017 			put_unaligned_le64(dsd_ptr->dsd_list_dma,
1018 					   &cur_dsd->address);
1019 			cur_dsd->length = cpu_to_le32(dsd_list_len);
1020 			cur_dsd = next_dsd;
1021 		}
1022 		append_dsd64(&cur_dsd, sg);
1023 		avail_dsds--;
1024 
1025 	}
1026 	/* Null termination */
1027 	cur_dsd->address = 0;
1028 	cur_dsd->length = 0;
1029 	cur_dsd++;
1030 	return 0;
1031 }
1032 
1033 int
1034 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1035 	struct dsd64 *cur_dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1036 {
1037 	struct dsd_dma *dsd_ptr = NULL, *dif_dsd, *nxt_dsd;
1038 	struct scatterlist *sg, *sgl;
1039 	struct crc_context *difctx = NULL;
1040 	struct scsi_qla_host *vha;
1041 	uint dsd_list_len;
1042 	uint avail_dsds = 0;
1043 	uint used_dsds = tot_dsds;
1044 	bool dif_local_dma_alloc = false;
1045 	bool direction_to_device = false;
1046 	int i;
1047 
1048 	if (sp) {
1049 		struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1050 
1051 		sgl = scsi_prot_sglist(cmd);
1052 		vha = sp->vha;
1053 		difctx = sp->u.scmd.crc_ctx;
1054 		direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
1055 		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1056 		  "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
1057 			__func__, cmd, difctx, sp);
1058 	} else if (tc) {
1059 		vha = tc->vha;
1060 		sgl = tc->prot_sg;
1061 		difctx = tc->ctx;
1062 		direction_to_device = tc->dma_data_direction == DMA_TO_DEVICE;
1063 	} else {
1064 		BUG();
1065 		return 1;
1066 	}
1067 
1068 	ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
1069 	    "%s: enter (write=%u)\n", __func__, direction_to_device);
1070 
1071 	/* if initiator doing write or target doing read */
1072 	if (direction_to_device) {
1073 		for_each_sg(sgl, sg, tot_dsds, i) {
1074 			u64 sle_phys = sg_phys(sg);
1075 
1076 			/* If SGE addr + len flips bits in upper 32-bits */
1077 			if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
1078 				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe022,
1079 				    "%s: page boundary crossing (phys=%llx len=%x)\n",
1080 				    __func__, sle_phys, sg->length);
1081 
1082 				if (difctx) {
1083 					ha->dif_bundle_crossed_pages++;
1084 					dif_local_dma_alloc = true;
1085 				} else {
1086 					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1087 					    vha, 0xe022,
1088 					    "%s: difctx pointer is NULL\n",
1089 					    __func__);
1090 				}
1091 				break;
1092 			}
1093 		}
1094 		ha->dif_bundle_writes++;
1095 	} else {
1096 		ha->dif_bundle_reads++;
1097 	}
1098 
1099 	if (ql2xdifbundlinginternalbuffers)
1100 		dif_local_dma_alloc = direction_to_device;
1101 
1102 	if (dif_local_dma_alloc) {
1103 		u32 track_difbundl_buf = 0;
1104 		u32 ldma_sg_len = 0;
1105 		u8 ldma_needed = 1;
1106 
1107 		difctx->no_dif_bundl = 0;
1108 		difctx->dif_bundl_len = 0;
1109 
1110 		/* Track DSD buffers */
1111 		INIT_LIST_HEAD(&difctx->ldif_dsd_list);
1112 		/* Track local DMA buffers */
1113 		INIT_LIST_HEAD(&difctx->ldif_dma_hndl_list);
1114 
1115 		for_each_sg(sgl, sg, tot_dsds, i) {
1116 			u32 sglen = sg_dma_len(sg);
1117 
1118 			ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1119 			    "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1120 			    __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1121 			    difctx->dif_bundl_len, ldma_needed);
1122 
1123 			while (sglen) {
1124 				u32 xfrlen = 0;
1125 
1126 				if (ldma_needed) {
1127 					/*
1128 					 * Allocate list item to store
1129 					 * the DMA buffers
1130 					 */
1131 					dsd_ptr = kzalloc(sizeof(*dsd_ptr),
1132 					    GFP_ATOMIC);
1133 					if (!dsd_ptr) {
1134 						ql_dbg(ql_dbg_tgt, vha, 0xe024,
1135 						    "%s: failed alloc dsd_ptr\n",
1136 						    __func__);
1137 						return 1;
1138 					}
1139 					ha->dif_bundle_kallocs++;
1140 
1141 					/* allocate dma buffer */
1142 					dsd_ptr->dsd_addr = dma_pool_alloc
1143 						(ha->dif_bundl_pool, GFP_ATOMIC,
1144 						 &dsd_ptr->dsd_list_dma);
1145 					if (!dsd_ptr->dsd_addr) {
1146 						ql_dbg(ql_dbg_tgt, vha, 0xe024,
1147 						    "%s: failed alloc ->dsd_ptr\n",
1148 						    __func__);
1149 						/*
1150 						 * need to cleanup only this
1151 						 * dsd_ptr rest will be done
1152 						 * by sp_free_dma()
1153 						 */
1154 						kfree(dsd_ptr);
1155 						ha->dif_bundle_kallocs--;
1156 						return 1;
1157 					}
1158 					ha->dif_bundle_dma_allocs++;
1159 					ldma_needed = 0;
1160 					difctx->no_dif_bundl++;
1161 					list_add_tail(&dsd_ptr->list,
1162 					    &difctx->ldif_dma_hndl_list);
1163 				}
1164 
1165 				/* xfrlen is min of dma pool size and sglen */
1166 				xfrlen = (sglen >
1167 				   (DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len)) ?
1168 				    DIF_BUNDLING_DMA_POOL_SIZE - ldma_sg_len :
1169 				    sglen;
1170 
1171 				/* replace with local allocated dma buffer */
1172 				sg_pcopy_to_buffer(sgl, sg_nents(sgl),
1173 				    dsd_ptr->dsd_addr + ldma_sg_len, xfrlen,
1174 				    difctx->dif_bundl_len);
1175 				difctx->dif_bundl_len += xfrlen;
1176 				sglen -= xfrlen;
1177 				ldma_sg_len += xfrlen;
1178 				if (ldma_sg_len == DIF_BUNDLING_DMA_POOL_SIZE ||
1179 				    sg_is_last(sg)) {
1180 					ldma_needed = 1;
1181 					ldma_sg_len = 0;
1182 				}
1183 			}
1184 		}
1185 
1186 		track_difbundl_buf = used_dsds = difctx->no_dif_bundl;
1187 		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe025,
1188 		    "dif_bundl_len=%x, no_dif_bundl=%x track_difbundl_buf: %x\n",
1189 		    difctx->dif_bundl_len, difctx->no_dif_bundl,
1190 		    track_difbundl_buf);
1191 
1192 		if (sp)
1193 			sp->flags |= SRB_DIF_BUNDL_DMA_VALID;
1194 		else
1195 			tc->prot_flags = DIF_BUNDL_DMA_VALID;
1196 
1197 		list_for_each_entry_safe(dif_dsd, nxt_dsd,
1198 		    &difctx->ldif_dma_hndl_list, list) {
1199 			u32 sglen = (difctx->dif_bundl_len >
1200 			    DIF_BUNDLING_DMA_POOL_SIZE) ?
1201 			    DIF_BUNDLING_DMA_POOL_SIZE : difctx->dif_bundl_len;
1202 
1203 			BUG_ON(track_difbundl_buf == 0);
1204 
1205 			/* Allocate additional continuation packets? */
1206 			if (avail_dsds == 0) {
1207 				ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha,
1208 				    0xe024,
1209 				    "%s: adding continuation iocb's\n",
1210 				    __func__);
1211 				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1212 				    QLA_DSDS_PER_IOCB : used_dsds;
1213 				dsd_list_len = (avail_dsds + 1) * 12;
1214 				used_dsds -= avail_dsds;
1215 
1216 				/* allocate tracking DS */
1217 				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1218 				if (!dsd_ptr) {
1219 					ql_dbg(ql_dbg_tgt, vha, 0xe026,
1220 					    "%s: failed alloc dsd_ptr\n",
1221 					    __func__);
1222 					return 1;
1223 				}
1224 				ha->dif_bundle_kallocs++;
1225 
1226 				difctx->no_ldif_dsd++;
1227 				/* allocate new list */
1228 				dsd_ptr->dsd_addr =
1229 				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1230 					&dsd_ptr->dsd_list_dma);
1231 				if (!dsd_ptr->dsd_addr) {
1232 					ql_dbg(ql_dbg_tgt, vha, 0xe026,
1233 					    "%s: failed alloc ->dsd_addr\n",
1234 					    __func__);
1235 					/*
1236 					 * need to cleanup only this dsd_ptr
1237 					 *  rest will be done by sp_free_dma()
1238 					 */
1239 					kfree(dsd_ptr);
1240 					ha->dif_bundle_kallocs--;
1241 					return 1;
1242 				}
1243 				ha->dif_bundle_dma_allocs++;
1244 
1245 				if (sp) {
1246 					list_add_tail(&dsd_ptr->list,
1247 					    &difctx->ldif_dsd_list);
1248 					sp->flags |= SRB_CRC_CTX_DSD_VALID;
1249 				} else {
1250 					list_add_tail(&dsd_ptr->list,
1251 					    &difctx->ldif_dsd_list);
1252 					tc->ctx_dsd_alloced = 1;
1253 				}
1254 
1255 				/* add new list to cmd iocb or last list */
1256 				put_unaligned_le64(dsd_ptr->dsd_list_dma,
1257 						   &cur_dsd->address);
1258 				cur_dsd->length = cpu_to_le32(dsd_list_len);
1259 				cur_dsd = dsd_ptr->dsd_addr;
1260 			}
1261 			put_unaligned_le64(dif_dsd->dsd_list_dma,
1262 					   &cur_dsd->address);
1263 			cur_dsd->length = cpu_to_le32(sglen);
1264 			cur_dsd++;
1265 			avail_dsds--;
1266 			difctx->dif_bundl_len -= sglen;
1267 			track_difbundl_buf--;
1268 		}
1269 
1270 		ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe026,
1271 		    "%s: no_ldif_dsd:%x, no_dif_bundl:%x\n", __func__,
1272 			difctx->no_ldif_dsd, difctx->no_dif_bundl);
1273 	} else {
1274 		for_each_sg(sgl, sg, tot_dsds, i) {
1275 			/* Allocate additional continuation packets? */
1276 			if (avail_dsds == 0) {
1277 				avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1278 				    QLA_DSDS_PER_IOCB : used_dsds;
1279 				dsd_list_len = (avail_dsds + 1) * 12;
1280 				used_dsds -= avail_dsds;
1281 
1282 				/* allocate tracking DS */
1283 				dsd_ptr = kzalloc(sizeof(*dsd_ptr), GFP_ATOMIC);
1284 				if (!dsd_ptr) {
1285 					ql_dbg(ql_dbg_tgt + ql_dbg_verbose,
1286 					    vha, 0xe027,
1287 					    "%s: failed alloc dsd_dma...\n",
1288 					    __func__);
1289 					return 1;
1290 				}
1291 
1292 				/* allocate new list */
1293 				dsd_ptr->dsd_addr =
1294 				    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1295 					&dsd_ptr->dsd_list_dma);
1296 				if (!dsd_ptr->dsd_addr) {
1297 					/* need to cleanup only this dsd_ptr */
1298 					/* rest will be done by sp_free_dma() */
1299 					kfree(dsd_ptr);
1300 					return 1;
1301 				}
1302 
1303 				if (sp) {
1304 					list_add_tail(&dsd_ptr->list,
1305 					    &difctx->dsd_list);
1306 					sp->flags |= SRB_CRC_CTX_DSD_VALID;
1307 				} else {
1308 					list_add_tail(&dsd_ptr->list,
1309 					    &difctx->dsd_list);
1310 					tc->ctx_dsd_alloced = 1;
1311 				}
1312 
1313 				/* add new list to cmd iocb or last list */
1314 				put_unaligned_le64(dsd_ptr->dsd_list_dma,
1315 						   &cur_dsd->address);
1316 				cur_dsd->length = cpu_to_le32(dsd_list_len);
1317 				cur_dsd = dsd_ptr->dsd_addr;
1318 			}
1319 			append_dsd64(&cur_dsd, sg);
1320 			avail_dsds--;
1321 		}
1322 	}
1323 	/* Null termination */
1324 	cur_dsd->address = 0;
1325 	cur_dsd->length = 0;
1326 	cur_dsd++;
1327 	return 0;
1328 }
1329 
1330 /**
1331  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1332  *							Type 6 IOCB types.
1333  *
1334  * @sp: SRB command to process
1335  * @cmd_pkt: Command type 3 IOCB
1336  * @tot_dsds: Total number of segments to transfer
1337  * @tot_prot_dsds: Total number of segments with protection information
1338  * @fw_prot_opts: Protection options to be passed to firmware
1339  */
1340 static inline int
1341 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1342     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1343 {
1344 	struct dsd64		*cur_dsd;
1345 	__be32			*fcp_dl;
1346 	scsi_qla_host_t		*vha;
1347 	struct scsi_cmnd	*cmd;
1348 	uint32_t		total_bytes = 0;
1349 	uint32_t		data_bytes;
1350 	uint32_t		dif_bytes;
1351 	uint8_t			bundling = 1;
1352 	uint16_t		blk_size;
1353 	struct crc_context	*crc_ctx_pkt = NULL;
1354 	struct qla_hw_data	*ha;
1355 	uint8_t			additional_fcpcdb_len;
1356 	uint16_t		fcp_cmnd_len;
1357 	struct fcp_cmnd		*fcp_cmnd;
1358 	dma_addr_t		crc_ctx_dma;
1359 
1360 	cmd = GET_CMD_SP(sp);
1361 
1362 	/* Update entry type to indicate Command Type CRC_2 IOCB */
1363 	put_unaligned_le32(COMMAND_TYPE_CRC_2, &cmd_pkt->entry_type);
1364 
1365 	vha = sp->vha;
1366 	ha = vha->hw;
1367 
1368 	/* No data transfer */
1369 	data_bytes = scsi_bufflen(cmd);
1370 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1371 		cmd_pkt->byte_count = cpu_to_le32(0);
1372 		return QLA_SUCCESS;
1373 	}
1374 
1375 	cmd_pkt->vp_index = sp->vha->vp_idx;
1376 
1377 	/* Set transfer direction */
1378 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1379 		cmd_pkt->control_flags =
1380 		    cpu_to_le16(CF_WRITE_DATA);
1381 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1382 		cmd_pkt->control_flags =
1383 		    cpu_to_le16(CF_READ_DATA);
1384 	}
1385 
1386 	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1387 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1388 	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1389 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1390 		bundling = 0;
1391 
1392 	/* Allocate CRC context from global pool */
1393 	crc_ctx_pkt = sp->u.scmd.crc_ctx =
1394 	    dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1395 
1396 	if (!crc_ctx_pkt)
1397 		goto crc_queuing_error;
1398 
1399 	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1400 
1401 	sp->flags |= SRB_CRC_CTX_DMA_VALID;
1402 
1403 	/* Set handle */
1404 	crc_ctx_pkt->handle = cmd_pkt->handle;
1405 
1406 	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1407 
1408 	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1409 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1410 
1411 	put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address);
1412 	cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW);
1413 
1414 	/* Determine SCSI command length -- align to 4 byte boundary */
1415 	if (cmd->cmd_len > 16) {
1416 		additional_fcpcdb_len = cmd->cmd_len - 16;
1417 		if ((cmd->cmd_len % 4) != 0) {
1418 			/* SCSI cmd > 16 bytes must be multiple of 4 */
1419 			goto crc_queuing_error;
1420 		}
1421 		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1422 	} else {
1423 		additional_fcpcdb_len = 0;
1424 		fcp_cmnd_len = 12 + 16 + 4;
1425 	}
1426 
1427 	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1428 
1429 	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1430 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
1431 		fcp_cmnd->additional_cdb_len |= 1;
1432 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1433 		fcp_cmnd->additional_cdb_len |= 2;
1434 
1435 	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1436 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1437 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1438 	put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF,
1439 			   &cmd_pkt->fcp_cmnd_dseg_address);
1440 	fcp_cmnd->task_management = 0;
1441 	fcp_cmnd->task_attribute = TSK_SIMPLE;
1442 
1443 	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1444 
1445 	/* Compute dif len and adjust data len to incude protection */
1446 	dif_bytes = 0;
1447 	blk_size = cmd->device->sector_size;
1448 	dif_bytes = (data_bytes / blk_size) * 8;
1449 
1450 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1451 	case SCSI_PROT_READ_INSERT:
1452 	case SCSI_PROT_WRITE_STRIP:
1453 		total_bytes = data_bytes;
1454 		data_bytes += dif_bytes;
1455 		break;
1456 
1457 	case SCSI_PROT_READ_STRIP:
1458 	case SCSI_PROT_WRITE_INSERT:
1459 	case SCSI_PROT_READ_PASS:
1460 	case SCSI_PROT_WRITE_PASS:
1461 		total_bytes = data_bytes + dif_bytes;
1462 		break;
1463 	default:
1464 		BUG();
1465 	}
1466 
1467 	if (!qla2x00_hba_err_chk_enabled(sp))
1468 		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1469 	/* HBA error checking enabled */
1470 	else if (IS_PI_UNINIT_CAPABLE(ha)) {
1471 		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1472 		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1473 			SCSI_PROT_DIF_TYPE2))
1474 			fw_prot_opts |= BIT_10;
1475 		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1476 		    SCSI_PROT_DIF_TYPE3)
1477 			fw_prot_opts |= BIT_11;
1478 	}
1479 
1480 	if (!bundling) {
1481 		cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
1482 	} else {
1483 		/*
1484 		 * Configure Bundling if we need to fetch interlaving
1485 		 * protection PCI accesses
1486 		 */
1487 		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1488 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1489 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1490 							tot_prot_dsds);
1491 		cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
1492 	}
1493 
1494 	/* Finish the common fields of CRC pkt */
1495 	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1496 	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1497 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1498 	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1499 	/* Fibre channel byte count */
1500 	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1501 	fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1502 	    additional_fcpcdb_len);
1503 	*fcp_dl = htonl(total_bytes);
1504 
1505 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1506 		cmd_pkt->byte_count = cpu_to_le32(0);
1507 		return QLA_SUCCESS;
1508 	}
1509 	/* Walks data segments */
1510 
1511 	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1512 
1513 	if (!bundling && tot_prot_dsds) {
1514 		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1515 			cur_dsd, tot_dsds, NULL))
1516 			goto crc_queuing_error;
1517 	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1518 			(tot_dsds - tot_prot_dsds), NULL))
1519 		goto crc_queuing_error;
1520 
1521 	if (bundling && tot_prot_dsds) {
1522 		/* Walks dif segments */
1523 		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1524 		cur_dsd = &crc_ctx_pkt->u.bundling.dif_dsd;
1525 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1526 				tot_prot_dsds, NULL))
1527 			goto crc_queuing_error;
1528 	}
1529 	return QLA_SUCCESS;
1530 
1531 crc_queuing_error:
1532 	/* Cleanup will be performed by the caller */
1533 
1534 	return QLA_FUNCTION_FAILED;
1535 }
1536 
1537 /**
1538  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1539  * @sp: command to send to the ISP
1540  *
1541  * Returns non-zero if a failure occurred, else zero.
1542  */
1543 int
1544 qla24xx_start_scsi(srb_t *sp)
1545 {
1546 	int		nseg;
1547 	unsigned long   flags;
1548 	uint32_t	*clr_ptr;
1549 	uint32_t	handle;
1550 	struct cmd_type_7 *cmd_pkt;
1551 	uint16_t	cnt;
1552 	uint16_t	req_cnt;
1553 	uint16_t	tot_dsds;
1554 	struct req_que *req = NULL;
1555 	struct rsp_que *rsp;
1556 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1557 	struct scsi_qla_host *vha = sp->vha;
1558 	struct qla_hw_data *ha = vha->hw;
1559 
1560 	if (sp->fcport->edif.enable  && (sp->fcport->flags & FCF_FCSP_DEVICE))
1561 		return qla28xx_start_scsi_edif(sp);
1562 
1563 	/* Setup device pointers. */
1564 	req = vha->req;
1565 	rsp = req->rsp;
1566 
1567 	/* So we know we haven't pci_map'ed anything yet */
1568 	tot_dsds = 0;
1569 
1570 	/* Send marker if required */
1571 	if (vha->marker_needed != 0) {
1572 		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1573 		    QLA_SUCCESS)
1574 			return QLA_FUNCTION_FAILED;
1575 		vha->marker_needed = 0;
1576 	}
1577 
1578 	/* Acquire ring specific lock */
1579 	spin_lock_irqsave(&ha->hardware_lock, flags);
1580 
1581 	handle = qla2xxx_get_next_handle(req);
1582 	if (handle == 0)
1583 		goto queuing_error;
1584 
1585 	/* Map the sg table so we have an accurate count of sg entries needed */
1586 	if (scsi_sg_count(cmd)) {
1587 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1588 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1589 		if (unlikely(!nseg))
1590 			goto queuing_error;
1591 	} else
1592 		nseg = 0;
1593 
1594 	tot_dsds = nseg;
1595 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1596 
1597 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
1598 	sp->iores.exch_cnt = 1;
1599 	sp->iores.iocb_cnt = req_cnt;
1600 	if (qla_get_fw_resources(sp->qpair, &sp->iores))
1601 		goto queuing_error;
1602 
1603 	if (req->cnt < (req_cnt + 2)) {
1604 		if (IS_SHADOW_REG_CAPABLE(ha)) {
1605 			cnt = *req->out_ptr;
1606 		} else {
1607 			cnt = rd_reg_dword_relaxed(req->req_q_out);
1608 			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1609 				goto queuing_error;
1610 		}
1611 
1612 		if (req->ring_index < cnt)
1613 			req->cnt = cnt - req->ring_index;
1614 		else
1615 			req->cnt = req->length -
1616 				(req->ring_index - cnt);
1617 		if (req->cnt < (req_cnt + 2))
1618 			goto queuing_error;
1619 	}
1620 
1621 	/* Build command packet. */
1622 	req->current_outstanding_cmd = handle;
1623 	req->outstanding_cmds[handle] = sp;
1624 	sp->handle = handle;
1625 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1626 	req->cnt -= req_cnt;
1627 
1628 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1629 	cmd_pkt->handle = make_handle(req->id, handle);
1630 
1631 	/* Zero out remaining portion of packet. */
1632 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1633 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1634 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1635 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1636 
1637 	/* Set NPORT-ID and LUN number*/
1638 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1639 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1640 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1641 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1642 	cmd_pkt->vp_index = sp->vha->vp_idx;
1643 
1644 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1645 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1646 
1647 	cmd_pkt->task = TSK_SIMPLE;
1648 
1649 	/* Load SCSI command packet. */
1650 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1651 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1652 
1653 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1654 
1655 	/* Build IOCB segments */
1656 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1657 
1658 	/* Set total data segment count. */
1659 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1660 	wmb();
1661 	/* Adjust ring index. */
1662 	req->ring_index++;
1663 	if (req->ring_index == req->length) {
1664 		req->ring_index = 0;
1665 		req->ring_ptr = req->ring;
1666 	} else
1667 		req->ring_ptr++;
1668 
1669 	sp->qpair->cmd_cnt++;
1670 	sp->flags |= SRB_DMA_VALID;
1671 
1672 	/* Set chip new ring index. */
1673 	wrt_reg_dword(req->req_q_in, req->ring_index);
1674 
1675 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1676 	if (vha->flags.process_response_queue &&
1677 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1678 		qla24xx_process_response_queue(vha, rsp);
1679 
1680 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1681 	return QLA_SUCCESS;
1682 
1683 queuing_error:
1684 	if (tot_dsds)
1685 		scsi_dma_unmap(cmd);
1686 
1687 	qla_put_fw_resources(sp->qpair, &sp->iores);
1688 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1689 
1690 	return QLA_FUNCTION_FAILED;
1691 }
1692 
1693 /**
1694  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1695  * @sp: command to send to the ISP
1696  *
1697  * Returns non-zero if a failure occurred, else zero.
1698  */
1699 int
1700 qla24xx_dif_start_scsi(srb_t *sp)
1701 {
1702 	int			nseg;
1703 	unsigned long		flags;
1704 	uint32_t		*clr_ptr;
1705 	uint32_t		handle;
1706 	uint16_t		cnt;
1707 	uint16_t		req_cnt = 0;
1708 	uint16_t		tot_dsds;
1709 	uint16_t		tot_prot_dsds;
1710 	uint16_t		fw_prot_opts = 0;
1711 	struct req_que		*req = NULL;
1712 	struct rsp_que		*rsp = NULL;
1713 	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
1714 	struct scsi_qla_host	*vha = sp->vha;
1715 	struct qla_hw_data	*ha = vha->hw;
1716 	struct cmd_type_crc_2	*cmd_pkt;
1717 	uint32_t		status = 0;
1718 
1719 #define QDSS_GOT_Q_SPACE	BIT_0
1720 
1721 	/* Only process protection or >16 cdb in this routine */
1722 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1723 		if (cmd->cmd_len <= 16)
1724 			return qla24xx_start_scsi(sp);
1725 	}
1726 
1727 	/* Setup device pointers. */
1728 	req = vha->req;
1729 	rsp = req->rsp;
1730 
1731 	/* So we know we haven't pci_map'ed anything yet */
1732 	tot_dsds = 0;
1733 
1734 	/* Send marker if required */
1735 	if (vha->marker_needed != 0) {
1736 		if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) !=
1737 		    QLA_SUCCESS)
1738 			return QLA_FUNCTION_FAILED;
1739 		vha->marker_needed = 0;
1740 	}
1741 
1742 	/* Acquire ring specific lock */
1743 	spin_lock_irqsave(&ha->hardware_lock, flags);
1744 
1745 	handle = qla2xxx_get_next_handle(req);
1746 	if (handle == 0)
1747 		goto queuing_error;
1748 
1749 	/* Compute number of required data segments */
1750 	/* Map the sg table so we have an accurate count of sg entries needed */
1751 	if (scsi_sg_count(cmd)) {
1752 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1753 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1754 		if (unlikely(!nseg))
1755 			goto queuing_error;
1756 		else
1757 			sp->flags |= SRB_DMA_VALID;
1758 
1759 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1760 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1761 			struct qla2_sgx sgx;
1762 			uint32_t	partial;
1763 
1764 			memset(&sgx, 0, sizeof(struct qla2_sgx));
1765 			sgx.tot_bytes = scsi_bufflen(cmd);
1766 			sgx.cur_sg = scsi_sglist(cmd);
1767 			sgx.sp = sp;
1768 
1769 			nseg = 0;
1770 			while (qla24xx_get_one_block_sg(
1771 			    cmd->device->sector_size, &sgx, &partial))
1772 				nseg++;
1773 		}
1774 	} else
1775 		nseg = 0;
1776 
1777 	/* number of required data segments */
1778 	tot_dsds = nseg;
1779 
1780 	/* Compute number of required protection segments */
1781 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1782 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1783 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1784 		if (unlikely(!nseg))
1785 			goto queuing_error;
1786 		else
1787 			sp->flags |= SRB_CRC_PROT_DMA_VALID;
1788 
1789 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1790 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1791 			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1792 		}
1793 	} else {
1794 		nseg = 0;
1795 	}
1796 
1797 	req_cnt = 1;
1798 	/* Total Data and protection sg segment(s) */
1799 	tot_prot_dsds = nseg;
1800 	tot_dsds += nseg;
1801 
1802 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
1803 	sp->iores.exch_cnt = 1;
1804 	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1805 	if (qla_get_fw_resources(sp->qpair, &sp->iores))
1806 		goto queuing_error;
1807 
1808 	if (req->cnt < (req_cnt + 2)) {
1809 		if (IS_SHADOW_REG_CAPABLE(ha)) {
1810 			cnt = *req->out_ptr;
1811 		} else {
1812 			cnt = rd_reg_dword_relaxed(req->req_q_out);
1813 			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1814 				goto queuing_error;
1815 		}
1816 		if (req->ring_index < cnt)
1817 			req->cnt = cnt - req->ring_index;
1818 		else
1819 			req->cnt = req->length -
1820 				(req->ring_index - cnt);
1821 		if (req->cnt < (req_cnt + 2))
1822 			goto queuing_error;
1823 	}
1824 
1825 	status |= QDSS_GOT_Q_SPACE;
1826 
1827 	/* Build header part of command packet (excluding the OPCODE). */
1828 	req->current_outstanding_cmd = handle;
1829 	req->outstanding_cmds[handle] = sp;
1830 	sp->handle = handle;
1831 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1832 	req->cnt -= req_cnt;
1833 
1834 	/* Fill-in common area */
1835 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1836 	cmd_pkt->handle = make_handle(req->id, handle);
1837 
1838 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1839 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1840 
1841 	/* Set NPORT-ID and LUN number*/
1842 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1843 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1844 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1845 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1846 
1847 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1848 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1849 
1850 	/* Total Data and protection segment(s) */
1851 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1852 
1853 	/* Build IOCB segments and adjust for data protection segments */
1854 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1855 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1856 		QLA_SUCCESS)
1857 		goto queuing_error;
1858 
1859 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1860 	/* Specify response queue number where completion should happen */
1861 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1862 	cmd_pkt->timeout = cpu_to_le16(0);
1863 	wmb();
1864 
1865 	/* Adjust ring index. */
1866 	req->ring_index++;
1867 	if (req->ring_index == req->length) {
1868 		req->ring_index = 0;
1869 		req->ring_ptr = req->ring;
1870 	} else
1871 		req->ring_ptr++;
1872 
1873 	sp->qpair->cmd_cnt++;
1874 	/* Set chip new ring index. */
1875 	wrt_reg_dword(req->req_q_in, req->ring_index);
1876 
1877 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1878 	if (vha->flags.process_response_queue &&
1879 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1880 		qla24xx_process_response_queue(vha, rsp);
1881 
1882 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1883 
1884 	return QLA_SUCCESS;
1885 
1886 queuing_error:
1887 	if (status & QDSS_GOT_Q_SPACE) {
1888 		req->outstanding_cmds[handle] = NULL;
1889 		req->cnt += req_cnt;
1890 	}
1891 	/* Cleanup will be performed by the caller (queuecommand) */
1892 
1893 	qla_put_fw_resources(sp->qpair, &sp->iores);
1894 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1895 
1896 	return QLA_FUNCTION_FAILED;
1897 }
1898 
1899 /**
1900  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1901  * @sp: command to send to the ISP
1902  *
1903  * Returns non-zero if a failure occurred, else zero.
1904  */
1905 static int
1906 qla2xxx_start_scsi_mq(srb_t *sp)
1907 {
1908 	int		nseg;
1909 	unsigned long   flags;
1910 	uint32_t	*clr_ptr;
1911 	uint32_t	handle;
1912 	struct cmd_type_7 *cmd_pkt;
1913 	uint16_t	cnt;
1914 	uint16_t	req_cnt;
1915 	uint16_t	tot_dsds;
1916 	struct req_que *req = NULL;
1917 	struct rsp_que *rsp;
1918 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1919 	struct scsi_qla_host *vha = sp->fcport->vha;
1920 	struct qla_hw_data *ha = vha->hw;
1921 	struct qla_qpair *qpair = sp->qpair;
1922 
1923 	if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE))
1924 		return qla28xx_start_scsi_edif(sp);
1925 
1926 	/* Acquire qpair specific lock */
1927 	spin_lock_irqsave(&qpair->qp_lock, flags);
1928 
1929 	/* Setup qpair pointers */
1930 	req = qpair->req;
1931 	rsp = qpair->rsp;
1932 
1933 	/* So we know we haven't pci_map'ed anything yet */
1934 	tot_dsds = 0;
1935 
1936 	/* Send marker if required */
1937 	if (vha->marker_needed != 0) {
1938 		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
1939 		    QLA_SUCCESS) {
1940 			spin_unlock_irqrestore(&qpair->qp_lock, flags);
1941 			return QLA_FUNCTION_FAILED;
1942 		}
1943 		vha->marker_needed = 0;
1944 	}
1945 
1946 	handle = qla2xxx_get_next_handle(req);
1947 	if (handle == 0)
1948 		goto queuing_error;
1949 
1950 	/* Map the sg table so we have an accurate count of sg entries needed */
1951 	if (scsi_sg_count(cmd)) {
1952 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1953 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1954 		if (unlikely(!nseg))
1955 			goto queuing_error;
1956 	} else
1957 		nseg = 0;
1958 
1959 	tot_dsds = nseg;
1960 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1961 
1962 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
1963 	sp->iores.exch_cnt = 1;
1964 	sp->iores.iocb_cnt = req_cnt;
1965 	if (qla_get_fw_resources(sp->qpair, &sp->iores))
1966 		goto queuing_error;
1967 
1968 	if (req->cnt < (req_cnt + 2)) {
1969 		if (IS_SHADOW_REG_CAPABLE(ha)) {
1970 			cnt = *req->out_ptr;
1971 		} else {
1972 			cnt = rd_reg_dword_relaxed(req->req_q_out);
1973 			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
1974 				goto queuing_error;
1975 		}
1976 
1977 		if (req->ring_index < cnt)
1978 			req->cnt = cnt - req->ring_index;
1979 		else
1980 			req->cnt = req->length -
1981 				(req->ring_index - cnt);
1982 		if (req->cnt < (req_cnt + 2))
1983 			goto queuing_error;
1984 	}
1985 
1986 	/* Build command packet. */
1987 	req->current_outstanding_cmd = handle;
1988 	req->outstanding_cmds[handle] = sp;
1989 	sp->handle = handle;
1990 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1991 	req->cnt -= req_cnt;
1992 
1993 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1994 	cmd_pkt->handle = make_handle(req->id, handle);
1995 
1996 	/* Zero out remaining portion of packet. */
1997 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1998 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1999 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2000 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2001 
2002 	/* Set NPORT-ID and LUN number*/
2003 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2004 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2005 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2006 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2007 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2008 
2009 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2010 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2011 
2012 	cmd_pkt->task = TSK_SIMPLE;
2013 
2014 	/* Load SCSI command packet. */
2015 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2016 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2017 
2018 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2019 
2020 	/* Build IOCB segments */
2021 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2022 
2023 	/* Set total data segment count. */
2024 	cmd_pkt->entry_count = (uint8_t)req_cnt;
2025 	wmb();
2026 	/* Adjust ring index. */
2027 	req->ring_index++;
2028 	if (req->ring_index == req->length) {
2029 		req->ring_index = 0;
2030 		req->ring_ptr = req->ring;
2031 	} else
2032 		req->ring_ptr++;
2033 
2034 	sp->qpair->cmd_cnt++;
2035 	sp->flags |= SRB_DMA_VALID;
2036 
2037 	/* Set chip new ring index. */
2038 	wrt_reg_dword(req->req_q_in, req->ring_index);
2039 
2040 	/* Manage unprocessed RIO/ZIO commands in response queue. */
2041 	if (vha->flags.process_response_queue &&
2042 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2043 		qla24xx_process_response_queue(vha, rsp);
2044 
2045 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2046 	return QLA_SUCCESS;
2047 
2048 queuing_error:
2049 	if (tot_dsds)
2050 		scsi_dma_unmap(cmd);
2051 
2052 	qla_put_fw_resources(sp->qpair, &sp->iores);
2053 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2054 
2055 	return QLA_FUNCTION_FAILED;
2056 }
2057 
2058 
2059 /**
2060  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
2061  * @sp: command to send to the ISP
2062  *
2063  * Returns non-zero if a failure occurred, else zero.
2064  */
2065 int
2066 qla2xxx_dif_start_scsi_mq(srb_t *sp)
2067 {
2068 	int			nseg;
2069 	unsigned long		flags;
2070 	uint32_t		*clr_ptr;
2071 	uint32_t		handle;
2072 	uint16_t		cnt;
2073 	uint16_t		req_cnt = 0;
2074 	uint16_t		tot_dsds;
2075 	uint16_t		tot_prot_dsds;
2076 	uint16_t		fw_prot_opts = 0;
2077 	struct req_que		*req = NULL;
2078 	struct rsp_que		*rsp = NULL;
2079 	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
2080 	struct scsi_qla_host	*vha = sp->fcport->vha;
2081 	struct qla_hw_data	*ha = vha->hw;
2082 	struct cmd_type_crc_2	*cmd_pkt;
2083 	uint32_t		status = 0;
2084 	struct qla_qpair	*qpair = sp->qpair;
2085 
2086 #define QDSS_GOT_Q_SPACE	BIT_0
2087 
2088 	/* Check for host side state */
2089 	if (!qpair->online) {
2090 		cmd->result = DID_NO_CONNECT << 16;
2091 		return QLA_INTERFACE_ERROR;
2092 	}
2093 
2094 	if (!qpair->difdix_supported &&
2095 		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
2096 		cmd->result = DID_NO_CONNECT << 16;
2097 		return QLA_INTERFACE_ERROR;
2098 	}
2099 
2100 	/* Only process protection or >16 cdb in this routine */
2101 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
2102 		if (cmd->cmd_len <= 16)
2103 			return qla2xxx_start_scsi_mq(sp);
2104 	}
2105 
2106 	spin_lock_irqsave(&qpair->qp_lock, flags);
2107 
2108 	/* Setup qpair pointers */
2109 	rsp = qpair->rsp;
2110 	req = qpair->req;
2111 
2112 	/* So we know we haven't pci_map'ed anything yet */
2113 	tot_dsds = 0;
2114 
2115 	/* Send marker if required */
2116 	if (vha->marker_needed != 0) {
2117 		if (__qla2x00_marker(vha, qpair, 0, 0, MK_SYNC_ALL) !=
2118 		    QLA_SUCCESS) {
2119 			spin_unlock_irqrestore(&qpair->qp_lock, flags);
2120 			return QLA_FUNCTION_FAILED;
2121 		}
2122 		vha->marker_needed = 0;
2123 	}
2124 
2125 	handle = qla2xxx_get_next_handle(req);
2126 	if (handle == 0)
2127 		goto queuing_error;
2128 
2129 	/* Compute number of required data segments */
2130 	/* Map the sg table so we have an accurate count of sg entries needed */
2131 	if (scsi_sg_count(cmd)) {
2132 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2133 		    scsi_sg_count(cmd), cmd->sc_data_direction);
2134 		if (unlikely(!nseg))
2135 			goto queuing_error;
2136 		else
2137 			sp->flags |= SRB_DMA_VALID;
2138 
2139 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2140 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2141 			struct qla2_sgx sgx;
2142 			uint32_t	partial;
2143 
2144 			memset(&sgx, 0, sizeof(struct qla2_sgx));
2145 			sgx.tot_bytes = scsi_bufflen(cmd);
2146 			sgx.cur_sg = scsi_sglist(cmd);
2147 			sgx.sp = sp;
2148 
2149 			nseg = 0;
2150 			while (qla24xx_get_one_block_sg(
2151 			    cmd->device->sector_size, &sgx, &partial))
2152 				nseg++;
2153 		}
2154 	} else
2155 		nseg = 0;
2156 
2157 	/* number of required data segments */
2158 	tot_dsds = nseg;
2159 
2160 	/* Compute number of required protection segments */
2161 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2162 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2163 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2164 		if (unlikely(!nseg))
2165 			goto queuing_error;
2166 		else
2167 			sp->flags |= SRB_CRC_PROT_DMA_VALID;
2168 
2169 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2170 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2171 			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2172 		}
2173 	} else {
2174 		nseg = 0;
2175 	}
2176 
2177 	req_cnt = 1;
2178 	/* Total Data and protection sg segment(s) */
2179 	tot_prot_dsds = nseg;
2180 	tot_dsds += nseg;
2181 
2182 	sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
2183 	sp->iores.exch_cnt = 1;
2184 	sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2185 	if (qla_get_fw_resources(sp->qpair, &sp->iores))
2186 		goto queuing_error;
2187 
2188 	if (req->cnt < (req_cnt + 2)) {
2189 		if (IS_SHADOW_REG_CAPABLE(ha)) {
2190 			cnt = *req->out_ptr;
2191 		} else {
2192 			cnt = rd_reg_dword_relaxed(req->req_q_out);
2193 			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
2194 				goto queuing_error;
2195 		}
2196 
2197 		if (req->ring_index < cnt)
2198 			req->cnt = cnt - req->ring_index;
2199 		else
2200 			req->cnt = req->length -
2201 				(req->ring_index - cnt);
2202 		if (req->cnt < (req_cnt + 2))
2203 			goto queuing_error;
2204 	}
2205 
2206 	status |= QDSS_GOT_Q_SPACE;
2207 
2208 	/* Build header part of command packet (excluding the OPCODE). */
2209 	req->current_outstanding_cmd = handle;
2210 	req->outstanding_cmds[handle] = sp;
2211 	sp->handle = handle;
2212 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2213 	req->cnt -= req_cnt;
2214 
2215 	/* Fill-in common area */
2216 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2217 	cmd_pkt->handle = make_handle(req->id, handle);
2218 
2219 	clr_ptr = (uint32_t *)cmd_pkt + 2;
2220 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2221 
2222 	/* Set NPORT-ID and LUN number*/
2223 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2224 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2225 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2226 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2227 
2228 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2229 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2230 
2231 	/* Total Data and protection segment(s) */
2232 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2233 
2234 	/* Build IOCB segments and adjust for data protection segments */
2235 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2236 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2237 		QLA_SUCCESS)
2238 		goto queuing_error;
2239 
2240 	cmd_pkt->entry_count = (uint8_t)req_cnt;
2241 	cmd_pkt->timeout = cpu_to_le16(0);
2242 	wmb();
2243 
2244 	/* Adjust ring index. */
2245 	req->ring_index++;
2246 	if (req->ring_index == req->length) {
2247 		req->ring_index = 0;
2248 		req->ring_ptr = req->ring;
2249 	} else
2250 		req->ring_ptr++;
2251 
2252 	sp->qpair->cmd_cnt++;
2253 	/* Set chip new ring index. */
2254 	wrt_reg_dword(req->req_q_in, req->ring_index);
2255 
2256 	/* Manage unprocessed RIO/ZIO commands in response queue. */
2257 	if (vha->flags.process_response_queue &&
2258 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2259 		qla24xx_process_response_queue(vha, rsp);
2260 
2261 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2262 
2263 	return QLA_SUCCESS;
2264 
2265 queuing_error:
2266 	if (status & QDSS_GOT_Q_SPACE) {
2267 		req->outstanding_cmds[handle] = NULL;
2268 		req->cnt += req_cnt;
2269 	}
2270 	/* Cleanup will be performed by the caller (queuecommand) */
2271 
2272 	qla_put_fw_resources(sp->qpair, &sp->iores);
2273 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2274 
2275 	return QLA_FUNCTION_FAILED;
2276 }
2277 
2278 /* Generic Control-SRB manipulation functions. */
2279 
2280 /* hardware_lock assumed to be held. */
2281 
2282 void *
2283 __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
2284 {
2285 	scsi_qla_host_t *vha = qpair->vha;
2286 	struct qla_hw_data *ha = vha->hw;
2287 	struct req_que *req = qpair->req;
2288 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2289 	uint32_t handle;
2290 	request_t *pkt;
2291 	uint16_t cnt, req_cnt;
2292 
2293 	pkt = NULL;
2294 	req_cnt = 1;
2295 	handle = 0;
2296 
2297 	if (sp && (sp->type != SRB_SCSI_CMD)) {
2298 		/* Adjust entry-counts as needed. */
2299 		req_cnt = sp->iocbs;
2300 	}
2301 
2302 	/* Check for room on request queue. */
2303 	if (req->cnt < req_cnt + 2) {
2304 		if (qpair->use_shadow_reg)
2305 			cnt = *req->out_ptr;
2306 		else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
2307 		    IS_QLA28XX(ha))
2308 			cnt = rd_reg_dword(&reg->isp25mq.req_q_out);
2309 		else if (IS_P3P_TYPE(ha))
2310 			cnt = rd_reg_dword(reg->isp82.req_q_out);
2311 		else if (IS_FWI2_CAPABLE(ha))
2312 			cnt = rd_reg_dword(&reg->isp24.req_q_out);
2313 		else if (IS_QLAFX00(ha))
2314 			cnt = rd_reg_dword(&reg->ispfx00.req_q_out);
2315 		else
2316 			cnt = qla2x00_debounce_register(
2317 			    ISP_REQ_Q_OUT(ha, &reg->isp));
2318 
2319 		if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) {
2320 			qla_schedule_eeh_work(vha);
2321 			return NULL;
2322 		}
2323 
2324 		if  (req->ring_index < cnt)
2325 			req->cnt = cnt - req->ring_index;
2326 		else
2327 			req->cnt = req->length -
2328 			    (req->ring_index - cnt);
2329 	}
2330 	if (req->cnt < req_cnt + 2)
2331 		goto queuing_error;
2332 
2333 	if (sp) {
2334 		handle = qla2xxx_get_next_handle(req);
2335 		if (handle == 0) {
2336 			ql_log(ql_log_warn, vha, 0x700b,
2337 			    "No room on outstanding cmd array.\n");
2338 			goto queuing_error;
2339 		}
2340 
2341 		/* Prep command array. */
2342 		req->current_outstanding_cmd = handle;
2343 		req->outstanding_cmds[handle] = sp;
2344 		sp->handle = handle;
2345 	}
2346 
2347 	/* Prep packet */
2348 	req->cnt -= req_cnt;
2349 	pkt = req->ring_ptr;
2350 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
2351 	if (IS_QLAFX00(ha)) {
2352 		wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt);
2353 		wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle);
2354 	} else {
2355 		pkt->entry_count = req_cnt;
2356 		pkt->handle = handle;
2357 	}
2358 
2359 	return pkt;
2360 
2361 queuing_error:
2362 	qpair->tgt_counters.num_alloc_iocb_failed++;
2363 	return pkt;
2364 }
2365 
2366 void *
2367 qla2x00_alloc_iocbs_ready(struct qla_qpair *qpair, srb_t *sp)
2368 {
2369 	scsi_qla_host_t *vha = qpair->vha;
2370 
2371 	if (qla2x00_reset_active(vha))
2372 		return NULL;
2373 
2374 	return __qla2x00_alloc_iocbs(qpair, sp);
2375 }
2376 
2377 void *
2378 qla2x00_alloc_iocbs(struct scsi_qla_host *vha, srb_t *sp)
2379 {
2380 	return __qla2x00_alloc_iocbs(vha->hw->base_qpair, sp);
2381 }
2382 
2383 static void
2384 qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2385 {
2386 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2387 
2388 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2389 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2390 	if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) {
2391 		logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI);
2392 		if (sp->vha->flags.nvme_first_burst)
2393 			logio->io_parameter[0] =
2394 				cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
2395 		if (sp->vha->flags.nvme2_enabled) {
2396 			/* Set service parameter BIT_7 for NVME CONF support */
2397 			logio->io_parameter[0] |=
2398 				cpu_to_le32(NVME_PRLI_SP_CONF);
2399 			/* Set service parameter BIT_8 for SLER support */
2400 			logio->io_parameter[0] |=
2401 				cpu_to_le32(NVME_PRLI_SP_SLER);
2402 			/* Set service parameter BIT_9 for PI control support */
2403 			logio->io_parameter[0] |=
2404 				cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
2405 		}
2406 	}
2407 
2408 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2409 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2410 	logio->port_id[1] = sp->fcport->d_id.b.area;
2411 	logio->port_id[2] = sp->fcport->d_id.b.domain;
2412 	logio->vp_index = sp->vha->vp_idx;
2413 }
2414 
2415 static void
2416 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2417 {
2418 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2419 
2420 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2421 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2422 
2423 	if (lio->u.logio.flags & SRB_LOGIN_PRLI_ONLY) {
2424 		logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI);
2425 	} else {
2426 		logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2427 		if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2428 			logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2429 		if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2430 			logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2431 		if (lio->u.logio.flags & SRB_LOGIN_FCSP) {
2432 			logio->control_flags |=
2433 			    cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI);
2434 			logio->io_parameter[0] =
2435 			    cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO);
2436 		}
2437 	}
2438 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2439 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2440 	logio->port_id[1] = sp->fcport->d_id.b.area;
2441 	logio->port_id[2] = sp->fcport->d_id.b.domain;
2442 	logio->vp_index = sp->vha->vp_idx;
2443 }
2444 
2445 static void
2446 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2447 {
2448 	struct qla_hw_data *ha = sp->vha->hw;
2449 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2450 	uint16_t opts;
2451 
2452 	mbx->entry_type = MBX_IOCB_TYPE;
2453 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2454 	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2455 	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2456 	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2457 	if (HAS_EXTENDED_IDS(ha)) {
2458 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2459 		mbx->mb10 = cpu_to_le16(opts);
2460 	} else {
2461 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2462 	}
2463 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2464 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2465 	    sp->fcport->d_id.b.al_pa);
2466 	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2467 }
2468 
2469 static void
2470 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2471 {
2472 	u16 control_flags = LCF_COMMAND_LOGO;
2473 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2474 
2475 	if (sp->fcport->explicit_logout) {
2476 		control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
2477 	} else {
2478 		control_flags |= LCF_IMPL_LOGO;
2479 
2480 		if (!sp->fcport->keep_nport_handle)
2481 			control_flags |= LCF_FREE_NPORT;
2482 	}
2483 
2484 	logio->control_flags = cpu_to_le16(control_flags);
2485 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2486 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2487 	logio->port_id[1] = sp->fcport->d_id.b.area;
2488 	logio->port_id[2] = sp->fcport->d_id.b.domain;
2489 	logio->vp_index = sp->vha->vp_idx;
2490 }
2491 
2492 static void
2493 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2494 {
2495 	struct qla_hw_data *ha = sp->vha->hw;
2496 
2497 	mbx->entry_type = MBX_IOCB_TYPE;
2498 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2499 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2500 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2501 	    cpu_to_le16(sp->fcport->loop_id) :
2502 	    cpu_to_le16(sp->fcport->loop_id << 8);
2503 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2504 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2505 	    sp->fcport->d_id.b.al_pa);
2506 	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2507 	/* Implicit: mbx->mbx10 = 0. */
2508 }
2509 
2510 static void
2511 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2512 {
2513 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2514 	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2515 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2516 	logio->vp_index = sp->vha->vp_idx;
2517 }
2518 
2519 static void
2520 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2521 {
2522 	struct qla_hw_data *ha = sp->vha->hw;
2523 
2524 	mbx->entry_type = MBX_IOCB_TYPE;
2525 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2526 	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2527 	if (HAS_EXTENDED_IDS(ha)) {
2528 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2529 		mbx->mb10 = cpu_to_le16(BIT_0);
2530 	} else {
2531 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2532 	}
2533 	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2534 	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2535 	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2536 	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2537 	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2538 }
2539 
2540 static void
2541 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2542 {
2543 	uint32_t flags;
2544 	uint64_t lun;
2545 	struct fc_port *fcport = sp->fcport;
2546 	scsi_qla_host_t *vha = fcport->vha;
2547 	struct qla_hw_data *ha = vha->hw;
2548 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
2549 	struct req_que *req = sp->qpair->req;
2550 
2551 	flags = iocb->u.tmf.flags;
2552 	lun = iocb->u.tmf.lun;
2553 
2554 	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2555 	tsk->entry_count = 1;
2556 	tsk->handle = make_handle(req->id, tsk->handle);
2557 	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2558 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2559 	tsk->control_flags = cpu_to_le32(flags);
2560 	tsk->port_id[0] = fcport->d_id.b.al_pa;
2561 	tsk->port_id[1] = fcport->d_id.b.area;
2562 	tsk->port_id[2] = fcport->d_id.b.domain;
2563 	tsk->vp_index = fcport->vha->vp_idx;
2564 
2565 	if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET|
2566 	    TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) {
2567 		int_to_scsilun(lun, &tsk->lun);
2568 		host_to_fcp_swap((uint8_t *)&tsk->lun,
2569 			sizeof(tsk->lun));
2570 	}
2571 }
2572 
2573 static void
2574 qla2x00_async_done(struct srb *sp, int res)
2575 {
2576 	if (del_timer(&sp->u.iocb_cmd.timer)) {
2577 		/*
2578 		 * Successfully cancelled the timeout handler
2579 		 * ref: TMR
2580 		 */
2581 		if (kref_put(&sp->cmd_kref, qla2x00_sp_release))
2582 			return;
2583 	}
2584 	sp->async_done(sp, res);
2585 }
2586 
2587 void
2588 qla2x00_sp_release(struct kref *kref)
2589 {
2590 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
2591 
2592 	sp->free(sp);
2593 }
2594 
2595 void
2596 qla2x00_init_async_sp(srb_t *sp, unsigned long tmo,
2597 		     void (*done)(struct srb *sp, int res))
2598 {
2599 	timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0);
2600 	sp->done = qla2x00_async_done;
2601 	sp->async_done = done;
2602 	sp->free = qla2x00_sp_free;
2603 	sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
2604 	sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
2605 	if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
2606 		init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
2607 	sp->start_timer = 1;
2608 }
2609 
2610 static void qla2x00_els_dcmd_sp_free(srb_t *sp)
2611 {
2612 	struct srb_iocb *elsio = &sp->u.iocb_cmd;
2613 
2614 	kfree(sp->fcport);
2615 
2616 	if (elsio->u.els_logo.els_logo_pyld)
2617 		dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2618 		    elsio->u.els_logo.els_logo_pyld,
2619 		    elsio->u.els_logo.els_logo_pyld_dma);
2620 
2621 	del_timer(&elsio->timer);
2622 	qla2x00_rel_sp(sp);
2623 }
2624 
2625 static void
2626 qla2x00_els_dcmd_iocb_timeout(void *data)
2627 {
2628 	srb_t *sp = data;
2629 	fc_port_t *fcport = sp->fcport;
2630 	struct scsi_qla_host *vha = sp->vha;
2631 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2632 	unsigned long flags = 0;
2633 	int res, h;
2634 
2635 	ql_dbg(ql_dbg_io, vha, 0x3069,
2636 	    "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2637 	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2638 	    fcport->d_id.b.al_pa);
2639 
2640 	/* Abort the exchange */
2641 	res = qla24xx_async_abort_cmd(sp, false);
2642 	if (res) {
2643 		ql_dbg(ql_dbg_io, vha, 0x3070,
2644 		    "mbx abort_command failed.\n");
2645 		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2646 		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2647 			if (sp->qpair->req->outstanding_cmds[h] == sp) {
2648 				sp->qpair->req->outstanding_cmds[h] = NULL;
2649 				break;
2650 			}
2651 		}
2652 		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2653 		complete(&lio->u.els_logo.comp);
2654 	} else {
2655 		ql_dbg(ql_dbg_io, vha, 0x3071,
2656 		    "mbx abort_command success.\n");
2657 	}
2658 }
2659 
2660 static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
2661 {
2662 	fc_port_t *fcport = sp->fcport;
2663 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2664 	struct scsi_qla_host *vha = sp->vha;
2665 
2666 	ql_dbg(ql_dbg_io, vha, 0x3072,
2667 	    "%s hdl=%x, portid=%02x%02x%02x done\n",
2668 	    sp->name, sp->handle, fcport->d_id.b.domain,
2669 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2670 
2671 	complete(&lio->u.els_logo.comp);
2672 }
2673 
2674 int
2675 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2676     port_id_t remote_did)
2677 {
2678 	srb_t *sp;
2679 	fc_port_t *fcport = NULL;
2680 	struct srb_iocb *elsio = NULL;
2681 	struct qla_hw_data *ha = vha->hw;
2682 	struct els_logo_payload logo_pyld;
2683 	int rval = QLA_SUCCESS;
2684 
2685 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2686 	if (!fcport) {
2687 	       ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2688 	       return -ENOMEM;
2689 	}
2690 
2691 	/* Alloc SRB structure
2692 	 * ref: INIT
2693 	 */
2694 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2695 	if (!sp) {
2696 		kfree(fcport);
2697 		ql_log(ql_log_info, vha, 0x70e6,
2698 		 "SRB allocation failed\n");
2699 		return -ENOMEM;
2700 	}
2701 
2702 	elsio = &sp->u.iocb_cmd;
2703 	fcport->loop_id = 0xFFFF;
2704 	fcport->d_id.b.domain = remote_did.b.domain;
2705 	fcport->d_id.b.area = remote_did.b.area;
2706 	fcport->d_id.b.al_pa = remote_did.b.al_pa;
2707 
2708 	ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2709 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2710 
2711 	sp->type = SRB_ELS_DCMD;
2712 	sp->name = "ELS_DCMD";
2713 	sp->fcport = fcport;
2714 	qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT,
2715 			      qla2x00_els_dcmd_sp_done);
2716 	sp->free = qla2x00_els_dcmd_sp_free;
2717 	sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout;
2718 	init_completion(&sp->u.iocb_cmd.u.els_logo.comp);
2719 
2720 	elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2721 			    DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2722 			    GFP_KERNEL);
2723 
2724 	if (!elsio->u.els_logo.els_logo_pyld) {
2725 		/* ref: INIT */
2726 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
2727 		return QLA_FUNCTION_FAILED;
2728 	}
2729 
2730 	memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2731 
2732 	elsio->u.els_logo.els_cmd = els_opcode;
2733 	logo_pyld.opcode = els_opcode;
2734 	logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2735 	logo_pyld.s_id[1] = vha->d_id.b.area;
2736 	logo_pyld.s_id[2] = vha->d_id.b.domain;
2737 	host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2738 	memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2739 
2740 	memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2741 	    sizeof(struct els_logo_payload));
2742 	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
2743 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
2744 		       elsio->u.els_logo.els_logo_pyld,
2745 		       sizeof(*elsio->u.els_logo.els_logo_pyld));
2746 
2747 	rval = qla2x00_start_sp(sp);
2748 	if (rval != QLA_SUCCESS) {
2749 		/* ref: INIT */
2750 		kref_put(&sp->cmd_kref, qla2x00_sp_release);
2751 		return QLA_FUNCTION_FAILED;
2752 	}
2753 
2754 	ql_dbg(ql_dbg_io, vha, 0x3074,
2755 	    "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2756 	    sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2757 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2758 
2759 	wait_for_completion(&elsio->u.els_logo.comp);
2760 
2761 	/* ref: INIT */
2762 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
2763 	return rval;
2764 }
2765 
2766 static void
2767 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2768 {
2769 	scsi_qla_host_t *vha = sp->vha;
2770 	struct srb_iocb *elsio = &sp->u.iocb_cmd;
2771 
2772 	els_iocb->entry_type = ELS_IOCB_TYPE;
2773 	els_iocb->entry_count = 1;
2774 	els_iocb->sys_define = 0;
2775 	els_iocb->entry_status = 0;
2776 	els_iocb->handle = sp->handle;
2777 	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2778 	els_iocb->tx_dsd_count = cpu_to_le16(1);
2779 	els_iocb->vp_index = vha->vp_idx;
2780 	els_iocb->sof_type = EST_SOFI3;
2781 	els_iocb->rx_dsd_count = 0;
2782 	els_iocb->opcode = elsio->u.els_logo.els_cmd;
2783 
2784 	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
2785 	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
2786 	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
2787 	/* For SID the byte order is different than DID */
2788 	els_iocb->s_id[1] = vha->d_id.b.al_pa;
2789 	els_iocb->s_id[2] = vha->d_id.b.area;
2790 	els_iocb->s_id[0] = vha->d_id.b.domain;
2791 
2792 	if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
2793 		if (vha->hw->flags.edif_enabled)
2794 			els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN);
2795 		else
2796 			els_iocb->control_flags = 0;
2797 		els_iocb->tx_byte_count = els_iocb->tx_len =
2798 			cpu_to_le32(sizeof(struct els_plogi_payload));
2799 		put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
2800 				   &els_iocb->tx_address);
2801 		els_iocb->rx_dsd_count = cpu_to_le16(1);
2802 		els_iocb->rx_byte_count = els_iocb->rx_len =
2803 			cpu_to_le32(sizeof(struct els_plogi_payload));
2804 		put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
2805 				   &els_iocb->rx_address);
2806 
2807 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
2808 		    "PLOGI ELS IOCB:\n");
2809 		ql_dump_buffer(ql_log_info, vha, 0x0109,
2810 		    (uint8_t *)els_iocb,
2811 		    sizeof(*els_iocb));
2812 	} else {
2813 		els_iocb->tx_byte_count =
2814 			cpu_to_le32(sizeof(struct els_logo_payload));
2815 		put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
2816 				   &els_iocb->tx_address);
2817 		els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2818 
2819 		els_iocb->rx_byte_count = 0;
2820 		els_iocb->rx_address = 0;
2821 		els_iocb->rx_len = 0;
2822 		ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
2823 		       "LOGO ELS IOCB:");
2824 		ql_dump_buffer(ql_log_info, vha, 0x010b,
2825 			       els_iocb,
2826 			       sizeof(*els_iocb));
2827 	}
2828 
2829 	sp->vha->qla_stats.control_requests++;
2830 }
2831 
2832 void
2833 qla2x00_els_dcmd2_iocb_timeout(void *data)
2834 {
2835 	srb_t *sp = data;
2836 	fc_port_t *fcport = sp->fcport;
2837 	struct scsi_qla_host *vha = sp->vha;
2838 	unsigned long flags = 0;
2839 	int res, h;
2840 
2841 	ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
2842 	    "%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
2843 	    sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
2844 
2845 	/* Abort the exchange */
2846 	res = qla24xx_async_abort_cmd(sp, false);
2847 	ql_dbg(ql_dbg_io, vha, 0x3070,
2848 	    "mbx abort_command %s\n",
2849 	    (res == QLA_SUCCESS) ? "successful" : "failed");
2850 	if (res) {
2851 		spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
2852 		for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
2853 			if (sp->qpair->req->outstanding_cmds[h] == sp) {
2854 				sp->qpair->req->outstanding_cmds[h] = NULL;
2855 				break;
2856 			}
2857 		}
2858 		spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
2859 		sp->done(sp, QLA_FUNCTION_TIMEOUT);
2860 	}
2861 }
2862 
2863 void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
2864 {
2865 	if (els_plogi->els_plogi_pyld)
2866 		dma_free_coherent(&vha->hw->pdev->dev,
2867 				  els_plogi->tx_size,
2868 				  els_plogi->els_plogi_pyld,
2869 				  els_plogi->els_plogi_pyld_dma);
2870 
2871 	if (els_plogi->els_resp_pyld)
2872 		dma_free_coherent(&vha->hw->pdev->dev,
2873 				  els_plogi->rx_size,
2874 				  els_plogi->els_resp_pyld,
2875 				  els_plogi->els_resp_pyld_dma);
2876 }
2877 
2878 static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
2879 {
2880 	fc_port_t *fcport = sp->fcport;
2881 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2882 	struct scsi_qla_host *vha = sp->vha;
2883 	struct event_arg ea;
2884 	struct qla_work_evt *e;
2885 	struct fc_port *conflict_fcport;
2886 	port_id_t cid;	/* conflict Nport id */
2887 	const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
2888 	u16 lid;
2889 
2890 	ql_dbg(ql_dbg_disc, vha, 0x3072,
2891 	    "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
2892 	    sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name);
2893 
2894 	fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
2895 	/* For edif, set logout on delete to ensure any residual key from FW is flushed.*/
2896 	fcport->logout_on_delete = 1;
2897 	fcport->chip_reset = vha->hw->base_qpair->chip_reset;
2898 
2899 	if (sp->flags & SRB_WAKEUP_ON_COMP)
2900 		complete(&lio->u.els_plogi.comp);
2901 	else {
2902 		switch (le32_to_cpu(fw_status[0])) {
2903 		case CS_DATA_UNDERRUN:
2904 		case CS_COMPLETE:
2905 			memset(&ea, 0, sizeof(ea));
2906 			ea.fcport = fcport;
2907 			ea.rc = res;
2908 			qla_handle_els_plogi_done(vha, &ea);
2909 			break;
2910 
2911 		case CS_IOCB_ERROR:
2912 			switch (le32_to_cpu(fw_status[1])) {
2913 			case LSC_SCODE_PORTID_USED:
2914 				lid = le32_to_cpu(fw_status[2]) & 0xffff;
2915 				qlt_find_sess_invalidate_other(vha,
2916 				    wwn_to_u64(fcport->port_name),
2917 				    fcport->d_id, lid, &conflict_fcport);
2918 				if (conflict_fcport) {
2919 					/*
2920 					 * Another fcport shares the same
2921 					 * loop_id & nport id; conflict
2922 					 * fcport needs to finish cleanup
2923 					 * before this fcport can proceed
2924 					 * to login.
2925 					 */
2926 					conflict_fcport->conflict = fcport;
2927 					fcport->login_pause = 1;
2928 					ql_dbg(ql_dbg_disc, vha, 0x20ed,
2929 					    "%s %d %8phC pid %06x inuse with lid %#x.\n",
2930 					    __func__, __LINE__,
2931 					    fcport->port_name,
2932 					    fcport->d_id.b24, lid);
2933 				} else {
2934 					ql_dbg(ql_dbg_disc, vha, 0x20ed,
2935 					    "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
2936 					    __func__, __LINE__,
2937 					    fcport->port_name,
2938 					    fcport->d_id.b24, lid);
2939 					qla2x00_clear_loop_id(fcport);
2940 					set_bit(lid, vha->hw->loop_id_map);
2941 					fcport->loop_id = lid;
2942 					fcport->keep_nport_handle = 0;
2943 					qlt_schedule_sess_for_deletion(fcport);
2944 				}
2945 				break;
2946 
2947 			case LSC_SCODE_NPORT_USED:
2948 				cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16)
2949 					& 0xff;
2950 				cid.b.area   = (le32_to_cpu(fw_status[2]) >>  8)
2951 					& 0xff;
2952 				cid.b.al_pa  = le32_to_cpu(fw_status[2]) & 0xff;
2953 				cid.b.rsvd_1 = 0;
2954 
2955 				ql_dbg(ql_dbg_disc, vha, 0x20ec,
2956 				    "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
2957 				    __func__, __LINE__, fcport->port_name,
2958 				    fcport->loop_id, cid.b24);
2959 				set_bit(fcport->loop_id,
2960 				    vha->hw->loop_id_map);
2961 				fcport->loop_id = FC_NO_LOOP_ID;
2962 				qla24xx_post_gnl_work(vha, fcport);
2963 				break;
2964 
2965 			case LSC_SCODE_NOXCB:
2966 				vha->hw->exch_starvation++;
2967 				if (vha->hw->exch_starvation > 5) {
2968 					ql_log(ql_log_warn, vha, 0xd046,
2969 					    "Exchange starvation. Resetting RISC\n");
2970 					vha->hw->exch_starvation = 0;
2971 					set_bit(ISP_ABORT_NEEDED,
2972 					    &vha->dpc_flags);
2973 					qla2xxx_wake_dpc(vha);
2974 					break;
2975 				}
2976 				fallthrough;
2977 			default:
2978 				ql_dbg(ql_dbg_disc, vha, 0x20eb,
2979 				    "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
2980 				    __func__, sp->fcport->port_name,
2981 				    fw_status[0], fw_status[1], fw_status[2]);
2982 
2983 				fcport->flags &= ~FCF_ASYNC_SENT;
2984 				qlt_schedule_sess_for_deletion(fcport);
2985 				break;
2986 			}
2987 			break;
2988 
2989 		default:
2990 			ql_dbg(ql_dbg_disc, vha, 0x20eb,
2991 			    "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
2992 			    __func__, sp->fcport->port_name,
2993 			    fw_status[0], fw_status[1], fw_status[2]);
2994 
2995 			sp->fcport->flags &= ~FCF_ASYNC_SENT;
2996 			qlt_schedule_sess_for_deletion(fcport);
2997 			break;
2998 		}
2999 
3000 		e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3001 		if (!e) {
3002 			struct srb_iocb *elsio = &sp->u.iocb_cmd;
3003 
3004 			qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3005 			/* ref: INIT */
3006 			kref_put(&sp->cmd_kref, qla2x00_sp_release);
3007 			return;
3008 		}
3009 		e->u.iosb.sp = sp;
3010 		qla2x00_post_work(vha, e);
3011 	}
3012 }
3013 
3014 int
3015 qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
3016     fc_port_t *fcport, bool wait)
3017 {
3018 	srb_t *sp;
3019 	struct srb_iocb *elsio = NULL;
3020 	struct qla_hw_data *ha = vha->hw;
3021 	int rval = QLA_SUCCESS;
3022 	void	*ptr, *resp_ptr;
3023 
3024 	/* Alloc SRB structure
3025 	 * ref: INIT
3026 	 */
3027 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3028 	if (!sp) {
3029 		ql_log(ql_log_info, vha, 0x70e6,
3030 		 "SRB allocation failed\n");
3031 		fcport->flags &= ~FCF_ASYNC_ACTIVE;
3032 		return -ENOMEM;
3033 	}
3034 
3035 	fcport->flags |= FCF_ASYNC_SENT;
3036 	qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
3037 	elsio = &sp->u.iocb_cmd;
3038 	ql_dbg(ql_dbg_io, vha, 0x3073,
3039 	       "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24);
3040 
3041 	if (wait)
3042 		sp->flags = SRB_WAKEUP_ON_COMP;
3043 
3044 	sp->type = SRB_ELS_DCMD;
3045 	sp->name = "ELS_DCMD";
3046 	sp->fcport = fcport;
3047 	qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2,
3048 			     qla2x00_els_dcmd2_sp_done);
3049 	sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout;
3050 
3051 	elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE;
3052 
3053 	ptr = elsio->u.els_plogi.els_plogi_pyld =
3054 	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.tx_size,
3055 		&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
3056 
3057 	if (!elsio->u.els_plogi.els_plogi_pyld) {
3058 		rval = QLA_FUNCTION_FAILED;
3059 		goto out;
3060 	}
3061 
3062 	resp_ptr = elsio->u.els_plogi.els_resp_pyld =
3063 	    dma_alloc_coherent(&ha->pdev->dev, elsio->u.els_plogi.rx_size,
3064 		&elsio->u.els_plogi.els_resp_pyld_dma, GFP_KERNEL);
3065 
3066 	if (!elsio->u.els_plogi.els_resp_pyld) {
3067 		rval = QLA_FUNCTION_FAILED;
3068 		goto out;
3069 	}
3070 
3071 	ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr);
3072 
3073 	memset(ptr, 0, sizeof(struct els_plogi_payload));
3074 	memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
3075 	memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
3076 	       (void *)&ha->plogi_els_payld + offsetof(struct fc_els_flogi, fl_csp),
3077 	       sizeof(ha->plogi_els_payld) - offsetof(struct fc_els_flogi, fl_csp));
3078 
3079 	elsio->u.els_plogi.els_cmd = els_opcode;
3080 	elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
3081 
3082 	if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) {
3083 		struct fc_els_flogi *p = ptr;
3084 
3085 		p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC);
3086 	}
3087 
3088 	ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
3089 	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
3090 	    (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
3091 	    sizeof(*elsio->u.els_plogi.els_plogi_pyld));
3092 
3093 	init_completion(&elsio->u.els_plogi.comp);
3094 	rval = qla2x00_start_sp(sp);
3095 	if (rval != QLA_SUCCESS) {
3096 		rval = QLA_FUNCTION_FAILED;
3097 	} else {
3098 		ql_dbg(ql_dbg_disc, vha, 0x3074,
3099 		    "%s PLOGI sent, hdl=%x, loopid=%x, to port_id %06x from port_id %06x\n",
3100 		    sp->name, sp->handle, fcport->loop_id,
3101 		    fcport->d_id.b24, vha->d_id.b24);
3102 	}
3103 
3104 	if (wait) {
3105 		wait_for_completion(&elsio->u.els_plogi.comp);
3106 
3107 		if (elsio->u.els_plogi.comp_status != CS_COMPLETE)
3108 			rval = QLA_FUNCTION_FAILED;
3109 	} else {
3110 		goto done;
3111 	}
3112 
3113 out:
3114 	fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3115 	qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
3116 	/* ref: INIT */
3117 	kref_put(&sp->cmd_kref, qla2x00_sp_release);
3118 done:
3119 	return rval;
3120 }
3121 
3122 /* it is assume qpair lock is held */
3123 void qla_els_pt_iocb(struct scsi_qla_host *vha,
3124 	struct els_entry_24xx *els_iocb,
3125 	struct qla_els_pt_arg *a)
3126 {
3127 	els_iocb->entry_type = ELS_IOCB_TYPE;
3128 	els_iocb->entry_count = 1;
3129 	els_iocb->sys_define = 0;
3130 	els_iocb->entry_status = 0;
3131 	els_iocb->handle = QLA_SKIP_HANDLE;
3132 	els_iocb->nport_handle = a->nport_handle;
3133 	els_iocb->rx_xchg_address = a->rx_xchg_address;
3134 	els_iocb->tx_dsd_count = cpu_to_le16(1);
3135 	els_iocb->vp_index = a->vp_idx;
3136 	els_iocb->sof_type = EST_SOFI3;
3137 	els_iocb->rx_dsd_count = cpu_to_le16(0);
3138 	els_iocb->opcode = a->els_opcode;
3139 
3140 	els_iocb->d_id[0] = a->did.b.al_pa;
3141 	els_iocb->d_id[1] = a->did.b.area;
3142 	els_iocb->d_id[2] = a->did.b.domain;
3143 	/* For SID the byte order is different than DID */
3144 	els_iocb->s_id[1] = vha->d_id.b.al_pa;
3145 	els_iocb->s_id[2] = vha->d_id.b.area;
3146 	els_iocb->s_id[0] = vha->d_id.b.domain;
3147 
3148 	els_iocb->control_flags = cpu_to_le16(a->control_flags);
3149 
3150 	els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count);
3151 	els_iocb->tx_len = cpu_to_le32(a->tx_len);
3152 	put_unaligned_le64(a->tx_addr, &els_iocb->tx_address);
3153 
3154 	els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count);
3155 	els_iocb->rx_len = cpu_to_le32(a->rx_len);
3156 	put_unaligned_le64(a->rx_addr, &els_iocb->rx_address);
3157 }
3158 
3159 static void
3160 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
3161 {
3162 	struct bsg_job *bsg_job = sp->u.bsg_job;
3163 	struct fc_bsg_request *bsg_request = bsg_job->request;
3164 
3165         els_iocb->entry_type = ELS_IOCB_TYPE;
3166         els_iocb->entry_count = 1;
3167         els_iocb->sys_define = 0;
3168         els_iocb->entry_status = 0;
3169         els_iocb->handle = sp->handle;
3170 	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3171 	els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3172 	els_iocb->vp_index = sp->vha->vp_idx;
3173         els_iocb->sof_type = EST_SOFI3;
3174 	els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3175 
3176 	els_iocb->opcode =
3177 	    sp->type == SRB_ELS_CMD_RPT ?
3178 	    bsg_request->rqst_data.r_els.els_code :
3179 	    bsg_request->rqst_data.h_els.command_code;
3180 	els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa;
3181 	els_iocb->d_id[1] = sp->fcport->d_id.b.area;
3182 	els_iocb->d_id[2] = sp->fcport->d_id.b.domain;
3183         els_iocb->control_flags = 0;
3184         els_iocb->rx_byte_count =
3185             cpu_to_le32(bsg_job->reply_payload.payload_len);
3186         els_iocb->tx_byte_count =
3187             cpu_to_le32(bsg_job->request_payload.payload_len);
3188 
3189 	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3190 			   &els_iocb->tx_address);
3191         els_iocb->tx_len = cpu_to_le32(sg_dma_len
3192             (bsg_job->request_payload.sg_list));
3193 
3194 	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3195 			   &els_iocb->rx_address);
3196         els_iocb->rx_len = cpu_to_le32(sg_dma_len
3197             (bsg_job->reply_payload.sg_list));
3198 
3199 	sp->vha->qla_stats.control_requests++;
3200 }
3201 
3202 static void
3203 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
3204 {
3205 	uint16_t        avail_dsds;
3206 	struct dsd64	*cur_dsd;
3207 	struct scatterlist *sg;
3208 	int index;
3209 	uint16_t tot_dsds;
3210 	scsi_qla_host_t *vha = sp->vha;
3211 	struct qla_hw_data *ha = vha->hw;
3212 	struct bsg_job *bsg_job = sp->u.bsg_job;
3213 	int entry_count = 1;
3214 
3215 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
3216 	ct_iocb->entry_type = CT_IOCB_TYPE;
3217 	ct_iocb->entry_status = 0;
3218 	ct_iocb->handle1 = sp->handle;
3219 	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
3220 	ct_iocb->status = cpu_to_le16(0);
3221 	ct_iocb->control_flags = cpu_to_le16(0);
3222 	ct_iocb->timeout = 0;
3223 	ct_iocb->cmd_dsd_count =
3224 	    cpu_to_le16(bsg_job->request_payload.sg_cnt);
3225 	ct_iocb->total_dsd_count =
3226 	    cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
3227 	ct_iocb->req_bytecount =
3228 	    cpu_to_le32(bsg_job->request_payload.payload_len);
3229 	ct_iocb->rsp_bytecount =
3230 	    cpu_to_le32(bsg_job->reply_payload.payload_len);
3231 
3232 	put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list),
3233 			   &ct_iocb->req_dsd.address);
3234 	ct_iocb->req_dsd.length = ct_iocb->req_bytecount;
3235 
3236 	put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list),
3237 			   &ct_iocb->rsp_dsd.address);
3238 	ct_iocb->rsp_dsd.length = ct_iocb->rsp_bytecount;
3239 
3240 	avail_dsds = 1;
3241 	cur_dsd = &ct_iocb->rsp_dsd;
3242 	index = 0;
3243 	tot_dsds = bsg_job->reply_payload.sg_cnt;
3244 
3245 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
3246 		cont_a64_entry_t *cont_pkt;
3247 
3248 		/* Allocate additional continuation packets? */
3249 		if (avail_dsds == 0) {
3250 			/*
3251 			* Five DSDs are available in the Cont.
3252 			* Type 1 IOCB.
3253 			       */
3254 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3255 			    vha->hw->req_q_map[0]);
3256 			cur_dsd = cont_pkt->dsd;
3257 			avail_dsds = 5;
3258 			entry_count++;
3259 		}
3260 
3261 		append_dsd64(&cur_dsd, sg);
3262 		avail_dsds--;
3263 	}
3264 	ct_iocb->entry_count = entry_count;
3265 
3266 	sp->vha->qla_stats.control_requests++;
3267 }
3268 
3269 static void
3270 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
3271 {
3272 	uint16_t        avail_dsds;
3273 	struct dsd64	*cur_dsd;
3274 	struct scatterlist *sg;
3275 	int index;
3276 	uint16_t cmd_dsds, rsp_dsds;
3277 	scsi_qla_host_t *vha = sp->vha;
3278 	struct qla_hw_data *ha = vha->hw;
3279 	struct bsg_job *bsg_job = sp->u.bsg_job;
3280 	int entry_count = 1;
3281 	cont_a64_entry_t *cont_pkt = NULL;
3282 
3283 	ct_iocb->entry_type = CT_IOCB_TYPE;
3284         ct_iocb->entry_status = 0;
3285         ct_iocb->sys_define = 0;
3286         ct_iocb->handle = sp->handle;
3287 
3288 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3289 	ct_iocb->vp_index = sp->vha->vp_idx;
3290 	ct_iocb->comp_status = cpu_to_le16(0);
3291 
3292 	cmd_dsds = bsg_job->request_payload.sg_cnt;
3293 	rsp_dsds = bsg_job->reply_payload.sg_cnt;
3294 
3295 	ct_iocb->cmd_dsd_count = cpu_to_le16(cmd_dsds);
3296         ct_iocb->timeout = 0;
3297 	ct_iocb->rsp_dsd_count = cpu_to_le16(rsp_dsds);
3298         ct_iocb->cmd_byte_count =
3299             cpu_to_le32(bsg_job->request_payload.payload_len);
3300 
3301 	avail_dsds = 2;
3302 	cur_dsd = ct_iocb->dsd;
3303 	index = 0;
3304 
3305 	for_each_sg(bsg_job->request_payload.sg_list, sg, cmd_dsds, index) {
3306 		/* Allocate additional continuation packets? */
3307 		if (avail_dsds == 0) {
3308 			/*
3309 			 * Five DSDs are available in the Cont.
3310 			 * Type 1 IOCB.
3311 			 */
3312 			cont_pkt = qla2x00_prep_cont_type1_iocb(
3313 			    vha, ha->req_q_map[0]);
3314 			cur_dsd = cont_pkt->dsd;
3315 			avail_dsds = 5;
3316 			entry_count++;
3317 		}
3318 
3319 		append_dsd64(&cur_dsd, sg);
3320 		avail_dsds--;
3321 	}
3322 
3323 	index = 0;
3324 
3325 	for_each_sg(bsg_job->reply_payload.sg_list, sg, rsp_dsds, index) {
3326 		/* Allocate additional continuation packets? */
3327 		if (avail_dsds == 0) {
3328 			/*
3329 			* Five DSDs are available in the Cont.
3330 			* Type 1 IOCB.
3331 			       */
3332 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
3333 			    ha->req_q_map[0]);
3334 			cur_dsd = cont_pkt->dsd;
3335 			avail_dsds = 5;
3336 			entry_count++;
3337 		}
3338 
3339 		append_dsd64(&cur_dsd, sg);
3340 		avail_dsds--;
3341 	}
3342         ct_iocb->entry_count = entry_count;
3343 }
3344 
3345 /*
3346  * qla82xx_start_scsi() - Send a SCSI command to the ISP
3347  * @sp: command to send to the ISP
3348  *
3349  * Returns non-zero if a failure occurred, else zero.
3350  */
3351 int
3352 qla82xx_start_scsi(srb_t *sp)
3353 {
3354 	int		nseg;
3355 	unsigned long   flags;
3356 	struct scsi_cmnd *cmd;
3357 	uint32_t	*clr_ptr;
3358 	uint32_t	handle;
3359 	uint16_t	cnt;
3360 	uint16_t	req_cnt;
3361 	uint16_t	tot_dsds;
3362 	struct device_reg_82xx __iomem *reg;
3363 	uint32_t dbval;
3364 	__be32 *fcp_dl;
3365 	uint8_t additional_cdb_len;
3366 	struct ct6_dsd *ctx;
3367 	struct scsi_qla_host *vha = sp->vha;
3368 	struct qla_hw_data *ha = vha->hw;
3369 	struct req_que *req = NULL;
3370 	struct rsp_que *rsp = NULL;
3371 
3372 	/* Setup device pointers. */
3373 	reg = &ha->iobase->isp82;
3374 	cmd = GET_CMD_SP(sp);
3375 	req = vha->req;
3376 	rsp = ha->rsp_q_map[0];
3377 
3378 	/* So we know we haven't pci_map'ed anything yet */
3379 	tot_dsds = 0;
3380 
3381 	dbval = 0x04 | (ha->portnum << 5);
3382 
3383 	/* Send marker if required */
3384 	if (vha->marker_needed != 0) {
3385 		if (qla2x00_marker(vha, ha->base_qpair,
3386 			0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
3387 			ql_log(ql_log_warn, vha, 0x300c,
3388 			    "qla2x00_marker failed for cmd=%p.\n", cmd);
3389 			return QLA_FUNCTION_FAILED;
3390 		}
3391 		vha->marker_needed = 0;
3392 	}
3393 
3394 	/* Acquire ring specific lock */
3395 	spin_lock_irqsave(&ha->hardware_lock, flags);
3396 
3397 	handle = qla2xxx_get_next_handle(req);
3398 	if (handle == 0)
3399 		goto queuing_error;
3400 
3401 	/* Map the sg table so we have an accurate count of sg entries needed */
3402 	if (scsi_sg_count(cmd)) {
3403 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3404 		    scsi_sg_count(cmd), cmd->sc_data_direction);
3405 		if (unlikely(!nseg))
3406 			goto queuing_error;
3407 	} else
3408 		nseg = 0;
3409 
3410 	tot_dsds = nseg;
3411 
3412 	if (tot_dsds > ql2xshiftctondsd) {
3413 		struct cmd_type_6 *cmd_pkt;
3414 		uint16_t more_dsd_lists = 0;
3415 		struct dsd_dma *dsd_ptr;
3416 		uint16_t i;
3417 
3418 		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
3419 		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
3420 			ql_dbg(ql_dbg_io, vha, 0x300d,
3421 			    "Num of DSD list %d is than %d for cmd=%p.\n",
3422 			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
3423 			    cmd);
3424 			goto queuing_error;
3425 		}
3426 
3427 		if (more_dsd_lists <= ha->gbl_dsd_avail)
3428 			goto sufficient_dsds;
3429 		else
3430 			more_dsd_lists -= ha->gbl_dsd_avail;
3431 
3432 		for (i = 0; i < more_dsd_lists; i++) {
3433 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
3434 			if (!dsd_ptr) {
3435 				ql_log(ql_log_fatal, vha, 0x300e,
3436 				    "Failed to allocate memory for dsd_dma "
3437 				    "for cmd=%p.\n", cmd);
3438 				goto queuing_error;
3439 			}
3440 
3441 			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
3442 				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
3443 			if (!dsd_ptr->dsd_addr) {
3444 				kfree(dsd_ptr);
3445 				ql_log(ql_log_fatal, vha, 0x300f,
3446 				    "Failed to allocate memory for dsd_addr "
3447 				    "for cmd=%p.\n", cmd);
3448 				goto queuing_error;
3449 			}
3450 			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
3451 			ha->gbl_dsd_avail++;
3452 		}
3453 
3454 sufficient_dsds:
3455 		req_cnt = 1;
3456 
3457 		if (req->cnt < (req_cnt + 2)) {
3458 			cnt = (uint16_t)rd_reg_dword_relaxed(
3459 				&reg->req_q_out[0]);
3460 			if (req->ring_index < cnt)
3461 				req->cnt = cnt - req->ring_index;
3462 			else
3463 				req->cnt = req->length -
3464 					(req->ring_index - cnt);
3465 			if (req->cnt < (req_cnt + 2))
3466 				goto queuing_error;
3467 		}
3468 
3469 		ctx = &sp->u.scmd.ct6_ctx;
3470 
3471 		memset(ctx, 0, sizeof(struct ct6_dsd));
3472 		ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
3473 			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
3474 		if (!ctx->fcp_cmnd) {
3475 			ql_log(ql_log_fatal, vha, 0x3011,
3476 			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
3477 			goto queuing_error;
3478 		}
3479 
3480 		/* Initialize the DSD list and dma handle */
3481 		INIT_LIST_HEAD(&ctx->dsd_list);
3482 		ctx->dsd_use_cnt = 0;
3483 
3484 		if (cmd->cmd_len > 16) {
3485 			additional_cdb_len = cmd->cmd_len - 16;
3486 			if ((cmd->cmd_len % 4) != 0) {
3487 				/* SCSI command bigger than 16 bytes must be
3488 				 * multiple of 4
3489 				 */
3490 				ql_log(ql_log_warn, vha, 0x3012,
3491 				    "scsi cmd len %d not multiple of 4 "
3492 				    "for cmd=%p.\n", cmd->cmd_len, cmd);
3493 				goto queuing_error_fcp_cmnd;
3494 			}
3495 			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
3496 		} else {
3497 			additional_cdb_len = 0;
3498 			ctx->fcp_cmnd_len = 12 + 16 + 4;
3499 		}
3500 
3501 		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
3502 		cmd_pkt->handle = make_handle(req->id, handle);
3503 
3504 		/* Zero out remaining portion of packet. */
3505 		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
3506 		clr_ptr = (uint32_t *)cmd_pkt + 2;
3507 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3508 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3509 
3510 		/* Set NPORT-ID and LUN number*/
3511 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3512 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3513 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3514 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3515 		cmd_pkt->vp_index = sp->vha->vp_idx;
3516 
3517 		/* Build IOCB segments */
3518 		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
3519 			goto queuing_error_fcp_cmnd;
3520 
3521 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3522 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
3523 
3524 		/* build FCP_CMND IU */
3525 		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
3526 		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
3527 
3528 		if (cmd->sc_data_direction == DMA_TO_DEVICE)
3529 			ctx->fcp_cmnd->additional_cdb_len |= 1;
3530 		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
3531 			ctx->fcp_cmnd->additional_cdb_len |= 2;
3532 
3533 		/* Populate the FCP_PRIO. */
3534 		if (ha->flags.fcp_prio_enabled)
3535 			ctx->fcp_cmnd->task_attribute |=
3536 			    sp->fcport->fcp_prio << 3;
3537 
3538 		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
3539 
3540 		fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 +
3541 		    additional_cdb_len);
3542 		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
3543 
3544 		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
3545 		put_unaligned_le64(ctx->fcp_cmnd_dma,
3546 				   &cmd_pkt->fcp_cmnd_dseg_address);
3547 
3548 		sp->flags |= SRB_FCP_CMND_DMA_VALID;
3549 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3550 		/* Set total data segment count. */
3551 		cmd_pkt->entry_count = (uint8_t)req_cnt;
3552 		/* Specify response queue number where
3553 		 * completion should happen
3554 		 */
3555 		cmd_pkt->entry_status = (uint8_t) rsp->id;
3556 	} else {
3557 		struct cmd_type_7 *cmd_pkt;
3558 
3559 		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3560 		if (req->cnt < (req_cnt + 2)) {
3561 			cnt = (uint16_t)rd_reg_dword_relaxed(
3562 			    &reg->req_q_out[0]);
3563 			if (req->ring_index < cnt)
3564 				req->cnt = cnt - req->ring_index;
3565 			else
3566 				req->cnt = req->length -
3567 					(req->ring_index - cnt);
3568 		}
3569 		if (req->cnt < (req_cnt + 2))
3570 			goto queuing_error;
3571 
3572 		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
3573 		cmd_pkt->handle = make_handle(req->id, handle);
3574 
3575 		/* Zero out remaining portion of packet. */
3576 		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3577 		clr_ptr = (uint32_t *)cmd_pkt + 2;
3578 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3579 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
3580 
3581 		/* Set NPORT-ID and LUN number*/
3582 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3583 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
3584 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
3585 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
3586 		cmd_pkt->vp_index = sp->vha->vp_idx;
3587 
3588 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
3589 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
3590 		    sizeof(cmd_pkt->lun));
3591 
3592 		/* Populate the FCP_PRIO. */
3593 		if (ha->flags.fcp_prio_enabled)
3594 			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
3595 
3596 		/* Load SCSI command packet. */
3597 		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
3598 		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
3599 
3600 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3601 
3602 		/* Build IOCB segments */
3603 		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
3604 
3605 		/* Set total data segment count. */
3606 		cmd_pkt->entry_count = (uint8_t)req_cnt;
3607 		/* Specify response queue number where
3608 		 * completion should happen.
3609 		 */
3610 		cmd_pkt->entry_status = (uint8_t) rsp->id;
3611 
3612 	}
3613 	/* Build command packet. */
3614 	req->current_outstanding_cmd = handle;
3615 	req->outstanding_cmds[handle] = sp;
3616 	sp->handle = handle;
3617 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3618 	req->cnt -= req_cnt;
3619 	wmb();
3620 
3621 	/* Adjust ring index. */
3622 	req->ring_index++;
3623 	if (req->ring_index == req->length) {
3624 		req->ring_index = 0;
3625 		req->ring_ptr = req->ring;
3626 	} else
3627 		req->ring_ptr++;
3628 
3629 	sp->flags |= SRB_DMA_VALID;
3630 
3631 	/* Set chip new ring index. */
3632 	/* write, read and verify logic */
3633 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3634 	if (ql2xdbwr)
3635 		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3636 	else {
3637 		wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3638 		wmb();
3639 		while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) {
3640 			wrt_reg_dword(ha->nxdb_wr_ptr, dbval);
3641 			wmb();
3642 		}
3643 	}
3644 
3645 	/* Manage unprocessed RIO/ZIO commands in response queue. */
3646 	if (vha->flags.process_response_queue &&
3647 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3648 		qla24xx_process_response_queue(vha, rsp);
3649 
3650 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3651 	return QLA_SUCCESS;
3652 
3653 queuing_error_fcp_cmnd:
3654 	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3655 queuing_error:
3656 	if (tot_dsds)
3657 		scsi_dma_unmap(cmd);
3658 
3659 	if (sp->u.scmd.crc_ctx) {
3660 		mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
3661 		sp->u.scmd.crc_ctx = NULL;
3662 	}
3663 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3664 
3665 	return QLA_FUNCTION_FAILED;
3666 }
3667 
3668 static void
3669 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3670 {
3671 	struct srb_iocb *aio = &sp->u.iocb_cmd;
3672 	scsi_qla_host_t *vha = sp->vha;
3673 	struct req_que *req = sp->qpair->req;
3674 	srb_t *orig_sp = sp->cmd_sp;
3675 
3676 	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3677 	abt_iocb->entry_type = ABORT_IOCB_TYPE;
3678 	abt_iocb->entry_count = 1;
3679 	abt_iocb->handle = make_handle(req->id, sp->handle);
3680 	if (sp->fcport) {
3681 		abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3682 		abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3683 		abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3684 		abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3685 	}
3686 	abt_iocb->handle_to_abort =
3687 		make_handle(le16_to_cpu(aio->u.abt.req_que_no),
3688 			    aio->u.abt.cmd_hndl);
3689 	abt_iocb->vp_index = vha->vp_idx;
3690 	abt_iocb->req_que_no = aio->u.abt.req_que_no;
3691 
3692 	/* need to pass original sp */
3693 	if (orig_sp)
3694 		qla_nvme_abort_set_option(abt_iocb, orig_sp);
3695 
3696 	/* Send the command to the firmware */
3697 	wmb();
3698 }
3699 
3700 static void
3701 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3702 {
3703 	int i, sz;
3704 
3705 	mbx->entry_type = MBX_IOCB_TYPE;
3706 	mbx->handle = sp->handle;
3707 	sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3708 
3709 	for (i = 0; i < sz; i++)
3710 		mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i];
3711 }
3712 
3713 static void
3714 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3715 {
3716 	sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3717 	qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3718 	ct_pkt->handle = sp->handle;
3719 }
3720 
3721 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3722 	struct nack_to_isp *nack)
3723 {
3724 	struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3725 
3726 	nack->entry_type = NOTIFY_ACK_TYPE;
3727 	nack->entry_count = 1;
3728 	nack->ox_id = ntfy->ox_id;
3729 
3730 	nack->u.isp24.handle = sp->handle;
3731 	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3732 	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3733 		nack->u.isp24.flags = ntfy->u.isp24.flags &
3734 			cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB);
3735 	}
3736 	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3737 	nack->u.isp24.status = ntfy->u.isp24.status;
3738 	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3739 	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3740 	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3741 	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3742 	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3743 	nack->u.isp24.srr_flags = 0;
3744 	nack->u.isp24.srr_reject_code = 0;
3745 	nack->u.isp24.srr_reject_code_expl = 0;
3746 	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3747 
3748 	if (ntfy->u.isp24.status_subcode == ELS_PLOGI &&
3749 	    (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP) &&
3750 	    sp->vha->hw->flags.edif_enabled) {
3751 		ql_dbg(ql_dbg_disc, sp->vha, 0x3074,
3752 		    "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %06x\n",
3753 		    sp->name, sp->handle, sp->fcport->loop_id,
3754 		    sp->fcport->d_id.b24);
3755 		nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP);
3756 	}
3757 }
3758 
3759 /*
3760  * Build NVME LS request
3761  */
3762 static void
3763 qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
3764 {
3765 	struct srb_iocb *nvme;
3766 
3767 	nvme = &sp->u.iocb_cmd;
3768 	cmd_pkt->entry_type = PT_LS4_REQUEST;
3769 	cmd_pkt->entry_count = 1;
3770 	cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT);
3771 
3772 	cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
3773 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3774 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
3775 
3776 	cmd_pkt->tx_dseg_count = cpu_to_le16(1);
3777 	cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len);
3778 	cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len);
3779 	put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address);
3780 
3781 	cmd_pkt->rx_dseg_count = cpu_to_le16(1);
3782 	cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len);
3783 	cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len);
3784 	put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address);
3785 }
3786 
3787 static void
3788 qla25xx_ctrlvp_iocb(srb_t *sp, struct vp_ctrl_entry_24xx *vce)
3789 {
3790 	int map, pos;
3791 
3792 	vce->entry_type = VP_CTRL_IOCB_TYPE;
3793 	vce->handle = sp->handle;
3794 	vce->entry_count = 1;
3795 	vce->command = cpu_to_le16(sp->u.iocb_cmd.u.ctrlvp.cmd);
3796 	vce->vp_count = cpu_to_le16(1);
3797 
3798 	/*
3799 	 * index map in firmware starts with 1; decrement index
3800 	 * this is ok as we never use index 0
3801 	 */
3802 	map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8;
3803 	pos = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) & 7;
3804 	vce->vp_idx_map[map] |= 1 << pos;
3805 }
3806 
3807 static void
3808 qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
3809 {
3810 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
3811 	logio->control_flags =
3812 	    cpu_to_le16(LCF_COMMAND_PRLO|LCF_IMPL_PRLO);
3813 
3814 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3815 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
3816 	logio->port_id[1] = sp->fcport->d_id.b.area;
3817 	logio->port_id[2] = sp->fcport->d_id.b.domain;
3818 	logio->vp_index = sp->fcport->vha->vp_idx;
3819 }
3820 
3821 static int qla_get_iocbs_resource(struct srb *sp)
3822 {
3823 	bool get_exch;
3824 	bool push_it_through = false;
3825 
3826 	if (!ql2xenforce_iocb_limit) {
3827 		sp->iores.res_type = RESOURCE_NONE;
3828 		return 0;
3829 	}
3830 	sp->iores.res_type = RESOURCE_NONE;
3831 
3832 	switch (sp->type) {
3833 	case SRB_TM_CMD:
3834 	case SRB_PRLI_CMD:
3835 	case SRB_ADISC_CMD:
3836 		push_it_through = true;
3837 		fallthrough;
3838 	case SRB_LOGIN_CMD:
3839 	case SRB_ELS_CMD_RPT:
3840 	case SRB_ELS_CMD_HST:
3841 	case SRB_ELS_CMD_HST_NOLOGIN:
3842 	case SRB_CT_CMD:
3843 	case SRB_NVME_LS:
3844 	case SRB_ELS_DCMD:
3845 		get_exch = true;
3846 		break;
3847 
3848 	case SRB_FXIOCB_DCMD:
3849 	case SRB_FXIOCB_BCMD:
3850 		sp->iores.res_type = RESOURCE_NONE;
3851 		return 0;
3852 
3853 	case SRB_SA_UPDATE:
3854 	case SRB_SA_REPLACE:
3855 	case SRB_MB_IOCB:
3856 	case SRB_ABT_CMD:
3857 	case SRB_NACK_PLOGI:
3858 	case SRB_NACK_PRLI:
3859 	case SRB_NACK_LOGO:
3860 	case SRB_LOGOUT_CMD:
3861 	case SRB_CTRL_VP:
3862 	case SRB_MARKER:
3863 	default:
3864 		push_it_through = true;
3865 		get_exch = false;
3866 	}
3867 
3868 	sp->iores.res_type |= RESOURCE_IOCB;
3869 	sp->iores.iocb_cnt = 1;
3870 	if (get_exch) {
3871 		sp->iores.res_type |= RESOURCE_EXCH;
3872 		sp->iores.exch_cnt = 1;
3873 	}
3874 	if (push_it_through)
3875 		sp->iores.res_type |= RESOURCE_FORCE;
3876 
3877 	return qla_get_fw_resources(sp->qpair, &sp->iores);
3878 }
3879 
3880 static void
3881 qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk)
3882 {
3883 	mrk->entry_type = MARKER_TYPE;
3884 	mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier;
3885 	if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) {
3886 		mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id);
3887 		int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun);
3888 		host_to_fcp_swap(mrk->lun, sizeof(mrk->lun));
3889 		mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index;
3890 	}
3891 }
3892 
3893 int
3894 qla2x00_start_sp(srb_t *sp)
3895 {
3896 	int rval = QLA_SUCCESS;
3897 	scsi_qla_host_t *vha = sp->vha;
3898 	struct qla_hw_data *ha = vha->hw;
3899 	struct qla_qpair *qp = sp->qpair;
3900 	void *pkt;
3901 	unsigned long flags;
3902 
3903 	if (vha->hw->flags.eeh_busy)
3904 		return -EIO;
3905 
3906 	spin_lock_irqsave(qp->qp_lock_ptr, flags);
3907 	rval = qla_get_iocbs_resource(sp);
3908 	if (rval) {
3909 		spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
3910 		return -EAGAIN;
3911 	}
3912 
3913 	pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
3914 	if (!pkt) {
3915 		rval = -EAGAIN;
3916 		ql_log(ql_log_warn, vha, 0x700c,
3917 		    "qla2x00_alloc_iocbs failed.\n");
3918 		goto done;
3919 	}
3920 
3921 	switch (sp->type) {
3922 	case SRB_LOGIN_CMD:
3923 		IS_FWI2_CAPABLE(ha) ?
3924 		    qla24xx_login_iocb(sp, pkt) :
3925 		    qla2x00_login_iocb(sp, pkt);
3926 		break;
3927 	case SRB_PRLI_CMD:
3928 		qla24xx_prli_iocb(sp, pkt);
3929 		break;
3930 	case SRB_LOGOUT_CMD:
3931 		IS_FWI2_CAPABLE(ha) ?
3932 		    qla24xx_logout_iocb(sp, pkt) :
3933 		    qla2x00_logout_iocb(sp, pkt);
3934 		break;
3935 	case SRB_ELS_CMD_RPT:
3936 	case SRB_ELS_CMD_HST:
3937 		qla24xx_els_iocb(sp, pkt);
3938 		break;
3939 	case SRB_ELS_CMD_HST_NOLOGIN:
3940 		qla_els_pt_iocb(sp->vha, pkt,  &sp->u.bsg_cmd.u.els_arg);
3941 		((struct els_entry_24xx *)pkt)->handle = sp->handle;
3942 		break;
3943 	case SRB_CT_CMD:
3944 		IS_FWI2_CAPABLE(ha) ?
3945 		    qla24xx_ct_iocb(sp, pkt) :
3946 		    qla2x00_ct_iocb(sp, pkt);
3947 		break;
3948 	case SRB_ADISC_CMD:
3949 		IS_FWI2_CAPABLE(ha) ?
3950 		    qla24xx_adisc_iocb(sp, pkt) :
3951 		    qla2x00_adisc_iocb(sp, pkt);
3952 		break;
3953 	case SRB_TM_CMD:
3954 		IS_QLAFX00(ha) ?
3955 		    qlafx00_tm_iocb(sp, pkt) :
3956 		    qla24xx_tm_iocb(sp, pkt);
3957 		break;
3958 	case SRB_FXIOCB_DCMD:
3959 	case SRB_FXIOCB_BCMD:
3960 		qlafx00_fxdisc_iocb(sp, pkt);
3961 		break;
3962 	case SRB_NVME_LS:
3963 		qla_nvme_ls(sp, pkt);
3964 		break;
3965 	case SRB_ABT_CMD:
3966 		IS_QLAFX00(ha) ?
3967 			qlafx00_abort_iocb(sp, pkt) :
3968 			qla24xx_abort_iocb(sp, pkt);
3969 		break;
3970 	case SRB_ELS_DCMD:
3971 		qla24xx_els_logo_iocb(sp, pkt);
3972 		break;
3973 	case SRB_CT_PTHRU_CMD:
3974 		qla2x00_ctpthru_cmd_iocb(sp, pkt);
3975 		break;
3976 	case SRB_MB_IOCB:
3977 		qla2x00_mb_iocb(sp, pkt);
3978 		break;
3979 	case SRB_NACK_PLOGI:
3980 	case SRB_NACK_PRLI:
3981 	case SRB_NACK_LOGO:
3982 		qla2x00_send_notify_ack_iocb(sp, pkt);
3983 		break;
3984 	case SRB_CTRL_VP:
3985 		qla25xx_ctrlvp_iocb(sp, pkt);
3986 		break;
3987 	case SRB_PRLO_CMD:
3988 		qla24xx_prlo_iocb(sp, pkt);
3989 		break;
3990 	case SRB_SA_UPDATE:
3991 		qla24xx_sa_update_iocb(sp, pkt);
3992 		break;
3993 	case SRB_SA_REPLACE:
3994 		qla24xx_sa_replace_iocb(sp, pkt);
3995 		break;
3996 	case SRB_MARKER:
3997 		qla_marker_iocb(sp, pkt);
3998 		break;
3999 	default:
4000 		break;
4001 	}
4002 
4003 	if (sp->start_timer) {
4004 		/* ref: TMR timer ref
4005 		 * this code should be just before start_iocbs function
4006 		 * This will make sure that caller function don't to do
4007 		 * kref_put even on failure
4008 		 */
4009 		kref_get(&sp->cmd_kref);
4010 		add_timer(&sp->u.iocb_cmd.timer);
4011 	}
4012 
4013 	wmb();
4014 	qla2x00_start_iocbs(vha, qp->req);
4015 done:
4016 	if (rval)
4017 		qla_put_fw_resources(sp->qpair, &sp->iores);
4018 	spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
4019 	return rval;
4020 }
4021 
4022 static void
4023 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
4024 				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
4025 {
4026 	uint16_t avail_dsds;
4027 	struct dsd64 *cur_dsd;
4028 	uint32_t req_data_len = 0;
4029 	uint32_t rsp_data_len = 0;
4030 	struct scatterlist *sg;
4031 	int index;
4032 	int entry_count = 1;
4033 	struct bsg_job *bsg_job = sp->u.bsg_job;
4034 
4035 	/*Update entry type to indicate bidir command */
4036 	put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type);
4037 
4038 	/* Set the transfer direction, in this set both flags
4039 	 * Also set the BD_WRAP_BACK flag, firmware will take care
4040 	 * assigning DID=SID for outgoing pkts.
4041 	 */
4042 	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
4043 	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
4044 	cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
4045 							BD_WRAP_BACK);
4046 
4047 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
4048 	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
4049 	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
4050 	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
4051 
4052 	vha->bidi_stats.transfer_bytes += req_data_len;
4053 	vha->bidi_stats.io_count++;
4054 
4055 	vha->qla_stats.output_bytes += req_data_len;
4056 	vha->qla_stats.output_requests++;
4057 
4058 	/* Only one dsd is available for bidirectional IOCB, remaining dsds
4059 	 * are bundled in continuation iocb
4060 	 */
4061 	avail_dsds = 1;
4062 	cur_dsd = &cmd_pkt->fcp_dsd;
4063 
4064 	index = 0;
4065 
4066 	for_each_sg(bsg_job->request_payload.sg_list, sg,
4067 				bsg_job->request_payload.sg_cnt, index) {
4068 		cont_a64_entry_t *cont_pkt;
4069 
4070 		/* Allocate additional continuation packets */
4071 		if (avail_dsds == 0) {
4072 			/* Continuation type 1 IOCB can accomodate
4073 			 * 5 DSDS
4074 			 */
4075 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
4076 			cur_dsd = cont_pkt->dsd;
4077 			avail_dsds = 5;
4078 			entry_count++;
4079 		}
4080 		append_dsd64(&cur_dsd, sg);
4081 		avail_dsds--;
4082 	}
4083 	/* For read request DSD will always goes to continuation IOCB
4084 	 * and follow the write DSD. If there is room on the current IOCB
4085 	 * then it is added to that IOCB else new continuation IOCB is
4086 	 * allocated.
4087 	 */
4088 	for_each_sg(bsg_job->reply_payload.sg_list, sg,
4089 				bsg_job->reply_payload.sg_cnt, index) {
4090 		cont_a64_entry_t *cont_pkt;
4091 
4092 		/* Allocate additional continuation packets */
4093 		if (avail_dsds == 0) {
4094 			/* Continuation type 1 IOCB can accomodate
4095 			 * 5 DSDS
4096 			 */
4097 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
4098 			cur_dsd = cont_pkt->dsd;
4099 			avail_dsds = 5;
4100 			entry_count++;
4101 		}
4102 		append_dsd64(&cur_dsd, sg);
4103 		avail_dsds--;
4104 	}
4105 	/* This value should be same as number of IOCB required for this cmd */
4106 	cmd_pkt->entry_count = entry_count;
4107 }
4108 
4109 int
4110 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
4111 {
4112 
4113 	struct qla_hw_data *ha = vha->hw;
4114 	unsigned long flags;
4115 	uint32_t handle;
4116 	uint16_t req_cnt;
4117 	uint16_t cnt;
4118 	uint32_t *clr_ptr;
4119 	struct cmd_bidir *cmd_pkt = NULL;
4120 	struct rsp_que *rsp;
4121 	struct req_que *req;
4122 	int rval = EXT_STATUS_OK;
4123 
4124 	rval = QLA_SUCCESS;
4125 
4126 	rsp = ha->rsp_q_map[0];
4127 	req = vha->req;
4128 
4129 	/* Send marker if required */
4130 	if (vha->marker_needed != 0) {
4131 		if (qla2x00_marker(vha, ha->base_qpair,
4132 			0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
4133 			return EXT_STATUS_MAILBOX;
4134 		vha->marker_needed = 0;
4135 	}
4136 
4137 	/* Acquire ring specific lock */
4138 	spin_lock_irqsave(&ha->hardware_lock, flags);
4139 
4140 	handle = qla2xxx_get_next_handle(req);
4141 	if (handle == 0) {
4142 		rval = EXT_STATUS_BUSY;
4143 		goto queuing_error;
4144 	}
4145 
4146 	/* Calculate number of IOCB required */
4147 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
4148 
4149 	/* Check for room on request queue. */
4150 	if (req->cnt < req_cnt + 2) {
4151 		if (IS_SHADOW_REG_CAPABLE(ha)) {
4152 			cnt = *req->out_ptr;
4153 		} else {
4154 			cnt = rd_reg_dword_relaxed(req->req_q_out);
4155 			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
4156 				goto queuing_error;
4157 		}
4158 
4159 		if  (req->ring_index < cnt)
4160 			req->cnt = cnt - req->ring_index;
4161 		else
4162 			req->cnt = req->length -
4163 				(req->ring_index - cnt);
4164 	}
4165 	if (req->cnt < req_cnt + 2) {
4166 		rval = EXT_STATUS_BUSY;
4167 		goto queuing_error;
4168 	}
4169 
4170 	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
4171 	cmd_pkt->handle = make_handle(req->id, handle);
4172 
4173 	/* Zero out remaining portion of packet. */
4174 	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
4175 	clr_ptr = (uint32_t *)cmd_pkt + 2;
4176 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
4177 
4178 	/* Set NPORT-ID  (of vha)*/
4179 	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
4180 	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
4181 	cmd_pkt->port_id[1] = vha->d_id.b.area;
4182 	cmd_pkt->port_id[2] = vha->d_id.b.domain;
4183 
4184 	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
4185 	cmd_pkt->entry_status = (uint8_t) rsp->id;
4186 	/* Build command packet. */
4187 	req->current_outstanding_cmd = handle;
4188 	req->outstanding_cmds[handle] = sp;
4189 	sp->handle = handle;
4190 	req->cnt -= req_cnt;
4191 
4192 	/* Send the command to the firmware */
4193 	wmb();
4194 	qla2x00_start_iocbs(vha, req);
4195 queuing_error:
4196 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
4197 
4198 	return rval;
4199 }
4200