xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_iocb.c (revision 97da55fc)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12 
13 #include <scsi/scsi_tcq.h>
14 
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25 	uint16_t cflags;
26 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 	struct scsi_qla_host *vha = sp->fcport->vha;
28 
29 	cflags = 0;
30 
31 	/* Set transfer direction */
32 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 		cflags = CF_WRITE;
34 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 		cflags = CF_READ;
37 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 	}
39 	return (cflags);
40 }
41 
42 /**
43  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
44  * Continuation Type 0 IOCBs to allocate.
45  *
46  * @dsds: number of data segment decriptors needed
47  *
48  * Returns the number of IOCB entries needed to store @dsds.
49  */
50 uint16_t
51 qla2x00_calc_iocbs_32(uint16_t dsds)
52 {
53 	uint16_t iocbs;
54 
55 	iocbs = 1;
56 	if (dsds > 3) {
57 		iocbs += (dsds - 3) / 7;
58 		if ((dsds - 3) % 7)
59 			iocbs++;
60 	}
61 	return (iocbs);
62 }
63 
64 /**
65  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
66  * Continuation Type 1 IOCBs to allocate.
67  *
68  * @dsds: number of data segment decriptors needed
69  *
70  * Returns the number of IOCB entries needed to store @dsds.
71  */
72 uint16_t
73 qla2x00_calc_iocbs_64(uint16_t dsds)
74 {
75 	uint16_t iocbs;
76 
77 	iocbs = 1;
78 	if (dsds > 2) {
79 		iocbs += (dsds - 2) / 5;
80 		if ((dsds - 2) % 5)
81 			iocbs++;
82 	}
83 	return (iocbs);
84 }
85 
86 /**
87  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
88  * @ha: HA context
89  *
90  * Returns a pointer to the Continuation Type 0 IOCB packet.
91  */
92 static inline cont_entry_t *
93 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
94 {
95 	cont_entry_t *cont_pkt;
96 	struct req_que *req = vha->req;
97 	/* Adjust ring index. */
98 	req->ring_index++;
99 	if (req->ring_index == req->length) {
100 		req->ring_index = 0;
101 		req->ring_ptr = req->ring;
102 	} else {
103 		req->ring_ptr++;
104 	}
105 
106 	cont_pkt = (cont_entry_t *)req->ring_ptr;
107 
108 	/* Load packet defaults. */
109 	*((uint32_t *)(&cont_pkt->entry_type)) =
110 	    __constant_cpu_to_le32(CONTINUE_TYPE);
111 
112 	return (cont_pkt);
113 }
114 
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124 	cont_a64_entry_t *cont_pkt;
125 
126 	/* Adjust ring index. */
127 	req->ring_index++;
128 	if (req->ring_index == req->length) {
129 		req->ring_index = 0;
130 		req->ring_ptr = req->ring;
131 	} else {
132 		req->ring_ptr++;
133 	}
134 
135 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136 
137 	/* Load packet defaults. */
138 	*((uint32_t *)(&cont_pkt->entry_type)) =
139 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
140 
141 	return (cont_pkt);
142 }
143 
144 static inline int
145 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
146 {
147 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
148 	uint8_t	guard = scsi_host_get_guard(cmd->device->host);
149 
150 	/* We always use DIFF Bundling for best performance */
151 	*fw_prot_opts = 0;
152 
153 	/* Translate SCSI opcode to a protection opcode */
154 	switch (scsi_get_prot_op(cmd)) {
155 	case SCSI_PROT_READ_STRIP:
156 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
157 		break;
158 	case SCSI_PROT_WRITE_INSERT:
159 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
160 		break;
161 	case SCSI_PROT_READ_INSERT:
162 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
163 		break;
164 	case SCSI_PROT_WRITE_STRIP:
165 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
166 		break;
167 	case SCSI_PROT_READ_PASS:
168 	case SCSI_PROT_WRITE_PASS:
169 		if (guard & SHOST_DIX_GUARD_IP)
170 			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
171 		else
172 			*fw_prot_opts |= PO_MODE_DIF_PASS;
173 		break;
174 	default:	/* Normal Request */
175 		*fw_prot_opts |= PO_MODE_DIF_PASS;
176 		break;
177 	}
178 
179 	return scsi_prot_sg_count(cmd);
180 }
181 
182 /*
183  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
184  * capable IOCB types.
185  *
186  * @sp: SRB command to process
187  * @cmd_pkt: Command type 2 IOCB
188  * @tot_dsds: Total number of segments to transfer
189  */
190 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
191     uint16_t tot_dsds)
192 {
193 	uint16_t	avail_dsds;
194 	uint32_t	*cur_dsd;
195 	scsi_qla_host_t	*vha;
196 	struct scsi_cmnd *cmd;
197 	struct scatterlist *sg;
198 	int i;
199 
200 	cmd = GET_CMD_SP(sp);
201 
202 	/* Update entry type to indicate Command Type 2 IOCB */
203 	*((uint32_t *)(&cmd_pkt->entry_type)) =
204 	    __constant_cpu_to_le32(COMMAND_TYPE);
205 
206 	/* No data transfer */
207 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
208 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
209 		return;
210 	}
211 
212 	vha = sp->fcport->vha;
213 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
214 
215 	/* Three DSDs are available in the Command Type 2 IOCB */
216 	avail_dsds = 3;
217 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
218 
219 	/* Load data segments */
220 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
221 		cont_entry_t *cont_pkt;
222 
223 		/* Allocate additional continuation packets? */
224 		if (avail_dsds == 0) {
225 			/*
226 			 * Seven DSDs are available in the Continuation
227 			 * Type 0 IOCB.
228 			 */
229 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
230 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
231 			avail_dsds = 7;
232 		}
233 
234 		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
235 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
236 		avail_dsds--;
237 	}
238 }
239 
240 /**
241  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
242  * capable IOCB types.
243  *
244  * @sp: SRB command to process
245  * @cmd_pkt: Command type 3 IOCB
246  * @tot_dsds: Total number of segments to transfer
247  */
248 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
249     uint16_t tot_dsds)
250 {
251 	uint16_t	avail_dsds;
252 	uint32_t	*cur_dsd;
253 	scsi_qla_host_t	*vha;
254 	struct scsi_cmnd *cmd;
255 	struct scatterlist *sg;
256 	int i;
257 
258 	cmd = GET_CMD_SP(sp);
259 
260 	/* Update entry type to indicate Command Type 3 IOCB */
261 	*((uint32_t *)(&cmd_pkt->entry_type)) =
262 	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
263 
264 	/* No data transfer */
265 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
267 		return;
268 	}
269 
270 	vha = sp->fcport->vha;
271 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272 
273 	/* Two DSDs are available in the Command Type 3 IOCB */
274 	avail_dsds = 2;
275 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276 
277 	/* Load data segments */
278 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 		dma_addr_t	sle_dma;
280 		cont_a64_entry_t *cont_pkt;
281 
282 		/* Allocate additional continuation packets? */
283 		if (avail_dsds == 0) {
284 			/*
285 			 * Five DSDs are available in the Continuation
286 			 * Type 1 IOCB.
287 			 */
288 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290 			avail_dsds = 5;
291 		}
292 
293 		sle_dma = sg_dma_address(sg);
294 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297 		avail_dsds--;
298 	}
299 }
300 
301 /**
302  * qla2x00_start_scsi() - Send a SCSI command to the ISP
303  * @sp: command to send to the ISP
304  *
305  * Returns non-zero if a failure occurred, else zero.
306  */
307 int
308 qla2x00_start_scsi(srb_t *sp)
309 {
310 	int		ret, nseg;
311 	unsigned long   flags;
312 	scsi_qla_host_t	*vha;
313 	struct scsi_cmnd *cmd;
314 	uint32_t	*clr_ptr;
315 	uint32_t        index;
316 	uint32_t	handle;
317 	cmd_entry_t	*cmd_pkt;
318 	uint16_t	cnt;
319 	uint16_t	req_cnt;
320 	uint16_t	tot_dsds;
321 	struct device_reg_2xxx __iomem *reg;
322 	struct qla_hw_data *ha;
323 	struct req_que *req;
324 	struct rsp_que *rsp;
325 	char		tag[2];
326 
327 	/* Setup device pointers. */
328 	ret = 0;
329 	vha = sp->fcport->vha;
330 	ha = vha->hw;
331 	reg = &ha->iobase->isp;
332 	cmd = GET_CMD_SP(sp);
333 	req = ha->req_q_map[0];
334 	rsp = ha->rsp_q_map[0];
335 	/* So we know we haven't pci_map'ed anything yet */
336 	tot_dsds = 0;
337 
338 	/* Send marker if required */
339 	if (vha->marker_needed != 0) {
340 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
341 		    QLA_SUCCESS) {
342 			return (QLA_FUNCTION_FAILED);
343 		}
344 		vha->marker_needed = 0;
345 	}
346 
347 	/* Acquire ring specific lock */
348 	spin_lock_irqsave(&ha->hardware_lock, flags);
349 
350 	/* Check for room in outstanding command list. */
351 	handle = req->current_outstanding_cmd;
352 	for (index = 1; index < req->num_outstanding_cmds; index++) {
353 		handle++;
354 		if (handle == req->num_outstanding_cmds)
355 			handle = 1;
356 		if (!req->outstanding_cmds[handle])
357 			break;
358 	}
359 	if (index == req->num_outstanding_cmds)
360 		goto queuing_error;
361 
362 	/* Map the sg table so we have an accurate count of sg entries needed */
363 	if (scsi_sg_count(cmd)) {
364 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
365 		    scsi_sg_count(cmd), cmd->sc_data_direction);
366 		if (unlikely(!nseg))
367 			goto queuing_error;
368 	} else
369 		nseg = 0;
370 
371 	tot_dsds = nseg;
372 
373 	/* Calculate the number of request entries needed. */
374 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
375 	if (req->cnt < (req_cnt + 2)) {
376 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
377 		if (req->ring_index < cnt)
378 			req->cnt = cnt - req->ring_index;
379 		else
380 			req->cnt = req->length -
381 			    (req->ring_index - cnt);
382 		/* If still no head room then bail out */
383 		if (req->cnt < (req_cnt + 2))
384 			goto queuing_error;
385 	}
386 
387 	/* Build command packet */
388 	req->current_outstanding_cmd = handle;
389 	req->outstanding_cmds[handle] = sp;
390 	sp->handle = handle;
391 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
392 	req->cnt -= req_cnt;
393 
394 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
395 	cmd_pkt->handle = handle;
396 	/* Zero out remaining portion of packet. */
397 	clr_ptr = (uint32_t *)cmd_pkt + 2;
398 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
399 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
400 
401 	/* Set target ID and LUN number*/
402 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
403 	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
404 
405 	/* Update tagged queuing modifier */
406 	if (scsi_populate_tag_msg(cmd, tag)) {
407 		switch (tag[0]) {
408 		case HEAD_OF_QUEUE_TAG:
409 			cmd_pkt->control_flags =
410 			    __constant_cpu_to_le16(CF_HEAD_TAG);
411 			break;
412 		case ORDERED_QUEUE_TAG:
413 			cmd_pkt->control_flags =
414 			    __constant_cpu_to_le16(CF_ORDERED_TAG);
415 			break;
416 		default:
417 			cmd_pkt->control_flags =
418 			    __constant_cpu_to_le16(CF_SIMPLE_TAG);
419 			break;
420 		}
421 	}
422 
423 	/* Load SCSI command packet. */
424 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
425 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
426 
427 	/* Build IOCB segments */
428 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
429 
430 	/* Set total data segment count. */
431 	cmd_pkt->entry_count = (uint8_t)req_cnt;
432 	wmb();
433 
434 	/* Adjust ring index. */
435 	req->ring_index++;
436 	if (req->ring_index == req->length) {
437 		req->ring_index = 0;
438 		req->ring_ptr = req->ring;
439 	} else
440 		req->ring_ptr++;
441 
442 	sp->flags |= SRB_DMA_VALID;
443 
444 	/* Set chip new ring index. */
445 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
446 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
447 
448 	/* Manage unprocessed RIO/ZIO commands in response queue. */
449 	if (vha->flags.process_response_queue &&
450 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
451 		qla2x00_process_response_queue(rsp);
452 
453 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
454 	return (QLA_SUCCESS);
455 
456 queuing_error:
457 	if (tot_dsds)
458 		scsi_dma_unmap(cmd);
459 
460 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
461 
462 	return (QLA_FUNCTION_FAILED);
463 }
464 
465 /**
466  * qla2x00_start_iocbs() - Execute the IOCB command
467  */
468 void
469 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
470 {
471 	struct qla_hw_data *ha = vha->hw;
472 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
473 
474 	if (IS_QLA82XX(ha)) {
475 		qla82xx_start_iocbs(vha);
476 	} else {
477 		/* Adjust ring index. */
478 		req->ring_index++;
479 		if (req->ring_index == req->length) {
480 			req->ring_index = 0;
481 			req->ring_ptr = req->ring;
482 		} else
483 			req->ring_ptr++;
484 
485 		/* Set chip new ring index. */
486 		if (ha->mqenable || IS_QLA83XX(ha)) {
487 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
488 			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
489 		} else if (IS_FWI2_CAPABLE(ha)) {
490 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
491 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
492 		} else {
493 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
494 				req->ring_index);
495 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
496 		}
497 	}
498 }
499 
500 /**
501  * qla2x00_marker() - Send a marker IOCB to the firmware.
502  * @ha: HA context
503  * @loop_id: loop ID
504  * @lun: LUN
505  * @type: marker modifier
506  *
507  * Can be called from both normal and interrupt context.
508  *
509  * Returns non-zero if a failure occurred, else zero.
510  */
511 static int
512 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
513 			struct rsp_que *rsp, uint16_t loop_id,
514 			uint16_t lun, uint8_t type)
515 {
516 	mrk_entry_t *mrk;
517 	struct mrk_entry_24xx *mrk24;
518 	struct qla_hw_data *ha = vha->hw;
519 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
520 
521 	mrk24 = NULL;
522 	req = ha->req_q_map[0];
523 	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
524 	if (mrk == NULL) {
525 		ql_log(ql_log_warn, base_vha, 0x3026,
526 		    "Failed to allocate Marker IOCB.\n");
527 
528 		return (QLA_FUNCTION_FAILED);
529 	}
530 
531 	mrk->entry_type = MARKER_TYPE;
532 	mrk->modifier = type;
533 	if (type != MK_SYNC_ALL) {
534 		if (IS_FWI2_CAPABLE(ha)) {
535 			mrk24 = (struct mrk_entry_24xx *) mrk;
536 			mrk24->nport_handle = cpu_to_le16(loop_id);
537 			mrk24->lun[1] = LSB(lun);
538 			mrk24->lun[2] = MSB(lun);
539 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
540 			mrk24->vp_index = vha->vp_idx;
541 			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
542 		} else {
543 			SET_TARGET_ID(ha, mrk->target, loop_id);
544 			mrk->lun = cpu_to_le16(lun);
545 		}
546 	}
547 	wmb();
548 
549 	qla2x00_start_iocbs(vha, req);
550 
551 	return (QLA_SUCCESS);
552 }
553 
554 int
555 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
556 		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
557 		uint8_t type)
558 {
559 	int ret;
560 	unsigned long flags = 0;
561 
562 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
563 	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
564 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
565 
566 	return (ret);
567 }
568 
569 /*
570  * qla2x00_issue_marker
571  *
572  * Issue marker
573  * Caller CAN have hardware lock held as specified by ha_locked parameter.
574  * Might release it, then reaquire.
575  */
576 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
577 {
578 	if (ha_locked) {
579 		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
580 					MK_SYNC_ALL) != QLA_SUCCESS)
581 			return QLA_FUNCTION_FAILED;
582 	} else {
583 		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
584 					MK_SYNC_ALL) != QLA_SUCCESS)
585 			return QLA_FUNCTION_FAILED;
586 	}
587 	vha->marker_needed = 0;
588 
589 	return QLA_SUCCESS;
590 }
591 
592 /**
593  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
594  * Continuation Type 1 IOCBs to allocate.
595  *
596  * @dsds: number of data segment decriptors needed
597  *
598  * Returns the number of IOCB entries needed to store @dsds.
599  */
600 inline uint16_t
601 qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
602 {
603 	uint16_t iocbs;
604 
605 	iocbs = 1;
606 	if (dsds > 1) {
607 		iocbs += (dsds - 1) / 5;
608 		if ((dsds - 1) % 5)
609 			iocbs++;
610 	}
611 	return iocbs;
612 }
613 
614 static inline int
615 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
616 	uint16_t tot_dsds)
617 {
618 	uint32_t *cur_dsd = NULL;
619 	scsi_qla_host_t	*vha;
620 	struct qla_hw_data *ha;
621 	struct scsi_cmnd *cmd;
622 	struct	scatterlist *cur_seg;
623 	uint32_t *dsd_seg;
624 	void *next_dsd;
625 	uint8_t avail_dsds;
626 	uint8_t first_iocb = 1;
627 	uint32_t dsd_list_len;
628 	struct dsd_dma *dsd_ptr;
629 	struct ct6_dsd *ctx;
630 
631 	cmd = GET_CMD_SP(sp);
632 
633 	/* Update entry type to indicate Command Type 3 IOCB */
634 	*((uint32_t *)(&cmd_pkt->entry_type)) =
635 		__constant_cpu_to_le32(COMMAND_TYPE_6);
636 
637 	/* No data transfer */
638 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
639 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
640 		return 0;
641 	}
642 
643 	vha = sp->fcport->vha;
644 	ha = vha->hw;
645 
646 	/* Set transfer direction */
647 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
648 		cmd_pkt->control_flags =
649 		    __constant_cpu_to_le16(CF_WRITE_DATA);
650 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
651 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
652 		cmd_pkt->control_flags =
653 		    __constant_cpu_to_le16(CF_READ_DATA);
654 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
655 	}
656 
657 	cur_seg = scsi_sglist(cmd);
658 	ctx = GET_CMD_CTX_SP(sp);
659 
660 	while (tot_dsds) {
661 		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
662 		    QLA_DSDS_PER_IOCB : tot_dsds;
663 		tot_dsds -= avail_dsds;
664 		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
665 
666 		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
667 		    struct dsd_dma, list);
668 		next_dsd = dsd_ptr->dsd_addr;
669 		list_del(&dsd_ptr->list);
670 		ha->gbl_dsd_avail--;
671 		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
672 		ctx->dsd_use_cnt++;
673 		ha->gbl_dsd_inuse++;
674 
675 		if (first_iocb) {
676 			first_iocb = 0;
677 			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
678 			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
679 			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
680 			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
681 		} else {
682 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
683 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
684 			*cur_dsd++ = cpu_to_le32(dsd_list_len);
685 		}
686 		cur_dsd = (uint32_t *)next_dsd;
687 		while (avail_dsds) {
688 			dma_addr_t	sle_dma;
689 
690 			sle_dma = sg_dma_address(cur_seg);
691 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
692 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
693 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
694 			cur_seg = sg_next(cur_seg);
695 			avail_dsds--;
696 		}
697 	}
698 
699 	/* Null termination */
700 	*cur_dsd++ =  0;
701 	*cur_dsd++ = 0;
702 	*cur_dsd++ = 0;
703 	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
704 	return 0;
705 }
706 
707 /*
708  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
709  * for Command Type 6.
710  *
711  * @dsds: number of data segment decriptors needed
712  *
713  * Returns the number of dsd list needed to store @dsds.
714  */
715 inline uint16_t
716 qla24xx_calc_dsd_lists(uint16_t dsds)
717 {
718 	uint16_t dsd_lists = 0;
719 
720 	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
721 	if (dsds % QLA_DSDS_PER_IOCB)
722 		dsd_lists++;
723 	return dsd_lists;
724 }
725 
726 
727 /**
728  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
729  * IOCB types.
730  *
731  * @sp: SRB command to process
732  * @cmd_pkt: Command type 3 IOCB
733  * @tot_dsds: Total number of segments to transfer
734  */
735 inline void
736 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
737     uint16_t tot_dsds)
738 {
739 	uint16_t	avail_dsds;
740 	uint32_t	*cur_dsd;
741 	scsi_qla_host_t	*vha;
742 	struct scsi_cmnd *cmd;
743 	struct scatterlist *sg;
744 	int i;
745 	struct req_que *req;
746 
747 	cmd = GET_CMD_SP(sp);
748 
749 	/* Update entry type to indicate Command Type 3 IOCB */
750 	*((uint32_t *)(&cmd_pkt->entry_type)) =
751 	    __constant_cpu_to_le32(COMMAND_TYPE_7);
752 
753 	/* No data transfer */
754 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
755 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
756 		return;
757 	}
758 
759 	vha = sp->fcport->vha;
760 	req = vha->req;
761 
762 	/* Set transfer direction */
763 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
764 		cmd_pkt->task_mgmt_flags =
765 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
766 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
767 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
768 		cmd_pkt->task_mgmt_flags =
769 		    __constant_cpu_to_le16(TMF_READ_DATA);
770 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
771 	}
772 
773 	/* One DSD is available in the Command Type 3 IOCB */
774 	avail_dsds = 1;
775 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
776 
777 	/* Load data segments */
778 
779 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
780 		dma_addr_t	sle_dma;
781 		cont_a64_entry_t *cont_pkt;
782 
783 		/* Allocate additional continuation packets? */
784 		if (avail_dsds == 0) {
785 			/*
786 			 * Five DSDs are available in the Continuation
787 			 * Type 1 IOCB.
788 			 */
789 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
790 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
791 			avail_dsds = 5;
792 		}
793 
794 		sle_dma = sg_dma_address(sg);
795 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
796 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
797 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
798 		avail_dsds--;
799 	}
800 }
801 
802 struct fw_dif_context {
803 	uint32_t ref_tag;
804 	uint16_t app_tag;
805 	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
806 	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
807 };
808 
809 /*
810  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
811  *
812  */
813 static inline void
814 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
815     unsigned int protcnt)
816 {
817 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
818 
819 	switch (scsi_get_prot_type(cmd)) {
820 	case SCSI_PROT_DIF_TYPE0:
821 		/*
822 		 * No check for ql2xenablehba_err_chk, as it would be an
823 		 * I/O error if hba tag generation is not done.
824 		 */
825 		pkt->ref_tag = cpu_to_le32((uint32_t)
826 		    (0xffffffff & scsi_get_lba(cmd)));
827 
828 		if (!qla2x00_hba_err_chk_enabled(sp))
829 			break;
830 
831 		pkt->ref_tag_mask[0] = 0xff;
832 		pkt->ref_tag_mask[1] = 0xff;
833 		pkt->ref_tag_mask[2] = 0xff;
834 		pkt->ref_tag_mask[3] = 0xff;
835 		break;
836 
837 	/*
838 	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
839 	 * match LBA in CDB + N
840 	 */
841 	case SCSI_PROT_DIF_TYPE2:
842 		pkt->app_tag = __constant_cpu_to_le16(0);
843 		pkt->app_tag_mask[0] = 0x0;
844 		pkt->app_tag_mask[1] = 0x0;
845 
846 		pkt->ref_tag = cpu_to_le32((uint32_t)
847 		    (0xffffffff & scsi_get_lba(cmd)));
848 
849 		if (!qla2x00_hba_err_chk_enabled(sp))
850 			break;
851 
852 		/* enable ALL bytes of the ref tag */
853 		pkt->ref_tag_mask[0] = 0xff;
854 		pkt->ref_tag_mask[1] = 0xff;
855 		pkt->ref_tag_mask[2] = 0xff;
856 		pkt->ref_tag_mask[3] = 0xff;
857 		break;
858 
859 	/* For Type 3 protection: 16 bit GUARD only */
860 	case SCSI_PROT_DIF_TYPE3:
861 		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
862 			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
863 								0x00;
864 		break;
865 
866 	/*
867 	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
868 	 * 16 bit app tag.
869 	 */
870 	case SCSI_PROT_DIF_TYPE1:
871 		pkt->ref_tag = cpu_to_le32((uint32_t)
872 		    (0xffffffff & scsi_get_lba(cmd)));
873 		pkt->app_tag = __constant_cpu_to_le16(0);
874 		pkt->app_tag_mask[0] = 0x0;
875 		pkt->app_tag_mask[1] = 0x0;
876 
877 		if (!qla2x00_hba_err_chk_enabled(sp))
878 			break;
879 
880 		/* enable ALL bytes of the ref tag */
881 		pkt->ref_tag_mask[0] = 0xff;
882 		pkt->ref_tag_mask[1] = 0xff;
883 		pkt->ref_tag_mask[2] = 0xff;
884 		pkt->ref_tag_mask[3] = 0xff;
885 		break;
886 	}
887 }
888 
889 struct qla2_sgx {
890 	dma_addr_t		dma_addr;	/* OUT */
891 	uint32_t		dma_len;	/* OUT */
892 
893 	uint32_t		tot_bytes;	/* IN */
894 	struct scatterlist	*cur_sg;	/* IN */
895 
896 	/* for book keeping, bzero on initial invocation */
897 	uint32_t		bytes_consumed;
898 	uint32_t		num_bytes;
899 	uint32_t		tot_partial;
900 
901 	/* for debugging */
902 	uint32_t		num_sg;
903 	srb_t			*sp;
904 };
905 
906 static int
907 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
908 	uint32_t *partial)
909 {
910 	struct scatterlist *sg;
911 	uint32_t cumulative_partial, sg_len;
912 	dma_addr_t sg_dma_addr;
913 
914 	if (sgx->num_bytes == sgx->tot_bytes)
915 		return 0;
916 
917 	sg = sgx->cur_sg;
918 	cumulative_partial = sgx->tot_partial;
919 
920 	sg_dma_addr = sg_dma_address(sg);
921 	sg_len = sg_dma_len(sg);
922 
923 	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
924 
925 	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
926 		sgx->dma_len = (blk_sz - cumulative_partial);
927 		sgx->tot_partial = 0;
928 		sgx->num_bytes += blk_sz;
929 		*partial = 0;
930 	} else {
931 		sgx->dma_len = sg_len - sgx->bytes_consumed;
932 		sgx->tot_partial += sgx->dma_len;
933 		*partial = 1;
934 	}
935 
936 	sgx->bytes_consumed += sgx->dma_len;
937 
938 	if (sg_len == sgx->bytes_consumed) {
939 		sg = sg_next(sg);
940 		sgx->num_sg++;
941 		sgx->cur_sg = sg;
942 		sgx->bytes_consumed = 0;
943 	}
944 
945 	return 1;
946 }
947 
948 static int
949 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
950 	uint32_t *dsd, uint16_t tot_dsds)
951 {
952 	void *next_dsd;
953 	uint8_t avail_dsds = 0;
954 	uint32_t dsd_list_len;
955 	struct dsd_dma *dsd_ptr;
956 	struct scatterlist *sg_prot;
957 	uint32_t *cur_dsd = dsd;
958 	uint16_t	used_dsds = tot_dsds;
959 
960 	uint32_t	prot_int;
961 	uint32_t	partial;
962 	struct qla2_sgx sgx;
963 	dma_addr_t	sle_dma;
964 	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
965 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
966 
967 	prot_int = cmd->device->sector_size;
968 
969 	memset(&sgx, 0, sizeof(struct qla2_sgx));
970 	sgx.tot_bytes = scsi_bufflen(cmd);
971 	sgx.cur_sg = scsi_sglist(cmd);
972 	sgx.sp = sp;
973 
974 	sg_prot = scsi_prot_sglist(cmd);
975 
976 	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
977 
978 		sle_dma = sgx.dma_addr;
979 		sle_dma_len = sgx.dma_len;
980 alloc_and_fill:
981 		/* Allocate additional continuation packets? */
982 		if (avail_dsds == 0) {
983 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
984 					QLA_DSDS_PER_IOCB : used_dsds;
985 			dsd_list_len = (avail_dsds + 1) * 12;
986 			used_dsds -= avail_dsds;
987 
988 			/* allocate tracking DS */
989 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
990 			if (!dsd_ptr)
991 				return 1;
992 
993 			/* allocate new list */
994 			dsd_ptr->dsd_addr = next_dsd =
995 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
996 				&dsd_ptr->dsd_list_dma);
997 
998 			if (!next_dsd) {
999 				/*
1000 				 * Need to cleanup only this dsd_ptr, rest
1001 				 * will be done by sp_free_dma()
1002 				 */
1003 				kfree(dsd_ptr);
1004 				return 1;
1005 			}
1006 
1007 			list_add_tail(&dsd_ptr->list,
1008 			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1009 
1010 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
1011 
1012 			/* add new list to cmd iocb or last list */
1013 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1014 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1015 			*cur_dsd++ = dsd_list_len;
1016 			cur_dsd = (uint32_t *)next_dsd;
1017 		}
1018 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1019 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1020 		*cur_dsd++ = cpu_to_le32(sle_dma_len);
1021 		avail_dsds--;
1022 
1023 		if (partial == 0) {
1024 			/* Got a full protection interval */
1025 			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1026 			sle_dma_len = 8;
1027 
1028 			tot_prot_dma_len += sle_dma_len;
1029 			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1030 				tot_prot_dma_len = 0;
1031 				sg_prot = sg_next(sg_prot);
1032 			}
1033 
1034 			partial = 1; /* So as to not re-enter this block */
1035 			goto alloc_and_fill;
1036 		}
1037 	}
1038 	/* Null termination */
1039 	*cur_dsd++ = 0;
1040 	*cur_dsd++ = 0;
1041 	*cur_dsd++ = 0;
1042 	return 0;
1043 }
1044 
1045 static int
1046 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1047 	uint16_t tot_dsds)
1048 {
1049 	void *next_dsd;
1050 	uint8_t avail_dsds = 0;
1051 	uint32_t dsd_list_len;
1052 	struct dsd_dma *dsd_ptr;
1053 	struct scatterlist *sg;
1054 	uint32_t *cur_dsd = dsd;
1055 	int	i;
1056 	uint16_t	used_dsds = tot_dsds;
1057 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1058 
1059 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1060 		dma_addr_t	sle_dma;
1061 
1062 		/* Allocate additional continuation packets? */
1063 		if (avail_dsds == 0) {
1064 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1065 					QLA_DSDS_PER_IOCB : used_dsds;
1066 			dsd_list_len = (avail_dsds + 1) * 12;
1067 			used_dsds -= avail_dsds;
1068 
1069 			/* allocate tracking DS */
1070 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1071 			if (!dsd_ptr)
1072 				return 1;
1073 
1074 			/* allocate new list */
1075 			dsd_ptr->dsd_addr = next_dsd =
1076 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1077 				&dsd_ptr->dsd_list_dma);
1078 
1079 			if (!next_dsd) {
1080 				/*
1081 				 * Need to cleanup only this dsd_ptr, rest
1082 				 * will be done by sp_free_dma()
1083 				 */
1084 				kfree(dsd_ptr);
1085 				return 1;
1086 			}
1087 
1088 			list_add_tail(&dsd_ptr->list,
1089 			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1090 
1091 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
1092 
1093 			/* add new list to cmd iocb or last list */
1094 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1095 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1096 			*cur_dsd++ = dsd_list_len;
1097 			cur_dsd = (uint32_t *)next_dsd;
1098 		}
1099 		sle_dma = sg_dma_address(sg);
1100 
1101 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1102 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1103 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1104 		avail_dsds--;
1105 
1106 	}
1107 	/* Null termination */
1108 	*cur_dsd++ = 0;
1109 	*cur_dsd++ = 0;
1110 	*cur_dsd++ = 0;
1111 	return 0;
1112 }
1113 
1114 static int
1115 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1116 							uint32_t *dsd,
1117 	uint16_t tot_dsds)
1118 {
1119 	void *next_dsd;
1120 	uint8_t avail_dsds = 0;
1121 	uint32_t dsd_list_len;
1122 	struct dsd_dma *dsd_ptr;
1123 	struct scatterlist *sg;
1124 	int	i;
1125 	struct scsi_cmnd *cmd;
1126 	uint32_t *cur_dsd = dsd;
1127 	uint16_t	used_dsds = tot_dsds;
1128 
1129 	cmd = GET_CMD_SP(sp);
1130 	scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1131 		dma_addr_t	sle_dma;
1132 
1133 		/* Allocate additional continuation packets? */
1134 		if (avail_dsds == 0) {
1135 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1136 						QLA_DSDS_PER_IOCB : used_dsds;
1137 			dsd_list_len = (avail_dsds + 1) * 12;
1138 			used_dsds -= avail_dsds;
1139 
1140 			/* allocate tracking DS */
1141 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1142 			if (!dsd_ptr)
1143 				return 1;
1144 
1145 			/* allocate new list */
1146 			dsd_ptr->dsd_addr = next_dsd =
1147 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1148 				&dsd_ptr->dsd_list_dma);
1149 
1150 			if (!next_dsd) {
1151 				/*
1152 				 * Need to cleanup only this dsd_ptr, rest
1153 				 * will be done by sp_free_dma()
1154 				 */
1155 				kfree(dsd_ptr);
1156 				return 1;
1157 			}
1158 
1159 			list_add_tail(&dsd_ptr->list,
1160 			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1161 
1162 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
1163 
1164 			/* add new list to cmd iocb or last list */
1165 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1166 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1167 			*cur_dsd++ = dsd_list_len;
1168 			cur_dsd = (uint32_t *)next_dsd;
1169 		}
1170 		sle_dma = sg_dma_address(sg);
1171 
1172 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1173 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1174 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1175 
1176 		avail_dsds--;
1177 	}
1178 	/* Null termination */
1179 	*cur_dsd++ = 0;
1180 	*cur_dsd++ = 0;
1181 	*cur_dsd++ = 0;
1182 	return 0;
1183 }
1184 
1185 /**
1186  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1187  *							Type 6 IOCB types.
1188  *
1189  * @sp: SRB command to process
1190  * @cmd_pkt: Command type 3 IOCB
1191  * @tot_dsds: Total number of segments to transfer
1192  */
1193 static inline int
1194 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1195     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1196 {
1197 	uint32_t		*cur_dsd, *fcp_dl;
1198 	scsi_qla_host_t		*vha;
1199 	struct scsi_cmnd	*cmd;
1200 	struct scatterlist	*cur_seg;
1201 	int			sgc;
1202 	uint32_t		total_bytes = 0;
1203 	uint32_t		data_bytes;
1204 	uint32_t		dif_bytes;
1205 	uint8_t			bundling = 1;
1206 	uint16_t		blk_size;
1207 	uint8_t			*clr_ptr;
1208 	struct crc_context	*crc_ctx_pkt = NULL;
1209 	struct qla_hw_data	*ha;
1210 	uint8_t			additional_fcpcdb_len;
1211 	uint16_t		fcp_cmnd_len;
1212 	struct fcp_cmnd		*fcp_cmnd;
1213 	dma_addr_t		crc_ctx_dma;
1214 	char			tag[2];
1215 
1216 	cmd = GET_CMD_SP(sp);
1217 
1218 	sgc = 0;
1219 	/* Update entry type to indicate Command Type CRC_2 IOCB */
1220 	*((uint32_t *)(&cmd_pkt->entry_type)) =
1221 	    __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1222 
1223 	vha = sp->fcport->vha;
1224 	ha = vha->hw;
1225 
1226 	/* No data transfer */
1227 	data_bytes = scsi_bufflen(cmd);
1228 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1229 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1230 		return QLA_SUCCESS;
1231 	}
1232 
1233 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1234 
1235 	/* Set transfer direction */
1236 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1237 		cmd_pkt->control_flags =
1238 		    __constant_cpu_to_le16(CF_WRITE_DATA);
1239 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1240 		cmd_pkt->control_flags =
1241 		    __constant_cpu_to_le16(CF_READ_DATA);
1242 	}
1243 
1244 	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1245 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1246 	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1247 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1248 		bundling = 0;
1249 
1250 	/* Allocate CRC context from global pool */
1251 	crc_ctx_pkt = sp->u.scmd.ctx =
1252 	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1253 
1254 	if (!crc_ctx_pkt)
1255 		goto crc_queuing_error;
1256 
1257 	/* Zero out CTX area. */
1258 	clr_ptr = (uint8_t *)crc_ctx_pkt;
1259 	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1260 
1261 	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1262 
1263 	sp->flags |= SRB_CRC_CTX_DMA_VALID;
1264 
1265 	/* Set handle */
1266 	crc_ctx_pkt->handle = cmd_pkt->handle;
1267 
1268 	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1269 
1270 	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1271 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1272 
1273 	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1274 	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1275 	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1276 
1277 	/* Determine SCSI command length -- align to 4 byte boundary */
1278 	if (cmd->cmd_len > 16) {
1279 		additional_fcpcdb_len = cmd->cmd_len - 16;
1280 		if ((cmd->cmd_len % 4) != 0) {
1281 			/* SCSI cmd > 16 bytes must be multiple of 4 */
1282 			goto crc_queuing_error;
1283 		}
1284 		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1285 	} else {
1286 		additional_fcpcdb_len = 0;
1287 		fcp_cmnd_len = 12 + 16 + 4;
1288 	}
1289 
1290 	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1291 
1292 	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1293 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
1294 		fcp_cmnd->additional_cdb_len |= 1;
1295 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1296 		fcp_cmnd->additional_cdb_len |= 2;
1297 
1298 	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1299 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1300 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1301 	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1302 	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1303 	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1304 	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1305 	fcp_cmnd->task_management = 0;
1306 
1307 	/*
1308 	 * Update tagged queuing modifier if using command tag queuing
1309 	 */
1310 	if (scsi_populate_tag_msg(cmd, tag)) {
1311 		switch (tag[0]) {
1312 		case HEAD_OF_QUEUE_TAG:
1313 		    fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1314 		    break;
1315 		case ORDERED_QUEUE_TAG:
1316 		    fcp_cmnd->task_attribute = TSK_ORDERED;
1317 		    break;
1318 		default:
1319 		    fcp_cmnd->task_attribute = 0;
1320 		    break;
1321 		}
1322 	} else {
1323 		fcp_cmnd->task_attribute = 0;
1324 	}
1325 
1326 	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1327 
1328 	/* Compute dif len and adjust data len to incude protection */
1329 	dif_bytes = 0;
1330 	blk_size = cmd->device->sector_size;
1331 	dif_bytes = (data_bytes / blk_size) * 8;
1332 
1333 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1334 	case SCSI_PROT_READ_INSERT:
1335 	case SCSI_PROT_WRITE_STRIP:
1336 	    total_bytes = data_bytes;
1337 	    data_bytes += dif_bytes;
1338 	    break;
1339 
1340 	case SCSI_PROT_READ_STRIP:
1341 	case SCSI_PROT_WRITE_INSERT:
1342 	case SCSI_PROT_READ_PASS:
1343 	case SCSI_PROT_WRITE_PASS:
1344 	    total_bytes = data_bytes + dif_bytes;
1345 	    break;
1346 	default:
1347 	    BUG();
1348 	}
1349 
1350 	if (!qla2x00_hba_err_chk_enabled(sp))
1351 		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1352 	/* HBA error checking enabled */
1353 	else if (IS_PI_UNINIT_CAPABLE(ha)) {
1354 		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1355 		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1356 			SCSI_PROT_DIF_TYPE2))
1357 			fw_prot_opts |= BIT_10;
1358 		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1359 		    SCSI_PROT_DIF_TYPE3)
1360 			fw_prot_opts |= BIT_11;
1361 	}
1362 
1363 	if (!bundling) {
1364 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1365 	} else {
1366 		/*
1367 		 * Configure Bundling if we need to fetch interlaving
1368 		 * protection PCI accesses
1369 		 */
1370 		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1371 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1372 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1373 							tot_prot_dsds);
1374 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1375 	}
1376 
1377 	/* Finish the common fields of CRC pkt */
1378 	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1379 	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1380 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1381 	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1382 	/* Fibre channel byte count */
1383 	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1384 	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1385 	    additional_fcpcdb_len);
1386 	*fcp_dl = htonl(total_bytes);
1387 
1388 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1389 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1390 		return QLA_SUCCESS;
1391 	}
1392 	/* Walks data segments */
1393 
1394 	cmd_pkt->control_flags |=
1395 	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1396 
1397 	if (!bundling && tot_prot_dsds) {
1398 		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1399 		    cur_dsd, tot_dsds))
1400 			goto crc_queuing_error;
1401 	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1402 	    (tot_dsds - tot_prot_dsds)))
1403 		goto crc_queuing_error;
1404 
1405 	if (bundling && tot_prot_dsds) {
1406 		/* Walks dif segments */
1407 		cur_seg = scsi_prot_sglist(cmd);
1408 		cmd_pkt->control_flags |=
1409 			__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1410 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1411 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1412 		    tot_prot_dsds))
1413 			goto crc_queuing_error;
1414 	}
1415 	return QLA_SUCCESS;
1416 
1417 crc_queuing_error:
1418 	/* Cleanup will be performed by the caller */
1419 
1420 	return QLA_FUNCTION_FAILED;
1421 }
1422 
1423 /**
1424  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1425  * @sp: command to send to the ISP
1426  *
1427  * Returns non-zero if a failure occurred, else zero.
1428  */
1429 int
1430 qla24xx_start_scsi(srb_t *sp)
1431 {
1432 	int		ret, nseg;
1433 	unsigned long   flags;
1434 	uint32_t	*clr_ptr;
1435 	uint32_t        index;
1436 	uint32_t	handle;
1437 	struct cmd_type_7 *cmd_pkt;
1438 	uint16_t	cnt;
1439 	uint16_t	req_cnt;
1440 	uint16_t	tot_dsds;
1441 	struct req_que *req = NULL;
1442 	struct rsp_que *rsp = NULL;
1443 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1444 	struct scsi_qla_host *vha = sp->fcport->vha;
1445 	struct qla_hw_data *ha = vha->hw;
1446 	char		tag[2];
1447 
1448 	/* Setup device pointers. */
1449 	ret = 0;
1450 
1451 	qla25xx_set_que(sp, &rsp);
1452 	req = vha->req;
1453 
1454 	/* So we know we haven't pci_map'ed anything yet */
1455 	tot_dsds = 0;
1456 
1457 	/* Send marker if required */
1458 	if (vha->marker_needed != 0) {
1459 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1460 		    QLA_SUCCESS)
1461 			return QLA_FUNCTION_FAILED;
1462 		vha->marker_needed = 0;
1463 	}
1464 
1465 	/* Acquire ring specific lock */
1466 	spin_lock_irqsave(&ha->hardware_lock, flags);
1467 
1468 	/* Check for room in outstanding command list. */
1469 	handle = req->current_outstanding_cmd;
1470 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1471 		handle++;
1472 		if (handle == req->num_outstanding_cmds)
1473 			handle = 1;
1474 		if (!req->outstanding_cmds[handle])
1475 			break;
1476 	}
1477 	if (index == req->num_outstanding_cmds)
1478 		goto queuing_error;
1479 
1480 	/* Map the sg table so we have an accurate count of sg entries needed */
1481 	if (scsi_sg_count(cmd)) {
1482 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1483 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1484 		if (unlikely(!nseg))
1485 			goto queuing_error;
1486 	} else
1487 		nseg = 0;
1488 
1489 	tot_dsds = nseg;
1490 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1491 	if (req->cnt < (req_cnt + 2)) {
1492 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1493 
1494 		if (req->ring_index < cnt)
1495 			req->cnt = cnt - req->ring_index;
1496 		else
1497 			req->cnt = req->length -
1498 				(req->ring_index - cnt);
1499 		if (req->cnt < (req_cnt + 2))
1500 			goto queuing_error;
1501 	}
1502 
1503 	/* Build command packet. */
1504 	req->current_outstanding_cmd = handle;
1505 	req->outstanding_cmds[handle] = sp;
1506 	sp->handle = handle;
1507 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1508 	req->cnt -= req_cnt;
1509 
1510 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1511 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1512 
1513 	/* Zero out remaining portion of packet. */
1514 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1515 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1516 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1517 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1518 
1519 	/* Set NPORT-ID and LUN number*/
1520 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1521 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1522 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1523 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1524 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1525 
1526 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1527 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1528 
1529 	/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1530 	if (scsi_populate_tag_msg(cmd, tag)) {
1531 		switch (tag[0]) {
1532 		case HEAD_OF_QUEUE_TAG:
1533 			cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1534 			break;
1535 		case ORDERED_QUEUE_TAG:
1536 			cmd_pkt->task = TSK_ORDERED;
1537 			break;
1538 		}
1539 	}
1540 
1541 	/* Load SCSI command packet. */
1542 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1543 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1544 
1545 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1546 
1547 	/* Build IOCB segments */
1548 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1549 
1550 	/* Set total data segment count. */
1551 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1552 	/* Specify response queue number where completion should happen */
1553 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1554 	wmb();
1555 	/* Adjust ring index. */
1556 	req->ring_index++;
1557 	if (req->ring_index == req->length) {
1558 		req->ring_index = 0;
1559 		req->ring_ptr = req->ring;
1560 	} else
1561 		req->ring_ptr++;
1562 
1563 	sp->flags |= SRB_DMA_VALID;
1564 
1565 	/* Set chip new ring index. */
1566 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1567 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1568 
1569 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1570 	if (vha->flags.process_response_queue &&
1571 		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1572 		qla24xx_process_response_queue(vha, rsp);
1573 
1574 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1575 	return QLA_SUCCESS;
1576 
1577 queuing_error:
1578 	if (tot_dsds)
1579 		scsi_dma_unmap(cmd);
1580 
1581 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1582 
1583 	return QLA_FUNCTION_FAILED;
1584 }
1585 
1586 
1587 /**
1588  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1589  * @sp: command to send to the ISP
1590  *
1591  * Returns non-zero if a failure occurred, else zero.
1592  */
1593 int
1594 qla24xx_dif_start_scsi(srb_t *sp)
1595 {
1596 	int			nseg;
1597 	unsigned long		flags;
1598 	uint32_t		*clr_ptr;
1599 	uint32_t		index;
1600 	uint32_t		handle;
1601 	uint16_t		cnt;
1602 	uint16_t		req_cnt = 0;
1603 	uint16_t		tot_dsds;
1604 	uint16_t		tot_prot_dsds;
1605 	uint16_t		fw_prot_opts = 0;
1606 	struct req_que		*req = NULL;
1607 	struct rsp_que		*rsp = NULL;
1608 	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
1609 	struct scsi_qla_host	*vha = sp->fcport->vha;
1610 	struct qla_hw_data	*ha = vha->hw;
1611 	struct cmd_type_crc_2	*cmd_pkt;
1612 	uint32_t		status = 0;
1613 
1614 #define QDSS_GOT_Q_SPACE	BIT_0
1615 
1616 	/* Only process protection or >16 cdb in this routine */
1617 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1618 		if (cmd->cmd_len <= 16)
1619 			return qla24xx_start_scsi(sp);
1620 	}
1621 
1622 	/* Setup device pointers. */
1623 
1624 	qla25xx_set_que(sp, &rsp);
1625 	req = vha->req;
1626 
1627 	/* So we know we haven't pci_map'ed anything yet */
1628 	tot_dsds = 0;
1629 
1630 	/* Send marker if required */
1631 	if (vha->marker_needed != 0) {
1632 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1633 		    QLA_SUCCESS)
1634 			return QLA_FUNCTION_FAILED;
1635 		vha->marker_needed = 0;
1636 	}
1637 
1638 	/* Acquire ring specific lock */
1639 	spin_lock_irqsave(&ha->hardware_lock, flags);
1640 
1641 	/* Check for room in outstanding command list. */
1642 	handle = req->current_outstanding_cmd;
1643 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1644 		handle++;
1645 		if (handle == req->num_outstanding_cmds)
1646 			handle = 1;
1647 		if (!req->outstanding_cmds[handle])
1648 			break;
1649 	}
1650 
1651 	if (index == req->num_outstanding_cmds)
1652 		goto queuing_error;
1653 
1654 	/* Compute number of required data segments */
1655 	/* Map the sg table so we have an accurate count of sg entries needed */
1656 	if (scsi_sg_count(cmd)) {
1657 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1658 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1659 		if (unlikely(!nseg))
1660 			goto queuing_error;
1661 		else
1662 			sp->flags |= SRB_DMA_VALID;
1663 
1664 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1665 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1666 			struct qla2_sgx sgx;
1667 			uint32_t	partial;
1668 
1669 			memset(&sgx, 0, sizeof(struct qla2_sgx));
1670 			sgx.tot_bytes = scsi_bufflen(cmd);
1671 			sgx.cur_sg = scsi_sglist(cmd);
1672 			sgx.sp = sp;
1673 
1674 			nseg = 0;
1675 			while (qla24xx_get_one_block_sg(
1676 			    cmd->device->sector_size, &sgx, &partial))
1677 				nseg++;
1678 		}
1679 	} else
1680 		nseg = 0;
1681 
1682 	/* number of required data segments */
1683 	tot_dsds = nseg;
1684 
1685 	/* Compute number of required protection segments */
1686 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1687 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1688 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1689 		if (unlikely(!nseg))
1690 			goto queuing_error;
1691 		else
1692 			sp->flags |= SRB_CRC_PROT_DMA_VALID;
1693 
1694 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1695 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1696 			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1697 		}
1698 	} else {
1699 		nseg = 0;
1700 	}
1701 
1702 	req_cnt = 1;
1703 	/* Total Data and protection sg segment(s) */
1704 	tot_prot_dsds = nseg;
1705 	tot_dsds += nseg;
1706 	if (req->cnt < (req_cnt + 2)) {
1707 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1708 
1709 		if (req->ring_index < cnt)
1710 			req->cnt = cnt - req->ring_index;
1711 		else
1712 			req->cnt = req->length -
1713 				(req->ring_index - cnt);
1714 		if (req->cnt < (req_cnt + 2))
1715 			goto queuing_error;
1716 	}
1717 
1718 	status |= QDSS_GOT_Q_SPACE;
1719 
1720 	/* Build header part of command packet (excluding the OPCODE). */
1721 	req->current_outstanding_cmd = handle;
1722 	req->outstanding_cmds[handle] = sp;
1723 	sp->handle = handle;
1724 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1725 	req->cnt -= req_cnt;
1726 
1727 	/* Fill-in common area */
1728 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1729 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1730 
1731 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1732 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1733 
1734 	/* Set NPORT-ID and LUN number*/
1735 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1736 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1737 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1738 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1739 
1740 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1741 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1742 
1743 	/* Total Data and protection segment(s) */
1744 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1745 
1746 	/* Build IOCB segments and adjust for data protection segments */
1747 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1748 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1749 		QLA_SUCCESS)
1750 		goto queuing_error;
1751 
1752 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1753 	/* Specify response queue number where completion should happen */
1754 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1755 	cmd_pkt->timeout = __constant_cpu_to_le16(0);
1756 	wmb();
1757 
1758 	/* Adjust ring index. */
1759 	req->ring_index++;
1760 	if (req->ring_index == req->length) {
1761 		req->ring_index = 0;
1762 		req->ring_ptr = req->ring;
1763 	} else
1764 		req->ring_ptr++;
1765 
1766 	/* Set chip new ring index. */
1767 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1768 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1769 
1770 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1771 	if (vha->flags.process_response_queue &&
1772 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1773 		qla24xx_process_response_queue(vha, rsp);
1774 
1775 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1776 
1777 	return QLA_SUCCESS;
1778 
1779 queuing_error:
1780 	if (status & QDSS_GOT_Q_SPACE) {
1781 		req->outstanding_cmds[handle] = NULL;
1782 		req->cnt += req_cnt;
1783 	}
1784 	/* Cleanup will be performed by the caller (queuecommand) */
1785 
1786 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1787 	return QLA_FUNCTION_FAILED;
1788 }
1789 
1790 
1791 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1792 {
1793 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1794 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1795 	int affinity = cmd->request->cpu;
1796 
1797 	if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1798 		affinity < ha->max_rsp_queues - 1)
1799 		*rsp = ha->rsp_q_map[affinity + 1];
1800 	 else
1801 		*rsp = ha->rsp_q_map[0];
1802 }
1803 
1804 /* Generic Control-SRB manipulation functions. */
1805 void *
1806 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1807 {
1808 	struct qla_hw_data *ha = vha->hw;
1809 	struct req_que *req = ha->req_q_map[0];
1810 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1811 	uint32_t index, handle;
1812 	request_t *pkt;
1813 	uint16_t cnt, req_cnt;
1814 
1815 	pkt = NULL;
1816 	req_cnt = 1;
1817 	handle = 0;
1818 
1819 	if (!sp)
1820 		goto skip_cmd_array;
1821 
1822 	/* Check for room in outstanding command list. */
1823 	handle = req->current_outstanding_cmd;
1824 	for (index = 1; req->num_outstanding_cmds; index++) {
1825 		handle++;
1826 		if (handle == req->num_outstanding_cmds)
1827 			handle = 1;
1828 		if (!req->outstanding_cmds[handle])
1829 			break;
1830 	}
1831 	if (index == req->num_outstanding_cmds) {
1832 		ql_log(ql_log_warn, vha, 0x700b,
1833 		    "No room on outstanding cmd array.\n");
1834 		goto queuing_error;
1835 	}
1836 
1837 	/* Prep command array. */
1838 	req->current_outstanding_cmd = handle;
1839 	req->outstanding_cmds[handle] = sp;
1840 	sp->handle = handle;
1841 
1842 	/* Adjust entry-counts as needed. */
1843 	if (sp->type != SRB_SCSI_CMD)
1844 		req_cnt = sp->iocbs;
1845 
1846 skip_cmd_array:
1847 	/* Check for room on request queue. */
1848 	if (req->cnt < req_cnt) {
1849 		if (ha->mqenable || IS_QLA83XX(ha))
1850 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1851 		else if (IS_QLA82XX(ha))
1852 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1853 		else if (IS_FWI2_CAPABLE(ha))
1854 			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1855 		else
1856 			cnt = qla2x00_debounce_register(
1857 			    ISP_REQ_Q_OUT(ha, &reg->isp));
1858 
1859 		if  (req->ring_index < cnt)
1860 			req->cnt = cnt - req->ring_index;
1861 		else
1862 			req->cnt = req->length -
1863 			    (req->ring_index - cnt);
1864 	}
1865 	if (req->cnt < req_cnt)
1866 		goto queuing_error;
1867 
1868 	/* Prep packet */
1869 	req->cnt -= req_cnt;
1870 	pkt = req->ring_ptr;
1871 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
1872 	pkt->entry_count = req_cnt;
1873 	pkt->handle = handle;
1874 
1875 queuing_error:
1876 	return pkt;
1877 }
1878 
1879 static void
1880 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1881 {
1882 	struct srb_iocb *lio = &sp->u.iocb_cmd;
1883 
1884 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1885 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1886 	if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1887 		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1888 	if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1889 		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1890 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1891 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1892 	logio->port_id[1] = sp->fcport->d_id.b.area;
1893 	logio->port_id[2] = sp->fcport->d_id.b.domain;
1894 	logio->vp_index = sp->fcport->vha->vp_idx;
1895 }
1896 
1897 static void
1898 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1899 {
1900 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1901 	struct srb_iocb *lio = &sp->u.iocb_cmd;
1902 	uint16_t opts;
1903 
1904 	mbx->entry_type = MBX_IOCB_TYPE;
1905 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1906 	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1907 	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1908 	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1909 	if (HAS_EXTENDED_IDS(ha)) {
1910 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1911 		mbx->mb10 = cpu_to_le16(opts);
1912 	} else {
1913 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1914 	}
1915 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1916 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1917 	    sp->fcport->d_id.b.al_pa);
1918 	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1919 }
1920 
1921 static void
1922 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1923 {
1924 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1925 	logio->control_flags =
1926 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1927 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1928 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1929 	logio->port_id[1] = sp->fcport->d_id.b.area;
1930 	logio->port_id[2] = sp->fcport->d_id.b.domain;
1931 	logio->vp_index = sp->fcport->vha->vp_idx;
1932 }
1933 
1934 static void
1935 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1936 {
1937 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1938 
1939 	mbx->entry_type = MBX_IOCB_TYPE;
1940 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1941 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1942 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1943 	    cpu_to_le16(sp->fcport->loop_id):
1944 	    cpu_to_le16(sp->fcport->loop_id << 8);
1945 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1946 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1947 	    sp->fcport->d_id.b.al_pa);
1948 	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1949 	/* Implicit: mbx->mbx10 = 0. */
1950 }
1951 
1952 static void
1953 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1954 {
1955 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1956 	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1957 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1958 	logio->vp_index = sp->fcport->vha->vp_idx;
1959 }
1960 
1961 static void
1962 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1963 {
1964 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1965 
1966 	mbx->entry_type = MBX_IOCB_TYPE;
1967 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1968 	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1969 	if (HAS_EXTENDED_IDS(ha)) {
1970 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1971 		mbx->mb10 = cpu_to_le16(BIT_0);
1972 	} else {
1973 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1974 	}
1975 	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1976 	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1977 	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1978 	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1979 	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1980 }
1981 
1982 static void
1983 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1984 {
1985 	uint32_t flags;
1986 	unsigned int lun;
1987 	struct fc_port *fcport = sp->fcport;
1988 	scsi_qla_host_t *vha = fcport->vha;
1989 	struct qla_hw_data *ha = vha->hw;
1990 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
1991 	struct req_que *req = vha->req;
1992 
1993 	flags = iocb->u.tmf.flags;
1994 	lun = iocb->u.tmf.lun;
1995 
1996 	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1997 	tsk->entry_count = 1;
1998 	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1999 	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2000 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2001 	tsk->control_flags = cpu_to_le32(flags);
2002 	tsk->port_id[0] = fcport->d_id.b.al_pa;
2003 	tsk->port_id[1] = fcport->d_id.b.area;
2004 	tsk->port_id[2] = fcport->d_id.b.domain;
2005 	tsk->vp_index = fcport->vha->vp_idx;
2006 
2007 	if (flags == TCF_LUN_RESET) {
2008 		int_to_scsilun(lun, &tsk->lun);
2009 		host_to_fcp_swap((uint8_t *)&tsk->lun,
2010 			sizeof(tsk->lun));
2011 	}
2012 }
2013 
2014 static void
2015 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2016 {
2017 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2018 
2019         els_iocb->entry_type = ELS_IOCB_TYPE;
2020         els_iocb->entry_count = 1;
2021         els_iocb->sys_define = 0;
2022         els_iocb->entry_status = 0;
2023         els_iocb->handle = sp->handle;
2024         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2025         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2026 	els_iocb->vp_index = sp->fcport->vha->vp_idx;
2027         els_iocb->sof_type = EST_SOFI3;
2028         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2029 
2030 	els_iocb->opcode =
2031 	    sp->type == SRB_ELS_CMD_RPT ?
2032 	    bsg_job->request->rqst_data.r_els.els_code :
2033 	    bsg_job->request->rqst_data.h_els.command_code;
2034         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2035         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2036         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2037         els_iocb->control_flags = 0;
2038         els_iocb->rx_byte_count =
2039             cpu_to_le32(bsg_job->reply_payload.payload_len);
2040         els_iocb->tx_byte_count =
2041             cpu_to_le32(bsg_job->request_payload.payload_len);
2042 
2043         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2044             (bsg_job->request_payload.sg_list)));
2045         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2046             (bsg_job->request_payload.sg_list)));
2047         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2048             (bsg_job->request_payload.sg_list));
2049 
2050         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2051             (bsg_job->reply_payload.sg_list)));
2052         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2053             (bsg_job->reply_payload.sg_list)));
2054         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2055             (bsg_job->reply_payload.sg_list));
2056 }
2057 
2058 static void
2059 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2060 {
2061 	uint16_t        avail_dsds;
2062 	uint32_t        *cur_dsd;
2063 	struct scatterlist *sg;
2064 	int index;
2065 	uint16_t tot_dsds;
2066 	scsi_qla_host_t *vha = sp->fcport->vha;
2067 	struct qla_hw_data *ha = vha->hw;
2068 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2069 	int loop_iterartion = 0;
2070 	int cont_iocb_prsnt = 0;
2071 	int entry_count = 1;
2072 
2073 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2074 	ct_iocb->entry_type = CT_IOCB_TYPE;
2075 	ct_iocb->entry_status = 0;
2076 	ct_iocb->handle1 = sp->handle;
2077 	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2078 	ct_iocb->status = __constant_cpu_to_le16(0);
2079 	ct_iocb->control_flags = __constant_cpu_to_le16(0);
2080 	ct_iocb->timeout = 0;
2081 	ct_iocb->cmd_dsd_count =
2082 	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2083 	ct_iocb->total_dsd_count =
2084 	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2085 	ct_iocb->req_bytecount =
2086 	    cpu_to_le32(bsg_job->request_payload.payload_len);
2087 	ct_iocb->rsp_bytecount =
2088 	    cpu_to_le32(bsg_job->reply_payload.payload_len);
2089 
2090 	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2091 	    (bsg_job->request_payload.sg_list)));
2092 	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2093 	    (bsg_job->request_payload.sg_list)));
2094 	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2095 
2096 	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2097 	    (bsg_job->reply_payload.sg_list)));
2098 	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2099 	    (bsg_job->reply_payload.sg_list)));
2100 	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2101 
2102 	avail_dsds = 1;
2103 	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2104 	index = 0;
2105 	tot_dsds = bsg_job->reply_payload.sg_cnt;
2106 
2107 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2108 		dma_addr_t       sle_dma;
2109 		cont_a64_entry_t *cont_pkt;
2110 
2111 		/* Allocate additional continuation packets? */
2112 		if (avail_dsds == 0) {
2113 			/*
2114 			* Five DSDs are available in the Cont.
2115 			* Type 1 IOCB.
2116 			       */
2117 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2118 			    vha->hw->req_q_map[0]);
2119 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2120 			avail_dsds = 5;
2121 			cont_iocb_prsnt = 1;
2122 			entry_count++;
2123 		}
2124 
2125 		sle_dma = sg_dma_address(sg);
2126 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2127 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2128 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2129 		loop_iterartion++;
2130 		avail_dsds--;
2131 	}
2132 	ct_iocb->entry_count = entry_count;
2133 }
2134 
2135 static void
2136 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2137 {
2138 	uint16_t        avail_dsds;
2139 	uint32_t        *cur_dsd;
2140 	struct scatterlist *sg;
2141 	int index;
2142 	uint16_t tot_dsds;
2143         scsi_qla_host_t *vha = sp->fcport->vha;
2144 	struct qla_hw_data *ha = vha->hw;
2145 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2146 	int loop_iterartion = 0;
2147 	int cont_iocb_prsnt = 0;
2148 	int entry_count = 1;
2149 
2150 	ct_iocb->entry_type = CT_IOCB_TYPE;
2151         ct_iocb->entry_status = 0;
2152         ct_iocb->sys_define = 0;
2153         ct_iocb->handle = sp->handle;
2154 
2155 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2156 	ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2157         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2158 
2159 	ct_iocb->cmd_dsd_count =
2160             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2161         ct_iocb->timeout = 0;
2162         ct_iocb->rsp_dsd_count =
2163             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2164         ct_iocb->rsp_byte_count =
2165             cpu_to_le32(bsg_job->reply_payload.payload_len);
2166         ct_iocb->cmd_byte_count =
2167             cpu_to_le32(bsg_job->request_payload.payload_len);
2168         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2169             (bsg_job->request_payload.sg_list)));
2170         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2171            (bsg_job->request_payload.sg_list)));
2172         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2173             (bsg_job->request_payload.sg_list));
2174 
2175 	avail_dsds = 1;
2176 	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2177 	index = 0;
2178 	tot_dsds = bsg_job->reply_payload.sg_cnt;
2179 
2180 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2181 		dma_addr_t       sle_dma;
2182 		cont_a64_entry_t *cont_pkt;
2183 
2184 		/* Allocate additional continuation packets? */
2185 		if (avail_dsds == 0) {
2186 			/*
2187 			* Five DSDs are available in the Cont.
2188 			* Type 1 IOCB.
2189 			       */
2190 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2191 			    ha->req_q_map[0]);
2192 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2193 			avail_dsds = 5;
2194 			cont_iocb_prsnt = 1;
2195 			entry_count++;
2196 		}
2197 
2198 		sle_dma = sg_dma_address(sg);
2199 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2200 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2201 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2202 		loop_iterartion++;
2203 		avail_dsds--;
2204 	}
2205         ct_iocb->entry_count = entry_count;
2206 }
2207 
2208 /*
2209  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2210  * @sp: command to send to the ISP
2211  *
2212  * Returns non-zero if a failure occurred, else zero.
2213  */
2214 int
2215 qla82xx_start_scsi(srb_t *sp)
2216 {
2217 	int		ret, nseg;
2218 	unsigned long   flags;
2219 	struct scsi_cmnd *cmd;
2220 	uint32_t	*clr_ptr;
2221 	uint32_t        index;
2222 	uint32_t	handle;
2223 	uint16_t	cnt;
2224 	uint16_t	req_cnt;
2225 	uint16_t	tot_dsds;
2226 	struct device_reg_82xx __iomem *reg;
2227 	uint32_t dbval;
2228 	uint32_t *fcp_dl;
2229 	uint8_t additional_cdb_len;
2230 	struct ct6_dsd *ctx;
2231 	struct scsi_qla_host *vha = sp->fcport->vha;
2232 	struct qla_hw_data *ha = vha->hw;
2233 	struct req_que *req = NULL;
2234 	struct rsp_que *rsp = NULL;
2235 	char tag[2];
2236 
2237 	/* Setup device pointers. */
2238 	ret = 0;
2239 	reg = &ha->iobase->isp82;
2240 	cmd = GET_CMD_SP(sp);
2241 	req = vha->req;
2242 	rsp = ha->rsp_q_map[0];
2243 
2244 	/* So we know we haven't pci_map'ed anything yet */
2245 	tot_dsds = 0;
2246 
2247 	dbval = 0x04 | (ha->portnum << 5);
2248 
2249 	/* Send marker if required */
2250 	if (vha->marker_needed != 0) {
2251 		if (qla2x00_marker(vha, req,
2252 			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2253 			ql_log(ql_log_warn, vha, 0x300c,
2254 			    "qla2x00_marker failed for cmd=%p.\n", cmd);
2255 			return QLA_FUNCTION_FAILED;
2256 		}
2257 		vha->marker_needed = 0;
2258 	}
2259 
2260 	/* Acquire ring specific lock */
2261 	spin_lock_irqsave(&ha->hardware_lock, flags);
2262 
2263 	/* Check for room in outstanding command list. */
2264 	handle = req->current_outstanding_cmd;
2265 	for (index = 1; index < req->num_outstanding_cmds; index++) {
2266 		handle++;
2267 		if (handle == req->num_outstanding_cmds)
2268 			handle = 1;
2269 		if (!req->outstanding_cmds[handle])
2270 			break;
2271 	}
2272 	if (index == req->num_outstanding_cmds)
2273 		goto queuing_error;
2274 
2275 	/* Map the sg table so we have an accurate count of sg entries needed */
2276 	if (scsi_sg_count(cmd)) {
2277 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2278 		    scsi_sg_count(cmd), cmd->sc_data_direction);
2279 		if (unlikely(!nseg))
2280 			goto queuing_error;
2281 	} else
2282 		nseg = 0;
2283 
2284 	tot_dsds = nseg;
2285 
2286 	if (tot_dsds > ql2xshiftctondsd) {
2287 		struct cmd_type_6 *cmd_pkt;
2288 		uint16_t more_dsd_lists = 0;
2289 		struct dsd_dma *dsd_ptr;
2290 		uint16_t i;
2291 
2292 		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2293 		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2294 			ql_dbg(ql_dbg_io, vha, 0x300d,
2295 			    "Num of DSD list %d is than %d for cmd=%p.\n",
2296 			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2297 			    cmd);
2298 			goto queuing_error;
2299 		}
2300 
2301 		if (more_dsd_lists <= ha->gbl_dsd_avail)
2302 			goto sufficient_dsds;
2303 		else
2304 			more_dsd_lists -= ha->gbl_dsd_avail;
2305 
2306 		for (i = 0; i < more_dsd_lists; i++) {
2307 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2308 			if (!dsd_ptr) {
2309 				ql_log(ql_log_fatal, vha, 0x300e,
2310 				    "Failed to allocate memory for dsd_dma "
2311 				    "for cmd=%p.\n", cmd);
2312 				goto queuing_error;
2313 			}
2314 
2315 			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2316 				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2317 			if (!dsd_ptr->dsd_addr) {
2318 				kfree(dsd_ptr);
2319 				ql_log(ql_log_fatal, vha, 0x300f,
2320 				    "Failed to allocate memory for dsd_addr "
2321 				    "for cmd=%p.\n", cmd);
2322 				goto queuing_error;
2323 			}
2324 			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2325 			ha->gbl_dsd_avail++;
2326 		}
2327 
2328 sufficient_dsds:
2329 		req_cnt = 1;
2330 
2331 		if (req->cnt < (req_cnt + 2)) {
2332 			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2333 				&reg->req_q_out[0]);
2334 			if (req->ring_index < cnt)
2335 				req->cnt = cnt - req->ring_index;
2336 			else
2337 				req->cnt = req->length -
2338 					(req->ring_index - cnt);
2339 			if (req->cnt < (req_cnt + 2))
2340 				goto queuing_error;
2341 		}
2342 
2343 		ctx = sp->u.scmd.ctx =
2344 		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2345 		if (!ctx) {
2346 			ql_log(ql_log_fatal, vha, 0x3010,
2347 			    "Failed to allocate ctx for cmd=%p.\n", cmd);
2348 			goto queuing_error;
2349 		}
2350 
2351 		memset(ctx, 0, sizeof(struct ct6_dsd));
2352 		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2353 			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2354 		if (!ctx->fcp_cmnd) {
2355 			ql_log(ql_log_fatal, vha, 0x3011,
2356 			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2357 			goto queuing_error;
2358 		}
2359 
2360 		/* Initialize the DSD list and dma handle */
2361 		INIT_LIST_HEAD(&ctx->dsd_list);
2362 		ctx->dsd_use_cnt = 0;
2363 
2364 		if (cmd->cmd_len > 16) {
2365 			additional_cdb_len = cmd->cmd_len - 16;
2366 			if ((cmd->cmd_len % 4) != 0) {
2367 				/* SCSI command bigger than 16 bytes must be
2368 				 * multiple of 4
2369 				 */
2370 				ql_log(ql_log_warn, vha, 0x3012,
2371 				    "scsi cmd len %d not multiple of 4 "
2372 				    "for cmd=%p.\n", cmd->cmd_len, cmd);
2373 				goto queuing_error_fcp_cmnd;
2374 			}
2375 			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2376 		} else {
2377 			additional_cdb_len = 0;
2378 			ctx->fcp_cmnd_len = 12 + 16 + 4;
2379 		}
2380 
2381 		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2382 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2383 
2384 		/* Zero out remaining portion of packet. */
2385 		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2386 		clr_ptr = (uint32_t *)cmd_pkt + 2;
2387 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2388 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2389 
2390 		/* Set NPORT-ID and LUN number*/
2391 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2392 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2393 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2394 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2395 		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2396 
2397 		/* Build IOCB segments */
2398 		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2399 			goto queuing_error_fcp_cmnd;
2400 
2401 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2402 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2403 
2404 		/* build FCP_CMND IU */
2405 		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2406 		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2407 		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2408 
2409 		if (cmd->sc_data_direction == DMA_TO_DEVICE)
2410 			ctx->fcp_cmnd->additional_cdb_len |= 1;
2411 		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2412 			ctx->fcp_cmnd->additional_cdb_len |= 2;
2413 
2414 		/*
2415 		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2416 		 */
2417 		if (scsi_populate_tag_msg(cmd, tag)) {
2418 			switch (tag[0]) {
2419 			case HEAD_OF_QUEUE_TAG:
2420 				ctx->fcp_cmnd->task_attribute =
2421 				    TSK_HEAD_OF_QUEUE;
2422 				break;
2423 			case ORDERED_QUEUE_TAG:
2424 				ctx->fcp_cmnd->task_attribute =
2425 				    TSK_ORDERED;
2426 				break;
2427 			}
2428 		}
2429 
2430 		/* Populate the FCP_PRIO. */
2431 		if (ha->flags.fcp_prio_enabled)
2432 			ctx->fcp_cmnd->task_attribute |=
2433 			    sp->fcport->fcp_prio << 3;
2434 
2435 		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2436 
2437 		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2438 		    additional_cdb_len);
2439 		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2440 
2441 		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2442 		cmd_pkt->fcp_cmnd_dseg_address[0] =
2443 		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2444 		cmd_pkt->fcp_cmnd_dseg_address[1] =
2445 		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2446 
2447 		sp->flags |= SRB_FCP_CMND_DMA_VALID;
2448 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2449 		/* Set total data segment count. */
2450 		cmd_pkt->entry_count = (uint8_t)req_cnt;
2451 		/* Specify response queue number where
2452 		 * completion should happen
2453 		 */
2454 		cmd_pkt->entry_status = (uint8_t) rsp->id;
2455 	} else {
2456 		struct cmd_type_7 *cmd_pkt;
2457 		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2458 		if (req->cnt < (req_cnt + 2)) {
2459 			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2460 			    &reg->req_q_out[0]);
2461 			if (req->ring_index < cnt)
2462 				req->cnt = cnt - req->ring_index;
2463 			else
2464 				req->cnt = req->length -
2465 					(req->ring_index - cnt);
2466 		}
2467 		if (req->cnt < (req_cnt + 2))
2468 			goto queuing_error;
2469 
2470 		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2471 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2472 
2473 		/* Zero out remaining portion of packet. */
2474 		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2475 		clr_ptr = (uint32_t *)cmd_pkt + 2;
2476 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2477 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2478 
2479 		/* Set NPORT-ID and LUN number*/
2480 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2481 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2482 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2483 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2484 		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2485 
2486 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2487 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2488 		    sizeof(cmd_pkt->lun));
2489 
2490 		/*
2491 		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2492 		 */
2493 		if (scsi_populate_tag_msg(cmd, tag)) {
2494 			switch (tag[0]) {
2495 			case HEAD_OF_QUEUE_TAG:
2496 				cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2497 				break;
2498 			case ORDERED_QUEUE_TAG:
2499 				cmd_pkt->task = TSK_ORDERED;
2500 				break;
2501 			}
2502 		}
2503 
2504 		/* Populate the FCP_PRIO. */
2505 		if (ha->flags.fcp_prio_enabled)
2506 			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2507 
2508 		/* Load SCSI command packet. */
2509 		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2510 		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2511 
2512 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2513 
2514 		/* Build IOCB segments */
2515 		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2516 
2517 		/* Set total data segment count. */
2518 		cmd_pkt->entry_count = (uint8_t)req_cnt;
2519 		/* Specify response queue number where
2520 		 * completion should happen.
2521 		 */
2522 		cmd_pkt->entry_status = (uint8_t) rsp->id;
2523 
2524 	}
2525 	/* Build command packet. */
2526 	req->current_outstanding_cmd = handle;
2527 	req->outstanding_cmds[handle] = sp;
2528 	sp->handle = handle;
2529 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2530 	req->cnt -= req_cnt;
2531 	wmb();
2532 
2533 	/* Adjust ring index. */
2534 	req->ring_index++;
2535 	if (req->ring_index == req->length) {
2536 		req->ring_index = 0;
2537 		req->ring_ptr = req->ring;
2538 	} else
2539 		req->ring_ptr++;
2540 
2541 	sp->flags |= SRB_DMA_VALID;
2542 
2543 	/* Set chip new ring index. */
2544 	/* write, read and verify logic */
2545 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2546 	if (ql2xdbwr)
2547 		qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2548 	else {
2549 		WRT_REG_DWORD(
2550 			(unsigned long __iomem *)ha->nxdb_wr_ptr,
2551 			dbval);
2552 		wmb();
2553 		while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2554 			WRT_REG_DWORD(
2555 				(unsigned long __iomem *)ha->nxdb_wr_ptr,
2556 				dbval);
2557 			wmb();
2558 		}
2559 	}
2560 
2561 	/* Manage unprocessed RIO/ZIO commands in response queue. */
2562 	if (vha->flags.process_response_queue &&
2563 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2564 		qla24xx_process_response_queue(vha, rsp);
2565 
2566 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2567 	return QLA_SUCCESS;
2568 
2569 queuing_error_fcp_cmnd:
2570 	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2571 queuing_error:
2572 	if (tot_dsds)
2573 		scsi_dma_unmap(cmd);
2574 
2575 	if (sp->u.scmd.ctx) {
2576 		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2577 		sp->u.scmd.ctx = NULL;
2578 	}
2579 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2580 
2581 	return QLA_FUNCTION_FAILED;
2582 }
2583 
2584 int
2585 qla2x00_start_sp(srb_t *sp)
2586 {
2587 	int rval;
2588 	struct qla_hw_data *ha = sp->fcport->vha->hw;
2589 	void *pkt;
2590 	unsigned long flags;
2591 
2592 	rval = QLA_FUNCTION_FAILED;
2593 	spin_lock_irqsave(&ha->hardware_lock, flags);
2594 	pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2595 	if (!pkt) {
2596 		ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2597 		    "qla2x00_alloc_iocbs failed.\n");
2598 		goto done;
2599 	}
2600 
2601 	rval = QLA_SUCCESS;
2602 	switch (sp->type) {
2603 	case SRB_LOGIN_CMD:
2604 		IS_FWI2_CAPABLE(ha) ?
2605 		    qla24xx_login_iocb(sp, pkt) :
2606 		    qla2x00_login_iocb(sp, pkt);
2607 		break;
2608 	case SRB_LOGOUT_CMD:
2609 		IS_FWI2_CAPABLE(ha) ?
2610 		    qla24xx_logout_iocb(sp, pkt) :
2611 		    qla2x00_logout_iocb(sp, pkt);
2612 		break;
2613 	case SRB_ELS_CMD_RPT:
2614 	case SRB_ELS_CMD_HST:
2615 		qla24xx_els_iocb(sp, pkt);
2616 		break;
2617 	case SRB_CT_CMD:
2618 		IS_FWI2_CAPABLE(ha) ?
2619 		    qla24xx_ct_iocb(sp, pkt) :
2620 		    qla2x00_ct_iocb(sp, pkt);
2621 		break;
2622 	case SRB_ADISC_CMD:
2623 		IS_FWI2_CAPABLE(ha) ?
2624 		    qla24xx_adisc_iocb(sp, pkt) :
2625 		    qla2x00_adisc_iocb(sp, pkt);
2626 		break;
2627 	case SRB_TM_CMD:
2628 		qla24xx_tm_iocb(sp, pkt);
2629 		break;
2630 	default:
2631 		break;
2632 	}
2633 
2634 	wmb();
2635 	qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2636 done:
2637 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2638 	return rval;
2639 }
2640 
2641 static void
2642 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2643 				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2644 {
2645 	uint16_t avail_dsds;
2646 	uint32_t *cur_dsd;
2647 	uint32_t req_data_len = 0;
2648 	uint32_t rsp_data_len = 0;
2649 	struct scatterlist *sg;
2650 	int index;
2651 	int entry_count = 1;
2652 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2653 
2654 	/*Update entry type to indicate bidir command */
2655 	*((uint32_t *)(&cmd_pkt->entry_type)) =
2656 		__constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2657 
2658 	/* Set the transfer direction, in this set both flags
2659 	 * Also set the BD_WRAP_BACK flag, firmware will take care
2660 	 * assigning DID=SID for outgoing pkts.
2661 	 */
2662 	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2663 	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2664 	cmd_pkt->control_flags =
2665 			__constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2666 							BD_WRAP_BACK);
2667 
2668 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2669 	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2670 	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2671 	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2672 
2673 	vha->bidi_stats.transfer_bytes += req_data_len;
2674 	vha->bidi_stats.io_count++;
2675 
2676 	/* Only one dsd is available for bidirectional IOCB, remaining dsds
2677 	 * are bundled in continuation iocb
2678 	 */
2679 	avail_dsds = 1;
2680 	cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2681 
2682 	index = 0;
2683 
2684 	for_each_sg(bsg_job->request_payload.sg_list, sg,
2685 				bsg_job->request_payload.sg_cnt, index) {
2686 		dma_addr_t sle_dma;
2687 		cont_a64_entry_t *cont_pkt;
2688 
2689 		/* Allocate additional continuation packets */
2690 		if (avail_dsds == 0) {
2691 			/* Continuation type 1 IOCB can accomodate
2692 			 * 5 DSDS
2693 			 */
2694 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2695 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2696 			avail_dsds = 5;
2697 			entry_count++;
2698 		}
2699 		sle_dma = sg_dma_address(sg);
2700 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2701 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2702 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2703 		avail_dsds--;
2704 	}
2705 	/* For read request DSD will always goes to continuation IOCB
2706 	 * and follow the write DSD. If there is room on the current IOCB
2707 	 * then it is added to that IOCB else new continuation IOCB is
2708 	 * allocated.
2709 	 */
2710 	for_each_sg(bsg_job->reply_payload.sg_list, sg,
2711 				bsg_job->reply_payload.sg_cnt, index) {
2712 		dma_addr_t sle_dma;
2713 		cont_a64_entry_t *cont_pkt;
2714 
2715 		/* Allocate additional continuation packets */
2716 		if (avail_dsds == 0) {
2717 			/* Continuation type 1 IOCB can accomodate
2718 			 * 5 DSDS
2719 			 */
2720 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2721 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2722 			avail_dsds = 5;
2723 			entry_count++;
2724 		}
2725 		sle_dma = sg_dma_address(sg);
2726 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2727 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2728 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2729 		avail_dsds--;
2730 	}
2731 	/* This value should be same as number of IOCB required for this cmd */
2732 	cmd_pkt->entry_count = entry_count;
2733 }
2734 
2735 int
2736 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2737 {
2738 
2739 	struct qla_hw_data *ha = vha->hw;
2740 	unsigned long flags;
2741 	uint32_t handle;
2742 	uint32_t index;
2743 	uint16_t req_cnt;
2744 	uint16_t cnt;
2745 	uint32_t *clr_ptr;
2746 	struct cmd_bidir *cmd_pkt = NULL;
2747 	struct rsp_que *rsp;
2748 	struct req_que *req;
2749 	int rval = EXT_STATUS_OK;
2750 
2751 	rval = QLA_SUCCESS;
2752 
2753 	rsp = ha->rsp_q_map[0];
2754 	req = vha->req;
2755 
2756 	/* Send marker if required */
2757 	if (vha->marker_needed != 0) {
2758 		if (qla2x00_marker(vha, req,
2759 			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2760 			return EXT_STATUS_MAILBOX;
2761 		vha->marker_needed = 0;
2762 	}
2763 
2764 	/* Acquire ring specific lock */
2765 	spin_lock_irqsave(&ha->hardware_lock, flags);
2766 
2767 	/* Check for room in outstanding command list. */
2768 	handle = req->current_outstanding_cmd;
2769 	for (index = 1; index < req->num_outstanding_cmds; index++) {
2770 		handle++;
2771 	if (handle == req->num_outstanding_cmds)
2772 		handle = 1;
2773 	if (!req->outstanding_cmds[handle])
2774 		break;
2775 	}
2776 
2777 	if (index == req->num_outstanding_cmds) {
2778 		rval = EXT_STATUS_BUSY;
2779 		goto queuing_error;
2780 	}
2781 
2782 	/* Calculate number of IOCB required */
2783 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2784 
2785 	/* Check for room on request queue. */
2786 	if (req->cnt < req_cnt + 2) {
2787 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2788 
2789 		if  (req->ring_index < cnt)
2790 			req->cnt = cnt - req->ring_index;
2791 		else
2792 			req->cnt = req->length -
2793 				(req->ring_index - cnt);
2794 	}
2795 	if (req->cnt < req_cnt + 2) {
2796 		rval = EXT_STATUS_BUSY;
2797 		goto queuing_error;
2798 	}
2799 
2800 	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2801 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2802 
2803 	/* Zero out remaining portion of packet. */
2804 	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2805 	clr_ptr = (uint32_t *)cmd_pkt + 2;
2806 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2807 
2808 	/* Set NPORT-ID  (of vha)*/
2809 	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2810 	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2811 	cmd_pkt->port_id[1] = vha->d_id.b.area;
2812 	cmd_pkt->port_id[2] = vha->d_id.b.domain;
2813 
2814 	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2815 	cmd_pkt->entry_status = (uint8_t) rsp->id;
2816 	/* Build command packet. */
2817 	req->current_outstanding_cmd = handle;
2818 	req->outstanding_cmds[handle] = sp;
2819 	sp->handle = handle;
2820 	req->cnt -= req_cnt;
2821 
2822 	/* Send the command to the firmware */
2823 	wmb();
2824 	qla2x00_start_iocbs(vha, req);
2825 queuing_error:
2826 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2827 	return rval;
2828 }
2829