xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_iocb.c (revision f7018c21)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2013 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12 
13 #include <scsi/scsi_tcq.h>
14 
15 static void qla25xx_set_que(srb_t *, struct rsp_que **);
16 /**
17  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
18  * @cmd: SCSI command
19  *
20  * Returns the proper CF_* direction based on CDB.
21  */
22 static inline uint16_t
23 qla2x00_get_cmd_direction(srb_t *sp)
24 {
25 	uint16_t cflags;
26 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 	struct scsi_qla_host *vha = sp->fcport->vha;
28 
29 	cflags = 0;
30 
31 	/* Set transfer direction */
32 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
33 		cflags = CF_WRITE;
34 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
35 		vha->qla_stats.output_requests++;
36 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 		cflags = CF_READ;
38 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
39 		vha->qla_stats.input_requests++;
40 	}
41 	return (cflags);
42 }
43 
44 /**
45  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46  * Continuation Type 0 IOCBs to allocate.
47  *
48  * @dsds: number of data segment decriptors needed
49  *
50  * Returns the number of IOCB entries needed to store @dsds.
51  */
52 uint16_t
53 qla2x00_calc_iocbs_32(uint16_t dsds)
54 {
55 	uint16_t iocbs;
56 
57 	iocbs = 1;
58 	if (dsds > 3) {
59 		iocbs += (dsds - 3) / 7;
60 		if ((dsds - 3) % 7)
61 			iocbs++;
62 	}
63 	return (iocbs);
64 }
65 
66 /**
67  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68  * Continuation Type 1 IOCBs to allocate.
69  *
70  * @dsds: number of data segment decriptors needed
71  *
72  * Returns the number of IOCB entries needed to store @dsds.
73  */
74 uint16_t
75 qla2x00_calc_iocbs_64(uint16_t dsds)
76 {
77 	uint16_t iocbs;
78 
79 	iocbs = 1;
80 	if (dsds > 2) {
81 		iocbs += (dsds - 2) / 5;
82 		if ((dsds - 2) % 5)
83 			iocbs++;
84 	}
85 	return (iocbs);
86 }
87 
88 /**
89  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90  * @ha: HA context
91  *
92  * Returns a pointer to the Continuation Type 0 IOCB packet.
93  */
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
96 {
97 	cont_entry_t *cont_pkt;
98 	struct req_que *req = vha->req;
99 	/* Adjust ring index. */
100 	req->ring_index++;
101 	if (req->ring_index == req->length) {
102 		req->ring_index = 0;
103 		req->ring_ptr = req->ring;
104 	} else {
105 		req->ring_ptr++;
106 	}
107 
108 	cont_pkt = (cont_entry_t *)req->ring_ptr;
109 
110 	/* Load packet defaults. */
111 	*((uint32_t *)(&cont_pkt->entry_type)) =
112 	    __constant_cpu_to_le32(CONTINUE_TYPE);
113 
114 	return (cont_pkt);
115 }
116 
117 /**
118  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
119  * @ha: HA context
120  *
121  * Returns a pointer to the continuation type 1 IOCB packet.
122  */
123 static inline cont_a64_entry_t *
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
125 {
126 	cont_a64_entry_t *cont_pkt;
127 
128 	/* Adjust ring index. */
129 	req->ring_index++;
130 	if (req->ring_index == req->length) {
131 		req->ring_index = 0;
132 		req->ring_ptr = req->ring;
133 	} else {
134 		req->ring_ptr++;
135 	}
136 
137 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
138 
139 	/* Load packet defaults. */
140 	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
141 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
142 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
143 
144 	return (cont_pkt);
145 }
146 
147 static inline int
148 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
149 {
150 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
151 	uint8_t	guard = scsi_host_get_guard(cmd->device->host);
152 
153 	/* We always use DIFF Bundling for best performance */
154 	*fw_prot_opts = 0;
155 
156 	/* Translate SCSI opcode to a protection opcode */
157 	switch (scsi_get_prot_op(cmd)) {
158 	case SCSI_PROT_READ_STRIP:
159 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
160 		break;
161 	case SCSI_PROT_WRITE_INSERT:
162 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
163 		break;
164 	case SCSI_PROT_READ_INSERT:
165 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
166 		break;
167 	case SCSI_PROT_WRITE_STRIP:
168 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
169 		break;
170 	case SCSI_PROT_READ_PASS:
171 	case SCSI_PROT_WRITE_PASS:
172 		if (guard & SHOST_DIX_GUARD_IP)
173 			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
174 		else
175 			*fw_prot_opts |= PO_MODE_DIF_PASS;
176 		break;
177 	default:	/* Normal Request */
178 		*fw_prot_opts |= PO_MODE_DIF_PASS;
179 		break;
180 	}
181 
182 	return scsi_prot_sg_count(cmd);
183 }
184 
185 /*
186  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
187  * capable IOCB types.
188  *
189  * @sp: SRB command to process
190  * @cmd_pkt: Command type 2 IOCB
191  * @tot_dsds: Total number of segments to transfer
192  */
193 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
194     uint16_t tot_dsds)
195 {
196 	uint16_t	avail_dsds;
197 	uint32_t	*cur_dsd;
198 	scsi_qla_host_t	*vha;
199 	struct scsi_cmnd *cmd;
200 	struct scatterlist *sg;
201 	int i;
202 
203 	cmd = GET_CMD_SP(sp);
204 
205 	/* Update entry type to indicate Command Type 2 IOCB */
206 	*((uint32_t *)(&cmd_pkt->entry_type)) =
207 	    __constant_cpu_to_le32(COMMAND_TYPE);
208 
209 	/* No data transfer */
210 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
211 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
212 		return;
213 	}
214 
215 	vha = sp->fcport->vha;
216 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
217 
218 	/* Three DSDs are available in the Command Type 2 IOCB */
219 	avail_dsds = 3;
220 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
221 
222 	/* Load data segments */
223 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
224 		cont_entry_t *cont_pkt;
225 
226 		/* Allocate additional continuation packets? */
227 		if (avail_dsds == 0) {
228 			/*
229 			 * Seven DSDs are available in the Continuation
230 			 * Type 0 IOCB.
231 			 */
232 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
233 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
234 			avail_dsds = 7;
235 		}
236 
237 		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
238 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
239 		avail_dsds--;
240 	}
241 }
242 
243 /**
244  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
245  * capable IOCB types.
246  *
247  * @sp: SRB command to process
248  * @cmd_pkt: Command type 3 IOCB
249  * @tot_dsds: Total number of segments to transfer
250  */
251 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
252     uint16_t tot_dsds)
253 {
254 	uint16_t	avail_dsds;
255 	uint32_t	*cur_dsd;
256 	scsi_qla_host_t	*vha;
257 	struct scsi_cmnd *cmd;
258 	struct scatterlist *sg;
259 	int i;
260 
261 	cmd = GET_CMD_SP(sp);
262 
263 	/* Update entry type to indicate Command Type 3 IOCB */
264 	*((uint32_t *)(&cmd_pkt->entry_type)) =
265 	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
266 
267 	/* No data transfer */
268 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
269 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
270 		return;
271 	}
272 
273 	vha = sp->fcport->vha;
274 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
275 
276 	/* Two DSDs are available in the Command Type 3 IOCB */
277 	avail_dsds = 2;
278 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
279 
280 	/* Load data segments */
281 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
282 		dma_addr_t	sle_dma;
283 		cont_a64_entry_t *cont_pkt;
284 
285 		/* Allocate additional continuation packets? */
286 		if (avail_dsds == 0) {
287 			/*
288 			 * Five DSDs are available in the Continuation
289 			 * Type 1 IOCB.
290 			 */
291 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
292 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
293 			avail_dsds = 5;
294 		}
295 
296 		sle_dma = sg_dma_address(sg);
297 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
298 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
299 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
300 		avail_dsds--;
301 	}
302 }
303 
304 /**
305  * qla2x00_start_scsi() - Send a SCSI command to the ISP
306  * @sp: command to send to the ISP
307  *
308  * Returns non-zero if a failure occurred, else zero.
309  */
310 int
311 qla2x00_start_scsi(srb_t *sp)
312 {
313 	int		ret, nseg;
314 	unsigned long   flags;
315 	scsi_qla_host_t	*vha;
316 	struct scsi_cmnd *cmd;
317 	uint32_t	*clr_ptr;
318 	uint32_t        index;
319 	uint32_t	handle;
320 	cmd_entry_t	*cmd_pkt;
321 	uint16_t	cnt;
322 	uint16_t	req_cnt;
323 	uint16_t	tot_dsds;
324 	struct device_reg_2xxx __iomem *reg;
325 	struct qla_hw_data *ha;
326 	struct req_que *req;
327 	struct rsp_que *rsp;
328 	char		tag[2];
329 
330 	/* Setup device pointers. */
331 	ret = 0;
332 	vha = sp->fcport->vha;
333 	ha = vha->hw;
334 	reg = &ha->iobase->isp;
335 	cmd = GET_CMD_SP(sp);
336 	req = ha->req_q_map[0];
337 	rsp = ha->rsp_q_map[0];
338 	/* So we know we haven't pci_map'ed anything yet */
339 	tot_dsds = 0;
340 
341 	/* Send marker if required */
342 	if (vha->marker_needed != 0) {
343 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
344 		    QLA_SUCCESS) {
345 			return (QLA_FUNCTION_FAILED);
346 		}
347 		vha->marker_needed = 0;
348 	}
349 
350 	/* Acquire ring specific lock */
351 	spin_lock_irqsave(&ha->hardware_lock, flags);
352 
353 	/* Check for room in outstanding command list. */
354 	handle = req->current_outstanding_cmd;
355 	for (index = 1; index < req->num_outstanding_cmds; index++) {
356 		handle++;
357 		if (handle == req->num_outstanding_cmds)
358 			handle = 1;
359 		if (!req->outstanding_cmds[handle])
360 			break;
361 	}
362 	if (index == req->num_outstanding_cmds)
363 		goto queuing_error;
364 
365 	/* Map the sg table so we have an accurate count of sg entries needed */
366 	if (scsi_sg_count(cmd)) {
367 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
368 		    scsi_sg_count(cmd), cmd->sc_data_direction);
369 		if (unlikely(!nseg))
370 			goto queuing_error;
371 	} else
372 		nseg = 0;
373 
374 	tot_dsds = nseg;
375 
376 	/* Calculate the number of request entries needed. */
377 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
378 	if (req->cnt < (req_cnt + 2)) {
379 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
380 		if (req->ring_index < cnt)
381 			req->cnt = cnt - req->ring_index;
382 		else
383 			req->cnt = req->length -
384 			    (req->ring_index - cnt);
385 		/* If still no head room then bail out */
386 		if (req->cnt < (req_cnt + 2))
387 			goto queuing_error;
388 	}
389 
390 	/* Build command packet */
391 	req->current_outstanding_cmd = handle;
392 	req->outstanding_cmds[handle] = sp;
393 	sp->handle = handle;
394 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
395 	req->cnt -= req_cnt;
396 
397 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
398 	cmd_pkt->handle = handle;
399 	/* Zero out remaining portion of packet. */
400 	clr_ptr = (uint32_t *)cmd_pkt + 2;
401 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
402 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
403 
404 	/* Set target ID and LUN number*/
405 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
406 	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
407 
408 	/* Update tagged queuing modifier */
409 	if (scsi_populate_tag_msg(cmd, tag)) {
410 		switch (tag[0]) {
411 		case HEAD_OF_QUEUE_TAG:
412 			cmd_pkt->control_flags =
413 			    __constant_cpu_to_le16(CF_HEAD_TAG);
414 			break;
415 		case ORDERED_QUEUE_TAG:
416 			cmd_pkt->control_flags =
417 			    __constant_cpu_to_le16(CF_ORDERED_TAG);
418 			break;
419 		default:
420 			cmd_pkt->control_flags =
421 			    __constant_cpu_to_le16(CF_SIMPLE_TAG);
422 			break;
423 		}
424 	} else {
425 		cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
426 	}
427 
428 	/* Load SCSI command packet. */
429 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
430 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
431 
432 	/* Build IOCB segments */
433 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
434 
435 	/* Set total data segment count. */
436 	cmd_pkt->entry_count = (uint8_t)req_cnt;
437 	wmb();
438 
439 	/* Adjust ring index. */
440 	req->ring_index++;
441 	if (req->ring_index == req->length) {
442 		req->ring_index = 0;
443 		req->ring_ptr = req->ring;
444 	} else
445 		req->ring_ptr++;
446 
447 	sp->flags |= SRB_DMA_VALID;
448 
449 	/* Set chip new ring index. */
450 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
451 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
452 
453 	/* Manage unprocessed RIO/ZIO commands in response queue. */
454 	if (vha->flags.process_response_queue &&
455 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
456 		qla2x00_process_response_queue(rsp);
457 
458 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
459 	return (QLA_SUCCESS);
460 
461 queuing_error:
462 	if (tot_dsds)
463 		scsi_dma_unmap(cmd);
464 
465 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
466 
467 	return (QLA_FUNCTION_FAILED);
468 }
469 
470 /**
471  * qla2x00_start_iocbs() - Execute the IOCB command
472  */
473 void
474 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475 {
476 	struct qla_hw_data *ha = vha->hw;
477 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
478 
479 	if (IS_P3P_TYPE(ha)) {
480 		qla82xx_start_iocbs(vha);
481 	} else {
482 		/* Adjust ring index. */
483 		req->ring_index++;
484 		if (req->ring_index == req->length) {
485 			req->ring_index = 0;
486 			req->ring_ptr = req->ring;
487 		} else
488 			req->ring_ptr++;
489 
490 		/* Set chip new ring index. */
491 		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
492 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
493 			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
494 		} else if (IS_QLAFX00(ha)) {
495 			WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
496 			RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
497 			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
498 		} else if (IS_FWI2_CAPABLE(ha)) {
499 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
500 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
501 		} else {
502 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
503 				req->ring_index);
504 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
505 		}
506 	}
507 }
508 
509 /**
510  * qla2x00_marker() - Send a marker IOCB to the firmware.
511  * @ha: HA context
512  * @loop_id: loop ID
513  * @lun: LUN
514  * @type: marker modifier
515  *
516  * Can be called from both normal and interrupt context.
517  *
518  * Returns non-zero if a failure occurred, else zero.
519  */
520 static int
521 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
522 			struct rsp_que *rsp, uint16_t loop_id,
523 			uint16_t lun, uint8_t type)
524 {
525 	mrk_entry_t *mrk;
526 	struct mrk_entry_24xx *mrk24 = NULL;
527 
528 	struct qla_hw_data *ha = vha->hw;
529 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
530 
531 	req = ha->req_q_map[0];
532 	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
533 	if (mrk == NULL) {
534 		ql_log(ql_log_warn, base_vha, 0x3026,
535 		    "Failed to allocate Marker IOCB.\n");
536 
537 		return (QLA_FUNCTION_FAILED);
538 	}
539 
540 	mrk->entry_type = MARKER_TYPE;
541 	mrk->modifier = type;
542 	if (type != MK_SYNC_ALL) {
543 		if (IS_FWI2_CAPABLE(ha)) {
544 			mrk24 = (struct mrk_entry_24xx *) mrk;
545 			mrk24->nport_handle = cpu_to_le16(loop_id);
546 			mrk24->lun[1] = LSB(lun);
547 			mrk24->lun[2] = MSB(lun);
548 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
549 			mrk24->vp_index = vha->vp_idx;
550 			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
551 		} else {
552 			SET_TARGET_ID(ha, mrk->target, loop_id);
553 			mrk->lun = cpu_to_le16(lun);
554 		}
555 	}
556 	wmb();
557 
558 	qla2x00_start_iocbs(vha, req);
559 
560 	return (QLA_SUCCESS);
561 }
562 
563 int
564 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
565 		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
566 		uint8_t type)
567 {
568 	int ret;
569 	unsigned long flags = 0;
570 
571 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
572 	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
573 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
574 
575 	return (ret);
576 }
577 
578 /*
579  * qla2x00_issue_marker
580  *
581  * Issue marker
582  * Caller CAN have hardware lock held as specified by ha_locked parameter.
583  * Might release it, then reaquire.
584  */
585 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
586 {
587 	if (ha_locked) {
588 		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
589 					MK_SYNC_ALL) != QLA_SUCCESS)
590 			return QLA_FUNCTION_FAILED;
591 	} else {
592 		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
593 					MK_SYNC_ALL) != QLA_SUCCESS)
594 			return QLA_FUNCTION_FAILED;
595 	}
596 	vha->marker_needed = 0;
597 
598 	return QLA_SUCCESS;
599 }
600 
601 static inline int
602 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
603 	uint16_t tot_dsds)
604 {
605 	uint32_t *cur_dsd = NULL;
606 	scsi_qla_host_t	*vha;
607 	struct qla_hw_data *ha;
608 	struct scsi_cmnd *cmd;
609 	struct	scatterlist *cur_seg;
610 	uint32_t *dsd_seg;
611 	void *next_dsd;
612 	uint8_t avail_dsds;
613 	uint8_t first_iocb = 1;
614 	uint32_t dsd_list_len;
615 	struct dsd_dma *dsd_ptr;
616 	struct ct6_dsd *ctx;
617 
618 	cmd = GET_CMD_SP(sp);
619 
620 	/* Update entry type to indicate Command Type 3 IOCB */
621 	*((uint32_t *)(&cmd_pkt->entry_type)) =
622 		__constant_cpu_to_le32(COMMAND_TYPE_6);
623 
624 	/* No data transfer */
625 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
626 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
627 		return 0;
628 	}
629 
630 	vha = sp->fcport->vha;
631 	ha = vha->hw;
632 
633 	/* Set transfer direction */
634 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
635 		cmd_pkt->control_flags =
636 		    __constant_cpu_to_le16(CF_WRITE_DATA);
637 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
638 		vha->qla_stats.output_requests++;
639 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
640 		cmd_pkt->control_flags =
641 		    __constant_cpu_to_le16(CF_READ_DATA);
642 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
643 		vha->qla_stats.input_requests++;
644 	}
645 
646 	cur_seg = scsi_sglist(cmd);
647 	ctx = GET_CMD_CTX_SP(sp);
648 
649 	while (tot_dsds) {
650 		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
651 		    QLA_DSDS_PER_IOCB : tot_dsds;
652 		tot_dsds -= avail_dsds;
653 		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
654 
655 		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
656 		    struct dsd_dma, list);
657 		next_dsd = dsd_ptr->dsd_addr;
658 		list_del(&dsd_ptr->list);
659 		ha->gbl_dsd_avail--;
660 		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
661 		ctx->dsd_use_cnt++;
662 		ha->gbl_dsd_inuse++;
663 
664 		if (first_iocb) {
665 			first_iocb = 0;
666 			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
667 			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
668 			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
669 			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
670 		} else {
671 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
672 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
673 			*cur_dsd++ = cpu_to_le32(dsd_list_len);
674 		}
675 		cur_dsd = (uint32_t *)next_dsd;
676 		while (avail_dsds) {
677 			dma_addr_t	sle_dma;
678 
679 			sle_dma = sg_dma_address(cur_seg);
680 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
681 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
682 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
683 			cur_seg = sg_next(cur_seg);
684 			avail_dsds--;
685 		}
686 	}
687 
688 	/* Null termination */
689 	*cur_dsd++ =  0;
690 	*cur_dsd++ = 0;
691 	*cur_dsd++ = 0;
692 	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
693 	return 0;
694 }
695 
696 /*
697  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
698  * for Command Type 6.
699  *
700  * @dsds: number of data segment decriptors needed
701  *
702  * Returns the number of dsd list needed to store @dsds.
703  */
704 inline uint16_t
705 qla24xx_calc_dsd_lists(uint16_t dsds)
706 {
707 	uint16_t dsd_lists = 0;
708 
709 	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
710 	if (dsds % QLA_DSDS_PER_IOCB)
711 		dsd_lists++;
712 	return dsd_lists;
713 }
714 
715 
716 /**
717  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
718  * IOCB types.
719  *
720  * @sp: SRB command to process
721  * @cmd_pkt: Command type 3 IOCB
722  * @tot_dsds: Total number of segments to transfer
723  */
724 inline void
725 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
726     uint16_t tot_dsds)
727 {
728 	uint16_t	avail_dsds;
729 	uint32_t	*cur_dsd;
730 	scsi_qla_host_t	*vha;
731 	struct scsi_cmnd *cmd;
732 	struct scatterlist *sg;
733 	int i;
734 	struct req_que *req;
735 
736 	cmd = GET_CMD_SP(sp);
737 
738 	/* Update entry type to indicate Command Type 3 IOCB */
739 	*((uint32_t *)(&cmd_pkt->entry_type)) =
740 	    __constant_cpu_to_le32(COMMAND_TYPE_7);
741 
742 	/* No data transfer */
743 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
744 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
745 		return;
746 	}
747 
748 	vha = sp->fcport->vha;
749 	req = vha->req;
750 
751 	/* Set transfer direction */
752 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
753 		cmd_pkt->task_mgmt_flags =
754 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
755 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
756 		vha->qla_stats.output_requests++;
757 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
758 		cmd_pkt->task_mgmt_flags =
759 		    __constant_cpu_to_le16(TMF_READ_DATA);
760 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
761 		vha->qla_stats.input_requests++;
762 	}
763 
764 	/* One DSD is available in the Command Type 3 IOCB */
765 	avail_dsds = 1;
766 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
767 
768 	/* Load data segments */
769 
770 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
771 		dma_addr_t	sle_dma;
772 		cont_a64_entry_t *cont_pkt;
773 
774 		/* Allocate additional continuation packets? */
775 		if (avail_dsds == 0) {
776 			/*
777 			 * Five DSDs are available in the Continuation
778 			 * Type 1 IOCB.
779 			 */
780 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
781 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
782 			avail_dsds = 5;
783 		}
784 
785 		sle_dma = sg_dma_address(sg);
786 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
787 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
788 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
789 		avail_dsds--;
790 	}
791 }
792 
793 struct fw_dif_context {
794 	uint32_t ref_tag;
795 	uint16_t app_tag;
796 	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
797 	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
798 };
799 
800 /*
801  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
802  *
803  */
804 static inline void
805 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
806     unsigned int protcnt)
807 {
808 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
809 
810 	switch (scsi_get_prot_type(cmd)) {
811 	case SCSI_PROT_DIF_TYPE0:
812 		/*
813 		 * No check for ql2xenablehba_err_chk, as it would be an
814 		 * I/O error if hba tag generation is not done.
815 		 */
816 		pkt->ref_tag = cpu_to_le32((uint32_t)
817 		    (0xffffffff & scsi_get_lba(cmd)));
818 
819 		if (!qla2x00_hba_err_chk_enabled(sp))
820 			break;
821 
822 		pkt->ref_tag_mask[0] = 0xff;
823 		pkt->ref_tag_mask[1] = 0xff;
824 		pkt->ref_tag_mask[2] = 0xff;
825 		pkt->ref_tag_mask[3] = 0xff;
826 		break;
827 
828 	/*
829 	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
830 	 * match LBA in CDB + N
831 	 */
832 	case SCSI_PROT_DIF_TYPE2:
833 		pkt->app_tag = __constant_cpu_to_le16(0);
834 		pkt->app_tag_mask[0] = 0x0;
835 		pkt->app_tag_mask[1] = 0x0;
836 
837 		pkt->ref_tag = cpu_to_le32((uint32_t)
838 		    (0xffffffff & scsi_get_lba(cmd)));
839 
840 		if (!qla2x00_hba_err_chk_enabled(sp))
841 			break;
842 
843 		/* enable ALL bytes of the ref tag */
844 		pkt->ref_tag_mask[0] = 0xff;
845 		pkt->ref_tag_mask[1] = 0xff;
846 		pkt->ref_tag_mask[2] = 0xff;
847 		pkt->ref_tag_mask[3] = 0xff;
848 		break;
849 
850 	/* For Type 3 protection: 16 bit GUARD only */
851 	case SCSI_PROT_DIF_TYPE3:
852 		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
853 			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
854 								0x00;
855 		break;
856 
857 	/*
858 	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
859 	 * 16 bit app tag.
860 	 */
861 	case SCSI_PROT_DIF_TYPE1:
862 		pkt->ref_tag = cpu_to_le32((uint32_t)
863 		    (0xffffffff & scsi_get_lba(cmd)));
864 		pkt->app_tag = __constant_cpu_to_le16(0);
865 		pkt->app_tag_mask[0] = 0x0;
866 		pkt->app_tag_mask[1] = 0x0;
867 
868 		if (!qla2x00_hba_err_chk_enabled(sp))
869 			break;
870 
871 		/* enable ALL bytes of the ref tag */
872 		pkt->ref_tag_mask[0] = 0xff;
873 		pkt->ref_tag_mask[1] = 0xff;
874 		pkt->ref_tag_mask[2] = 0xff;
875 		pkt->ref_tag_mask[3] = 0xff;
876 		break;
877 	}
878 }
879 
880 struct qla2_sgx {
881 	dma_addr_t		dma_addr;	/* OUT */
882 	uint32_t		dma_len;	/* OUT */
883 
884 	uint32_t		tot_bytes;	/* IN */
885 	struct scatterlist	*cur_sg;	/* IN */
886 
887 	/* for book keeping, bzero on initial invocation */
888 	uint32_t		bytes_consumed;
889 	uint32_t		num_bytes;
890 	uint32_t		tot_partial;
891 
892 	/* for debugging */
893 	uint32_t		num_sg;
894 	srb_t			*sp;
895 };
896 
897 static int
898 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
899 	uint32_t *partial)
900 {
901 	struct scatterlist *sg;
902 	uint32_t cumulative_partial, sg_len;
903 	dma_addr_t sg_dma_addr;
904 
905 	if (sgx->num_bytes == sgx->tot_bytes)
906 		return 0;
907 
908 	sg = sgx->cur_sg;
909 	cumulative_partial = sgx->tot_partial;
910 
911 	sg_dma_addr = sg_dma_address(sg);
912 	sg_len = sg_dma_len(sg);
913 
914 	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
915 
916 	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
917 		sgx->dma_len = (blk_sz - cumulative_partial);
918 		sgx->tot_partial = 0;
919 		sgx->num_bytes += blk_sz;
920 		*partial = 0;
921 	} else {
922 		sgx->dma_len = sg_len - sgx->bytes_consumed;
923 		sgx->tot_partial += sgx->dma_len;
924 		*partial = 1;
925 	}
926 
927 	sgx->bytes_consumed += sgx->dma_len;
928 
929 	if (sg_len == sgx->bytes_consumed) {
930 		sg = sg_next(sg);
931 		sgx->num_sg++;
932 		sgx->cur_sg = sg;
933 		sgx->bytes_consumed = 0;
934 	}
935 
936 	return 1;
937 }
938 
939 static int
940 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
941 	uint32_t *dsd, uint16_t tot_dsds)
942 {
943 	void *next_dsd;
944 	uint8_t avail_dsds = 0;
945 	uint32_t dsd_list_len;
946 	struct dsd_dma *dsd_ptr;
947 	struct scatterlist *sg_prot;
948 	uint32_t *cur_dsd = dsd;
949 	uint16_t	used_dsds = tot_dsds;
950 
951 	uint32_t	prot_int;
952 	uint32_t	partial;
953 	struct qla2_sgx sgx;
954 	dma_addr_t	sle_dma;
955 	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
956 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
957 
958 	prot_int = cmd->device->sector_size;
959 
960 	memset(&sgx, 0, sizeof(struct qla2_sgx));
961 	sgx.tot_bytes = scsi_bufflen(cmd);
962 	sgx.cur_sg = scsi_sglist(cmd);
963 	sgx.sp = sp;
964 
965 	sg_prot = scsi_prot_sglist(cmd);
966 
967 	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
968 
969 		sle_dma = sgx.dma_addr;
970 		sle_dma_len = sgx.dma_len;
971 alloc_and_fill:
972 		/* Allocate additional continuation packets? */
973 		if (avail_dsds == 0) {
974 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
975 					QLA_DSDS_PER_IOCB : used_dsds;
976 			dsd_list_len = (avail_dsds + 1) * 12;
977 			used_dsds -= avail_dsds;
978 
979 			/* allocate tracking DS */
980 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
981 			if (!dsd_ptr)
982 				return 1;
983 
984 			/* allocate new list */
985 			dsd_ptr->dsd_addr = next_dsd =
986 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
987 				&dsd_ptr->dsd_list_dma);
988 
989 			if (!next_dsd) {
990 				/*
991 				 * Need to cleanup only this dsd_ptr, rest
992 				 * will be done by sp_free_dma()
993 				 */
994 				kfree(dsd_ptr);
995 				return 1;
996 			}
997 
998 			list_add_tail(&dsd_ptr->list,
999 			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1000 
1001 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
1002 
1003 			/* add new list to cmd iocb or last list */
1004 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1005 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1006 			*cur_dsd++ = dsd_list_len;
1007 			cur_dsd = (uint32_t *)next_dsd;
1008 		}
1009 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1010 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1011 		*cur_dsd++ = cpu_to_le32(sle_dma_len);
1012 		avail_dsds--;
1013 
1014 		if (partial == 0) {
1015 			/* Got a full protection interval */
1016 			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
1017 			sle_dma_len = 8;
1018 
1019 			tot_prot_dma_len += sle_dma_len;
1020 			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
1021 				tot_prot_dma_len = 0;
1022 				sg_prot = sg_next(sg_prot);
1023 			}
1024 
1025 			partial = 1; /* So as to not re-enter this block */
1026 			goto alloc_and_fill;
1027 		}
1028 	}
1029 	/* Null termination */
1030 	*cur_dsd++ = 0;
1031 	*cur_dsd++ = 0;
1032 	*cur_dsd++ = 0;
1033 	return 0;
1034 }
1035 
1036 static int
1037 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1038 	uint16_t tot_dsds)
1039 {
1040 	void *next_dsd;
1041 	uint8_t avail_dsds = 0;
1042 	uint32_t dsd_list_len;
1043 	struct dsd_dma *dsd_ptr;
1044 	struct scatterlist *sg;
1045 	uint32_t *cur_dsd = dsd;
1046 	int	i;
1047 	uint16_t	used_dsds = tot_dsds;
1048 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1049 
1050 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
1051 		dma_addr_t	sle_dma;
1052 
1053 		/* Allocate additional continuation packets? */
1054 		if (avail_dsds == 0) {
1055 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1056 					QLA_DSDS_PER_IOCB : used_dsds;
1057 			dsd_list_len = (avail_dsds + 1) * 12;
1058 			used_dsds -= avail_dsds;
1059 
1060 			/* allocate tracking DS */
1061 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1062 			if (!dsd_ptr)
1063 				return 1;
1064 
1065 			/* allocate new list */
1066 			dsd_ptr->dsd_addr = next_dsd =
1067 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1068 				&dsd_ptr->dsd_list_dma);
1069 
1070 			if (!next_dsd) {
1071 				/*
1072 				 * Need to cleanup only this dsd_ptr, rest
1073 				 * will be done by sp_free_dma()
1074 				 */
1075 				kfree(dsd_ptr);
1076 				return 1;
1077 			}
1078 
1079 			list_add_tail(&dsd_ptr->list,
1080 			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1081 
1082 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
1083 
1084 			/* add new list to cmd iocb or last list */
1085 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1086 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1087 			*cur_dsd++ = dsd_list_len;
1088 			cur_dsd = (uint32_t *)next_dsd;
1089 		}
1090 		sle_dma = sg_dma_address(sg);
1091 
1092 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1093 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1094 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1095 		avail_dsds--;
1096 
1097 	}
1098 	/* Null termination */
1099 	*cur_dsd++ = 0;
1100 	*cur_dsd++ = 0;
1101 	*cur_dsd++ = 0;
1102 	return 0;
1103 }
1104 
1105 static int
1106 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1107 							uint32_t *dsd,
1108 	uint16_t tot_dsds)
1109 {
1110 	void *next_dsd;
1111 	uint8_t avail_dsds = 0;
1112 	uint32_t dsd_list_len;
1113 	struct dsd_dma *dsd_ptr;
1114 	struct scatterlist *sg;
1115 	int	i;
1116 	struct scsi_cmnd *cmd;
1117 	uint32_t *cur_dsd = dsd;
1118 	uint16_t	used_dsds = tot_dsds;
1119 
1120 	cmd = GET_CMD_SP(sp);
1121 	scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
1122 		dma_addr_t	sle_dma;
1123 
1124 		/* Allocate additional continuation packets? */
1125 		if (avail_dsds == 0) {
1126 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1127 						QLA_DSDS_PER_IOCB : used_dsds;
1128 			dsd_list_len = (avail_dsds + 1) * 12;
1129 			used_dsds -= avail_dsds;
1130 
1131 			/* allocate tracking DS */
1132 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1133 			if (!dsd_ptr)
1134 				return 1;
1135 
1136 			/* allocate new list */
1137 			dsd_ptr->dsd_addr = next_dsd =
1138 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1139 				&dsd_ptr->dsd_list_dma);
1140 
1141 			if (!next_dsd) {
1142 				/*
1143 				 * Need to cleanup only this dsd_ptr, rest
1144 				 * will be done by sp_free_dma()
1145 				 */
1146 				kfree(dsd_ptr);
1147 				return 1;
1148 			}
1149 
1150 			list_add_tail(&dsd_ptr->list,
1151 			    &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
1152 
1153 			sp->flags |= SRB_CRC_CTX_DSD_VALID;
1154 
1155 			/* add new list to cmd iocb or last list */
1156 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1157 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1158 			*cur_dsd++ = dsd_list_len;
1159 			cur_dsd = (uint32_t *)next_dsd;
1160 		}
1161 		sle_dma = sg_dma_address(sg);
1162 
1163 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1164 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1165 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1166 
1167 		avail_dsds--;
1168 	}
1169 	/* Null termination */
1170 	*cur_dsd++ = 0;
1171 	*cur_dsd++ = 0;
1172 	*cur_dsd++ = 0;
1173 	return 0;
1174 }
1175 
1176 /**
1177  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1178  *							Type 6 IOCB types.
1179  *
1180  * @sp: SRB command to process
1181  * @cmd_pkt: Command type 3 IOCB
1182  * @tot_dsds: Total number of segments to transfer
1183  */
1184 static inline int
1185 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1186     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1187 {
1188 	uint32_t		*cur_dsd, *fcp_dl;
1189 	scsi_qla_host_t		*vha;
1190 	struct scsi_cmnd	*cmd;
1191 	int			sgc;
1192 	uint32_t		total_bytes = 0;
1193 	uint32_t		data_bytes;
1194 	uint32_t		dif_bytes;
1195 	uint8_t			bundling = 1;
1196 	uint16_t		blk_size;
1197 	uint8_t			*clr_ptr;
1198 	struct crc_context	*crc_ctx_pkt = NULL;
1199 	struct qla_hw_data	*ha;
1200 	uint8_t			additional_fcpcdb_len;
1201 	uint16_t		fcp_cmnd_len;
1202 	struct fcp_cmnd		*fcp_cmnd;
1203 	dma_addr_t		crc_ctx_dma;
1204 	char			tag[2];
1205 
1206 	cmd = GET_CMD_SP(sp);
1207 
1208 	sgc = 0;
1209 	/* Update entry type to indicate Command Type CRC_2 IOCB */
1210 	*((uint32_t *)(&cmd_pkt->entry_type)) =
1211 	    __constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
1212 
1213 	vha = sp->fcport->vha;
1214 	ha = vha->hw;
1215 
1216 	/* No data transfer */
1217 	data_bytes = scsi_bufflen(cmd);
1218 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1219 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1220 		return QLA_SUCCESS;
1221 	}
1222 
1223 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1224 
1225 	/* Set transfer direction */
1226 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1227 		cmd_pkt->control_flags =
1228 		    __constant_cpu_to_le16(CF_WRITE_DATA);
1229 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1230 		cmd_pkt->control_flags =
1231 		    __constant_cpu_to_le16(CF_READ_DATA);
1232 	}
1233 
1234 	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1235 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1236 	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1237 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1238 		bundling = 0;
1239 
1240 	/* Allocate CRC context from global pool */
1241 	crc_ctx_pkt = sp->u.scmd.ctx =
1242 	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1243 
1244 	if (!crc_ctx_pkt)
1245 		goto crc_queuing_error;
1246 
1247 	/* Zero out CTX area. */
1248 	clr_ptr = (uint8_t *)crc_ctx_pkt;
1249 	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1250 
1251 	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1252 
1253 	sp->flags |= SRB_CRC_CTX_DMA_VALID;
1254 
1255 	/* Set handle */
1256 	crc_ctx_pkt->handle = cmd_pkt->handle;
1257 
1258 	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1259 
1260 	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1261 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1262 
1263 	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1264 	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1265 	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1266 
1267 	/* Determine SCSI command length -- align to 4 byte boundary */
1268 	if (cmd->cmd_len > 16) {
1269 		additional_fcpcdb_len = cmd->cmd_len - 16;
1270 		if ((cmd->cmd_len % 4) != 0) {
1271 			/* SCSI cmd > 16 bytes must be multiple of 4 */
1272 			goto crc_queuing_error;
1273 		}
1274 		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1275 	} else {
1276 		additional_fcpcdb_len = 0;
1277 		fcp_cmnd_len = 12 + 16 + 4;
1278 	}
1279 
1280 	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1281 
1282 	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1283 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
1284 		fcp_cmnd->additional_cdb_len |= 1;
1285 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1286 		fcp_cmnd->additional_cdb_len |= 2;
1287 
1288 	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1289 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1290 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1291 	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1292 	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1293 	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1294 	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1295 	fcp_cmnd->task_management = 0;
1296 
1297 	/*
1298 	 * Update tagged queuing modifier if using command tag queuing
1299 	 */
1300 	if (scsi_populate_tag_msg(cmd, tag)) {
1301 		switch (tag[0]) {
1302 		case HEAD_OF_QUEUE_TAG:
1303 		    fcp_cmnd->task_attribute = TSK_HEAD_OF_QUEUE;
1304 		    break;
1305 		case ORDERED_QUEUE_TAG:
1306 		    fcp_cmnd->task_attribute = TSK_ORDERED;
1307 		    break;
1308 		default:
1309 		    fcp_cmnd->task_attribute = TSK_SIMPLE;
1310 		    break;
1311 		}
1312 	} else {
1313 		fcp_cmnd->task_attribute = TSK_SIMPLE;
1314 	}
1315 
1316 	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1317 
1318 	/* Compute dif len and adjust data len to incude protection */
1319 	dif_bytes = 0;
1320 	blk_size = cmd->device->sector_size;
1321 	dif_bytes = (data_bytes / blk_size) * 8;
1322 
1323 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1324 	case SCSI_PROT_READ_INSERT:
1325 	case SCSI_PROT_WRITE_STRIP:
1326 	    total_bytes = data_bytes;
1327 	    data_bytes += dif_bytes;
1328 	    break;
1329 
1330 	case SCSI_PROT_READ_STRIP:
1331 	case SCSI_PROT_WRITE_INSERT:
1332 	case SCSI_PROT_READ_PASS:
1333 	case SCSI_PROT_WRITE_PASS:
1334 	    total_bytes = data_bytes + dif_bytes;
1335 	    break;
1336 	default:
1337 	    BUG();
1338 	}
1339 
1340 	if (!qla2x00_hba_err_chk_enabled(sp))
1341 		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1342 	/* HBA error checking enabled */
1343 	else if (IS_PI_UNINIT_CAPABLE(ha)) {
1344 		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1345 		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1346 			SCSI_PROT_DIF_TYPE2))
1347 			fw_prot_opts |= BIT_10;
1348 		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1349 		    SCSI_PROT_DIF_TYPE3)
1350 			fw_prot_opts |= BIT_11;
1351 	}
1352 
1353 	if (!bundling) {
1354 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1355 	} else {
1356 		/*
1357 		 * Configure Bundling if we need to fetch interlaving
1358 		 * protection PCI accesses
1359 		 */
1360 		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1361 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1362 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1363 							tot_prot_dsds);
1364 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1365 	}
1366 
1367 	/* Finish the common fields of CRC pkt */
1368 	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1369 	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1370 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1371 	crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
1372 	/* Fibre channel byte count */
1373 	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1374 	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1375 	    additional_fcpcdb_len);
1376 	*fcp_dl = htonl(total_bytes);
1377 
1378 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1379 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
1380 		return QLA_SUCCESS;
1381 	}
1382 	/* Walks data segments */
1383 
1384 	cmd_pkt->control_flags |=
1385 	    __constant_cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1386 
1387 	if (!bundling && tot_prot_dsds) {
1388 		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1389 		    cur_dsd, tot_dsds))
1390 			goto crc_queuing_error;
1391 	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1392 	    (tot_dsds - tot_prot_dsds)))
1393 		goto crc_queuing_error;
1394 
1395 	if (bundling && tot_prot_dsds) {
1396 		/* Walks dif segments */
1397 		cmd_pkt->control_flags |=
1398 			__constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1399 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1400 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1401 		    tot_prot_dsds))
1402 			goto crc_queuing_error;
1403 	}
1404 	return QLA_SUCCESS;
1405 
1406 crc_queuing_error:
1407 	/* Cleanup will be performed by the caller */
1408 
1409 	return QLA_FUNCTION_FAILED;
1410 }
1411 
1412 /**
1413  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1414  * @sp: command to send to the ISP
1415  *
1416  * Returns non-zero if a failure occurred, else zero.
1417  */
1418 int
1419 qla24xx_start_scsi(srb_t *sp)
1420 {
1421 	int		ret, nseg;
1422 	unsigned long   flags;
1423 	uint32_t	*clr_ptr;
1424 	uint32_t        index;
1425 	uint32_t	handle;
1426 	struct cmd_type_7 *cmd_pkt;
1427 	uint16_t	cnt;
1428 	uint16_t	req_cnt;
1429 	uint16_t	tot_dsds;
1430 	struct req_que *req = NULL;
1431 	struct rsp_que *rsp = NULL;
1432 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1433 	struct scsi_qla_host *vha = sp->fcport->vha;
1434 	struct qla_hw_data *ha = vha->hw;
1435 	char		tag[2];
1436 
1437 	/* Setup device pointers. */
1438 	ret = 0;
1439 
1440 	qla25xx_set_que(sp, &rsp);
1441 	req = vha->req;
1442 
1443 	/* So we know we haven't pci_map'ed anything yet */
1444 	tot_dsds = 0;
1445 
1446 	/* Send marker if required */
1447 	if (vha->marker_needed != 0) {
1448 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1449 		    QLA_SUCCESS)
1450 			return QLA_FUNCTION_FAILED;
1451 		vha->marker_needed = 0;
1452 	}
1453 
1454 	/* Acquire ring specific lock */
1455 	spin_lock_irqsave(&ha->hardware_lock, flags);
1456 
1457 	/* Check for room in outstanding command list. */
1458 	handle = req->current_outstanding_cmd;
1459 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1460 		handle++;
1461 		if (handle == req->num_outstanding_cmds)
1462 			handle = 1;
1463 		if (!req->outstanding_cmds[handle])
1464 			break;
1465 	}
1466 	if (index == req->num_outstanding_cmds)
1467 		goto queuing_error;
1468 
1469 	/* Map the sg table so we have an accurate count of sg entries needed */
1470 	if (scsi_sg_count(cmd)) {
1471 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1472 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1473 		if (unlikely(!nseg))
1474 			goto queuing_error;
1475 	} else
1476 		nseg = 0;
1477 
1478 	tot_dsds = nseg;
1479 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1480 	if (req->cnt < (req_cnt + 2)) {
1481 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1482 
1483 		if (req->ring_index < cnt)
1484 			req->cnt = cnt - req->ring_index;
1485 		else
1486 			req->cnt = req->length -
1487 				(req->ring_index - cnt);
1488 		if (req->cnt < (req_cnt + 2))
1489 			goto queuing_error;
1490 	}
1491 
1492 	/* Build command packet. */
1493 	req->current_outstanding_cmd = handle;
1494 	req->outstanding_cmds[handle] = sp;
1495 	sp->handle = handle;
1496 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1497 	req->cnt -= req_cnt;
1498 
1499 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1500 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1501 
1502 	/* Zero out remaining portion of packet. */
1503 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1504 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1505 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1506 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1507 
1508 	/* Set NPORT-ID and LUN number*/
1509 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1510 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1511 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1512 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1513 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1514 
1515 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1516 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1517 
1518 	/* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
1519 	if (scsi_populate_tag_msg(cmd, tag)) {
1520 		switch (tag[0]) {
1521 		case HEAD_OF_QUEUE_TAG:
1522 			cmd_pkt->task = TSK_HEAD_OF_QUEUE;
1523 			break;
1524 		case ORDERED_QUEUE_TAG:
1525 			cmd_pkt->task = TSK_ORDERED;
1526 			break;
1527 		default:
1528 		    cmd_pkt->task = TSK_SIMPLE;
1529 		    break;
1530 		}
1531 	} else {
1532 		cmd_pkt->task = TSK_SIMPLE;
1533 	}
1534 
1535 	/* Load SCSI command packet. */
1536 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1537 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1538 
1539 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1540 
1541 	/* Build IOCB segments */
1542 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
1543 
1544 	/* Set total data segment count. */
1545 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1546 	/* Specify response queue number where completion should happen */
1547 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1548 	wmb();
1549 	/* Adjust ring index. */
1550 	req->ring_index++;
1551 	if (req->ring_index == req->length) {
1552 		req->ring_index = 0;
1553 		req->ring_ptr = req->ring;
1554 	} else
1555 		req->ring_ptr++;
1556 
1557 	sp->flags |= SRB_DMA_VALID;
1558 
1559 	/* Set chip new ring index. */
1560 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1561 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1562 
1563 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1564 	if (vha->flags.process_response_queue &&
1565 		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1566 		qla24xx_process_response_queue(vha, rsp);
1567 
1568 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1569 	return QLA_SUCCESS;
1570 
1571 queuing_error:
1572 	if (tot_dsds)
1573 		scsi_dma_unmap(cmd);
1574 
1575 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1576 
1577 	return QLA_FUNCTION_FAILED;
1578 }
1579 
1580 /**
1581  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1582  * @sp: command to send to the ISP
1583  *
1584  * Returns non-zero if a failure occurred, else zero.
1585  */
1586 int
1587 qla24xx_dif_start_scsi(srb_t *sp)
1588 {
1589 	int			nseg;
1590 	unsigned long		flags;
1591 	uint32_t		*clr_ptr;
1592 	uint32_t		index;
1593 	uint32_t		handle;
1594 	uint16_t		cnt;
1595 	uint16_t		req_cnt = 0;
1596 	uint16_t		tot_dsds;
1597 	uint16_t		tot_prot_dsds;
1598 	uint16_t		fw_prot_opts = 0;
1599 	struct req_que		*req = NULL;
1600 	struct rsp_que		*rsp = NULL;
1601 	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
1602 	struct scsi_qla_host	*vha = sp->fcport->vha;
1603 	struct qla_hw_data	*ha = vha->hw;
1604 	struct cmd_type_crc_2	*cmd_pkt;
1605 	uint32_t		status = 0;
1606 
1607 #define QDSS_GOT_Q_SPACE	BIT_0
1608 
1609 	/* Only process protection or >16 cdb in this routine */
1610 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1611 		if (cmd->cmd_len <= 16)
1612 			return qla24xx_start_scsi(sp);
1613 	}
1614 
1615 	/* Setup device pointers. */
1616 
1617 	qla25xx_set_que(sp, &rsp);
1618 	req = vha->req;
1619 
1620 	/* So we know we haven't pci_map'ed anything yet */
1621 	tot_dsds = 0;
1622 
1623 	/* Send marker if required */
1624 	if (vha->marker_needed != 0) {
1625 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1626 		    QLA_SUCCESS)
1627 			return QLA_FUNCTION_FAILED;
1628 		vha->marker_needed = 0;
1629 	}
1630 
1631 	/* Acquire ring specific lock */
1632 	spin_lock_irqsave(&ha->hardware_lock, flags);
1633 
1634 	/* Check for room in outstanding command list. */
1635 	handle = req->current_outstanding_cmd;
1636 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1637 		handle++;
1638 		if (handle == req->num_outstanding_cmds)
1639 			handle = 1;
1640 		if (!req->outstanding_cmds[handle])
1641 			break;
1642 	}
1643 
1644 	if (index == req->num_outstanding_cmds)
1645 		goto queuing_error;
1646 
1647 	/* Compute number of required data segments */
1648 	/* Map the sg table so we have an accurate count of sg entries needed */
1649 	if (scsi_sg_count(cmd)) {
1650 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1651 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1652 		if (unlikely(!nseg))
1653 			goto queuing_error;
1654 		else
1655 			sp->flags |= SRB_DMA_VALID;
1656 
1657 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1658 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1659 			struct qla2_sgx sgx;
1660 			uint32_t	partial;
1661 
1662 			memset(&sgx, 0, sizeof(struct qla2_sgx));
1663 			sgx.tot_bytes = scsi_bufflen(cmd);
1664 			sgx.cur_sg = scsi_sglist(cmd);
1665 			sgx.sp = sp;
1666 
1667 			nseg = 0;
1668 			while (qla24xx_get_one_block_sg(
1669 			    cmd->device->sector_size, &sgx, &partial))
1670 				nseg++;
1671 		}
1672 	} else
1673 		nseg = 0;
1674 
1675 	/* number of required data segments */
1676 	tot_dsds = nseg;
1677 
1678 	/* Compute number of required protection segments */
1679 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1680 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1681 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1682 		if (unlikely(!nseg))
1683 			goto queuing_error;
1684 		else
1685 			sp->flags |= SRB_CRC_PROT_DMA_VALID;
1686 
1687 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1688 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1689 			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1690 		}
1691 	} else {
1692 		nseg = 0;
1693 	}
1694 
1695 	req_cnt = 1;
1696 	/* Total Data and protection sg segment(s) */
1697 	tot_prot_dsds = nseg;
1698 	tot_dsds += nseg;
1699 	if (req->cnt < (req_cnt + 2)) {
1700 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
1701 
1702 		if (req->ring_index < cnt)
1703 			req->cnt = cnt - req->ring_index;
1704 		else
1705 			req->cnt = req->length -
1706 				(req->ring_index - cnt);
1707 		if (req->cnt < (req_cnt + 2))
1708 			goto queuing_error;
1709 	}
1710 
1711 	status |= QDSS_GOT_Q_SPACE;
1712 
1713 	/* Build header part of command packet (excluding the OPCODE). */
1714 	req->current_outstanding_cmd = handle;
1715 	req->outstanding_cmds[handle] = sp;
1716 	sp->handle = handle;
1717 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1718 	req->cnt -= req_cnt;
1719 
1720 	/* Fill-in common area */
1721 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1722 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1723 
1724 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1725 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1726 
1727 	/* Set NPORT-ID and LUN number*/
1728 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1729 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1730 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1731 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1732 
1733 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1734 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1735 
1736 	/* Total Data and protection segment(s) */
1737 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1738 
1739 	/* Build IOCB segments and adjust for data protection segments */
1740 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1741 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1742 		QLA_SUCCESS)
1743 		goto queuing_error;
1744 
1745 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1746 	/* Specify response queue number where completion should happen */
1747 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1748 	cmd_pkt->timeout = __constant_cpu_to_le16(0);
1749 	wmb();
1750 
1751 	/* Adjust ring index. */
1752 	req->ring_index++;
1753 	if (req->ring_index == req->length) {
1754 		req->ring_index = 0;
1755 		req->ring_ptr = req->ring;
1756 	} else
1757 		req->ring_ptr++;
1758 
1759 	/* Set chip new ring index. */
1760 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1761 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1762 
1763 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1764 	if (vha->flags.process_response_queue &&
1765 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1766 		qla24xx_process_response_queue(vha, rsp);
1767 
1768 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1769 
1770 	return QLA_SUCCESS;
1771 
1772 queuing_error:
1773 	if (status & QDSS_GOT_Q_SPACE) {
1774 		req->outstanding_cmds[handle] = NULL;
1775 		req->cnt += req_cnt;
1776 	}
1777 	/* Cleanup will be performed by the caller (queuecommand) */
1778 
1779 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1780 	return QLA_FUNCTION_FAILED;
1781 }
1782 
1783 
1784 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
1785 {
1786 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1787 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1788 	int affinity = cmd->request->cpu;
1789 
1790 	if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
1791 		affinity < ha->max_rsp_queues - 1)
1792 		*rsp = ha->rsp_q_map[affinity + 1];
1793 	 else
1794 		*rsp = ha->rsp_q_map[0];
1795 }
1796 
1797 /* Generic Control-SRB manipulation functions. */
1798 void *
1799 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
1800 {
1801 	struct qla_hw_data *ha = vha->hw;
1802 	struct req_que *req = ha->req_q_map[0];
1803 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
1804 	uint32_t index, handle;
1805 	request_t *pkt;
1806 	uint16_t cnt, req_cnt;
1807 
1808 	pkt = NULL;
1809 	req_cnt = 1;
1810 	handle = 0;
1811 
1812 	if (!sp)
1813 		goto skip_cmd_array;
1814 
1815 	/* Check for room in outstanding command list. */
1816 	handle = req->current_outstanding_cmd;
1817 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1818 		handle++;
1819 		if (handle == req->num_outstanding_cmds)
1820 			handle = 1;
1821 		if (!req->outstanding_cmds[handle])
1822 			break;
1823 	}
1824 	if (index == req->num_outstanding_cmds) {
1825 		ql_log(ql_log_warn, vha, 0x700b,
1826 		    "No room on outstanding cmd array.\n");
1827 		goto queuing_error;
1828 	}
1829 
1830 	/* Prep command array. */
1831 	req->current_outstanding_cmd = handle;
1832 	req->outstanding_cmds[handle] = sp;
1833 	sp->handle = handle;
1834 
1835 	/* Adjust entry-counts as needed. */
1836 	if (sp->type != SRB_SCSI_CMD)
1837 		req_cnt = sp->iocbs;
1838 
1839 skip_cmd_array:
1840 	/* Check for room on request queue. */
1841 	if (req->cnt < req_cnt) {
1842 		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
1843 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
1844 		else if (IS_P3P_TYPE(ha))
1845 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
1846 		else if (IS_FWI2_CAPABLE(ha))
1847 			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
1848 		else if (IS_QLAFX00(ha))
1849 			cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
1850 		else
1851 			cnt = qla2x00_debounce_register(
1852 			    ISP_REQ_Q_OUT(ha, &reg->isp));
1853 
1854 		if  (req->ring_index < cnt)
1855 			req->cnt = cnt - req->ring_index;
1856 		else
1857 			req->cnt = req->length -
1858 			    (req->ring_index - cnt);
1859 	}
1860 	if (req->cnt < req_cnt)
1861 		goto queuing_error;
1862 
1863 	/* Prep packet */
1864 	req->cnt -= req_cnt;
1865 	pkt = req->ring_ptr;
1866 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
1867 	if (IS_QLAFX00(ha)) {
1868 		WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
1869 		WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
1870 	} else {
1871 		pkt->entry_count = req_cnt;
1872 		pkt->handle = handle;
1873 	}
1874 
1875 queuing_error:
1876 	return pkt;
1877 }
1878 
1879 static void
1880 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1881 {
1882 	struct srb_iocb *lio = &sp->u.iocb_cmd;
1883 
1884 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1885 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
1886 	if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
1887 		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
1888 	if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
1889 		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
1890 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1891 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1892 	logio->port_id[1] = sp->fcport->d_id.b.area;
1893 	logio->port_id[2] = sp->fcport->d_id.b.domain;
1894 	logio->vp_index = sp->fcport->vha->vp_idx;
1895 }
1896 
1897 static void
1898 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1899 {
1900 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1901 	struct srb_iocb *lio = &sp->u.iocb_cmd;
1902 	uint16_t opts;
1903 
1904 	mbx->entry_type = MBX_IOCB_TYPE;
1905 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1906 	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
1907 	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
1908 	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
1909 	if (HAS_EXTENDED_IDS(ha)) {
1910 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1911 		mbx->mb10 = cpu_to_le16(opts);
1912 	} else {
1913 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
1914 	}
1915 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1916 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1917 	    sp->fcport->d_id.b.al_pa);
1918 	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1919 }
1920 
1921 static void
1922 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1923 {
1924 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1925 	logio->control_flags =
1926 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1927 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1928 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1929 	logio->port_id[1] = sp->fcport->d_id.b.area;
1930 	logio->port_id[2] = sp->fcport->d_id.b.domain;
1931 	logio->vp_index = sp->fcport->vha->vp_idx;
1932 }
1933 
1934 static void
1935 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1936 {
1937 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1938 
1939 	mbx->entry_type = MBX_IOCB_TYPE;
1940 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1941 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1942 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1943 	    cpu_to_le16(sp->fcport->loop_id):
1944 	    cpu_to_le16(sp->fcport->loop_id << 8);
1945 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1946 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1947 	    sp->fcport->d_id.b.al_pa);
1948 	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1949 	/* Implicit: mbx->mbx10 = 0. */
1950 }
1951 
1952 static void
1953 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1954 {
1955 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1956 	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1957 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1958 	logio->vp_index = sp->fcport->vha->vp_idx;
1959 }
1960 
1961 static void
1962 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1963 {
1964 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1965 
1966 	mbx->entry_type = MBX_IOCB_TYPE;
1967 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1968 	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
1969 	if (HAS_EXTENDED_IDS(ha)) {
1970 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
1971 		mbx->mb10 = cpu_to_le16(BIT_0);
1972 	} else {
1973 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
1974 	}
1975 	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
1976 	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1977 	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1978 	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1979 	mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1980 }
1981 
1982 static void
1983 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
1984 {
1985 	uint32_t flags;
1986 	unsigned int lun;
1987 	struct fc_port *fcport = sp->fcport;
1988 	scsi_qla_host_t *vha = fcport->vha;
1989 	struct qla_hw_data *ha = vha->hw;
1990 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
1991 	struct req_que *req = vha->req;
1992 
1993 	flags = iocb->u.tmf.flags;
1994 	lun = iocb->u.tmf.lun;
1995 
1996 	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
1997 	tsk->entry_count = 1;
1998 	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
1999 	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2000 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2001 	tsk->control_flags = cpu_to_le32(flags);
2002 	tsk->port_id[0] = fcport->d_id.b.al_pa;
2003 	tsk->port_id[1] = fcport->d_id.b.area;
2004 	tsk->port_id[2] = fcport->d_id.b.domain;
2005 	tsk->vp_index = fcport->vha->vp_idx;
2006 
2007 	if (flags == TCF_LUN_RESET) {
2008 		int_to_scsilun(lun, &tsk->lun);
2009 		host_to_fcp_swap((uint8_t *)&tsk->lun,
2010 			sizeof(tsk->lun));
2011 	}
2012 }
2013 
2014 static void
2015 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2016 {
2017 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2018 
2019         els_iocb->entry_type = ELS_IOCB_TYPE;
2020         els_iocb->entry_count = 1;
2021         els_iocb->sys_define = 0;
2022         els_iocb->entry_status = 0;
2023         els_iocb->handle = sp->handle;
2024         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2025         els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2026 	els_iocb->vp_index = sp->fcport->vha->vp_idx;
2027         els_iocb->sof_type = EST_SOFI3;
2028         els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2029 
2030 	els_iocb->opcode =
2031 	    sp->type == SRB_ELS_CMD_RPT ?
2032 	    bsg_job->request->rqst_data.r_els.els_code :
2033 	    bsg_job->request->rqst_data.h_els.command_code;
2034         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2035         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2036         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2037         els_iocb->control_flags = 0;
2038         els_iocb->rx_byte_count =
2039             cpu_to_le32(bsg_job->reply_payload.payload_len);
2040         els_iocb->tx_byte_count =
2041             cpu_to_le32(bsg_job->request_payload.payload_len);
2042 
2043         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2044             (bsg_job->request_payload.sg_list)));
2045         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2046             (bsg_job->request_payload.sg_list)));
2047         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2048             (bsg_job->request_payload.sg_list));
2049 
2050         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2051             (bsg_job->reply_payload.sg_list)));
2052         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2053             (bsg_job->reply_payload.sg_list)));
2054         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2055             (bsg_job->reply_payload.sg_list));
2056 
2057 	sp->fcport->vha->qla_stats.control_requests++;
2058 }
2059 
2060 static void
2061 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2062 {
2063 	uint16_t        avail_dsds;
2064 	uint32_t        *cur_dsd;
2065 	struct scatterlist *sg;
2066 	int index;
2067 	uint16_t tot_dsds;
2068 	scsi_qla_host_t *vha = sp->fcport->vha;
2069 	struct qla_hw_data *ha = vha->hw;
2070 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2071 	int loop_iterartion = 0;
2072 	int cont_iocb_prsnt = 0;
2073 	int entry_count = 1;
2074 
2075 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2076 	ct_iocb->entry_type = CT_IOCB_TYPE;
2077 	ct_iocb->entry_status = 0;
2078 	ct_iocb->handle1 = sp->handle;
2079 	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2080 	ct_iocb->status = __constant_cpu_to_le16(0);
2081 	ct_iocb->control_flags = __constant_cpu_to_le16(0);
2082 	ct_iocb->timeout = 0;
2083 	ct_iocb->cmd_dsd_count =
2084 	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2085 	ct_iocb->total_dsd_count =
2086 	    __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2087 	ct_iocb->req_bytecount =
2088 	    cpu_to_le32(bsg_job->request_payload.payload_len);
2089 	ct_iocb->rsp_bytecount =
2090 	    cpu_to_le32(bsg_job->reply_payload.payload_len);
2091 
2092 	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2093 	    (bsg_job->request_payload.sg_list)));
2094 	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2095 	    (bsg_job->request_payload.sg_list)));
2096 	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2097 
2098 	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2099 	    (bsg_job->reply_payload.sg_list)));
2100 	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2101 	    (bsg_job->reply_payload.sg_list)));
2102 	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2103 
2104 	avail_dsds = 1;
2105 	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2106 	index = 0;
2107 	tot_dsds = bsg_job->reply_payload.sg_cnt;
2108 
2109 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2110 		dma_addr_t       sle_dma;
2111 		cont_a64_entry_t *cont_pkt;
2112 
2113 		/* Allocate additional continuation packets? */
2114 		if (avail_dsds == 0) {
2115 			/*
2116 			* Five DSDs are available in the Cont.
2117 			* Type 1 IOCB.
2118 			       */
2119 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2120 			    vha->hw->req_q_map[0]);
2121 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2122 			avail_dsds = 5;
2123 			cont_iocb_prsnt = 1;
2124 			entry_count++;
2125 		}
2126 
2127 		sle_dma = sg_dma_address(sg);
2128 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2129 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2130 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2131 		loop_iterartion++;
2132 		avail_dsds--;
2133 	}
2134 	ct_iocb->entry_count = entry_count;
2135 
2136 	sp->fcport->vha->qla_stats.control_requests++;
2137 }
2138 
2139 static void
2140 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2141 {
2142 	uint16_t        avail_dsds;
2143 	uint32_t        *cur_dsd;
2144 	struct scatterlist *sg;
2145 	int index;
2146 	uint16_t tot_dsds;
2147         scsi_qla_host_t *vha = sp->fcport->vha;
2148 	struct qla_hw_data *ha = vha->hw;
2149 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2150 	int loop_iterartion = 0;
2151 	int cont_iocb_prsnt = 0;
2152 	int entry_count = 1;
2153 
2154 	ct_iocb->entry_type = CT_IOCB_TYPE;
2155         ct_iocb->entry_status = 0;
2156         ct_iocb->sys_define = 0;
2157         ct_iocb->handle = sp->handle;
2158 
2159 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2160 	ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2161         ct_iocb->comp_status = __constant_cpu_to_le16(0);
2162 
2163 	ct_iocb->cmd_dsd_count =
2164             __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2165         ct_iocb->timeout = 0;
2166         ct_iocb->rsp_dsd_count =
2167             __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2168         ct_iocb->rsp_byte_count =
2169             cpu_to_le32(bsg_job->reply_payload.payload_len);
2170         ct_iocb->cmd_byte_count =
2171             cpu_to_le32(bsg_job->request_payload.payload_len);
2172         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2173             (bsg_job->request_payload.sg_list)));
2174         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2175            (bsg_job->request_payload.sg_list)));
2176         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2177             (bsg_job->request_payload.sg_list));
2178 
2179 	avail_dsds = 1;
2180 	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2181 	index = 0;
2182 	tot_dsds = bsg_job->reply_payload.sg_cnt;
2183 
2184 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2185 		dma_addr_t       sle_dma;
2186 		cont_a64_entry_t *cont_pkt;
2187 
2188 		/* Allocate additional continuation packets? */
2189 		if (avail_dsds == 0) {
2190 			/*
2191 			* Five DSDs are available in the Cont.
2192 			* Type 1 IOCB.
2193 			       */
2194 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2195 			    ha->req_q_map[0]);
2196 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2197 			avail_dsds = 5;
2198 			cont_iocb_prsnt = 1;
2199 			entry_count++;
2200 		}
2201 
2202 		sle_dma = sg_dma_address(sg);
2203 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2204 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2205 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2206 		loop_iterartion++;
2207 		avail_dsds--;
2208 	}
2209         ct_iocb->entry_count = entry_count;
2210 }
2211 
2212 /*
2213  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2214  * @sp: command to send to the ISP
2215  *
2216  * Returns non-zero if a failure occurred, else zero.
2217  */
2218 int
2219 qla82xx_start_scsi(srb_t *sp)
2220 {
2221 	int		ret, nseg;
2222 	unsigned long   flags;
2223 	struct scsi_cmnd *cmd;
2224 	uint32_t	*clr_ptr;
2225 	uint32_t        index;
2226 	uint32_t	handle;
2227 	uint16_t	cnt;
2228 	uint16_t	req_cnt;
2229 	uint16_t	tot_dsds;
2230 	struct device_reg_82xx __iomem *reg;
2231 	uint32_t dbval;
2232 	uint32_t *fcp_dl;
2233 	uint8_t additional_cdb_len;
2234 	struct ct6_dsd *ctx;
2235 	struct scsi_qla_host *vha = sp->fcport->vha;
2236 	struct qla_hw_data *ha = vha->hw;
2237 	struct req_que *req = NULL;
2238 	struct rsp_que *rsp = NULL;
2239 	char tag[2];
2240 
2241 	/* Setup device pointers. */
2242 	ret = 0;
2243 	reg = &ha->iobase->isp82;
2244 	cmd = GET_CMD_SP(sp);
2245 	req = vha->req;
2246 	rsp = ha->rsp_q_map[0];
2247 
2248 	/* So we know we haven't pci_map'ed anything yet */
2249 	tot_dsds = 0;
2250 
2251 	dbval = 0x04 | (ha->portnum << 5);
2252 
2253 	/* Send marker if required */
2254 	if (vha->marker_needed != 0) {
2255 		if (qla2x00_marker(vha, req,
2256 			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2257 			ql_log(ql_log_warn, vha, 0x300c,
2258 			    "qla2x00_marker failed for cmd=%p.\n", cmd);
2259 			return QLA_FUNCTION_FAILED;
2260 		}
2261 		vha->marker_needed = 0;
2262 	}
2263 
2264 	/* Acquire ring specific lock */
2265 	spin_lock_irqsave(&ha->hardware_lock, flags);
2266 
2267 	/* Check for room in outstanding command list. */
2268 	handle = req->current_outstanding_cmd;
2269 	for (index = 1; index < req->num_outstanding_cmds; index++) {
2270 		handle++;
2271 		if (handle == req->num_outstanding_cmds)
2272 			handle = 1;
2273 		if (!req->outstanding_cmds[handle])
2274 			break;
2275 	}
2276 	if (index == req->num_outstanding_cmds)
2277 		goto queuing_error;
2278 
2279 	/* Map the sg table so we have an accurate count of sg entries needed */
2280 	if (scsi_sg_count(cmd)) {
2281 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2282 		    scsi_sg_count(cmd), cmd->sc_data_direction);
2283 		if (unlikely(!nseg))
2284 			goto queuing_error;
2285 	} else
2286 		nseg = 0;
2287 
2288 	tot_dsds = nseg;
2289 
2290 	if (tot_dsds > ql2xshiftctondsd) {
2291 		struct cmd_type_6 *cmd_pkt;
2292 		uint16_t more_dsd_lists = 0;
2293 		struct dsd_dma *dsd_ptr;
2294 		uint16_t i;
2295 
2296 		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2297 		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2298 			ql_dbg(ql_dbg_io, vha, 0x300d,
2299 			    "Num of DSD list %d is than %d for cmd=%p.\n",
2300 			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2301 			    cmd);
2302 			goto queuing_error;
2303 		}
2304 
2305 		if (more_dsd_lists <= ha->gbl_dsd_avail)
2306 			goto sufficient_dsds;
2307 		else
2308 			more_dsd_lists -= ha->gbl_dsd_avail;
2309 
2310 		for (i = 0; i < more_dsd_lists; i++) {
2311 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2312 			if (!dsd_ptr) {
2313 				ql_log(ql_log_fatal, vha, 0x300e,
2314 				    "Failed to allocate memory for dsd_dma "
2315 				    "for cmd=%p.\n", cmd);
2316 				goto queuing_error;
2317 			}
2318 
2319 			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2320 				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2321 			if (!dsd_ptr->dsd_addr) {
2322 				kfree(dsd_ptr);
2323 				ql_log(ql_log_fatal, vha, 0x300f,
2324 				    "Failed to allocate memory for dsd_addr "
2325 				    "for cmd=%p.\n", cmd);
2326 				goto queuing_error;
2327 			}
2328 			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2329 			ha->gbl_dsd_avail++;
2330 		}
2331 
2332 sufficient_dsds:
2333 		req_cnt = 1;
2334 
2335 		if (req->cnt < (req_cnt + 2)) {
2336 			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2337 				&reg->req_q_out[0]);
2338 			if (req->ring_index < cnt)
2339 				req->cnt = cnt - req->ring_index;
2340 			else
2341 				req->cnt = req->length -
2342 					(req->ring_index - cnt);
2343 			if (req->cnt < (req_cnt + 2))
2344 				goto queuing_error;
2345 		}
2346 
2347 		ctx = sp->u.scmd.ctx =
2348 		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2349 		if (!ctx) {
2350 			ql_log(ql_log_fatal, vha, 0x3010,
2351 			    "Failed to allocate ctx for cmd=%p.\n", cmd);
2352 			goto queuing_error;
2353 		}
2354 
2355 		memset(ctx, 0, sizeof(struct ct6_dsd));
2356 		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2357 			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2358 		if (!ctx->fcp_cmnd) {
2359 			ql_log(ql_log_fatal, vha, 0x3011,
2360 			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2361 			goto queuing_error;
2362 		}
2363 
2364 		/* Initialize the DSD list and dma handle */
2365 		INIT_LIST_HEAD(&ctx->dsd_list);
2366 		ctx->dsd_use_cnt = 0;
2367 
2368 		if (cmd->cmd_len > 16) {
2369 			additional_cdb_len = cmd->cmd_len - 16;
2370 			if ((cmd->cmd_len % 4) != 0) {
2371 				/* SCSI command bigger than 16 bytes must be
2372 				 * multiple of 4
2373 				 */
2374 				ql_log(ql_log_warn, vha, 0x3012,
2375 				    "scsi cmd len %d not multiple of 4 "
2376 				    "for cmd=%p.\n", cmd->cmd_len, cmd);
2377 				goto queuing_error_fcp_cmnd;
2378 			}
2379 			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2380 		} else {
2381 			additional_cdb_len = 0;
2382 			ctx->fcp_cmnd_len = 12 + 16 + 4;
2383 		}
2384 
2385 		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2386 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2387 
2388 		/* Zero out remaining portion of packet. */
2389 		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2390 		clr_ptr = (uint32_t *)cmd_pkt + 2;
2391 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2392 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2393 
2394 		/* Set NPORT-ID and LUN number*/
2395 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2396 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2397 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2398 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2399 		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2400 
2401 		/* Build IOCB segments */
2402 		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2403 			goto queuing_error_fcp_cmnd;
2404 
2405 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2406 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2407 
2408 		/* build FCP_CMND IU */
2409 		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2410 		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2411 		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2412 
2413 		if (cmd->sc_data_direction == DMA_TO_DEVICE)
2414 			ctx->fcp_cmnd->additional_cdb_len |= 1;
2415 		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2416 			ctx->fcp_cmnd->additional_cdb_len |= 2;
2417 
2418 		/*
2419 		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2420 		 */
2421 		if (scsi_populate_tag_msg(cmd, tag)) {
2422 			switch (tag[0]) {
2423 			case HEAD_OF_QUEUE_TAG:
2424 				ctx->fcp_cmnd->task_attribute =
2425 				    TSK_HEAD_OF_QUEUE;
2426 				break;
2427 			case ORDERED_QUEUE_TAG:
2428 				ctx->fcp_cmnd->task_attribute =
2429 				    TSK_ORDERED;
2430 				break;
2431 			}
2432 		}
2433 
2434 		/* Populate the FCP_PRIO. */
2435 		if (ha->flags.fcp_prio_enabled)
2436 			ctx->fcp_cmnd->task_attribute |=
2437 			    sp->fcport->fcp_prio << 3;
2438 
2439 		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2440 
2441 		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2442 		    additional_cdb_len);
2443 		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2444 
2445 		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2446 		cmd_pkt->fcp_cmnd_dseg_address[0] =
2447 		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2448 		cmd_pkt->fcp_cmnd_dseg_address[1] =
2449 		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2450 
2451 		sp->flags |= SRB_FCP_CMND_DMA_VALID;
2452 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2453 		/* Set total data segment count. */
2454 		cmd_pkt->entry_count = (uint8_t)req_cnt;
2455 		/* Specify response queue number where
2456 		 * completion should happen
2457 		 */
2458 		cmd_pkt->entry_status = (uint8_t) rsp->id;
2459 	} else {
2460 		struct cmd_type_7 *cmd_pkt;
2461 		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2462 		if (req->cnt < (req_cnt + 2)) {
2463 			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2464 			    &reg->req_q_out[0]);
2465 			if (req->ring_index < cnt)
2466 				req->cnt = cnt - req->ring_index;
2467 			else
2468 				req->cnt = req->length -
2469 					(req->ring_index - cnt);
2470 		}
2471 		if (req->cnt < (req_cnt + 2))
2472 			goto queuing_error;
2473 
2474 		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2475 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2476 
2477 		/* Zero out remaining portion of packet. */
2478 		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2479 		clr_ptr = (uint32_t *)cmd_pkt + 2;
2480 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2481 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2482 
2483 		/* Set NPORT-ID and LUN number*/
2484 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2485 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2486 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2487 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2488 		cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2489 
2490 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2491 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2492 		    sizeof(cmd_pkt->lun));
2493 
2494 		/*
2495 		 * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
2496 		 */
2497 		if (scsi_populate_tag_msg(cmd, tag)) {
2498 			switch (tag[0]) {
2499 			case HEAD_OF_QUEUE_TAG:
2500 				cmd_pkt->task = TSK_HEAD_OF_QUEUE;
2501 				break;
2502 			case ORDERED_QUEUE_TAG:
2503 				cmd_pkt->task = TSK_ORDERED;
2504 				break;
2505 			}
2506 		}
2507 
2508 		/* Populate the FCP_PRIO. */
2509 		if (ha->flags.fcp_prio_enabled)
2510 			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2511 
2512 		/* Load SCSI command packet. */
2513 		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2514 		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2515 
2516 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2517 
2518 		/* Build IOCB segments */
2519 		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
2520 
2521 		/* Set total data segment count. */
2522 		cmd_pkt->entry_count = (uint8_t)req_cnt;
2523 		/* Specify response queue number where
2524 		 * completion should happen.
2525 		 */
2526 		cmd_pkt->entry_status = (uint8_t) rsp->id;
2527 
2528 	}
2529 	/* Build command packet. */
2530 	req->current_outstanding_cmd = handle;
2531 	req->outstanding_cmds[handle] = sp;
2532 	sp->handle = handle;
2533 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2534 	req->cnt -= req_cnt;
2535 	wmb();
2536 
2537 	/* Adjust ring index. */
2538 	req->ring_index++;
2539 	if (req->ring_index == req->length) {
2540 		req->ring_index = 0;
2541 		req->ring_ptr = req->ring;
2542 	} else
2543 		req->ring_ptr++;
2544 
2545 	sp->flags |= SRB_DMA_VALID;
2546 
2547 	/* Set chip new ring index. */
2548 	/* write, read and verify logic */
2549 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
2550 	if (ql2xdbwr)
2551 		qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
2552 	else {
2553 		WRT_REG_DWORD(
2554 			(unsigned long __iomem *)ha->nxdb_wr_ptr,
2555 			dbval);
2556 		wmb();
2557 		while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
2558 			WRT_REG_DWORD(
2559 				(unsigned long __iomem *)ha->nxdb_wr_ptr,
2560 				dbval);
2561 			wmb();
2562 		}
2563 	}
2564 
2565 	/* Manage unprocessed RIO/ZIO commands in response queue. */
2566 	if (vha->flags.process_response_queue &&
2567 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2568 		qla24xx_process_response_queue(vha, rsp);
2569 
2570 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2571 	return QLA_SUCCESS;
2572 
2573 queuing_error_fcp_cmnd:
2574 	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
2575 queuing_error:
2576 	if (tot_dsds)
2577 		scsi_dma_unmap(cmd);
2578 
2579 	if (sp->u.scmd.ctx) {
2580 		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
2581 		sp->u.scmd.ctx = NULL;
2582 	}
2583 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2584 
2585 	return QLA_FUNCTION_FAILED;
2586 }
2587 
2588 void
2589 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
2590 {
2591 	struct srb_iocb *aio = &sp->u.iocb_cmd;
2592 	scsi_qla_host_t *vha = sp->fcport->vha;
2593 	struct req_que *req = vha->req;
2594 
2595 	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
2596 	abt_iocb->entry_type = ABORT_IOCB_TYPE;
2597 	abt_iocb->entry_count = 1;
2598 	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
2599 	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2600 	abt_iocb->handle_to_abort =
2601 	    cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
2602 	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2603 	abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
2604 	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2605 	abt_iocb->vp_index = vha->vp_idx;
2606 	abt_iocb->req_que_no = cpu_to_le16(req->id);
2607 	/* Send the command to the firmware */
2608 	wmb();
2609 }
2610 
2611 int
2612 qla2x00_start_sp(srb_t *sp)
2613 {
2614 	int rval;
2615 	struct qla_hw_data *ha = sp->fcport->vha->hw;
2616 	void *pkt;
2617 	unsigned long flags;
2618 
2619 	rval = QLA_FUNCTION_FAILED;
2620 	spin_lock_irqsave(&ha->hardware_lock, flags);
2621 	pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
2622 	if (!pkt) {
2623 		ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
2624 		    "qla2x00_alloc_iocbs failed.\n");
2625 		goto done;
2626 	}
2627 
2628 	rval = QLA_SUCCESS;
2629 	switch (sp->type) {
2630 	case SRB_LOGIN_CMD:
2631 		IS_FWI2_CAPABLE(ha) ?
2632 		    qla24xx_login_iocb(sp, pkt) :
2633 		    qla2x00_login_iocb(sp, pkt);
2634 		break;
2635 	case SRB_LOGOUT_CMD:
2636 		IS_FWI2_CAPABLE(ha) ?
2637 		    qla24xx_logout_iocb(sp, pkt) :
2638 		    qla2x00_logout_iocb(sp, pkt);
2639 		break;
2640 	case SRB_ELS_CMD_RPT:
2641 	case SRB_ELS_CMD_HST:
2642 		qla24xx_els_iocb(sp, pkt);
2643 		break;
2644 	case SRB_CT_CMD:
2645 		IS_FWI2_CAPABLE(ha) ?
2646 		    qla24xx_ct_iocb(sp, pkt) :
2647 		    qla2x00_ct_iocb(sp, pkt);
2648 		break;
2649 	case SRB_ADISC_CMD:
2650 		IS_FWI2_CAPABLE(ha) ?
2651 		    qla24xx_adisc_iocb(sp, pkt) :
2652 		    qla2x00_adisc_iocb(sp, pkt);
2653 		break;
2654 	case SRB_TM_CMD:
2655 		IS_QLAFX00(ha) ?
2656 		    qlafx00_tm_iocb(sp, pkt) :
2657 		    qla24xx_tm_iocb(sp, pkt);
2658 		break;
2659 	case SRB_FXIOCB_DCMD:
2660 	case SRB_FXIOCB_BCMD:
2661 		qlafx00_fxdisc_iocb(sp, pkt);
2662 		break;
2663 	case SRB_ABT_CMD:
2664 		IS_QLAFX00(ha) ?
2665 			qlafx00_abort_iocb(sp, pkt) :
2666 			qla24xx_abort_iocb(sp, pkt);
2667 		break;
2668 	default:
2669 		break;
2670 	}
2671 
2672 	wmb();
2673 	qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
2674 done:
2675 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2676 	return rval;
2677 }
2678 
2679 static void
2680 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
2681 				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
2682 {
2683 	uint16_t avail_dsds;
2684 	uint32_t *cur_dsd;
2685 	uint32_t req_data_len = 0;
2686 	uint32_t rsp_data_len = 0;
2687 	struct scatterlist *sg;
2688 	int index;
2689 	int entry_count = 1;
2690 	struct fc_bsg_job *bsg_job = sp->u.bsg_job;
2691 
2692 	/*Update entry type to indicate bidir command */
2693 	*((uint32_t *)(&cmd_pkt->entry_type)) =
2694 		__constant_cpu_to_le32(COMMAND_BIDIRECTIONAL);
2695 
2696 	/* Set the transfer direction, in this set both flags
2697 	 * Also set the BD_WRAP_BACK flag, firmware will take care
2698 	 * assigning DID=SID for outgoing pkts.
2699 	 */
2700 	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2701 	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2702 	cmd_pkt->control_flags =
2703 			__constant_cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
2704 							BD_WRAP_BACK);
2705 
2706 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2707 	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
2708 	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
2709 	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
2710 
2711 	vha->bidi_stats.transfer_bytes += req_data_len;
2712 	vha->bidi_stats.io_count++;
2713 
2714 	vha->qla_stats.output_bytes += req_data_len;
2715 	vha->qla_stats.output_requests++;
2716 
2717 	/* Only one dsd is available for bidirectional IOCB, remaining dsds
2718 	 * are bundled in continuation iocb
2719 	 */
2720 	avail_dsds = 1;
2721 	cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
2722 
2723 	index = 0;
2724 
2725 	for_each_sg(bsg_job->request_payload.sg_list, sg,
2726 				bsg_job->request_payload.sg_cnt, index) {
2727 		dma_addr_t sle_dma;
2728 		cont_a64_entry_t *cont_pkt;
2729 
2730 		/* Allocate additional continuation packets */
2731 		if (avail_dsds == 0) {
2732 			/* Continuation type 1 IOCB can accomodate
2733 			 * 5 DSDS
2734 			 */
2735 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2736 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2737 			avail_dsds = 5;
2738 			entry_count++;
2739 		}
2740 		sle_dma = sg_dma_address(sg);
2741 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2742 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2743 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2744 		avail_dsds--;
2745 	}
2746 	/* For read request DSD will always goes to continuation IOCB
2747 	 * and follow the write DSD. If there is room on the current IOCB
2748 	 * then it is added to that IOCB else new continuation IOCB is
2749 	 * allocated.
2750 	 */
2751 	for_each_sg(bsg_job->reply_payload.sg_list, sg,
2752 				bsg_job->reply_payload.sg_cnt, index) {
2753 		dma_addr_t sle_dma;
2754 		cont_a64_entry_t *cont_pkt;
2755 
2756 		/* Allocate additional continuation packets */
2757 		if (avail_dsds == 0) {
2758 			/* Continuation type 1 IOCB can accomodate
2759 			 * 5 DSDS
2760 			 */
2761 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
2762 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2763 			avail_dsds = 5;
2764 			entry_count++;
2765 		}
2766 		sle_dma = sg_dma_address(sg);
2767 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2768 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2769 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2770 		avail_dsds--;
2771 	}
2772 	/* This value should be same as number of IOCB required for this cmd */
2773 	cmd_pkt->entry_count = entry_count;
2774 }
2775 
2776 int
2777 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
2778 {
2779 
2780 	struct qla_hw_data *ha = vha->hw;
2781 	unsigned long flags;
2782 	uint32_t handle;
2783 	uint32_t index;
2784 	uint16_t req_cnt;
2785 	uint16_t cnt;
2786 	uint32_t *clr_ptr;
2787 	struct cmd_bidir *cmd_pkt = NULL;
2788 	struct rsp_que *rsp;
2789 	struct req_que *req;
2790 	int rval = EXT_STATUS_OK;
2791 
2792 	rval = QLA_SUCCESS;
2793 
2794 	rsp = ha->rsp_q_map[0];
2795 	req = vha->req;
2796 
2797 	/* Send marker if required */
2798 	if (vha->marker_needed != 0) {
2799 		if (qla2x00_marker(vha, req,
2800 			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
2801 			return EXT_STATUS_MAILBOX;
2802 		vha->marker_needed = 0;
2803 	}
2804 
2805 	/* Acquire ring specific lock */
2806 	spin_lock_irqsave(&ha->hardware_lock, flags);
2807 
2808 	/* Check for room in outstanding command list. */
2809 	handle = req->current_outstanding_cmd;
2810 	for (index = 1; index < req->num_outstanding_cmds; index++) {
2811 		handle++;
2812 	if (handle == req->num_outstanding_cmds)
2813 		handle = 1;
2814 	if (!req->outstanding_cmds[handle])
2815 		break;
2816 	}
2817 
2818 	if (index == req->num_outstanding_cmds) {
2819 		rval = EXT_STATUS_BUSY;
2820 		goto queuing_error;
2821 	}
2822 
2823 	/* Calculate number of IOCB required */
2824 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2825 
2826 	/* Check for room on request queue. */
2827 	if (req->cnt < req_cnt + 2) {
2828 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
2829 
2830 		if  (req->ring_index < cnt)
2831 			req->cnt = cnt - req->ring_index;
2832 		else
2833 			req->cnt = req->length -
2834 				(req->ring_index - cnt);
2835 	}
2836 	if (req->cnt < req_cnt + 2) {
2837 		rval = EXT_STATUS_BUSY;
2838 		goto queuing_error;
2839 	}
2840 
2841 	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
2842 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2843 
2844 	/* Zero out remaining portion of packet. */
2845 	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2846 	clr_ptr = (uint32_t *)cmd_pkt + 2;
2847 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2848 
2849 	/* Set NPORT-ID  (of vha)*/
2850 	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
2851 	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
2852 	cmd_pkt->port_id[1] = vha->d_id.b.area;
2853 	cmd_pkt->port_id[2] = vha->d_id.b.domain;
2854 
2855 	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
2856 	cmd_pkt->entry_status = (uint8_t) rsp->id;
2857 	/* Build command packet. */
2858 	req->current_outstanding_cmd = handle;
2859 	req->outstanding_cmds[handle] = sp;
2860 	sp->handle = handle;
2861 	req->cnt -= req_cnt;
2862 
2863 	/* Send the command to the firmware */
2864 	wmb();
2865 	qla2x00_start_iocbs(vha, req);
2866 queuing_error:
2867 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2868 	return rval;
2869 }
2870