xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_iocb.c (revision 4ed91d48259d9ddd378424d008f2e6559f7e78f8)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 #include "qla_target.h"
9 
10 #include <linux/blkdev.h>
11 #include <linux/delay.h>
12 
13 #include <scsi/scsi_tcq.h>
14 
15 /**
16  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
17  * @cmd: SCSI command
18  *
19  * Returns the proper CF_* direction based on CDB.
20  */
21 static inline uint16_t
22 qla2x00_get_cmd_direction(srb_t *sp)
23 {
24 	uint16_t cflags;
25 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
26 	struct scsi_qla_host *vha = sp->vha;
27 
28 	cflags = 0;
29 
30 	/* Set transfer direction */
31 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
32 		cflags = CF_WRITE;
33 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
34 		vha->qla_stats.output_requests++;
35 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
36 		cflags = CF_READ;
37 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
38 		vha->qla_stats.input_requests++;
39 	}
40 	return (cflags);
41 }
42 
43 /**
44  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
45  * Continuation Type 0 IOCBs to allocate.
46  *
47  * @dsds: number of data segment decriptors needed
48  *
49  * Returns the number of IOCB entries needed to store @dsds.
50  */
51 uint16_t
52 qla2x00_calc_iocbs_32(uint16_t dsds)
53 {
54 	uint16_t iocbs;
55 
56 	iocbs = 1;
57 	if (dsds > 3) {
58 		iocbs += (dsds - 3) / 7;
59 		if ((dsds - 3) % 7)
60 			iocbs++;
61 	}
62 	return (iocbs);
63 }
64 
65 /**
66  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
67  * Continuation Type 1 IOCBs to allocate.
68  *
69  * @dsds: number of data segment decriptors needed
70  *
71  * Returns the number of IOCB entries needed to store @dsds.
72  */
73 uint16_t
74 qla2x00_calc_iocbs_64(uint16_t dsds)
75 {
76 	uint16_t iocbs;
77 
78 	iocbs = 1;
79 	if (dsds > 2) {
80 		iocbs += (dsds - 2) / 5;
81 		if ((dsds - 2) % 5)
82 			iocbs++;
83 	}
84 	return (iocbs);
85 }
86 
87 /**
88  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
89  * @ha: HA context
90  *
91  * Returns a pointer to the Continuation Type 0 IOCB packet.
92  */
93 static inline cont_entry_t *
94 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
95 {
96 	cont_entry_t *cont_pkt;
97 	struct req_que *req = vha->req;
98 	/* Adjust ring index. */
99 	req->ring_index++;
100 	if (req->ring_index == req->length) {
101 		req->ring_index = 0;
102 		req->ring_ptr = req->ring;
103 	} else {
104 		req->ring_ptr++;
105 	}
106 
107 	cont_pkt = (cont_entry_t *)req->ring_ptr;
108 
109 	/* Load packet defaults. */
110 	*((uint32_t *)(&cont_pkt->entry_type)) = cpu_to_le32(CONTINUE_TYPE);
111 
112 	return (cont_pkt);
113 }
114 
115 /**
116  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
117  * @ha: HA context
118  *
119  * Returns a pointer to the continuation type 1 IOCB packet.
120  */
121 static inline cont_a64_entry_t *
122 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
123 {
124 	cont_a64_entry_t *cont_pkt;
125 
126 	/* Adjust ring index. */
127 	req->ring_index++;
128 	if (req->ring_index == req->length) {
129 		req->ring_index = 0;
130 		req->ring_ptr = req->ring;
131 	} else {
132 		req->ring_ptr++;
133 	}
134 
135 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
136 
137 	/* Load packet defaults. */
138 	*((uint32_t *)(&cont_pkt->entry_type)) = IS_QLAFX00(vha->hw) ?
139 	    cpu_to_le32(CONTINUE_A64_TYPE_FX00) :
140 	    cpu_to_le32(CONTINUE_A64_TYPE);
141 
142 	return (cont_pkt);
143 }
144 
145 inline int
146 qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
147 {
148 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
149 	uint8_t	guard = scsi_host_get_guard(cmd->device->host);
150 
151 	/* We always use DIFF Bundling for best performance */
152 	*fw_prot_opts = 0;
153 
154 	/* Translate SCSI opcode to a protection opcode */
155 	switch (scsi_get_prot_op(cmd)) {
156 	case SCSI_PROT_READ_STRIP:
157 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
158 		break;
159 	case SCSI_PROT_WRITE_INSERT:
160 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
161 		break;
162 	case SCSI_PROT_READ_INSERT:
163 		*fw_prot_opts |= PO_MODE_DIF_INSERT;
164 		break;
165 	case SCSI_PROT_WRITE_STRIP:
166 		*fw_prot_opts |= PO_MODE_DIF_REMOVE;
167 		break;
168 	case SCSI_PROT_READ_PASS:
169 	case SCSI_PROT_WRITE_PASS:
170 		if (guard & SHOST_DIX_GUARD_IP)
171 			*fw_prot_opts |= PO_MODE_DIF_TCP_CKSUM;
172 		else
173 			*fw_prot_opts |= PO_MODE_DIF_PASS;
174 		break;
175 	default:	/* Normal Request */
176 		*fw_prot_opts |= PO_MODE_DIF_PASS;
177 		break;
178 	}
179 
180 	return scsi_prot_sg_count(cmd);
181 }
182 
183 /*
184  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
185  * capable IOCB types.
186  *
187  * @sp: SRB command to process
188  * @cmd_pkt: Command type 2 IOCB
189  * @tot_dsds: Total number of segments to transfer
190  */
191 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
192     uint16_t tot_dsds)
193 {
194 	uint16_t	avail_dsds;
195 	uint32_t	*cur_dsd;
196 	scsi_qla_host_t	*vha;
197 	struct scsi_cmnd *cmd;
198 	struct scatterlist *sg;
199 	int i;
200 
201 	cmd = GET_CMD_SP(sp);
202 
203 	/* Update entry type to indicate Command Type 2 IOCB */
204 	*((uint32_t *)(&cmd_pkt->entry_type)) =
205 	    cpu_to_le32(COMMAND_TYPE);
206 
207 	/* No data transfer */
208 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
209 		cmd_pkt->byte_count = cpu_to_le32(0);
210 		return;
211 	}
212 
213 	vha = sp->vha;
214 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
215 
216 	/* Three DSDs are available in the Command Type 2 IOCB */
217 	avail_dsds = 3;
218 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
219 
220 	/* Load data segments */
221 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
222 		cont_entry_t *cont_pkt;
223 
224 		/* Allocate additional continuation packets? */
225 		if (avail_dsds == 0) {
226 			/*
227 			 * Seven DSDs are available in the Continuation
228 			 * Type 0 IOCB.
229 			 */
230 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
231 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
232 			avail_dsds = 7;
233 		}
234 
235 		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
236 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
237 		avail_dsds--;
238 	}
239 }
240 
241 /**
242  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
243  * capable IOCB types.
244  *
245  * @sp: SRB command to process
246  * @cmd_pkt: Command type 3 IOCB
247  * @tot_dsds: Total number of segments to transfer
248  */
249 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
250     uint16_t tot_dsds)
251 {
252 	uint16_t	avail_dsds;
253 	uint32_t	*cur_dsd;
254 	scsi_qla_host_t	*vha;
255 	struct scsi_cmnd *cmd;
256 	struct scatterlist *sg;
257 	int i;
258 
259 	cmd = GET_CMD_SP(sp);
260 
261 	/* Update entry type to indicate Command Type 3 IOCB */
262 	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_A64_TYPE);
263 
264 	/* No data transfer */
265 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
266 		cmd_pkt->byte_count = cpu_to_le32(0);
267 		return;
268 	}
269 
270 	vha = sp->vha;
271 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
272 
273 	/* Two DSDs are available in the Command Type 3 IOCB */
274 	avail_dsds = 2;
275 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
276 
277 	/* Load data segments */
278 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
279 		dma_addr_t	sle_dma;
280 		cont_a64_entry_t *cont_pkt;
281 
282 		/* Allocate additional continuation packets? */
283 		if (avail_dsds == 0) {
284 			/*
285 			 * Five DSDs are available in the Continuation
286 			 * Type 1 IOCB.
287 			 */
288 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
289 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
290 			avail_dsds = 5;
291 		}
292 
293 		sle_dma = sg_dma_address(sg);
294 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
295 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
296 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
297 		avail_dsds--;
298 	}
299 }
300 
301 /**
302  * qla2x00_start_scsi() - Send a SCSI command to the ISP
303  * @sp: command to send to the ISP
304  *
305  * Returns non-zero if a failure occurred, else zero.
306  */
307 int
308 qla2x00_start_scsi(srb_t *sp)
309 {
310 	int		nseg;
311 	unsigned long   flags;
312 	scsi_qla_host_t	*vha;
313 	struct scsi_cmnd *cmd;
314 	uint32_t	*clr_ptr;
315 	uint32_t        index;
316 	uint32_t	handle;
317 	cmd_entry_t	*cmd_pkt;
318 	uint16_t	cnt;
319 	uint16_t	req_cnt;
320 	uint16_t	tot_dsds;
321 	struct device_reg_2xxx __iomem *reg;
322 	struct qla_hw_data *ha;
323 	struct req_que *req;
324 	struct rsp_que *rsp;
325 
326 	/* Setup device pointers. */
327 	vha = sp->vha;
328 	ha = vha->hw;
329 	reg = &ha->iobase->isp;
330 	cmd = GET_CMD_SP(sp);
331 	req = ha->req_q_map[0];
332 	rsp = ha->rsp_q_map[0];
333 	/* So we know we haven't pci_map'ed anything yet */
334 	tot_dsds = 0;
335 
336 	/* Send marker if required */
337 	if (vha->marker_needed != 0) {
338 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
339 		    QLA_SUCCESS) {
340 			return (QLA_FUNCTION_FAILED);
341 		}
342 		vha->marker_needed = 0;
343 	}
344 
345 	/* Acquire ring specific lock */
346 	spin_lock_irqsave(&ha->hardware_lock, flags);
347 
348 	/* Check for room in outstanding command list. */
349 	handle = req->current_outstanding_cmd;
350 	for (index = 1; index < req->num_outstanding_cmds; index++) {
351 		handle++;
352 		if (handle == req->num_outstanding_cmds)
353 			handle = 1;
354 		if (!req->outstanding_cmds[handle])
355 			break;
356 	}
357 	if (index == req->num_outstanding_cmds)
358 		goto queuing_error;
359 
360 	/* Map the sg table so we have an accurate count of sg entries needed */
361 	if (scsi_sg_count(cmd)) {
362 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
363 		    scsi_sg_count(cmd), cmd->sc_data_direction);
364 		if (unlikely(!nseg))
365 			goto queuing_error;
366 	} else
367 		nseg = 0;
368 
369 	tot_dsds = nseg;
370 
371 	/* Calculate the number of request entries needed. */
372 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
373 	if (req->cnt < (req_cnt + 2)) {
374 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
375 		if (req->ring_index < cnt)
376 			req->cnt = cnt - req->ring_index;
377 		else
378 			req->cnt = req->length -
379 			    (req->ring_index - cnt);
380 		/* If still no head room then bail out */
381 		if (req->cnt < (req_cnt + 2))
382 			goto queuing_error;
383 	}
384 
385 	/* Build command packet */
386 	req->current_outstanding_cmd = handle;
387 	req->outstanding_cmds[handle] = sp;
388 	sp->handle = handle;
389 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
390 	req->cnt -= req_cnt;
391 
392 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
393 	cmd_pkt->handle = handle;
394 	/* Zero out remaining portion of packet. */
395 	clr_ptr = (uint32_t *)cmd_pkt + 2;
396 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
397 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
398 
399 	/* Set target ID and LUN number*/
400 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
401 	cmd_pkt->lun = cpu_to_le16(cmd->device->lun);
402 	cmd_pkt->control_flags = cpu_to_le16(CF_SIMPLE_TAG);
403 
404 	/* Load SCSI command packet. */
405 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
406 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
407 
408 	/* Build IOCB segments */
409 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
410 
411 	/* Set total data segment count. */
412 	cmd_pkt->entry_count = (uint8_t)req_cnt;
413 	wmb();
414 
415 	/* Adjust ring index. */
416 	req->ring_index++;
417 	if (req->ring_index == req->length) {
418 		req->ring_index = 0;
419 		req->ring_ptr = req->ring;
420 	} else
421 		req->ring_ptr++;
422 
423 	sp->flags |= SRB_DMA_VALID;
424 
425 	/* Set chip new ring index. */
426 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
427 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
428 
429 	/* Manage unprocessed RIO/ZIO commands in response queue. */
430 	if (vha->flags.process_response_queue &&
431 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
432 		qla2x00_process_response_queue(rsp);
433 
434 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
435 	return (QLA_SUCCESS);
436 
437 queuing_error:
438 	if (tot_dsds)
439 		scsi_dma_unmap(cmd);
440 
441 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
442 
443 	return (QLA_FUNCTION_FAILED);
444 }
445 
446 /**
447  * qla2x00_start_iocbs() - Execute the IOCB command
448  */
449 void
450 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
451 {
452 	struct qla_hw_data *ha = vha->hw;
453 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
454 
455 	if (IS_P3P_TYPE(ha)) {
456 		qla82xx_start_iocbs(vha);
457 	} else {
458 		/* Adjust ring index. */
459 		req->ring_index++;
460 		if (req->ring_index == req->length) {
461 			req->ring_index = 0;
462 			req->ring_ptr = req->ring;
463 		} else
464 			req->ring_ptr++;
465 
466 		/* Set chip new ring index. */
467 		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
468 			WRT_REG_DWORD(req->req_q_in, req->ring_index);
469 			RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
470 		} else if (IS_QLAFX00(ha)) {
471 			WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index);
472 			RD_REG_DWORD_RELAXED(&reg->ispfx00.req_q_in);
473 			QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
474 		} else if (IS_FWI2_CAPABLE(ha)) {
475 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
476 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
477 		} else {
478 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
479 				req->ring_index);
480 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
481 		}
482 	}
483 }
484 
485 /**
486  * qla2x00_marker() - Send a marker IOCB to the firmware.
487  * @ha: HA context
488  * @loop_id: loop ID
489  * @lun: LUN
490  * @type: marker modifier
491  *
492  * Can be called from both normal and interrupt context.
493  *
494  * Returns non-zero if a failure occurred, else zero.
495  */
496 static int
497 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
498 			struct rsp_que *rsp, uint16_t loop_id,
499 			uint64_t lun, uint8_t type)
500 {
501 	mrk_entry_t *mrk;
502 	struct mrk_entry_24xx *mrk24 = NULL;
503 
504 	struct qla_hw_data *ha = vha->hw;
505 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
506 
507 	req = ha->req_q_map[0];
508 	mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
509 	if (mrk == NULL) {
510 		ql_log(ql_log_warn, base_vha, 0x3026,
511 		    "Failed to allocate Marker IOCB.\n");
512 
513 		return (QLA_FUNCTION_FAILED);
514 	}
515 
516 	mrk->entry_type = MARKER_TYPE;
517 	mrk->modifier = type;
518 	if (type != MK_SYNC_ALL) {
519 		if (IS_FWI2_CAPABLE(ha)) {
520 			mrk24 = (struct mrk_entry_24xx *) mrk;
521 			mrk24->nport_handle = cpu_to_le16(loop_id);
522 			int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun);
523 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
524 			mrk24->vp_index = vha->vp_idx;
525 			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
526 		} else {
527 			SET_TARGET_ID(ha, mrk->target, loop_id);
528 			mrk->lun = cpu_to_le16((uint16_t)lun);
529 		}
530 	}
531 	wmb();
532 
533 	qla2x00_start_iocbs(vha, req);
534 
535 	return (QLA_SUCCESS);
536 }
537 
538 int
539 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
540 		struct rsp_que *rsp, uint16_t loop_id, uint64_t lun,
541 		uint8_t type)
542 {
543 	int ret;
544 	unsigned long flags = 0;
545 
546 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
547 	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
548 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
549 
550 	return (ret);
551 }
552 
553 /*
554  * qla2x00_issue_marker
555  *
556  * Issue marker
557  * Caller CAN have hardware lock held as specified by ha_locked parameter.
558  * Might release it, then reaquire.
559  */
560 int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
561 {
562 	if (ha_locked) {
563 		if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
564 					MK_SYNC_ALL) != QLA_SUCCESS)
565 			return QLA_FUNCTION_FAILED;
566 	} else {
567 		if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
568 					MK_SYNC_ALL) != QLA_SUCCESS)
569 			return QLA_FUNCTION_FAILED;
570 	}
571 	vha->marker_needed = 0;
572 
573 	return QLA_SUCCESS;
574 }
575 
576 static inline int
577 qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
578 	uint16_t tot_dsds)
579 {
580 	uint32_t *cur_dsd = NULL;
581 	scsi_qla_host_t	*vha;
582 	struct qla_hw_data *ha;
583 	struct scsi_cmnd *cmd;
584 	struct	scatterlist *cur_seg;
585 	uint32_t *dsd_seg;
586 	void *next_dsd;
587 	uint8_t avail_dsds;
588 	uint8_t first_iocb = 1;
589 	uint32_t dsd_list_len;
590 	struct dsd_dma *dsd_ptr;
591 	struct ct6_dsd *ctx;
592 
593 	cmd = GET_CMD_SP(sp);
594 
595 	/* Update entry type to indicate Command Type 3 IOCB */
596 	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_6);
597 
598 	/* No data transfer */
599 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
600 		cmd_pkt->byte_count = cpu_to_le32(0);
601 		return 0;
602 	}
603 
604 	vha = sp->vha;
605 	ha = vha->hw;
606 
607 	/* Set transfer direction */
608 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
609 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
610 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
611 		vha->qla_stats.output_requests++;
612 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
613 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
614 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
615 		vha->qla_stats.input_requests++;
616 	}
617 
618 	cur_seg = scsi_sglist(cmd);
619 	ctx = GET_CMD_CTX_SP(sp);
620 
621 	while (tot_dsds) {
622 		avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
623 		    QLA_DSDS_PER_IOCB : tot_dsds;
624 		tot_dsds -= avail_dsds;
625 		dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
626 
627 		dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
628 		    struct dsd_dma, list);
629 		next_dsd = dsd_ptr->dsd_addr;
630 		list_del(&dsd_ptr->list);
631 		ha->gbl_dsd_avail--;
632 		list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
633 		ctx->dsd_use_cnt++;
634 		ha->gbl_dsd_inuse++;
635 
636 		if (first_iocb) {
637 			first_iocb = 0;
638 			dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
639 			*dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
640 			*dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
641 			cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
642 		} else {
643 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
644 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
645 			*cur_dsd++ = cpu_to_le32(dsd_list_len);
646 		}
647 		cur_dsd = (uint32_t *)next_dsd;
648 		while (avail_dsds) {
649 			dma_addr_t	sle_dma;
650 
651 			sle_dma = sg_dma_address(cur_seg);
652 			*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
653 			*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
654 			*cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
655 			cur_seg = sg_next(cur_seg);
656 			avail_dsds--;
657 		}
658 	}
659 
660 	/* Null termination */
661 	*cur_dsd++ =  0;
662 	*cur_dsd++ = 0;
663 	*cur_dsd++ = 0;
664 	cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
665 	return 0;
666 }
667 
668 /*
669  * qla24xx_calc_dsd_lists() - Determine number of DSD list required
670  * for Command Type 6.
671  *
672  * @dsds: number of data segment decriptors needed
673  *
674  * Returns the number of dsd list needed to store @dsds.
675  */
676 static inline uint16_t
677 qla24xx_calc_dsd_lists(uint16_t dsds)
678 {
679 	uint16_t dsd_lists = 0;
680 
681 	dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
682 	if (dsds % QLA_DSDS_PER_IOCB)
683 		dsd_lists++;
684 	return dsd_lists;
685 }
686 
687 
688 /**
689  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
690  * IOCB types.
691  *
692  * @sp: SRB command to process
693  * @cmd_pkt: Command type 3 IOCB
694  * @tot_dsds: Total number of segments to transfer
695  * @req: pointer to request queue
696  */
697 inline void
698 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
699 	uint16_t tot_dsds, struct req_que *req)
700 {
701 	uint16_t	avail_dsds;
702 	uint32_t	*cur_dsd;
703 	scsi_qla_host_t	*vha;
704 	struct scsi_cmnd *cmd;
705 	struct scatterlist *sg;
706 	int i;
707 
708 	cmd = GET_CMD_SP(sp);
709 
710 	/* Update entry type to indicate Command Type 3 IOCB */
711 	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_7);
712 
713 	/* No data transfer */
714 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
715 		cmd_pkt->byte_count = cpu_to_le32(0);
716 		return;
717 	}
718 
719 	vha = sp->vha;
720 
721 	/* Set transfer direction */
722 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
723 		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
724 		vha->qla_stats.output_bytes += scsi_bufflen(cmd);
725 		vha->qla_stats.output_requests++;
726 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
727 		cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
728 		vha->qla_stats.input_bytes += scsi_bufflen(cmd);
729 		vha->qla_stats.input_requests++;
730 	}
731 
732 	/* One DSD is available in the Command Type 3 IOCB */
733 	avail_dsds = 1;
734 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
735 
736 	/* Load data segments */
737 
738 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
739 		dma_addr_t	sle_dma;
740 		cont_a64_entry_t *cont_pkt;
741 
742 		/* Allocate additional continuation packets? */
743 		if (avail_dsds == 0) {
744 			/*
745 			 * Five DSDs are available in the Continuation
746 			 * Type 1 IOCB.
747 			 */
748 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req);
749 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
750 			avail_dsds = 5;
751 		}
752 
753 		sle_dma = sg_dma_address(sg);
754 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
755 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
756 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
757 		avail_dsds--;
758 	}
759 }
760 
761 struct fw_dif_context {
762 	uint32_t ref_tag;
763 	uint16_t app_tag;
764 	uint8_t ref_tag_mask[4];	/* Validation/Replacement Mask*/
765 	uint8_t app_tag_mask[2];	/* Validation/Replacement Mask*/
766 };
767 
768 /*
769  * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
770  *
771  */
772 static inline void
773 qla24xx_set_t10dif_tags(srb_t *sp, struct fw_dif_context *pkt,
774     unsigned int protcnt)
775 {
776 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
777 
778 	switch (scsi_get_prot_type(cmd)) {
779 	case SCSI_PROT_DIF_TYPE0:
780 		/*
781 		 * No check for ql2xenablehba_err_chk, as it would be an
782 		 * I/O error if hba tag generation is not done.
783 		 */
784 		pkt->ref_tag = cpu_to_le32((uint32_t)
785 		    (0xffffffff & scsi_get_lba(cmd)));
786 
787 		if (!qla2x00_hba_err_chk_enabled(sp))
788 			break;
789 
790 		pkt->ref_tag_mask[0] = 0xff;
791 		pkt->ref_tag_mask[1] = 0xff;
792 		pkt->ref_tag_mask[2] = 0xff;
793 		pkt->ref_tag_mask[3] = 0xff;
794 		break;
795 
796 	/*
797 	 * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
798 	 * match LBA in CDB + N
799 	 */
800 	case SCSI_PROT_DIF_TYPE2:
801 		pkt->app_tag = cpu_to_le16(0);
802 		pkt->app_tag_mask[0] = 0x0;
803 		pkt->app_tag_mask[1] = 0x0;
804 
805 		pkt->ref_tag = cpu_to_le32((uint32_t)
806 		    (0xffffffff & scsi_get_lba(cmd)));
807 
808 		if (!qla2x00_hba_err_chk_enabled(sp))
809 			break;
810 
811 		/* enable ALL bytes of the ref tag */
812 		pkt->ref_tag_mask[0] = 0xff;
813 		pkt->ref_tag_mask[1] = 0xff;
814 		pkt->ref_tag_mask[2] = 0xff;
815 		pkt->ref_tag_mask[3] = 0xff;
816 		break;
817 
818 	/* For Type 3 protection: 16 bit GUARD only */
819 	case SCSI_PROT_DIF_TYPE3:
820 		pkt->ref_tag_mask[0] = pkt->ref_tag_mask[1] =
821 			pkt->ref_tag_mask[2] = pkt->ref_tag_mask[3] =
822 								0x00;
823 		break;
824 
825 	/*
826 	 * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
827 	 * 16 bit app tag.
828 	 */
829 	case SCSI_PROT_DIF_TYPE1:
830 		pkt->ref_tag = cpu_to_le32((uint32_t)
831 		    (0xffffffff & scsi_get_lba(cmd)));
832 		pkt->app_tag = cpu_to_le16(0);
833 		pkt->app_tag_mask[0] = 0x0;
834 		pkt->app_tag_mask[1] = 0x0;
835 
836 		if (!qla2x00_hba_err_chk_enabled(sp))
837 			break;
838 
839 		/* enable ALL bytes of the ref tag */
840 		pkt->ref_tag_mask[0] = 0xff;
841 		pkt->ref_tag_mask[1] = 0xff;
842 		pkt->ref_tag_mask[2] = 0xff;
843 		pkt->ref_tag_mask[3] = 0xff;
844 		break;
845 	}
846 }
847 
848 int
849 qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
850 	uint32_t *partial)
851 {
852 	struct scatterlist *sg;
853 	uint32_t cumulative_partial, sg_len;
854 	dma_addr_t sg_dma_addr;
855 
856 	if (sgx->num_bytes == sgx->tot_bytes)
857 		return 0;
858 
859 	sg = sgx->cur_sg;
860 	cumulative_partial = sgx->tot_partial;
861 
862 	sg_dma_addr = sg_dma_address(sg);
863 	sg_len = sg_dma_len(sg);
864 
865 	sgx->dma_addr = sg_dma_addr + sgx->bytes_consumed;
866 
867 	if ((cumulative_partial + (sg_len - sgx->bytes_consumed)) >= blk_sz) {
868 		sgx->dma_len = (blk_sz - cumulative_partial);
869 		sgx->tot_partial = 0;
870 		sgx->num_bytes += blk_sz;
871 		*partial = 0;
872 	} else {
873 		sgx->dma_len = sg_len - sgx->bytes_consumed;
874 		sgx->tot_partial += sgx->dma_len;
875 		*partial = 1;
876 	}
877 
878 	sgx->bytes_consumed += sgx->dma_len;
879 
880 	if (sg_len == sgx->bytes_consumed) {
881 		sg = sg_next(sg);
882 		sgx->num_sg++;
883 		sgx->cur_sg = sg;
884 		sgx->bytes_consumed = 0;
885 	}
886 
887 	return 1;
888 }
889 
890 int
891 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
892 	uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
893 {
894 	void *next_dsd;
895 	uint8_t avail_dsds = 0;
896 	uint32_t dsd_list_len;
897 	struct dsd_dma *dsd_ptr;
898 	struct scatterlist *sg_prot;
899 	uint32_t *cur_dsd = dsd;
900 	uint16_t	used_dsds = tot_dsds;
901 
902 	uint32_t	prot_int; /* protection interval */
903 	uint32_t	partial;
904 	struct qla2_sgx sgx;
905 	dma_addr_t	sle_dma;
906 	uint32_t	sle_dma_len, tot_prot_dma_len = 0;
907 	struct scsi_cmnd *cmd;
908 
909 	memset(&sgx, 0, sizeof(struct qla2_sgx));
910 	if (sp) {
911 		cmd = GET_CMD_SP(sp);
912 		prot_int = cmd->device->sector_size;
913 
914 		sgx.tot_bytes = scsi_bufflen(cmd);
915 		sgx.cur_sg = scsi_sglist(cmd);
916 		sgx.sp = sp;
917 
918 		sg_prot = scsi_prot_sglist(cmd);
919 	} else if (tc) {
920 		prot_int      = tc->blk_sz;
921 		sgx.tot_bytes = tc->bufflen;
922 		sgx.cur_sg    = tc->sg;
923 		sg_prot	      = tc->prot_sg;
924 	} else {
925 		BUG();
926 		return 1;
927 	}
928 
929 	while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
930 
931 		sle_dma = sgx.dma_addr;
932 		sle_dma_len = sgx.dma_len;
933 alloc_and_fill:
934 		/* Allocate additional continuation packets? */
935 		if (avail_dsds == 0) {
936 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
937 					QLA_DSDS_PER_IOCB : used_dsds;
938 			dsd_list_len = (avail_dsds + 1) * 12;
939 			used_dsds -= avail_dsds;
940 
941 			/* allocate tracking DS */
942 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
943 			if (!dsd_ptr)
944 				return 1;
945 
946 			/* allocate new list */
947 			dsd_ptr->dsd_addr = next_dsd =
948 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
949 				&dsd_ptr->dsd_list_dma);
950 
951 			if (!next_dsd) {
952 				/*
953 				 * Need to cleanup only this dsd_ptr, rest
954 				 * will be done by sp_free_dma()
955 				 */
956 				kfree(dsd_ptr);
957 				return 1;
958 			}
959 
960 			if (sp) {
961 				list_add_tail(&dsd_ptr->list,
962 				    &((struct crc_context *)
963 					    sp->u.scmd.ctx)->dsd_list);
964 
965 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
966 			} else {
967 				list_add_tail(&dsd_ptr->list,
968 				    &(tc->ctx->dsd_list));
969 				tc->ctx_dsd_alloced = 1;
970 			}
971 
972 
973 			/* add new list to cmd iocb or last list */
974 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
975 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
976 			*cur_dsd++ = dsd_list_len;
977 			cur_dsd = (uint32_t *)next_dsd;
978 		}
979 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
980 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
981 		*cur_dsd++ = cpu_to_le32(sle_dma_len);
982 		avail_dsds--;
983 
984 		if (partial == 0) {
985 			/* Got a full protection interval */
986 			sle_dma = sg_dma_address(sg_prot) + tot_prot_dma_len;
987 			sle_dma_len = 8;
988 
989 			tot_prot_dma_len += sle_dma_len;
990 			if (tot_prot_dma_len == sg_dma_len(sg_prot)) {
991 				tot_prot_dma_len = 0;
992 				sg_prot = sg_next(sg_prot);
993 			}
994 
995 			partial = 1; /* So as to not re-enter this block */
996 			goto alloc_and_fill;
997 		}
998 	}
999 	/* Null termination */
1000 	*cur_dsd++ = 0;
1001 	*cur_dsd++ = 0;
1002 	*cur_dsd++ = 0;
1003 	return 0;
1004 }
1005 
1006 int
1007 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
1008 	uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1009 {
1010 	void *next_dsd;
1011 	uint8_t avail_dsds = 0;
1012 	uint32_t dsd_list_len;
1013 	struct dsd_dma *dsd_ptr;
1014 	struct scatterlist *sg, *sgl;
1015 	uint32_t *cur_dsd = dsd;
1016 	int	i;
1017 	uint16_t	used_dsds = tot_dsds;
1018 	struct scsi_cmnd *cmd;
1019 
1020 	if (sp) {
1021 		cmd = GET_CMD_SP(sp);
1022 		sgl = scsi_sglist(cmd);
1023 	} else if (tc) {
1024 		sgl = tc->sg;
1025 	} else {
1026 		BUG();
1027 		return 1;
1028 	}
1029 
1030 
1031 	for_each_sg(sgl, sg, tot_dsds, i) {
1032 		dma_addr_t	sle_dma;
1033 
1034 		/* Allocate additional continuation packets? */
1035 		if (avail_dsds == 0) {
1036 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1037 					QLA_DSDS_PER_IOCB : used_dsds;
1038 			dsd_list_len = (avail_dsds + 1) * 12;
1039 			used_dsds -= avail_dsds;
1040 
1041 			/* allocate tracking DS */
1042 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1043 			if (!dsd_ptr)
1044 				return 1;
1045 
1046 			/* allocate new list */
1047 			dsd_ptr->dsd_addr = next_dsd =
1048 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1049 				&dsd_ptr->dsd_list_dma);
1050 
1051 			if (!next_dsd) {
1052 				/*
1053 				 * Need to cleanup only this dsd_ptr, rest
1054 				 * will be done by sp_free_dma()
1055 				 */
1056 				kfree(dsd_ptr);
1057 				return 1;
1058 			}
1059 
1060 			if (sp) {
1061 				list_add_tail(&dsd_ptr->list,
1062 				    &((struct crc_context *)
1063 					    sp->u.scmd.ctx)->dsd_list);
1064 
1065 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
1066 			} else {
1067 				list_add_tail(&dsd_ptr->list,
1068 				    &(tc->ctx->dsd_list));
1069 				tc->ctx_dsd_alloced = 1;
1070 			}
1071 
1072 			/* add new list to cmd iocb or last list */
1073 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1074 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1075 			*cur_dsd++ = dsd_list_len;
1076 			cur_dsd = (uint32_t *)next_dsd;
1077 		}
1078 		sle_dma = sg_dma_address(sg);
1079 
1080 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1081 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1082 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1083 		avail_dsds--;
1084 
1085 	}
1086 	/* Null termination */
1087 	*cur_dsd++ = 0;
1088 	*cur_dsd++ = 0;
1089 	*cur_dsd++ = 0;
1090 	return 0;
1091 }
1092 
1093 int
1094 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1095 	uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
1096 {
1097 	void *next_dsd;
1098 	uint8_t avail_dsds = 0;
1099 	uint32_t dsd_list_len;
1100 	struct dsd_dma *dsd_ptr;
1101 	struct scatterlist *sg, *sgl;
1102 	int	i;
1103 	struct scsi_cmnd *cmd;
1104 	uint32_t *cur_dsd = dsd;
1105 	uint16_t used_dsds = tot_dsds;
1106 	struct scsi_qla_host *vha;
1107 
1108 	if (sp) {
1109 		cmd = GET_CMD_SP(sp);
1110 		sgl = scsi_prot_sglist(cmd);
1111 		vha = sp->vha;
1112 	} else if (tc) {
1113 		vha = tc->vha;
1114 		sgl = tc->prot_sg;
1115 	} else {
1116 		BUG();
1117 		return 1;
1118 	}
1119 
1120 	ql_dbg(ql_dbg_tgt, vha, 0xe021,
1121 		"%s: enter\n", __func__);
1122 
1123 	for_each_sg(sgl, sg, tot_dsds, i) {
1124 		dma_addr_t	sle_dma;
1125 
1126 		/* Allocate additional continuation packets? */
1127 		if (avail_dsds == 0) {
1128 			avail_dsds = (used_dsds > QLA_DSDS_PER_IOCB) ?
1129 						QLA_DSDS_PER_IOCB : used_dsds;
1130 			dsd_list_len = (avail_dsds + 1) * 12;
1131 			used_dsds -= avail_dsds;
1132 
1133 			/* allocate tracking DS */
1134 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
1135 			if (!dsd_ptr)
1136 				return 1;
1137 
1138 			/* allocate new list */
1139 			dsd_ptr->dsd_addr = next_dsd =
1140 			    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC,
1141 				&dsd_ptr->dsd_list_dma);
1142 
1143 			if (!next_dsd) {
1144 				/*
1145 				 * Need to cleanup only this dsd_ptr, rest
1146 				 * will be done by sp_free_dma()
1147 				 */
1148 				kfree(dsd_ptr);
1149 				return 1;
1150 			}
1151 
1152 			if (sp) {
1153 				list_add_tail(&dsd_ptr->list,
1154 				    &((struct crc_context *)
1155 					    sp->u.scmd.ctx)->dsd_list);
1156 
1157 				sp->flags |= SRB_CRC_CTX_DSD_VALID;
1158 			} else {
1159 				list_add_tail(&dsd_ptr->list,
1160 				    &(tc->ctx->dsd_list));
1161 				tc->ctx_dsd_alloced = 1;
1162 			}
1163 
1164 			/* add new list to cmd iocb or last list */
1165 			*cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
1166 			*cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
1167 			*cur_dsd++ = dsd_list_len;
1168 			cur_dsd = (uint32_t *)next_dsd;
1169 		}
1170 		sle_dma = sg_dma_address(sg);
1171 
1172 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
1173 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
1174 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
1175 
1176 		avail_dsds--;
1177 	}
1178 	/* Null termination */
1179 	*cur_dsd++ = 0;
1180 	*cur_dsd++ = 0;
1181 	*cur_dsd++ = 0;
1182 	return 0;
1183 }
1184 
1185 /**
1186  * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
1187  *							Type 6 IOCB types.
1188  *
1189  * @sp: SRB command to process
1190  * @cmd_pkt: Command type 3 IOCB
1191  * @tot_dsds: Total number of segments to transfer
1192  */
1193 inline int
1194 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1195     uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts)
1196 {
1197 	uint32_t		*cur_dsd, *fcp_dl;
1198 	scsi_qla_host_t		*vha;
1199 	struct scsi_cmnd	*cmd;
1200 	uint32_t		total_bytes = 0;
1201 	uint32_t		data_bytes;
1202 	uint32_t		dif_bytes;
1203 	uint8_t			bundling = 1;
1204 	uint16_t		blk_size;
1205 	uint8_t			*clr_ptr;
1206 	struct crc_context	*crc_ctx_pkt = NULL;
1207 	struct qla_hw_data	*ha;
1208 	uint8_t			additional_fcpcdb_len;
1209 	uint16_t		fcp_cmnd_len;
1210 	struct fcp_cmnd		*fcp_cmnd;
1211 	dma_addr_t		crc_ctx_dma;
1212 
1213 	cmd = GET_CMD_SP(sp);
1214 
1215 	/* Update entry type to indicate Command Type CRC_2 IOCB */
1216 	*((uint32_t *)(&cmd_pkt->entry_type)) = cpu_to_le32(COMMAND_TYPE_CRC_2);
1217 
1218 	vha = sp->vha;
1219 	ha = vha->hw;
1220 
1221 	/* No data transfer */
1222 	data_bytes = scsi_bufflen(cmd);
1223 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1224 		cmd_pkt->byte_count = cpu_to_le32(0);
1225 		return QLA_SUCCESS;
1226 	}
1227 
1228 	cmd_pkt->vp_index = sp->vha->vp_idx;
1229 
1230 	/* Set transfer direction */
1231 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
1232 		cmd_pkt->control_flags =
1233 		    cpu_to_le16(CF_WRITE_DATA);
1234 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
1235 		cmd_pkt->control_flags =
1236 		    cpu_to_le16(CF_READ_DATA);
1237 	}
1238 
1239 	if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1240 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) ||
1241 	    (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) ||
1242 	    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_INSERT))
1243 		bundling = 0;
1244 
1245 	/* Allocate CRC context from global pool */
1246 	crc_ctx_pkt = sp->u.scmd.ctx =
1247 	    dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
1248 
1249 	if (!crc_ctx_pkt)
1250 		goto crc_queuing_error;
1251 
1252 	/* Zero out CTX area. */
1253 	clr_ptr = (uint8_t *)crc_ctx_pkt;
1254 	memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
1255 
1256 	crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
1257 
1258 	sp->flags |= SRB_CRC_CTX_DMA_VALID;
1259 
1260 	/* Set handle */
1261 	crc_ctx_pkt->handle = cmd_pkt->handle;
1262 
1263 	INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
1264 
1265 	qla24xx_set_t10dif_tags(sp, (struct fw_dif_context *)
1266 	    &crc_ctx_pkt->ref_tag, tot_prot_dsds);
1267 
1268 	cmd_pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
1269 	cmd_pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
1270 	cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
1271 
1272 	/* Determine SCSI command length -- align to 4 byte boundary */
1273 	if (cmd->cmd_len > 16) {
1274 		additional_fcpcdb_len = cmd->cmd_len - 16;
1275 		if ((cmd->cmd_len % 4) != 0) {
1276 			/* SCSI cmd > 16 bytes must be multiple of 4 */
1277 			goto crc_queuing_error;
1278 		}
1279 		fcp_cmnd_len = 12 + cmd->cmd_len + 4;
1280 	} else {
1281 		additional_fcpcdb_len = 0;
1282 		fcp_cmnd_len = 12 + 16 + 4;
1283 	}
1284 
1285 	fcp_cmnd = &crc_ctx_pkt->fcp_cmnd;
1286 
1287 	fcp_cmnd->additional_cdb_len = additional_fcpcdb_len;
1288 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
1289 		fcp_cmnd->additional_cdb_len |= 1;
1290 	else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
1291 		fcp_cmnd->additional_cdb_len |= 2;
1292 
1293 	int_to_scsilun(cmd->device->lun, &fcp_cmnd->lun);
1294 	memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
1295 	cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len);
1296 	cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32(
1297 	    LSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1298 	cmd_pkt->fcp_cmnd_dseg_address[1] = cpu_to_le32(
1299 	    MSD(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF));
1300 	fcp_cmnd->task_management = 0;
1301 	fcp_cmnd->task_attribute = TSK_SIMPLE;
1302 
1303 	cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
1304 
1305 	/* Compute dif len and adjust data len to incude protection */
1306 	dif_bytes = 0;
1307 	blk_size = cmd->device->sector_size;
1308 	dif_bytes = (data_bytes / blk_size) * 8;
1309 
1310 	switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
1311 	case SCSI_PROT_READ_INSERT:
1312 	case SCSI_PROT_WRITE_STRIP:
1313 	    total_bytes = data_bytes;
1314 	    data_bytes += dif_bytes;
1315 	    break;
1316 
1317 	case SCSI_PROT_READ_STRIP:
1318 	case SCSI_PROT_WRITE_INSERT:
1319 	case SCSI_PROT_READ_PASS:
1320 	case SCSI_PROT_WRITE_PASS:
1321 	    total_bytes = data_bytes + dif_bytes;
1322 	    break;
1323 	default:
1324 	    BUG();
1325 	}
1326 
1327 	if (!qla2x00_hba_err_chk_enabled(sp))
1328 		fw_prot_opts |= 0x10; /* Disable Guard tag checking */
1329 	/* HBA error checking enabled */
1330 	else if (IS_PI_UNINIT_CAPABLE(ha)) {
1331 		if ((scsi_get_prot_type(GET_CMD_SP(sp)) == SCSI_PROT_DIF_TYPE1)
1332 		    || (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1333 			SCSI_PROT_DIF_TYPE2))
1334 			fw_prot_opts |= BIT_10;
1335 		else if (scsi_get_prot_type(GET_CMD_SP(sp)) ==
1336 		    SCSI_PROT_DIF_TYPE3)
1337 			fw_prot_opts |= BIT_11;
1338 	}
1339 
1340 	if (!bundling) {
1341 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
1342 	} else {
1343 		/*
1344 		 * Configure Bundling if we need to fetch interlaving
1345 		 * protection PCI accesses
1346 		 */
1347 		fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
1348 		crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
1349 		crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
1350 							tot_prot_dsds);
1351 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
1352 	}
1353 
1354 	/* Finish the common fields of CRC pkt */
1355 	crc_ctx_pkt->blk_size = cpu_to_le16(blk_size);
1356 	crc_ctx_pkt->prot_opts = cpu_to_le16(fw_prot_opts);
1357 	crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
1358 	crc_ctx_pkt->guard_seed = cpu_to_le16(0);
1359 	/* Fibre channel byte count */
1360 	cmd_pkt->byte_count = cpu_to_le32(total_bytes);
1361 	fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 +
1362 	    additional_fcpcdb_len);
1363 	*fcp_dl = htonl(total_bytes);
1364 
1365 	if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
1366 		cmd_pkt->byte_count = cpu_to_le32(0);
1367 		return QLA_SUCCESS;
1368 	}
1369 	/* Walks data segments */
1370 
1371 	cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE);
1372 
1373 	if (!bundling && tot_prot_dsds) {
1374 		if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
1375 			cur_dsd, tot_dsds, NULL))
1376 			goto crc_queuing_error;
1377 	} else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
1378 			(tot_dsds - tot_prot_dsds), NULL))
1379 		goto crc_queuing_error;
1380 
1381 	if (bundling && tot_prot_dsds) {
1382 		/* Walks dif segments */
1383 		cmd_pkt->control_flags |= cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
1384 		cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
1385 		if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
1386 				tot_prot_dsds, NULL))
1387 			goto crc_queuing_error;
1388 	}
1389 	return QLA_SUCCESS;
1390 
1391 crc_queuing_error:
1392 	/* Cleanup will be performed by the caller */
1393 
1394 	return QLA_FUNCTION_FAILED;
1395 }
1396 
1397 /**
1398  * qla24xx_start_scsi() - Send a SCSI command to the ISP
1399  * @sp: command to send to the ISP
1400  *
1401  * Returns non-zero if a failure occurred, else zero.
1402  */
1403 int
1404 qla24xx_start_scsi(srb_t *sp)
1405 {
1406 	int		nseg;
1407 	unsigned long   flags;
1408 	uint32_t	*clr_ptr;
1409 	uint32_t        index;
1410 	uint32_t	handle;
1411 	struct cmd_type_7 *cmd_pkt;
1412 	uint16_t	cnt;
1413 	uint16_t	req_cnt;
1414 	uint16_t	tot_dsds;
1415 	struct req_que *req = NULL;
1416 	struct rsp_que *rsp = NULL;
1417 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1418 	struct scsi_qla_host *vha = sp->vha;
1419 	struct qla_hw_data *ha = vha->hw;
1420 
1421 	/* Setup device pointers. */
1422 	req = vha->req;
1423 	rsp = req->rsp;
1424 
1425 	/* So we know we haven't pci_map'ed anything yet */
1426 	tot_dsds = 0;
1427 
1428 	/* Send marker if required */
1429 	if (vha->marker_needed != 0) {
1430 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1431 		    QLA_SUCCESS)
1432 			return QLA_FUNCTION_FAILED;
1433 		vha->marker_needed = 0;
1434 	}
1435 
1436 	/* Acquire ring specific lock */
1437 	spin_lock_irqsave(&ha->hardware_lock, flags);
1438 
1439 	/* Check for room in outstanding command list. */
1440 	handle = req->current_outstanding_cmd;
1441 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1442 		handle++;
1443 		if (handle == req->num_outstanding_cmds)
1444 			handle = 1;
1445 		if (!req->outstanding_cmds[handle])
1446 			break;
1447 	}
1448 	if (index == req->num_outstanding_cmds)
1449 		goto queuing_error;
1450 
1451 	/* Map the sg table so we have an accurate count of sg entries needed */
1452 	if (scsi_sg_count(cmd)) {
1453 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1454 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1455 		if (unlikely(!nseg))
1456 			goto queuing_error;
1457 	} else
1458 		nseg = 0;
1459 
1460 	tot_dsds = nseg;
1461 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1462 	if (req->cnt < (req_cnt + 2)) {
1463 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1464 		    RD_REG_DWORD_RELAXED(req->req_q_out);
1465 		if (req->ring_index < cnt)
1466 			req->cnt = cnt - req->ring_index;
1467 		else
1468 			req->cnt = req->length -
1469 				(req->ring_index - cnt);
1470 		if (req->cnt < (req_cnt + 2))
1471 			goto queuing_error;
1472 	}
1473 
1474 	/* Build command packet. */
1475 	req->current_outstanding_cmd = handle;
1476 	req->outstanding_cmds[handle] = sp;
1477 	sp->handle = handle;
1478 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1479 	req->cnt -= req_cnt;
1480 
1481 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1482 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1483 
1484 	/* Zero out remaining portion of packet. */
1485 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1486 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1487 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1488 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1489 
1490 	/* Set NPORT-ID and LUN number*/
1491 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1492 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1493 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1494 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1495 	cmd_pkt->vp_index = sp->vha->vp_idx;
1496 
1497 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1498 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1499 
1500 	cmd_pkt->task = TSK_SIMPLE;
1501 
1502 	/* Load SCSI command packet. */
1503 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1504 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1505 
1506 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1507 
1508 	/* Build IOCB segments */
1509 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1510 
1511 	/* Set total data segment count. */
1512 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1513 	wmb();
1514 	/* Adjust ring index. */
1515 	req->ring_index++;
1516 	if (req->ring_index == req->length) {
1517 		req->ring_index = 0;
1518 		req->ring_ptr = req->ring;
1519 	} else
1520 		req->ring_ptr++;
1521 
1522 	sp->flags |= SRB_DMA_VALID;
1523 
1524 	/* Set chip new ring index. */
1525 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1526 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1527 
1528 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1529 	if (vha->flags.process_response_queue &&
1530 		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1531 		qla24xx_process_response_queue(vha, rsp);
1532 
1533 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1534 	return QLA_SUCCESS;
1535 
1536 queuing_error:
1537 	if (tot_dsds)
1538 		scsi_dma_unmap(cmd);
1539 
1540 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1541 
1542 	return QLA_FUNCTION_FAILED;
1543 }
1544 
1545 /**
1546  * qla24xx_dif_start_scsi() - Send a SCSI command to the ISP
1547  * @sp: command to send to the ISP
1548  *
1549  * Returns non-zero if a failure occurred, else zero.
1550  */
1551 int
1552 qla24xx_dif_start_scsi(srb_t *sp)
1553 {
1554 	int			nseg;
1555 	unsigned long		flags;
1556 	uint32_t		*clr_ptr;
1557 	uint32_t		index;
1558 	uint32_t		handle;
1559 	uint16_t		cnt;
1560 	uint16_t		req_cnt = 0;
1561 	uint16_t		tot_dsds;
1562 	uint16_t		tot_prot_dsds;
1563 	uint16_t		fw_prot_opts = 0;
1564 	struct req_que		*req = NULL;
1565 	struct rsp_que		*rsp = NULL;
1566 	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
1567 	struct scsi_qla_host	*vha = sp->vha;
1568 	struct qla_hw_data	*ha = vha->hw;
1569 	struct cmd_type_crc_2	*cmd_pkt;
1570 	uint32_t		status = 0;
1571 
1572 #define QDSS_GOT_Q_SPACE	BIT_0
1573 
1574 	/* Only process protection or >16 cdb in this routine */
1575 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1576 		if (cmd->cmd_len <= 16)
1577 			return qla24xx_start_scsi(sp);
1578 	}
1579 
1580 	/* Setup device pointers. */
1581 	req = vha->req;
1582 	rsp = req->rsp;
1583 
1584 	/* So we know we haven't pci_map'ed anything yet */
1585 	tot_dsds = 0;
1586 
1587 	/* Send marker if required */
1588 	if (vha->marker_needed != 0) {
1589 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1590 		    QLA_SUCCESS)
1591 			return QLA_FUNCTION_FAILED;
1592 		vha->marker_needed = 0;
1593 	}
1594 
1595 	/* Acquire ring specific lock */
1596 	spin_lock_irqsave(&ha->hardware_lock, flags);
1597 
1598 	/* Check for room in outstanding command list. */
1599 	handle = req->current_outstanding_cmd;
1600 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1601 		handle++;
1602 		if (handle == req->num_outstanding_cmds)
1603 			handle = 1;
1604 		if (!req->outstanding_cmds[handle])
1605 			break;
1606 	}
1607 
1608 	if (index == req->num_outstanding_cmds)
1609 		goto queuing_error;
1610 
1611 	/* Compute number of required data segments */
1612 	/* Map the sg table so we have an accurate count of sg entries needed */
1613 	if (scsi_sg_count(cmd)) {
1614 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1615 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1616 		if (unlikely(!nseg))
1617 			goto queuing_error;
1618 		else
1619 			sp->flags |= SRB_DMA_VALID;
1620 
1621 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1622 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1623 			struct qla2_sgx sgx;
1624 			uint32_t	partial;
1625 
1626 			memset(&sgx, 0, sizeof(struct qla2_sgx));
1627 			sgx.tot_bytes = scsi_bufflen(cmd);
1628 			sgx.cur_sg = scsi_sglist(cmd);
1629 			sgx.sp = sp;
1630 
1631 			nseg = 0;
1632 			while (qla24xx_get_one_block_sg(
1633 			    cmd->device->sector_size, &sgx, &partial))
1634 				nseg++;
1635 		}
1636 	} else
1637 		nseg = 0;
1638 
1639 	/* number of required data segments */
1640 	tot_dsds = nseg;
1641 
1642 	/* Compute number of required protection segments */
1643 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
1644 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
1645 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
1646 		if (unlikely(!nseg))
1647 			goto queuing_error;
1648 		else
1649 			sp->flags |= SRB_CRC_PROT_DMA_VALID;
1650 
1651 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1652 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1653 			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
1654 		}
1655 	} else {
1656 		nseg = 0;
1657 	}
1658 
1659 	req_cnt = 1;
1660 	/* Total Data and protection sg segment(s) */
1661 	tot_prot_dsds = nseg;
1662 	tot_dsds += nseg;
1663 	if (req->cnt < (req_cnt + 2)) {
1664 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1665 		    RD_REG_DWORD_RELAXED(req->req_q_out);
1666 		if (req->ring_index < cnt)
1667 			req->cnt = cnt - req->ring_index;
1668 		else
1669 			req->cnt = req->length -
1670 				(req->ring_index - cnt);
1671 		if (req->cnt < (req_cnt + 2))
1672 			goto queuing_error;
1673 	}
1674 
1675 	status |= QDSS_GOT_Q_SPACE;
1676 
1677 	/* Build header part of command packet (excluding the OPCODE). */
1678 	req->current_outstanding_cmd = handle;
1679 	req->outstanding_cmds[handle] = sp;
1680 	sp->handle = handle;
1681 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1682 	req->cnt -= req_cnt;
1683 
1684 	/* Fill-in common area */
1685 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
1686 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1687 
1688 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1689 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1690 
1691 	/* Set NPORT-ID and LUN number*/
1692 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1693 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1694 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1695 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1696 
1697 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1698 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1699 
1700 	/* Total Data and protection segment(s) */
1701 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1702 
1703 	/* Build IOCB segments and adjust for data protection segments */
1704 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
1705 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
1706 		QLA_SUCCESS)
1707 		goto queuing_error;
1708 
1709 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1710 	/* Specify response queue number where completion should happen */
1711 	cmd_pkt->entry_status = (uint8_t) rsp->id;
1712 	cmd_pkt->timeout = cpu_to_le16(0);
1713 	wmb();
1714 
1715 	/* Adjust ring index. */
1716 	req->ring_index++;
1717 	if (req->ring_index == req->length) {
1718 		req->ring_index = 0;
1719 		req->ring_ptr = req->ring;
1720 	} else
1721 		req->ring_ptr++;
1722 
1723 	/* Set chip new ring index. */
1724 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1725 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
1726 
1727 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1728 	if (vha->flags.process_response_queue &&
1729 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1730 		qla24xx_process_response_queue(vha, rsp);
1731 
1732 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1733 
1734 	return QLA_SUCCESS;
1735 
1736 queuing_error:
1737 	if (status & QDSS_GOT_Q_SPACE) {
1738 		req->outstanding_cmds[handle] = NULL;
1739 		req->cnt += req_cnt;
1740 	}
1741 	/* Cleanup will be performed by the caller (queuecommand) */
1742 
1743 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1744 	return QLA_FUNCTION_FAILED;
1745 }
1746 
1747 /**
1748  * qla2xxx_start_scsi_mq() - Send a SCSI command to the ISP
1749  * @sp: command to send to the ISP
1750  *
1751  * Returns non-zero if a failure occurred, else zero.
1752  */
1753 static int
1754 qla2xxx_start_scsi_mq(srb_t *sp)
1755 {
1756 	int		nseg;
1757 	unsigned long   flags;
1758 	uint32_t	*clr_ptr;
1759 	uint32_t        index;
1760 	uint32_t	handle;
1761 	struct cmd_type_7 *cmd_pkt;
1762 	uint16_t	cnt;
1763 	uint16_t	req_cnt;
1764 	uint16_t	tot_dsds;
1765 	struct req_que *req = NULL;
1766 	struct rsp_que *rsp = NULL;
1767 	struct scsi_cmnd *cmd = GET_CMD_SP(sp);
1768 	struct scsi_qla_host *vha = sp->fcport->vha;
1769 	struct qla_hw_data *ha = vha->hw;
1770 	struct qla_qpair *qpair = sp->qpair;
1771 
1772 	/* Setup qpair pointers */
1773 	rsp = qpair->rsp;
1774 	req = qpair->req;
1775 
1776 	/* So we know we haven't pci_map'ed anything yet */
1777 	tot_dsds = 0;
1778 
1779 	/* Send marker if required */
1780 	if (vha->marker_needed != 0) {
1781 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1782 		    QLA_SUCCESS)
1783 			return QLA_FUNCTION_FAILED;
1784 		vha->marker_needed = 0;
1785 	}
1786 
1787 	/* Acquire qpair specific lock */
1788 	spin_lock_irqsave(&qpair->qp_lock, flags);
1789 
1790 	/* Check for room in outstanding command list. */
1791 	handle = req->current_outstanding_cmd;
1792 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1793 		handle++;
1794 		if (handle == req->num_outstanding_cmds)
1795 			handle = 1;
1796 		if (!req->outstanding_cmds[handle])
1797 			break;
1798 	}
1799 	if (index == req->num_outstanding_cmds)
1800 		goto queuing_error;
1801 
1802 	/* Map the sg table so we have an accurate count of sg entries needed */
1803 	if (scsi_sg_count(cmd)) {
1804 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1805 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1806 		if (unlikely(!nseg))
1807 			goto queuing_error;
1808 	} else
1809 		nseg = 0;
1810 
1811 	tot_dsds = nseg;
1812 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
1813 	if (req->cnt < (req_cnt + 2)) {
1814 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
1815 		    RD_REG_DWORD_RELAXED(req->req_q_out);
1816 		if (req->ring_index < cnt)
1817 			req->cnt = cnt - req->ring_index;
1818 		else
1819 			req->cnt = req->length -
1820 				(req->ring_index - cnt);
1821 		if (req->cnt < (req_cnt + 2))
1822 			goto queuing_error;
1823 	}
1824 
1825 	/* Build command packet. */
1826 	req->current_outstanding_cmd = handle;
1827 	req->outstanding_cmds[handle] = sp;
1828 	sp->handle = handle;
1829 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
1830 	req->cnt -= req_cnt;
1831 
1832 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
1833 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
1834 
1835 	/* Zero out remaining portion of packet. */
1836 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
1837 	clr_ptr = (uint32_t *)cmd_pkt + 2;
1838 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
1839 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
1840 
1841 	/* Set NPORT-ID and LUN number*/
1842 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1843 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1844 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1845 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1846 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1847 
1848 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1849 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
1850 
1851 	cmd_pkt->task = TSK_SIMPLE;
1852 
1853 	/* Load SCSI command packet. */
1854 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
1855 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
1856 
1857 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
1858 
1859 	/* Build IOCB segments */
1860 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
1861 
1862 	/* Set total data segment count. */
1863 	cmd_pkt->entry_count = (uint8_t)req_cnt;
1864 	wmb();
1865 	/* Adjust ring index. */
1866 	req->ring_index++;
1867 	if (req->ring_index == req->length) {
1868 		req->ring_index = 0;
1869 		req->ring_ptr = req->ring;
1870 	} else
1871 		req->ring_ptr++;
1872 
1873 	sp->flags |= SRB_DMA_VALID;
1874 
1875 	/* Set chip new ring index. */
1876 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
1877 
1878 	/* Manage unprocessed RIO/ZIO commands in response queue. */
1879 	if (vha->flags.process_response_queue &&
1880 		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
1881 		qla24xx_process_response_queue(vha, rsp);
1882 
1883 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
1884 	return QLA_SUCCESS;
1885 
1886 queuing_error:
1887 	if (tot_dsds)
1888 		scsi_dma_unmap(cmd);
1889 
1890 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
1891 
1892 	return QLA_FUNCTION_FAILED;
1893 }
1894 
1895 
1896 /**
1897  * qla2xxx_dif_start_scsi_mq() - Send a SCSI command to the ISP
1898  * @sp: command to send to the ISP
1899  *
1900  * Returns non-zero if a failure occurred, else zero.
1901  */
1902 int
1903 qla2xxx_dif_start_scsi_mq(srb_t *sp)
1904 {
1905 	int			nseg;
1906 	unsigned long		flags;
1907 	uint32_t		*clr_ptr;
1908 	uint32_t		index;
1909 	uint32_t		handle;
1910 	uint16_t		cnt;
1911 	uint16_t		req_cnt = 0;
1912 	uint16_t		tot_dsds;
1913 	uint16_t		tot_prot_dsds;
1914 	uint16_t		fw_prot_opts = 0;
1915 	struct req_que		*req = NULL;
1916 	struct rsp_que		*rsp = NULL;
1917 	struct scsi_cmnd	*cmd = GET_CMD_SP(sp);
1918 	struct scsi_qla_host	*vha = sp->fcport->vha;
1919 	struct qla_hw_data	*ha = vha->hw;
1920 	struct cmd_type_crc_2	*cmd_pkt;
1921 	uint32_t		status = 0;
1922 	struct qla_qpair	*qpair = sp->qpair;
1923 
1924 #define QDSS_GOT_Q_SPACE	BIT_0
1925 
1926 	/* Check for host side state */
1927 	if (!qpair->online) {
1928 		cmd->result = DID_NO_CONNECT << 16;
1929 		return QLA_INTERFACE_ERROR;
1930 	}
1931 
1932 	if (!qpair->difdix_supported &&
1933 		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1934 		cmd->result = DID_NO_CONNECT << 16;
1935 		return QLA_INTERFACE_ERROR;
1936 	}
1937 
1938 	/* Only process protection or >16 cdb in this routine */
1939 	if (scsi_get_prot_op(cmd) == SCSI_PROT_NORMAL) {
1940 		if (cmd->cmd_len <= 16)
1941 			return qla2xxx_start_scsi_mq(sp);
1942 	}
1943 
1944 	/* Setup qpair pointers */
1945 	rsp = qpair->rsp;
1946 	req = qpair->req;
1947 
1948 	/* So we know we haven't pci_map'ed anything yet */
1949 	tot_dsds = 0;
1950 
1951 	/* Send marker if required */
1952 	if (vha->marker_needed != 0) {
1953 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
1954 		    QLA_SUCCESS)
1955 			return QLA_FUNCTION_FAILED;
1956 		vha->marker_needed = 0;
1957 	}
1958 
1959 	/* Acquire ring specific lock */
1960 	spin_lock_irqsave(&qpair->qp_lock, flags);
1961 
1962 	/* Check for room in outstanding command list. */
1963 	handle = req->current_outstanding_cmd;
1964 	for (index = 1; index < req->num_outstanding_cmds; index++) {
1965 		handle++;
1966 		if (handle == req->num_outstanding_cmds)
1967 			handle = 1;
1968 		if (!req->outstanding_cmds[handle])
1969 			break;
1970 	}
1971 
1972 	if (index == req->num_outstanding_cmds)
1973 		goto queuing_error;
1974 
1975 	/* Compute number of required data segments */
1976 	/* Map the sg table so we have an accurate count of sg entries needed */
1977 	if (scsi_sg_count(cmd)) {
1978 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
1979 		    scsi_sg_count(cmd), cmd->sc_data_direction);
1980 		if (unlikely(!nseg))
1981 			goto queuing_error;
1982 		else
1983 			sp->flags |= SRB_DMA_VALID;
1984 
1985 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
1986 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
1987 			struct qla2_sgx sgx;
1988 			uint32_t	partial;
1989 
1990 			memset(&sgx, 0, sizeof(struct qla2_sgx));
1991 			sgx.tot_bytes = scsi_bufflen(cmd);
1992 			sgx.cur_sg = scsi_sglist(cmd);
1993 			sgx.sp = sp;
1994 
1995 			nseg = 0;
1996 			while (qla24xx_get_one_block_sg(
1997 			    cmd->device->sector_size, &sgx, &partial))
1998 				nseg++;
1999 		}
2000 	} else
2001 		nseg = 0;
2002 
2003 	/* number of required data segments */
2004 	tot_dsds = nseg;
2005 
2006 	/* Compute number of required protection segments */
2007 	if (qla24xx_configure_prot_mode(sp, &fw_prot_opts)) {
2008 		nseg = dma_map_sg(&ha->pdev->dev, scsi_prot_sglist(cmd),
2009 		    scsi_prot_sg_count(cmd), cmd->sc_data_direction);
2010 		if (unlikely(!nseg))
2011 			goto queuing_error;
2012 		else
2013 			sp->flags |= SRB_CRC_PROT_DMA_VALID;
2014 
2015 		if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) ||
2016 		    (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP)) {
2017 			nseg = scsi_bufflen(cmd) / cmd->device->sector_size;
2018 		}
2019 	} else {
2020 		nseg = 0;
2021 	}
2022 
2023 	req_cnt = 1;
2024 	/* Total Data and protection sg segment(s) */
2025 	tot_prot_dsds = nseg;
2026 	tot_dsds += nseg;
2027 	if (req->cnt < (req_cnt + 2)) {
2028 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
2029 		    RD_REG_DWORD_RELAXED(req->req_q_out);
2030 		if (req->ring_index < cnt)
2031 			req->cnt = cnt - req->ring_index;
2032 		else
2033 			req->cnt = req->length -
2034 				(req->ring_index - cnt);
2035 		if (req->cnt < (req_cnt + 2))
2036 			goto queuing_error;
2037 	}
2038 
2039 	status |= QDSS_GOT_Q_SPACE;
2040 
2041 	/* Build header part of command packet (excluding the OPCODE). */
2042 	req->current_outstanding_cmd = handle;
2043 	req->outstanding_cmds[handle] = sp;
2044 	sp->handle = handle;
2045 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
2046 	req->cnt -= req_cnt;
2047 
2048 	/* Fill-in common area */
2049 	cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr;
2050 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2051 
2052 	clr_ptr = (uint32_t *)cmd_pkt + 2;
2053 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2054 
2055 	/* Set NPORT-ID and LUN number*/
2056 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2057 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2058 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2059 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2060 
2061 	int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2062 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2063 
2064 	/* Total Data and protection segment(s) */
2065 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2066 
2067 	/* Build IOCB segments and adjust for data protection segments */
2068 	if (qla24xx_build_scsi_crc_2_iocbs(sp, (struct cmd_type_crc_2 *)
2069 	    req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) !=
2070 		QLA_SUCCESS)
2071 		goto queuing_error;
2072 
2073 	cmd_pkt->entry_count = (uint8_t)req_cnt;
2074 	cmd_pkt->timeout = cpu_to_le16(0);
2075 	wmb();
2076 
2077 	/* Adjust ring index. */
2078 	req->ring_index++;
2079 	if (req->ring_index == req->length) {
2080 		req->ring_index = 0;
2081 		req->ring_ptr = req->ring;
2082 	} else
2083 		req->ring_ptr++;
2084 
2085 	/* Set chip new ring index. */
2086 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
2087 
2088 	/* Manage unprocessed RIO/ZIO commands in response queue. */
2089 	if (vha->flags.process_response_queue &&
2090 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
2091 		qla24xx_process_response_queue(vha, rsp);
2092 
2093 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2094 
2095 	return QLA_SUCCESS;
2096 
2097 queuing_error:
2098 	if (status & QDSS_GOT_Q_SPACE) {
2099 		req->outstanding_cmds[handle] = NULL;
2100 		req->cnt += req_cnt;
2101 	}
2102 	/* Cleanup will be performed by the caller (queuecommand) */
2103 
2104 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
2105 	return QLA_FUNCTION_FAILED;
2106 }
2107 
2108 /* Generic Control-SRB manipulation functions. */
2109 
2110 /* hardware_lock assumed to be held. */
2111 void *
2112 qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
2113 {
2114 	if (qla2x00_reset_active(vha))
2115 		return NULL;
2116 
2117 	return qla2x00_alloc_iocbs(vha, sp);
2118 }
2119 
2120 void *
2121 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
2122 {
2123 	struct qla_hw_data *ha = vha->hw;
2124 	struct req_que *req = ha->req_q_map[0];
2125 	device_reg_t *reg = ISP_QUE_REG(ha, req->id);
2126 	uint32_t index, handle;
2127 	request_t *pkt;
2128 	uint16_t cnt, req_cnt;
2129 
2130 	pkt = NULL;
2131 	req_cnt = 1;
2132 	handle = 0;
2133 
2134 	if (!sp)
2135 		goto skip_cmd_array;
2136 
2137 	/* Check for room in outstanding command list. */
2138 	handle = req->current_outstanding_cmd;
2139 	for (index = 1; index < req->num_outstanding_cmds; index++) {
2140 		handle++;
2141 		if (handle == req->num_outstanding_cmds)
2142 			handle = 1;
2143 		if (!req->outstanding_cmds[handle])
2144 			break;
2145 	}
2146 	if (index == req->num_outstanding_cmds) {
2147 		ql_log(ql_log_warn, vha, 0x700b,
2148 		    "No room on outstanding cmd array.\n");
2149 		goto queuing_error;
2150 	}
2151 
2152 	/* Prep command array. */
2153 	req->current_outstanding_cmd = handle;
2154 	req->outstanding_cmds[handle] = sp;
2155 	sp->handle = handle;
2156 
2157 	/* Adjust entry-counts as needed. */
2158 	if (sp->type != SRB_SCSI_CMD)
2159 		req_cnt = sp->iocbs;
2160 
2161 skip_cmd_array:
2162 	/* Check for room on request queue. */
2163 	if (req->cnt < req_cnt + 2) {
2164 		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
2165 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
2166 		else if (IS_P3P_TYPE(ha))
2167 			cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
2168 		else if (IS_FWI2_CAPABLE(ha))
2169 			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
2170 		else if (IS_QLAFX00(ha))
2171 			cnt = RD_REG_DWORD(&reg->ispfx00.req_q_out);
2172 		else
2173 			cnt = qla2x00_debounce_register(
2174 			    ISP_REQ_Q_OUT(ha, &reg->isp));
2175 
2176 		if  (req->ring_index < cnt)
2177 			req->cnt = cnt - req->ring_index;
2178 		else
2179 			req->cnt = req->length -
2180 			    (req->ring_index - cnt);
2181 	}
2182 	if (req->cnt < req_cnt + 2)
2183 		goto queuing_error;
2184 
2185 	/* Prep packet */
2186 	req->cnt -= req_cnt;
2187 	pkt = req->ring_ptr;
2188 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
2189 	if (IS_QLAFX00(ha)) {
2190 		WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt);
2191 		WRT_REG_WORD((void __iomem *)&pkt->handle, handle);
2192 	} else {
2193 		pkt->entry_count = req_cnt;
2194 		pkt->handle = handle;
2195 	}
2196 
2197 queuing_error:
2198 	vha->tgt_counters.num_alloc_iocb_failed++;
2199 	return pkt;
2200 }
2201 
2202 static void
2203 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2204 {
2205 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2206 
2207 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2208 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
2209 	if (lio->u.logio.flags & SRB_LOGIN_COND_PLOGI)
2210 		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
2211 	if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI)
2212 		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
2213 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2214 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2215 	logio->port_id[1] = sp->fcport->d_id.b.area;
2216 	logio->port_id[2] = sp->fcport->d_id.b.domain;
2217 	logio->vp_index = sp->vha->vp_idx;
2218 }
2219 
2220 static void
2221 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
2222 {
2223 	struct qla_hw_data *ha = sp->vha->hw;
2224 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2225 	uint16_t opts;
2226 
2227 	mbx->entry_type = MBX_IOCB_TYPE;
2228 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2229 	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
2230 	opts = lio->u.logio.flags & SRB_LOGIN_COND_PLOGI ? BIT_0 : 0;
2231 	opts |= lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI ? BIT_1 : 0;
2232 	if (HAS_EXTENDED_IDS(ha)) {
2233 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2234 		mbx->mb10 = cpu_to_le16(opts);
2235 	} else {
2236 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
2237 	}
2238 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2239 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2240 	    sp->fcport->d_id.b.al_pa);
2241 	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2242 }
2243 
2244 static void
2245 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2246 {
2247 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2248 	logio->control_flags =
2249 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
2250 	if (!sp->fcport->se_sess ||
2251 	    !sp->fcport->keep_nport_handle)
2252 		logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
2253 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2254 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
2255 	logio->port_id[1] = sp->fcport->d_id.b.area;
2256 	logio->port_id[2] = sp->fcport->d_id.b.domain;
2257 	logio->vp_index = sp->vha->vp_idx;
2258 }
2259 
2260 static void
2261 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
2262 {
2263 	struct qla_hw_data *ha = sp->vha->hw;
2264 
2265 	mbx->entry_type = MBX_IOCB_TYPE;
2266 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2267 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
2268 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
2269 	    cpu_to_le16(sp->fcport->loop_id):
2270 	    cpu_to_le16(sp->fcport->loop_id << 8);
2271 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
2272 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
2273 	    sp->fcport->d_id.b.al_pa);
2274 	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2275 	/* Implicit: mbx->mbx10 = 0. */
2276 }
2277 
2278 static void
2279 qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
2280 {
2281 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
2282 	logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
2283 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2284 	logio->vp_index = sp->vha->vp_idx;
2285 }
2286 
2287 static void
2288 qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
2289 {
2290 	struct qla_hw_data *ha = sp->vha->hw;
2291 
2292 	mbx->entry_type = MBX_IOCB_TYPE;
2293 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
2294 	mbx->mb0 = cpu_to_le16(MBC_GET_PORT_DATABASE);
2295 	if (HAS_EXTENDED_IDS(ha)) {
2296 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
2297 		mbx->mb10 = cpu_to_le16(BIT_0);
2298 	} else {
2299 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | BIT_0);
2300 	}
2301 	mbx->mb2 = cpu_to_le16(MSW(ha->async_pd_dma));
2302 	mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
2303 	mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
2304 	mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
2305 	mbx->mb9 = cpu_to_le16(sp->vha->vp_idx);
2306 }
2307 
2308 static void
2309 qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2310 {
2311 	uint32_t flags;
2312 	uint64_t lun;
2313 	struct fc_port *fcport = sp->fcport;
2314 	scsi_qla_host_t *vha = fcport->vha;
2315 	struct qla_hw_data *ha = vha->hw;
2316 	struct srb_iocb *iocb = &sp->u.iocb_cmd;
2317 	struct req_que *req = vha->req;
2318 
2319 	flags = iocb->u.tmf.flags;
2320 	lun = iocb->u.tmf.lun;
2321 
2322 	tsk->entry_type = TSK_MGMT_IOCB_TYPE;
2323 	tsk->entry_count = 1;
2324 	tsk->handle = MAKE_HANDLE(req->id, tsk->handle);
2325 	tsk->nport_handle = cpu_to_le16(fcport->loop_id);
2326 	tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
2327 	tsk->control_flags = cpu_to_le32(flags);
2328 	tsk->port_id[0] = fcport->d_id.b.al_pa;
2329 	tsk->port_id[1] = fcport->d_id.b.area;
2330 	tsk->port_id[2] = fcport->d_id.b.domain;
2331 	tsk->vp_index = fcport->vha->vp_idx;
2332 
2333 	if (flags == TCF_LUN_RESET) {
2334 		int_to_scsilun(lun, &tsk->lun);
2335 		host_to_fcp_swap((uint8_t *)&tsk->lun,
2336 			sizeof(tsk->lun));
2337 	}
2338 }
2339 
2340 static void
2341 qla2x00_els_dcmd_sp_free(void *data)
2342 {
2343 	srb_t *sp = data;
2344 	struct srb_iocb *elsio = &sp->u.iocb_cmd;
2345 
2346 	kfree(sp->fcport);
2347 
2348 	if (elsio->u.els_logo.els_logo_pyld)
2349 		dma_free_coherent(&sp->vha->hw->pdev->dev, DMA_POOL_SIZE,
2350 		    elsio->u.els_logo.els_logo_pyld,
2351 		    elsio->u.els_logo.els_logo_pyld_dma);
2352 
2353 	del_timer(&elsio->timer);
2354 	qla2x00_rel_sp(sp);
2355 }
2356 
2357 static void
2358 qla2x00_els_dcmd_iocb_timeout(void *data)
2359 {
2360 	srb_t *sp = data;
2361 	fc_port_t *fcport = sp->fcport;
2362 	struct scsi_qla_host *vha = sp->vha;
2363 	struct qla_hw_data *ha = vha->hw;
2364 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2365 	unsigned long flags = 0;
2366 
2367 	ql_dbg(ql_dbg_io, vha, 0x3069,
2368 	    "%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
2369 	    sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
2370 	    fcport->d_id.b.al_pa);
2371 
2372 	/* Abort the exchange */
2373 	spin_lock_irqsave(&ha->hardware_lock, flags);
2374 	if (ha->isp_ops->abort_command(sp)) {
2375 		ql_dbg(ql_dbg_io, vha, 0x3070,
2376 		    "mbx abort_command failed.\n");
2377 	} else {
2378 		ql_dbg(ql_dbg_io, vha, 0x3071,
2379 		    "mbx abort_command success.\n");
2380 	}
2381 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
2382 
2383 	complete(&lio->u.els_logo.comp);
2384 }
2385 
2386 static void
2387 qla2x00_els_dcmd_sp_done(void *ptr, int res)
2388 {
2389 	srb_t *sp = ptr;
2390 	fc_port_t *fcport = sp->fcport;
2391 	struct srb_iocb *lio = &sp->u.iocb_cmd;
2392 	struct scsi_qla_host *vha = sp->vha;
2393 
2394 	ql_dbg(ql_dbg_io, vha, 0x3072,
2395 	    "%s hdl=%x, portid=%02x%02x%02x done\n",
2396 	    sp->name, sp->handle, fcport->d_id.b.domain,
2397 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2398 
2399 	complete(&lio->u.els_logo.comp);
2400 }
2401 
2402 int
2403 qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
2404     port_id_t remote_did)
2405 {
2406 	srb_t *sp;
2407 	fc_port_t *fcport = NULL;
2408 	struct srb_iocb *elsio = NULL;
2409 	struct qla_hw_data *ha = vha->hw;
2410 	struct els_logo_payload logo_pyld;
2411 	int rval = QLA_SUCCESS;
2412 
2413 	fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2414 	if (!fcport) {
2415 	       ql_log(ql_log_info, vha, 0x70e5, "fcport allocation failed\n");
2416 	       return -ENOMEM;
2417 	}
2418 
2419 	/* Alloc SRB structure */
2420 	sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2421 	if (!sp) {
2422 		kfree(fcport);
2423 		ql_log(ql_log_info, vha, 0x70e6,
2424 		 "SRB allocation failed\n");
2425 		return -ENOMEM;
2426 	}
2427 
2428 	elsio = &sp->u.iocb_cmd;
2429 	fcport->loop_id = 0xFFFF;
2430 	fcport->d_id.b.domain = remote_did.b.domain;
2431 	fcport->d_id.b.area = remote_did.b.area;
2432 	fcport->d_id.b.al_pa = remote_did.b.al_pa;
2433 
2434 	ql_dbg(ql_dbg_io, vha, 0x3073, "portid=%02x%02x%02x done\n",
2435 	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
2436 
2437 	sp->type = SRB_ELS_DCMD;
2438 	sp->name = "ELS_DCMD";
2439 	sp->fcport = fcport;
2440 	qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT);
2441 	elsio->timeout = qla2x00_els_dcmd_iocb_timeout;
2442 	sp->done = qla2x00_els_dcmd_sp_done;
2443 	sp->free = qla2x00_els_dcmd_sp_free;
2444 
2445 	elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev,
2446 			    DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma,
2447 			    GFP_KERNEL);
2448 
2449 	if (!elsio->u.els_logo.els_logo_pyld) {
2450 		sp->free(sp);
2451 		return QLA_FUNCTION_FAILED;
2452 	}
2453 
2454 	memset(&logo_pyld, 0, sizeof(struct els_logo_payload));
2455 
2456 	elsio->u.els_logo.els_cmd = els_opcode;
2457 	logo_pyld.opcode = els_opcode;
2458 	logo_pyld.s_id[0] = vha->d_id.b.al_pa;
2459 	logo_pyld.s_id[1] = vha->d_id.b.area;
2460 	logo_pyld.s_id[2] = vha->d_id.b.domain;
2461 	host_to_fcp_swap(logo_pyld.s_id, sizeof(uint32_t));
2462 	memcpy(&logo_pyld.wwpn, vha->port_name, WWN_SIZE);
2463 
2464 	memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
2465 	    sizeof(struct els_logo_payload));
2466 
2467 	rval = qla2x00_start_sp(sp);
2468 	if (rval != QLA_SUCCESS) {
2469 		sp->free(sp);
2470 		return QLA_FUNCTION_FAILED;
2471 	}
2472 
2473 	ql_dbg(ql_dbg_io, vha, 0x3074,
2474 	    "%s LOGO sent, hdl=%x, loopid=%x, portid=%02x%02x%02x.\n",
2475 	    sp->name, sp->handle, fcport->loop_id, fcport->d_id.b.domain,
2476 	    fcport->d_id.b.area, fcport->d_id.b.al_pa);
2477 
2478 	wait_for_completion(&elsio->u.els_logo.comp);
2479 
2480 	sp->free(sp);
2481 	return rval;
2482 }
2483 
2484 static void
2485 qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2486 {
2487 	scsi_qla_host_t *vha = sp->vha;
2488 	struct srb_iocb *elsio = &sp->u.iocb_cmd;
2489 
2490 	els_iocb->entry_type = ELS_IOCB_TYPE;
2491 	els_iocb->entry_count = 1;
2492 	els_iocb->sys_define = 0;
2493 	els_iocb->entry_status = 0;
2494 	els_iocb->handle = sp->handle;
2495 	els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2496 	els_iocb->tx_dsd_count = 1;
2497 	els_iocb->vp_index = vha->vp_idx;
2498 	els_iocb->sof_type = EST_SOFI3;
2499 	els_iocb->rx_dsd_count = 0;
2500 	els_iocb->opcode = elsio->u.els_logo.els_cmd;
2501 
2502 	els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2503 	els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2504 	els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2505 	els_iocb->control_flags = 0;
2506 
2507 	els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
2508 	els_iocb->tx_address[0] =
2509 	    cpu_to_le32(LSD(elsio->u.els_logo.els_logo_pyld_dma));
2510 	els_iocb->tx_address[1] =
2511 	    cpu_to_le32(MSD(elsio->u.els_logo.els_logo_pyld_dma));
2512 	els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
2513 
2514 	els_iocb->rx_byte_count = 0;
2515 	els_iocb->rx_address[0] = 0;
2516 	els_iocb->rx_address[1] = 0;
2517 	els_iocb->rx_len = 0;
2518 
2519 	sp->vha->qla_stats.control_requests++;
2520 }
2521 
2522 static void
2523 qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2524 {
2525 	struct bsg_job *bsg_job = sp->u.bsg_job;
2526 	struct fc_bsg_request *bsg_request = bsg_job->request;
2527 
2528         els_iocb->entry_type = ELS_IOCB_TYPE;
2529         els_iocb->entry_count = 1;
2530         els_iocb->sys_define = 0;
2531         els_iocb->entry_status = 0;
2532         els_iocb->handle = sp->handle;
2533         els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2534 	els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
2535 	els_iocb->vp_index = sp->vha->vp_idx;
2536         els_iocb->sof_type = EST_SOFI3;
2537 	els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2538 
2539 	els_iocb->opcode =
2540 	    sp->type == SRB_ELS_CMD_RPT ?
2541 	    bsg_request->rqst_data.r_els.els_code :
2542 	    bsg_request->rqst_data.h_els.command_code;
2543         els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
2544         els_iocb->port_id[1] = sp->fcport->d_id.b.area;
2545         els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
2546         els_iocb->control_flags = 0;
2547         els_iocb->rx_byte_count =
2548             cpu_to_le32(bsg_job->reply_payload.payload_len);
2549         els_iocb->tx_byte_count =
2550             cpu_to_le32(bsg_job->request_payload.payload_len);
2551 
2552         els_iocb->tx_address[0] = cpu_to_le32(LSD(sg_dma_address
2553             (bsg_job->request_payload.sg_list)));
2554         els_iocb->tx_address[1] = cpu_to_le32(MSD(sg_dma_address
2555             (bsg_job->request_payload.sg_list)));
2556         els_iocb->tx_len = cpu_to_le32(sg_dma_len
2557             (bsg_job->request_payload.sg_list));
2558 
2559         els_iocb->rx_address[0] = cpu_to_le32(LSD(sg_dma_address
2560             (bsg_job->reply_payload.sg_list)));
2561         els_iocb->rx_address[1] = cpu_to_le32(MSD(sg_dma_address
2562             (bsg_job->reply_payload.sg_list)));
2563         els_iocb->rx_len = cpu_to_le32(sg_dma_len
2564             (bsg_job->reply_payload.sg_list));
2565 
2566 	sp->vha->qla_stats.control_requests++;
2567 }
2568 
2569 static void
2570 qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
2571 {
2572 	uint16_t        avail_dsds;
2573 	uint32_t        *cur_dsd;
2574 	struct scatterlist *sg;
2575 	int index;
2576 	uint16_t tot_dsds;
2577 	scsi_qla_host_t *vha = sp->vha;
2578 	struct qla_hw_data *ha = vha->hw;
2579 	struct bsg_job *bsg_job = sp->u.bsg_job;
2580 	int loop_iterartion = 0;
2581 	int entry_count = 1;
2582 
2583 	memset(ct_iocb, 0, sizeof(ms_iocb_entry_t));
2584 	ct_iocb->entry_type = CT_IOCB_TYPE;
2585 	ct_iocb->entry_status = 0;
2586 	ct_iocb->handle1 = sp->handle;
2587 	SET_TARGET_ID(ha, ct_iocb->loop_id, sp->fcport->loop_id);
2588 	ct_iocb->status = cpu_to_le16(0);
2589 	ct_iocb->control_flags = cpu_to_le16(0);
2590 	ct_iocb->timeout = 0;
2591 	ct_iocb->cmd_dsd_count =
2592 	    cpu_to_le16(bsg_job->request_payload.sg_cnt);
2593 	ct_iocb->total_dsd_count =
2594 	    cpu_to_le16(bsg_job->request_payload.sg_cnt + 1);
2595 	ct_iocb->req_bytecount =
2596 	    cpu_to_le32(bsg_job->request_payload.payload_len);
2597 	ct_iocb->rsp_bytecount =
2598 	    cpu_to_le32(bsg_job->reply_payload.payload_len);
2599 
2600 	ct_iocb->dseg_req_address[0] = cpu_to_le32(LSD(sg_dma_address
2601 	    (bsg_job->request_payload.sg_list)));
2602 	ct_iocb->dseg_req_address[1] = cpu_to_le32(MSD(sg_dma_address
2603 	    (bsg_job->request_payload.sg_list)));
2604 	ct_iocb->dseg_req_length = ct_iocb->req_bytecount;
2605 
2606 	ct_iocb->dseg_rsp_address[0] = cpu_to_le32(LSD(sg_dma_address
2607 	    (bsg_job->reply_payload.sg_list)));
2608 	ct_iocb->dseg_rsp_address[1] = cpu_to_le32(MSD(sg_dma_address
2609 	    (bsg_job->reply_payload.sg_list)));
2610 	ct_iocb->dseg_rsp_length = ct_iocb->rsp_bytecount;
2611 
2612 	avail_dsds = 1;
2613 	cur_dsd = (uint32_t *)ct_iocb->dseg_rsp_address;
2614 	index = 0;
2615 	tot_dsds = bsg_job->reply_payload.sg_cnt;
2616 
2617 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2618 		dma_addr_t       sle_dma;
2619 		cont_a64_entry_t *cont_pkt;
2620 
2621 		/* Allocate additional continuation packets? */
2622 		if (avail_dsds == 0) {
2623 			/*
2624 			* Five DSDs are available in the Cont.
2625 			* Type 1 IOCB.
2626 			       */
2627 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2628 			    vha->hw->req_q_map[0]);
2629 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2630 			avail_dsds = 5;
2631 			entry_count++;
2632 		}
2633 
2634 		sle_dma = sg_dma_address(sg);
2635 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2636 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2637 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2638 		loop_iterartion++;
2639 		avail_dsds--;
2640 	}
2641 	ct_iocb->entry_count = entry_count;
2642 
2643 	sp->vha->qla_stats.control_requests++;
2644 }
2645 
2646 static void
2647 qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2648 {
2649 	uint16_t        avail_dsds;
2650 	uint32_t        *cur_dsd;
2651 	struct scatterlist *sg;
2652 	int index;
2653 	uint16_t tot_dsds;
2654 	scsi_qla_host_t *vha = sp->vha;
2655 	struct qla_hw_data *ha = vha->hw;
2656 	struct bsg_job *bsg_job = sp->u.bsg_job;
2657 	int loop_iterartion = 0;
2658 	int entry_count = 1;
2659 
2660 	ct_iocb->entry_type = CT_IOCB_TYPE;
2661         ct_iocb->entry_status = 0;
2662         ct_iocb->sys_define = 0;
2663         ct_iocb->handle = sp->handle;
2664 
2665 	ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2666 	ct_iocb->vp_index = sp->vha->vp_idx;
2667 	ct_iocb->comp_status = cpu_to_le16(0);
2668 
2669 	ct_iocb->cmd_dsd_count =
2670 		cpu_to_le16(bsg_job->request_payload.sg_cnt);
2671         ct_iocb->timeout = 0;
2672         ct_iocb->rsp_dsd_count =
2673 		cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2674         ct_iocb->rsp_byte_count =
2675             cpu_to_le32(bsg_job->reply_payload.payload_len);
2676         ct_iocb->cmd_byte_count =
2677             cpu_to_le32(bsg_job->request_payload.payload_len);
2678         ct_iocb->dseg_0_address[0] = cpu_to_le32(LSD(sg_dma_address
2679             (bsg_job->request_payload.sg_list)));
2680         ct_iocb->dseg_0_address[1] = cpu_to_le32(MSD(sg_dma_address
2681            (bsg_job->request_payload.sg_list)));
2682         ct_iocb->dseg_0_len = cpu_to_le32(sg_dma_len
2683             (bsg_job->request_payload.sg_list));
2684 
2685 	avail_dsds = 1;
2686 	cur_dsd = (uint32_t *)ct_iocb->dseg_1_address;
2687 	index = 0;
2688 	tot_dsds = bsg_job->reply_payload.sg_cnt;
2689 
2690 	for_each_sg(bsg_job->reply_payload.sg_list, sg, tot_dsds, index) {
2691 		dma_addr_t       sle_dma;
2692 		cont_a64_entry_t *cont_pkt;
2693 
2694 		/* Allocate additional continuation packets? */
2695 		if (avail_dsds == 0) {
2696 			/*
2697 			* Five DSDs are available in the Cont.
2698 			* Type 1 IOCB.
2699 			       */
2700 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
2701 			    ha->req_q_map[0]);
2702 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
2703 			avail_dsds = 5;
2704 			entry_count++;
2705 		}
2706 
2707 		sle_dma = sg_dma_address(sg);
2708 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
2709 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
2710 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
2711 		loop_iterartion++;
2712 		avail_dsds--;
2713 	}
2714         ct_iocb->entry_count = entry_count;
2715 }
2716 
2717 /*
2718  * qla82xx_start_scsi() - Send a SCSI command to the ISP
2719  * @sp: command to send to the ISP
2720  *
2721  * Returns non-zero if a failure occurred, else zero.
2722  */
2723 int
2724 qla82xx_start_scsi(srb_t *sp)
2725 {
2726 	int		nseg;
2727 	unsigned long   flags;
2728 	struct scsi_cmnd *cmd;
2729 	uint32_t	*clr_ptr;
2730 	uint32_t        index;
2731 	uint32_t	handle;
2732 	uint16_t	cnt;
2733 	uint16_t	req_cnt;
2734 	uint16_t	tot_dsds;
2735 	struct device_reg_82xx __iomem *reg;
2736 	uint32_t dbval;
2737 	uint32_t *fcp_dl;
2738 	uint8_t additional_cdb_len;
2739 	struct ct6_dsd *ctx;
2740 	struct scsi_qla_host *vha = sp->vha;
2741 	struct qla_hw_data *ha = vha->hw;
2742 	struct req_que *req = NULL;
2743 	struct rsp_que *rsp = NULL;
2744 
2745 	/* Setup device pointers. */
2746 	reg = &ha->iobase->isp82;
2747 	cmd = GET_CMD_SP(sp);
2748 	req = vha->req;
2749 	rsp = ha->rsp_q_map[0];
2750 
2751 	/* So we know we haven't pci_map'ed anything yet */
2752 	tot_dsds = 0;
2753 
2754 	dbval = 0x04 | (ha->portnum << 5);
2755 
2756 	/* Send marker if required */
2757 	if (vha->marker_needed != 0) {
2758 		if (qla2x00_marker(vha, req,
2759 			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
2760 			ql_log(ql_log_warn, vha, 0x300c,
2761 			    "qla2x00_marker failed for cmd=%p.\n", cmd);
2762 			return QLA_FUNCTION_FAILED;
2763 		}
2764 		vha->marker_needed = 0;
2765 	}
2766 
2767 	/* Acquire ring specific lock */
2768 	spin_lock_irqsave(&ha->hardware_lock, flags);
2769 
2770 	/* Check for room in outstanding command list. */
2771 	handle = req->current_outstanding_cmd;
2772 	for (index = 1; index < req->num_outstanding_cmds; index++) {
2773 		handle++;
2774 		if (handle == req->num_outstanding_cmds)
2775 			handle = 1;
2776 		if (!req->outstanding_cmds[handle])
2777 			break;
2778 	}
2779 	if (index == req->num_outstanding_cmds)
2780 		goto queuing_error;
2781 
2782 	/* Map the sg table so we have an accurate count of sg entries needed */
2783 	if (scsi_sg_count(cmd)) {
2784 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
2785 		    scsi_sg_count(cmd), cmd->sc_data_direction);
2786 		if (unlikely(!nseg))
2787 			goto queuing_error;
2788 	} else
2789 		nseg = 0;
2790 
2791 	tot_dsds = nseg;
2792 
2793 	if (tot_dsds > ql2xshiftctondsd) {
2794 		struct cmd_type_6 *cmd_pkt;
2795 		uint16_t more_dsd_lists = 0;
2796 		struct dsd_dma *dsd_ptr;
2797 		uint16_t i;
2798 
2799 		more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
2800 		if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
2801 			ql_dbg(ql_dbg_io, vha, 0x300d,
2802 			    "Num of DSD list %d is than %d for cmd=%p.\n",
2803 			    more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
2804 			    cmd);
2805 			goto queuing_error;
2806 		}
2807 
2808 		if (more_dsd_lists <= ha->gbl_dsd_avail)
2809 			goto sufficient_dsds;
2810 		else
2811 			more_dsd_lists -= ha->gbl_dsd_avail;
2812 
2813 		for (i = 0; i < more_dsd_lists; i++) {
2814 			dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
2815 			if (!dsd_ptr) {
2816 				ql_log(ql_log_fatal, vha, 0x300e,
2817 				    "Failed to allocate memory for dsd_dma "
2818 				    "for cmd=%p.\n", cmd);
2819 				goto queuing_error;
2820 			}
2821 
2822 			dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
2823 				GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
2824 			if (!dsd_ptr->dsd_addr) {
2825 				kfree(dsd_ptr);
2826 				ql_log(ql_log_fatal, vha, 0x300f,
2827 				    "Failed to allocate memory for dsd_addr "
2828 				    "for cmd=%p.\n", cmd);
2829 				goto queuing_error;
2830 			}
2831 			list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
2832 			ha->gbl_dsd_avail++;
2833 		}
2834 
2835 sufficient_dsds:
2836 		req_cnt = 1;
2837 
2838 		if (req->cnt < (req_cnt + 2)) {
2839 			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2840 				&reg->req_q_out[0]);
2841 			if (req->ring_index < cnt)
2842 				req->cnt = cnt - req->ring_index;
2843 			else
2844 				req->cnt = req->length -
2845 					(req->ring_index - cnt);
2846 			if (req->cnt < (req_cnt + 2))
2847 				goto queuing_error;
2848 		}
2849 
2850 		ctx = sp->u.scmd.ctx =
2851 		    mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2852 		if (!ctx) {
2853 			ql_log(ql_log_fatal, vha, 0x3010,
2854 			    "Failed to allocate ctx for cmd=%p.\n", cmd);
2855 			goto queuing_error;
2856 		}
2857 
2858 		memset(ctx, 0, sizeof(struct ct6_dsd));
2859 		ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
2860 			GFP_ATOMIC, &ctx->fcp_cmnd_dma);
2861 		if (!ctx->fcp_cmnd) {
2862 			ql_log(ql_log_fatal, vha, 0x3011,
2863 			    "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2864 			goto queuing_error;
2865 		}
2866 
2867 		/* Initialize the DSD list and dma handle */
2868 		INIT_LIST_HEAD(&ctx->dsd_list);
2869 		ctx->dsd_use_cnt = 0;
2870 
2871 		if (cmd->cmd_len > 16) {
2872 			additional_cdb_len = cmd->cmd_len - 16;
2873 			if ((cmd->cmd_len % 4) != 0) {
2874 				/* SCSI command bigger than 16 bytes must be
2875 				 * multiple of 4
2876 				 */
2877 				ql_log(ql_log_warn, vha, 0x3012,
2878 				    "scsi cmd len %d not multiple of 4 "
2879 				    "for cmd=%p.\n", cmd->cmd_len, cmd);
2880 				goto queuing_error_fcp_cmnd;
2881 			}
2882 			ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
2883 		} else {
2884 			additional_cdb_len = 0;
2885 			ctx->fcp_cmnd_len = 12 + 16 + 4;
2886 		}
2887 
2888 		cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
2889 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2890 
2891 		/* Zero out remaining portion of packet. */
2892 		/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
2893 		clr_ptr = (uint32_t *)cmd_pkt + 2;
2894 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2895 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2896 
2897 		/* Set NPORT-ID and LUN number*/
2898 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2899 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2900 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2901 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2902 		cmd_pkt->vp_index = sp->vha->vp_idx;
2903 
2904 		/* Build IOCB segments */
2905 		if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
2906 			goto queuing_error_fcp_cmnd;
2907 
2908 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2909 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
2910 
2911 		/* build FCP_CMND IU */
2912 		memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2913 		int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun);
2914 		ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
2915 
2916 		if (cmd->sc_data_direction == DMA_TO_DEVICE)
2917 			ctx->fcp_cmnd->additional_cdb_len |= 1;
2918 		else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
2919 			ctx->fcp_cmnd->additional_cdb_len |= 2;
2920 
2921 		/* Populate the FCP_PRIO. */
2922 		if (ha->flags.fcp_prio_enabled)
2923 			ctx->fcp_cmnd->task_attribute |=
2924 			    sp->fcport->fcp_prio << 3;
2925 
2926 		memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
2927 
2928 		fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
2929 		    additional_cdb_len);
2930 		*fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
2931 
2932 		cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
2933 		cmd_pkt->fcp_cmnd_dseg_address[0] =
2934 		    cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
2935 		cmd_pkt->fcp_cmnd_dseg_address[1] =
2936 		    cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
2937 
2938 		sp->flags |= SRB_FCP_CMND_DMA_VALID;
2939 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2940 		/* Set total data segment count. */
2941 		cmd_pkt->entry_count = (uint8_t)req_cnt;
2942 		/* Specify response queue number where
2943 		 * completion should happen
2944 		 */
2945 		cmd_pkt->entry_status = (uint8_t) rsp->id;
2946 	} else {
2947 		struct cmd_type_7 *cmd_pkt;
2948 		req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
2949 		if (req->cnt < (req_cnt + 2)) {
2950 			cnt = (uint16_t)RD_REG_DWORD_RELAXED(
2951 			    &reg->req_q_out[0]);
2952 			if (req->ring_index < cnt)
2953 				req->cnt = cnt - req->ring_index;
2954 			else
2955 				req->cnt = req->length -
2956 					(req->ring_index - cnt);
2957 		}
2958 		if (req->cnt < (req_cnt + 2))
2959 			goto queuing_error;
2960 
2961 		cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
2962 		cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
2963 
2964 		/* Zero out remaining portion of packet. */
2965 		/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
2966 		clr_ptr = (uint32_t *)cmd_pkt + 2;
2967 		memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
2968 		cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
2969 
2970 		/* Set NPORT-ID and LUN number*/
2971 		cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2972 		cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2973 		cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2974 		cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2975 		cmd_pkt->vp_index = sp->vha->vp_idx;
2976 
2977 		int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2978 		host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
2979 		    sizeof(cmd_pkt->lun));
2980 
2981 		/* Populate the FCP_PRIO. */
2982 		if (ha->flags.fcp_prio_enabled)
2983 			cmd_pkt->task |= sp->fcport->fcp_prio << 3;
2984 
2985 		/* Load SCSI command packet. */
2986 		memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
2987 		host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
2988 
2989 		cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
2990 
2991 		/* Build IOCB segments */
2992 		qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, req);
2993 
2994 		/* Set total data segment count. */
2995 		cmd_pkt->entry_count = (uint8_t)req_cnt;
2996 		/* Specify response queue number where
2997 		 * completion should happen.
2998 		 */
2999 		cmd_pkt->entry_status = (uint8_t) rsp->id;
3000 
3001 	}
3002 	/* Build command packet. */
3003 	req->current_outstanding_cmd = handle;
3004 	req->outstanding_cmds[handle] = sp;
3005 	sp->handle = handle;
3006 	cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3007 	req->cnt -= req_cnt;
3008 	wmb();
3009 
3010 	/* Adjust ring index. */
3011 	req->ring_index++;
3012 	if (req->ring_index == req->length) {
3013 		req->ring_index = 0;
3014 		req->ring_ptr = req->ring;
3015 	} else
3016 		req->ring_ptr++;
3017 
3018 	sp->flags |= SRB_DMA_VALID;
3019 
3020 	/* Set chip new ring index. */
3021 	/* write, read and verify logic */
3022 	dbval = dbval | (req->id << 8) | (req->ring_index << 16);
3023 	if (ql2xdbwr)
3024 		qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval);
3025 	else {
3026 		WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3027 		wmb();
3028 		while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
3029 			WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval);
3030 			wmb();
3031 		}
3032 	}
3033 
3034 	/* Manage unprocessed RIO/ZIO commands in response queue. */
3035 	if (vha->flags.process_response_queue &&
3036 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
3037 		qla24xx_process_response_queue(vha, rsp);
3038 
3039 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3040 	return QLA_SUCCESS;
3041 
3042 queuing_error_fcp_cmnd:
3043 	dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
3044 queuing_error:
3045 	if (tot_dsds)
3046 		scsi_dma_unmap(cmd);
3047 
3048 	if (sp->u.scmd.ctx) {
3049 		mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
3050 		sp->u.scmd.ctx = NULL;
3051 	}
3052 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3053 
3054 	return QLA_FUNCTION_FAILED;
3055 }
3056 
3057 static void
3058 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
3059 {
3060 	struct srb_iocb *aio = &sp->u.iocb_cmd;
3061 	scsi_qla_host_t *vha = sp->vha;
3062 	struct req_que *req = vha->req;
3063 
3064 	memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
3065 	abt_iocb->entry_type = ABORT_IOCB_TYPE;
3066 	abt_iocb->entry_count = 1;
3067 	abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3068 	abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
3069 	abt_iocb->handle_to_abort =
3070 	    cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
3071 	abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
3072 	abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
3073 	abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
3074 	abt_iocb->vp_index = vha->vp_idx;
3075 	abt_iocb->req_que_no = cpu_to_le16(req->id);
3076 	/* Send the command to the firmware */
3077 	wmb();
3078 }
3079 
3080 static void
3081 qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx)
3082 {
3083 	int i, sz;
3084 
3085 	mbx->entry_type = MBX_IOCB_TYPE;
3086 	mbx->handle = sp->handle;
3087 	sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb));
3088 
3089 	for (i = 0; i < sz; i++)
3090 		mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]);
3091 }
3092 
3093 static void
3094 qla2x00_ctpthru_cmd_iocb(srb_t *sp, struct ct_entry_24xx *ct_pkt)
3095 {
3096 	sp->u.iocb_cmd.u.ctarg.iocb = ct_pkt;
3097 	qla24xx_prep_ms_iocb(sp->vha, &sp->u.iocb_cmd.u.ctarg);
3098 	ct_pkt->handle = sp->handle;
3099 }
3100 
3101 static void qla2x00_send_notify_ack_iocb(srb_t *sp,
3102 	struct nack_to_isp *nack)
3103 {
3104 	struct imm_ntfy_from_isp *ntfy = sp->u.iocb_cmd.u.nack.ntfy;
3105 
3106 	nack->entry_type = NOTIFY_ACK_TYPE;
3107 	nack->entry_count = 1;
3108 	nack->ox_id = ntfy->ox_id;
3109 
3110 	nack->u.isp24.handle = sp->handle;
3111 	nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
3112 	if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
3113 		nack->u.isp24.flags = ntfy->u.isp24.flags &
3114 			cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
3115 	}
3116 	nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
3117 	nack->u.isp24.status = ntfy->u.isp24.status;
3118 	nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
3119 	nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
3120 	nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
3121 	nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
3122 	nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
3123 	nack->u.isp24.srr_flags = 0;
3124 	nack->u.isp24.srr_reject_code = 0;
3125 	nack->u.isp24.srr_reject_code_expl = 0;
3126 	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
3127 }
3128 
3129 int
3130 qla2x00_start_sp(srb_t *sp)
3131 {
3132 	int rval;
3133 	scsi_qla_host_t *vha = sp->vha;
3134 	struct qla_hw_data *ha = vha->hw;
3135 	void *pkt;
3136 	unsigned long flags;
3137 
3138 	rval = QLA_FUNCTION_FAILED;
3139 	spin_lock_irqsave(&ha->hardware_lock, flags);
3140 	pkt = qla2x00_alloc_iocbs(vha, sp);
3141 	if (!pkt) {
3142 		ql_log(ql_log_warn, vha, 0x700c,
3143 		    "qla2x00_alloc_iocbs failed.\n");
3144 		goto done;
3145 	}
3146 
3147 	rval = QLA_SUCCESS;
3148 	switch (sp->type) {
3149 	case SRB_LOGIN_CMD:
3150 		IS_FWI2_CAPABLE(ha) ?
3151 		    qla24xx_login_iocb(sp, pkt) :
3152 		    qla2x00_login_iocb(sp, pkt);
3153 		break;
3154 	case SRB_LOGOUT_CMD:
3155 		IS_FWI2_CAPABLE(ha) ?
3156 		    qla24xx_logout_iocb(sp, pkt) :
3157 		    qla2x00_logout_iocb(sp, pkt);
3158 		break;
3159 	case SRB_ELS_CMD_RPT:
3160 	case SRB_ELS_CMD_HST:
3161 		qla24xx_els_iocb(sp, pkt);
3162 		break;
3163 	case SRB_CT_CMD:
3164 		IS_FWI2_CAPABLE(ha) ?
3165 		    qla24xx_ct_iocb(sp, pkt) :
3166 		    qla2x00_ct_iocb(sp, pkt);
3167 		break;
3168 	case SRB_ADISC_CMD:
3169 		IS_FWI2_CAPABLE(ha) ?
3170 		    qla24xx_adisc_iocb(sp, pkt) :
3171 		    qla2x00_adisc_iocb(sp, pkt);
3172 		break;
3173 	case SRB_TM_CMD:
3174 		IS_QLAFX00(ha) ?
3175 		    qlafx00_tm_iocb(sp, pkt) :
3176 		    qla24xx_tm_iocb(sp, pkt);
3177 		break;
3178 	case SRB_FXIOCB_DCMD:
3179 	case SRB_FXIOCB_BCMD:
3180 		qlafx00_fxdisc_iocb(sp, pkt);
3181 		break;
3182 	case SRB_ABT_CMD:
3183 		IS_QLAFX00(ha) ?
3184 			qlafx00_abort_iocb(sp, pkt) :
3185 			qla24xx_abort_iocb(sp, pkt);
3186 		break;
3187 	case SRB_ELS_DCMD:
3188 		qla24xx_els_logo_iocb(sp, pkt);
3189 		break;
3190 	case SRB_CT_PTHRU_CMD:
3191 		qla2x00_ctpthru_cmd_iocb(sp, pkt);
3192 		break;
3193 	case SRB_MB_IOCB:
3194 		qla2x00_mb_iocb(sp, pkt);
3195 		break;
3196 	case SRB_NACK_PLOGI:
3197 	case SRB_NACK_PRLI:
3198 	case SRB_NACK_LOGO:
3199 		qla2x00_send_notify_ack_iocb(sp, pkt);
3200 		break;
3201 	default:
3202 		break;
3203 	}
3204 
3205 	wmb();
3206 	qla2x00_start_iocbs(vha, ha->req_q_map[0]);
3207 done:
3208 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3209 	return rval;
3210 }
3211 
3212 static void
3213 qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
3214 				struct cmd_bidir *cmd_pkt, uint32_t tot_dsds)
3215 {
3216 	uint16_t avail_dsds;
3217 	uint32_t *cur_dsd;
3218 	uint32_t req_data_len = 0;
3219 	uint32_t rsp_data_len = 0;
3220 	struct scatterlist *sg;
3221 	int index;
3222 	int entry_count = 1;
3223 	struct bsg_job *bsg_job = sp->u.bsg_job;
3224 
3225 	/*Update entry type to indicate bidir command */
3226 	*((uint32_t *)(&cmd_pkt->entry_type)) =
3227 		cpu_to_le32(COMMAND_BIDIRECTIONAL);
3228 
3229 	/* Set the transfer direction, in this set both flags
3230 	 * Also set the BD_WRAP_BACK flag, firmware will take care
3231 	 * assigning DID=SID for outgoing pkts.
3232 	 */
3233 	cmd_pkt->wr_dseg_count = cpu_to_le16(bsg_job->request_payload.sg_cnt);
3234 	cmd_pkt->rd_dseg_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3235 	cmd_pkt->control_flags = cpu_to_le16(BD_WRITE_DATA | BD_READ_DATA |
3236 							BD_WRAP_BACK);
3237 
3238 	req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
3239 	cmd_pkt->wr_byte_count = cpu_to_le32(req_data_len);
3240 	cmd_pkt->rd_byte_count = cpu_to_le32(rsp_data_len);
3241 	cmd_pkt->timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3242 
3243 	vha->bidi_stats.transfer_bytes += req_data_len;
3244 	vha->bidi_stats.io_count++;
3245 
3246 	vha->qla_stats.output_bytes += req_data_len;
3247 	vha->qla_stats.output_requests++;
3248 
3249 	/* Only one dsd is available for bidirectional IOCB, remaining dsds
3250 	 * are bundled in continuation iocb
3251 	 */
3252 	avail_dsds = 1;
3253 	cur_dsd = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
3254 
3255 	index = 0;
3256 
3257 	for_each_sg(bsg_job->request_payload.sg_list, sg,
3258 				bsg_job->request_payload.sg_cnt, index) {
3259 		dma_addr_t sle_dma;
3260 		cont_a64_entry_t *cont_pkt;
3261 
3262 		/* Allocate additional continuation packets */
3263 		if (avail_dsds == 0) {
3264 			/* Continuation type 1 IOCB can accomodate
3265 			 * 5 DSDS
3266 			 */
3267 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3268 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3269 			avail_dsds = 5;
3270 			entry_count++;
3271 		}
3272 		sle_dma = sg_dma_address(sg);
3273 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3274 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3275 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3276 		avail_dsds--;
3277 	}
3278 	/* For read request DSD will always goes to continuation IOCB
3279 	 * and follow the write DSD. If there is room on the current IOCB
3280 	 * then it is added to that IOCB else new continuation IOCB is
3281 	 * allocated.
3282 	 */
3283 	for_each_sg(bsg_job->reply_payload.sg_list, sg,
3284 				bsg_job->reply_payload.sg_cnt, index) {
3285 		dma_addr_t sle_dma;
3286 		cont_a64_entry_t *cont_pkt;
3287 
3288 		/* Allocate additional continuation packets */
3289 		if (avail_dsds == 0) {
3290 			/* Continuation type 1 IOCB can accomodate
3291 			 * 5 DSDS
3292 			 */
3293 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
3294 			cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
3295 			avail_dsds = 5;
3296 			entry_count++;
3297 		}
3298 		sle_dma = sg_dma_address(sg);
3299 		*cur_dsd++   = cpu_to_le32(LSD(sle_dma));
3300 		*cur_dsd++   = cpu_to_le32(MSD(sle_dma));
3301 		*cur_dsd++   = cpu_to_le32(sg_dma_len(sg));
3302 		avail_dsds--;
3303 	}
3304 	/* This value should be same as number of IOCB required for this cmd */
3305 	cmd_pkt->entry_count = entry_count;
3306 }
3307 
3308 int
3309 qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
3310 {
3311 
3312 	struct qla_hw_data *ha = vha->hw;
3313 	unsigned long flags;
3314 	uint32_t handle;
3315 	uint32_t index;
3316 	uint16_t req_cnt;
3317 	uint16_t cnt;
3318 	uint32_t *clr_ptr;
3319 	struct cmd_bidir *cmd_pkt = NULL;
3320 	struct rsp_que *rsp;
3321 	struct req_que *req;
3322 	int rval = EXT_STATUS_OK;
3323 
3324 	rval = QLA_SUCCESS;
3325 
3326 	rsp = ha->rsp_q_map[0];
3327 	req = vha->req;
3328 
3329 	/* Send marker if required */
3330 	if (vha->marker_needed != 0) {
3331 		if (qla2x00_marker(vha, req,
3332 			rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS)
3333 			return EXT_STATUS_MAILBOX;
3334 		vha->marker_needed = 0;
3335 	}
3336 
3337 	/* Acquire ring specific lock */
3338 	spin_lock_irqsave(&ha->hardware_lock, flags);
3339 
3340 	/* Check for room in outstanding command list. */
3341 	handle = req->current_outstanding_cmd;
3342 	for (index = 1; index < req->num_outstanding_cmds; index++) {
3343 		handle++;
3344 		if (handle == req->num_outstanding_cmds)
3345 			handle = 1;
3346 		if (!req->outstanding_cmds[handle])
3347 			break;
3348 	}
3349 
3350 	if (index == req->num_outstanding_cmds) {
3351 		rval = EXT_STATUS_BUSY;
3352 		goto queuing_error;
3353 	}
3354 
3355 	/* Calculate number of IOCB required */
3356 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3357 
3358 	/* Check for room on request queue. */
3359 	if (req->cnt < req_cnt + 2) {
3360 		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
3361 		    RD_REG_DWORD_RELAXED(req->req_q_out);
3362 		if  (req->ring_index < cnt)
3363 			req->cnt = cnt - req->ring_index;
3364 		else
3365 			req->cnt = req->length -
3366 				(req->ring_index - cnt);
3367 	}
3368 	if (req->cnt < req_cnt + 2) {
3369 		rval = EXT_STATUS_BUSY;
3370 		goto queuing_error;
3371 	}
3372 
3373 	cmd_pkt = (struct cmd_bidir *)req->ring_ptr;
3374 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
3375 
3376 	/* Zero out remaining portion of packet. */
3377 	/* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
3378 	clr_ptr = (uint32_t *)cmd_pkt + 2;
3379 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
3380 
3381 	/* Set NPORT-ID  (of vha)*/
3382 	cmd_pkt->nport_handle = cpu_to_le16(vha->self_login_loop_id);
3383 	cmd_pkt->port_id[0] = vha->d_id.b.al_pa;
3384 	cmd_pkt->port_id[1] = vha->d_id.b.area;
3385 	cmd_pkt->port_id[2] = vha->d_id.b.domain;
3386 
3387 	qla25xx_build_bidir_iocb(sp, vha, cmd_pkt, tot_dsds);
3388 	cmd_pkt->entry_status = (uint8_t) rsp->id;
3389 	/* Build command packet. */
3390 	req->current_outstanding_cmd = handle;
3391 	req->outstanding_cmds[handle] = sp;
3392 	sp->handle = handle;
3393 	req->cnt -= req_cnt;
3394 
3395 	/* Send the command to the firmware */
3396 	wmb();
3397 	qla2x00_start_iocbs(vha, req);
3398 queuing_error:
3399 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
3400 	return rval;
3401 }
3402