xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_iocb.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11 
12 #include <scsi/scsi_tcq.h>
13 
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 							struct rsp_que *rsp);
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 
18 /**
19  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
20  * @cmd: SCSI command
21  *
22  * Returns the proper CF_* direction based on CDB.
23  */
24 static inline uint16_t
25 qla2x00_get_cmd_direction(srb_t *sp)
26 {
27 	uint16_t cflags;
28 
29 	cflags = 0;
30 
31 	/* Set transfer direction */
32 	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
33 		cflags = CF_WRITE;
34 		sp->fcport->vha->hw->qla_stats.output_bytes +=
35 		    scsi_bufflen(sp->cmd);
36 	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
37 		cflags = CF_READ;
38 		sp->fcport->vha->hw->qla_stats.input_bytes +=
39 		    scsi_bufflen(sp->cmd);
40 	}
41 	return (cflags);
42 }
43 
44 /**
45  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
46  * Continuation Type 0 IOCBs to allocate.
47  *
48  * @dsds: number of data segment decriptors needed
49  *
50  * Returns the number of IOCB entries needed to store @dsds.
51  */
52 uint16_t
53 qla2x00_calc_iocbs_32(uint16_t dsds)
54 {
55 	uint16_t iocbs;
56 
57 	iocbs = 1;
58 	if (dsds > 3) {
59 		iocbs += (dsds - 3) / 7;
60 		if ((dsds - 3) % 7)
61 			iocbs++;
62 	}
63 	return (iocbs);
64 }
65 
66 /**
67  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
68  * Continuation Type 1 IOCBs to allocate.
69  *
70  * @dsds: number of data segment decriptors needed
71  *
72  * Returns the number of IOCB entries needed to store @dsds.
73  */
74 uint16_t
75 qla2x00_calc_iocbs_64(uint16_t dsds)
76 {
77 	uint16_t iocbs;
78 
79 	iocbs = 1;
80 	if (dsds > 2) {
81 		iocbs += (dsds - 2) / 5;
82 		if ((dsds - 2) % 5)
83 			iocbs++;
84 	}
85 	return (iocbs);
86 }
87 
88 /**
89  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
90  * @ha: HA context
91  *
92  * Returns a pointer to the Continuation Type 0 IOCB packet.
93  */
94 static inline cont_entry_t *
95 qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
96 {
97 	cont_entry_t *cont_pkt;
98 	/* Adjust ring index. */
99 	req->ring_index++;
100 	if (req->ring_index == req->length) {
101 		req->ring_index = 0;
102 		req->ring_ptr = req->ring;
103 	} else {
104 		req->ring_ptr++;
105 	}
106 
107 	cont_pkt = (cont_entry_t *)req->ring_ptr;
108 
109 	/* Load packet defaults. */
110 	*((uint32_t *)(&cont_pkt->entry_type)) =
111 	    __constant_cpu_to_le32(CONTINUE_TYPE);
112 
113 	return (cont_pkt);
114 }
115 
116 /**
117  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
118  * @ha: HA context
119  *
120  * Returns a pointer to the continuation type 1 IOCB packet.
121  */
122 static inline cont_a64_entry_t *
123 qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
124 {
125 	cont_a64_entry_t *cont_pkt;
126 
127 	/* Adjust ring index. */
128 	req->ring_index++;
129 	if (req->ring_index == req->length) {
130 		req->ring_index = 0;
131 		req->ring_ptr = req->ring;
132 	} else {
133 		req->ring_ptr++;
134 	}
135 
136 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
137 
138 	/* Load packet defaults. */
139 	*((uint32_t *)(&cont_pkt->entry_type)) =
140 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
141 
142 	return (cont_pkt);
143 }
144 
145 /**
146  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
147  * capable IOCB types.
148  *
149  * @sp: SRB command to process
150  * @cmd_pkt: Command type 2 IOCB
151  * @tot_dsds: Total number of segments to transfer
152  */
153 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
154     uint16_t tot_dsds)
155 {
156 	uint16_t	avail_dsds;
157 	uint32_t	*cur_dsd;
158 	scsi_qla_host_t	*vha;
159 	struct scsi_cmnd *cmd;
160 	struct scatterlist *sg;
161 	int i;
162 	struct req_que *req;
163 
164 	cmd = sp->cmd;
165 
166 	/* Update entry type to indicate Command Type 2 IOCB */
167 	*((uint32_t *)(&cmd_pkt->entry_type)) =
168 	    __constant_cpu_to_le32(COMMAND_TYPE);
169 
170 	/* No data transfer */
171 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
172 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
173 		return;
174 	}
175 
176 	vha = sp->fcport->vha;
177 	req = sp->que;
178 
179 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180 
181 	/* Three DSDs are available in the Command Type 2 IOCB */
182 	avail_dsds = 3;
183 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
184 
185 	/* Load data segments */
186 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
187 		cont_entry_t *cont_pkt;
188 
189 		/* Allocate additional continuation packets? */
190 		if (avail_dsds == 0) {
191 			/*
192 			 * Seven DSDs are available in the Continuation
193 			 * Type 0 IOCB.
194 			 */
195 			cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
196 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 			avail_dsds = 7;
198 		}
199 
200 		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
201 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
202 		avail_dsds--;
203 	}
204 }
205 
206 /**
207  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208  * capable IOCB types.
209  *
210  * @sp: SRB command to process
211  * @cmd_pkt: Command type 3 IOCB
212  * @tot_dsds: Total number of segments to transfer
213  */
214 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
215     uint16_t tot_dsds)
216 {
217 	uint16_t	avail_dsds;
218 	uint32_t	*cur_dsd;
219 	scsi_qla_host_t	*vha;
220 	struct scsi_cmnd *cmd;
221 	struct scatterlist *sg;
222 	int i;
223 	struct req_que *req;
224 
225 	cmd = sp->cmd;
226 
227 	/* Update entry type to indicate Command Type 3 IOCB */
228 	*((uint32_t *)(&cmd_pkt->entry_type)) =
229 	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
230 
231 	/* No data transfer */
232 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
233 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
234 		return;
235 	}
236 
237 	vha = sp->fcport->vha;
238 	req = sp->que;
239 
240 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
241 
242 	/* Two DSDs are available in the Command Type 3 IOCB */
243 	avail_dsds = 2;
244 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
245 
246 	/* Load data segments */
247 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
248 		dma_addr_t	sle_dma;
249 		cont_a64_entry_t *cont_pkt;
250 
251 		/* Allocate additional continuation packets? */
252 		if (avail_dsds == 0) {
253 			/*
254 			 * Five DSDs are available in the Continuation
255 			 * Type 1 IOCB.
256 			 */
257 			cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
258 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
259 			avail_dsds = 5;
260 		}
261 
262 		sle_dma = sg_dma_address(sg);
263 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
264 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
265 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
266 		avail_dsds--;
267 	}
268 }
269 
270 /**
271  * qla2x00_start_scsi() - Send a SCSI command to the ISP
272  * @sp: command to send to the ISP
273  *
274  * Returns non-zero if a failure occurred, else zero.
275  */
276 int
277 qla2x00_start_scsi(srb_t *sp)
278 {
279 	int		ret, nseg;
280 	unsigned long   flags;
281 	scsi_qla_host_t	*vha;
282 	struct scsi_cmnd *cmd;
283 	uint32_t	*clr_ptr;
284 	uint32_t        index;
285 	uint32_t	handle;
286 	cmd_entry_t	*cmd_pkt;
287 	uint16_t	cnt;
288 	uint16_t	req_cnt;
289 	uint16_t	tot_dsds;
290 	struct device_reg_2xxx __iomem *reg;
291 	struct qla_hw_data *ha;
292 	struct req_que *req;
293 	struct rsp_que *rsp;
294 
295 	/* Setup device pointers. */
296 	ret = 0;
297 	vha = sp->fcport->vha;
298 	ha = vha->hw;
299 	reg = &ha->iobase->isp;
300 	cmd = sp->cmd;
301 	req = ha->req_q_map[0];
302 	rsp = ha->rsp_q_map[0];
303 	/* So we know we haven't pci_map'ed anything yet */
304 	tot_dsds = 0;
305 
306 	/* Send marker if required */
307 	if (vha->marker_needed != 0) {
308 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
309 							!= QLA_SUCCESS)
310 			return (QLA_FUNCTION_FAILED);
311 		vha->marker_needed = 0;
312 	}
313 
314 	/* Acquire ring specific lock */
315 	spin_lock_irqsave(&ha->hardware_lock, flags);
316 
317 	/* Check for room in outstanding command list. */
318 	handle = req->current_outstanding_cmd;
319 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
320 		handle++;
321 		if (handle == MAX_OUTSTANDING_COMMANDS)
322 			handle = 1;
323 		if (!req->outstanding_cmds[handle])
324 			break;
325 	}
326 	if (index == MAX_OUTSTANDING_COMMANDS)
327 		goto queuing_error;
328 
329 	/* Map the sg table so we have an accurate count of sg entries needed */
330 	if (scsi_sg_count(cmd)) {
331 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
332 		    scsi_sg_count(cmd), cmd->sc_data_direction);
333 		if (unlikely(!nseg))
334 			goto queuing_error;
335 	} else
336 		nseg = 0;
337 
338 	tot_dsds = nseg;
339 
340 	/* Calculate the number of request entries needed. */
341 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
342 	if (req->cnt < (req_cnt + 2)) {
343 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
344 		if (req->ring_index < cnt)
345 			req->cnt = cnt - req->ring_index;
346 		else
347 			req->cnt = req->length -
348 			    (req->ring_index - cnt);
349 	}
350 	if (req->cnt < (req_cnt + 2))
351 		goto queuing_error;
352 
353 	/* Build command packet */
354 	req->current_outstanding_cmd = handle;
355 	req->outstanding_cmds[handle] = sp;
356 	sp->que = req;
357 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
358 	req->cnt -= req_cnt;
359 
360 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
361 	cmd_pkt->handle = handle;
362 	/* Zero out remaining portion of packet. */
363 	clr_ptr = (uint32_t *)cmd_pkt + 2;
364 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
365 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
366 
367 	/* Set target ID and LUN number*/
368 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
369 	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
370 
371 	/* Update tagged queuing modifier */
372 	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
373 
374 	/* Load SCSI command packet. */
375 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
376 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
377 
378 	/* Build IOCB segments */
379 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
380 
381 	/* Set total data segment count. */
382 	cmd_pkt->entry_count = (uint8_t)req_cnt;
383 	wmb();
384 
385 	/* Adjust ring index. */
386 	req->ring_index++;
387 	if (req->ring_index == req->length) {
388 		req->ring_index = 0;
389 		req->ring_ptr = req->ring;
390 	} else
391 		req->ring_ptr++;
392 
393 	sp->flags |= SRB_DMA_VALID;
394 
395 	/* Set chip new ring index. */
396 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
397 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
398 
399 	/* Manage unprocessed RIO/ZIO commands in response queue. */
400 	if (vha->flags.process_response_queue &&
401 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
402 		qla2x00_process_response_queue(rsp);
403 
404 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
405 	return (QLA_SUCCESS);
406 
407 queuing_error:
408 	if (tot_dsds)
409 		scsi_dma_unmap(cmd);
410 
411 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
412 
413 	return (QLA_FUNCTION_FAILED);
414 }
415 
416 /**
417  * qla2x00_marker() - Send a marker IOCB to the firmware.
418  * @ha: HA context
419  * @loop_id: loop ID
420  * @lun: LUN
421  * @type: marker modifier
422  *
423  * Can be called from both normal and interrupt context.
424  *
425  * Returns non-zero if a failure occurred, else zero.
426  */
427 int
428 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
429 			struct rsp_que *rsp, uint16_t loop_id,
430 			uint16_t lun, uint8_t type)
431 {
432 	mrk_entry_t *mrk;
433 	struct mrk_entry_24xx *mrk24;
434 	struct qla_hw_data *ha = vha->hw;
435 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
436 
437 	mrk24 = NULL;
438 	mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
439 	if (mrk == NULL) {
440 		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
441 		    __func__, base_vha->host_no));
442 
443 		return (QLA_FUNCTION_FAILED);
444 	}
445 
446 	mrk->entry_type = MARKER_TYPE;
447 	mrk->modifier = type;
448 	if (type != MK_SYNC_ALL) {
449 		if (IS_FWI2_CAPABLE(ha)) {
450 			mrk24 = (struct mrk_entry_24xx *) mrk;
451 			mrk24->nport_handle = cpu_to_le16(loop_id);
452 			mrk24->lun[1] = LSB(lun);
453 			mrk24->lun[2] = MSB(lun);
454 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
455 			mrk24->vp_index = vha->vp_idx;
456 		} else {
457 			SET_TARGET_ID(ha, mrk->target, loop_id);
458 			mrk->lun = cpu_to_le16(lun);
459 		}
460 	}
461 	wmb();
462 
463 	qla2x00_isp_cmd(vha, req);
464 
465 	return (QLA_SUCCESS);
466 }
467 
468 int
469 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
470 		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
471 		uint8_t type)
472 {
473 	int ret;
474 	unsigned long flags = 0;
475 
476 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
477 	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
478 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
479 
480 	return (ret);
481 }
482 
483 /**
484  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
485  * @ha: HA context
486  *
487  * Note: The caller must hold the hardware lock before calling this routine.
488  *
489  * Returns NULL if function failed, else, a pointer to the request packet.
490  */
491 static request_t *
492 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
493 		struct rsp_que *rsp)
494 {
495 	struct qla_hw_data *ha = vha->hw;
496 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
497 	request_t	*pkt = NULL;
498 	uint16_t	cnt;
499 	uint32_t	*dword_ptr;
500 	uint32_t	timer;
501 	uint16_t	req_cnt = 1;
502 
503 	/* Wait 1 second for slot. */
504 	for (timer = HZ; timer; timer--) {
505 		if ((req_cnt + 2) >= req->cnt) {
506 			/* Calculate number of free request entries. */
507 			if (ha->mqenable)
508 				cnt = (uint16_t)
509 					RD_REG_DWORD(&reg->isp25mq.req_q_out);
510 			else {
511 				if (IS_FWI2_CAPABLE(ha))
512 					cnt = (uint16_t)RD_REG_DWORD(
513 						&reg->isp24.req_q_out);
514 				else
515 					cnt = qla2x00_debounce_register(
516 						ISP_REQ_Q_OUT(ha, &reg->isp));
517 			}
518 			if  (req->ring_index < cnt)
519 				req->cnt = cnt - req->ring_index;
520 			else
521 				req->cnt = req->length -
522 				    (req->ring_index - cnt);
523 		}
524 		/* If room for request in request ring. */
525 		if ((req_cnt + 2) < req->cnt) {
526 			req->cnt--;
527 			pkt = req->ring_ptr;
528 
529 			/* Zero out packet. */
530 			dword_ptr = (uint32_t *)pkt;
531 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
532 				*dword_ptr++ = 0;
533 
534 			/* Set system defined field. */
535 			pkt->sys_define = (uint8_t)req->ring_index;
536 
537 			/* Set entry count. */
538 			pkt->entry_count = 1;
539 
540 			break;
541 		}
542 
543 		/* Release ring specific lock */
544 		spin_unlock_irq(&ha->hardware_lock);
545 
546 		udelay(2);   /* 2 us */
547 
548 		/* Check for pending interrupts. */
549 		/* During init we issue marker directly */
550 		if (!vha->marker_needed && !vha->flags.init_done)
551 			qla2x00_poll(rsp);
552 		spin_lock_irq(&ha->hardware_lock);
553 	}
554 	if (!pkt) {
555 		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
556 	}
557 
558 	return (pkt);
559 }
560 
561 /**
562  * qla2x00_isp_cmd() - Modify the request ring pointer.
563  * @ha: HA context
564  *
565  * Note: The caller must hold the hardware lock before calling this routine.
566  */
567 static void
568 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
569 {
570 	struct qla_hw_data *ha = vha->hw;
571 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
572 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
573 
574 	DEBUG5(printk("%s(): IOCB data:\n", __func__));
575 	DEBUG5(qla2x00_dump_buffer(
576 	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
577 
578 	/* Adjust ring index. */
579 	req->ring_index++;
580 	if (req->ring_index == req->length) {
581 		req->ring_index = 0;
582 		req->ring_ptr = req->ring;
583 	} else
584 		req->ring_ptr++;
585 
586 	/* Set chip new ring index. */
587 	if (ha->mqenable) {
588 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
589 		RD_REG_DWORD(&ioreg->hccr);
590 	}
591 	else {
592 		if (IS_FWI2_CAPABLE(ha)) {
593 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
594 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
595 		} else {
596 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
597 				req->ring_index);
598 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
599 		}
600 	}
601 
602 }
603 
604 /**
605  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
606  * Continuation Type 1 IOCBs to allocate.
607  *
608  * @dsds: number of data segment decriptors needed
609  *
610  * Returns the number of IOCB entries needed to store @dsds.
611  */
612 static inline uint16_t
613 qla24xx_calc_iocbs(uint16_t dsds)
614 {
615 	uint16_t iocbs;
616 
617 	iocbs = 1;
618 	if (dsds > 1) {
619 		iocbs += (dsds - 1) / 5;
620 		if ((dsds - 1) % 5)
621 			iocbs++;
622 	}
623 	return iocbs;
624 }
625 
626 /**
627  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
628  * IOCB types.
629  *
630  * @sp: SRB command to process
631  * @cmd_pkt: Command type 3 IOCB
632  * @tot_dsds: Total number of segments to transfer
633  */
634 static inline void
635 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
636     uint16_t tot_dsds)
637 {
638 	uint16_t	avail_dsds;
639 	uint32_t	*cur_dsd;
640 	scsi_qla_host_t	*vha;
641 	struct scsi_cmnd *cmd;
642 	struct scatterlist *sg;
643 	int i;
644 	struct req_que *req;
645 
646 	cmd = sp->cmd;
647 
648 	/* Update entry type to indicate Command Type 3 IOCB */
649 	*((uint32_t *)(&cmd_pkt->entry_type)) =
650 	    __constant_cpu_to_le32(COMMAND_TYPE_7);
651 
652 	/* No data transfer */
653 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
654 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
655 		return;
656 	}
657 
658 	vha = sp->fcport->vha;
659 	req = sp->que;
660 
661 	/* Set transfer direction */
662 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
663 		cmd_pkt->task_mgmt_flags =
664 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
665 		sp->fcport->vha->hw->qla_stats.output_bytes +=
666 		    scsi_bufflen(sp->cmd);
667 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
668 		cmd_pkt->task_mgmt_flags =
669 		    __constant_cpu_to_le16(TMF_READ_DATA);
670 		sp->fcport->vha->hw->qla_stats.input_bytes +=
671 		    scsi_bufflen(sp->cmd);
672 	}
673 
674 	/* One DSD is available in the Command Type 3 IOCB */
675 	avail_dsds = 1;
676 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
677 
678 	/* Load data segments */
679 
680 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
681 		dma_addr_t	sle_dma;
682 		cont_a64_entry_t *cont_pkt;
683 
684 		/* Allocate additional continuation packets? */
685 		if (avail_dsds == 0) {
686 			/*
687 			 * Five DSDs are available in the Continuation
688 			 * Type 1 IOCB.
689 			 */
690 			cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
691 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
692 			avail_dsds = 5;
693 		}
694 
695 		sle_dma = sg_dma_address(sg);
696 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
697 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
698 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
699 		avail_dsds--;
700 	}
701 }
702 
703 
704 /**
705  * qla24xx_start_scsi() - Send a SCSI command to the ISP
706  * @sp: command to send to the ISP
707  *
708  * Returns non-zero if a failure occurred, else zero.
709  */
710 int
711 qla24xx_start_scsi(srb_t *sp)
712 {
713 	int		ret, nseg;
714 	unsigned long   flags;
715 	uint32_t	*clr_ptr;
716 	uint32_t        index;
717 	uint32_t	handle;
718 	struct cmd_type_7 *cmd_pkt;
719 	uint16_t	cnt;
720 	uint16_t	req_cnt;
721 	uint16_t	tot_dsds;
722 	struct req_que *req = NULL;
723 	struct rsp_que *rsp = NULL;
724 	struct scsi_cmnd *cmd = sp->cmd;
725 	struct scsi_qla_host *vha = sp->fcport->vha;
726 	struct qla_hw_data *ha = vha->hw;
727 	uint16_t que_id;
728 
729 	/* Setup device pointers. */
730 	ret = 0;
731 	que_id = vha->req_ques[0];
732 
733 	req = ha->req_q_map[que_id];
734 	sp->que = req;
735 
736 	if (req->rsp)
737 		rsp = req->rsp;
738 	else
739 		rsp = ha->rsp_q_map[que_id];
740 	/* So we know we haven't pci_map'ed anything yet */
741 	tot_dsds = 0;
742 
743 	/* Send marker if required */
744 	if (vha->marker_needed != 0) {
745 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
746 							!= QLA_SUCCESS)
747 			return QLA_FUNCTION_FAILED;
748 		vha->marker_needed = 0;
749 	}
750 
751 	/* Acquire ring specific lock */
752 	spin_lock_irqsave(&ha->hardware_lock, flags);
753 
754 	/* Check for room in outstanding command list. */
755 	handle = req->current_outstanding_cmd;
756 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
757 		handle++;
758 		if (handle == MAX_OUTSTANDING_COMMANDS)
759 			handle = 1;
760 		if (!req->outstanding_cmds[handle])
761 			break;
762 	}
763 	if (index == MAX_OUTSTANDING_COMMANDS)
764 		goto queuing_error;
765 
766 	/* Map the sg table so we have an accurate count of sg entries needed */
767 	if (scsi_sg_count(cmd)) {
768 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
769 		    scsi_sg_count(cmd), cmd->sc_data_direction);
770 		if (unlikely(!nseg))
771 			goto queuing_error;
772 	} else
773 		nseg = 0;
774 
775 	tot_dsds = nseg;
776 
777 	req_cnt = qla24xx_calc_iocbs(tot_dsds);
778 	if (req->cnt < (req_cnt + 2)) {
779 		cnt = ha->isp_ops->rd_req_reg(ha, req->id);
780 
781 		if (req->ring_index < cnt)
782 			req->cnt = cnt - req->ring_index;
783 		else
784 			req->cnt = req->length -
785 				(req->ring_index - cnt);
786 	}
787 	if (req->cnt < (req_cnt + 2))
788 		goto queuing_error;
789 
790 	/* Build command packet. */
791 	req->current_outstanding_cmd = handle;
792 	req->outstanding_cmds[handle] = sp;
793 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
794 	req->cnt -= req_cnt;
795 
796 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
797 	cmd_pkt->handle = handle;
798 
799 	/* Zero out remaining portion of packet. */
800 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
801 	clr_ptr = (uint32_t *)cmd_pkt + 2;
802 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
803 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
804 
805 	/* Set NPORT-ID and LUN number*/
806 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
807 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
808 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
809 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
810 	cmd_pkt->vp_index = sp->fcport->vp_idx;
811 
812 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
813 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
814 
815 	/* Load SCSI command packet. */
816 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
817 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
818 
819 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
820 
821 	/* Build IOCB segments */
822 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
823 
824 	/* Set total data segment count. */
825 	cmd_pkt->entry_count = (uint8_t)req_cnt;
826 	wmb();
827 
828 	/* Adjust ring index. */
829 	req->ring_index++;
830 	if (req->ring_index == req->length) {
831 		req->ring_index = 0;
832 		req->ring_ptr = req->ring;
833 	} else
834 		req->ring_ptr++;
835 
836 	sp->flags |= SRB_DMA_VALID;
837 
838 	/* Set chip new ring index. */
839 	ha->isp_ops->wrt_req_reg(ha, req->id, req->ring_index);
840 
841 	/* Manage unprocessed RIO/ZIO commands in response queue. */
842 	if (vha->flags.process_response_queue &&
843 		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
844 		qla24xx_process_response_queue(rsp);
845 
846 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
847 	return QLA_SUCCESS;
848 
849 queuing_error:
850 	if (tot_dsds)
851 		scsi_dma_unmap(cmd);
852 
853 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
854 
855 	return QLA_FUNCTION_FAILED;
856 }
857 
858 uint16_t
859 qla24xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
860 {
861 	device_reg_t __iomem *reg = (void *) ha->iobase;
862 	return RD_REG_DWORD_RELAXED(&reg->isp24.req_q_out);
863 }
864 
865 uint16_t
866 qla25xx_rd_req_reg(struct qla_hw_data *ha, uint16_t id)
867 {
868 	device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
869 	return RD_REG_DWORD_RELAXED(&reg->isp25mq.req_q_out);
870 }
871 
872 void
873 qla24xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
874 {
875 	device_reg_t __iomem *reg = (void *) ha->iobase;
876 	WRT_REG_DWORD(&reg->isp24.req_q_in, index);
877 	RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
878 }
879 
880 void
881 qla25xx_wrt_req_reg(struct qla_hw_data *ha, uint16_t id, uint16_t index)
882 {
883 	device_reg_t __iomem *reg = (void *) ha->mqiobase + QLA_QUE_PAGE * id;
884 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
885 	WRT_REG_DWORD(&reg->isp25mq.req_q_in, index);
886 	RD_REG_DWORD(&ioreg->hccr); /* PCI posting */
887 }
888 
889