xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_iocb.c (revision b6dcefde)
1 /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8 
9 #include <linux/blkdev.h>
10 #include <linux/delay.h>
11 
12 #include <scsi/scsi_tcq.h>
13 
14 static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
15 							struct rsp_que *rsp);
16 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
17 
18 static void qla25xx_set_que(srb_t *, struct rsp_que **);
19 /**
20  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
21  * @cmd: SCSI command
22  *
23  * Returns the proper CF_* direction based on CDB.
24  */
25 static inline uint16_t
26 qla2x00_get_cmd_direction(srb_t *sp)
27 {
28 	uint16_t cflags;
29 
30 	cflags = 0;
31 
32 	/* Set transfer direction */
33 	if (sp->cmd->sc_data_direction == DMA_TO_DEVICE) {
34 		cflags = CF_WRITE;
35 		sp->fcport->vha->hw->qla_stats.output_bytes +=
36 		    scsi_bufflen(sp->cmd);
37 	} else if (sp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
38 		cflags = CF_READ;
39 		sp->fcport->vha->hw->qla_stats.input_bytes +=
40 		    scsi_bufflen(sp->cmd);
41 	}
42 	return (cflags);
43 }
44 
45 /**
46  * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and
47  * Continuation Type 0 IOCBs to allocate.
48  *
49  * @dsds: number of data segment decriptors needed
50  *
51  * Returns the number of IOCB entries needed to store @dsds.
52  */
53 uint16_t
54 qla2x00_calc_iocbs_32(uint16_t dsds)
55 {
56 	uint16_t iocbs;
57 
58 	iocbs = 1;
59 	if (dsds > 3) {
60 		iocbs += (dsds - 3) / 7;
61 		if ((dsds - 3) % 7)
62 			iocbs++;
63 	}
64 	return (iocbs);
65 }
66 
67 /**
68  * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and
69  * Continuation Type 1 IOCBs to allocate.
70  *
71  * @dsds: number of data segment decriptors needed
72  *
73  * Returns the number of IOCB entries needed to store @dsds.
74  */
75 uint16_t
76 qla2x00_calc_iocbs_64(uint16_t dsds)
77 {
78 	uint16_t iocbs;
79 
80 	iocbs = 1;
81 	if (dsds > 2) {
82 		iocbs += (dsds - 2) / 5;
83 		if ((dsds - 2) % 5)
84 			iocbs++;
85 	}
86 	return (iocbs);
87 }
88 
89 /**
90  * qla2x00_prep_cont_type0_iocb() - Initialize a Continuation Type 0 IOCB.
91  * @ha: HA context
92  *
93  * Returns a pointer to the Continuation Type 0 IOCB packet.
94  */
95 static inline cont_entry_t *
96 qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
97 {
98 	cont_entry_t *cont_pkt;
99 	struct req_que *req = vha->req;
100 	/* Adjust ring index. */
101 	req->ring_index++;
102 	if (req->ring_index == req->length) {
103 		req->ring_index = 0;
104 		req->ring_ptr = req->ring;
105 	} else {
106 		req->ring_ptr++;
107 	}
108 
109 	cont_pkt = (cont_entry_t *)req->ring_ptr;
110 
111 	/* Load packet defaults. */
112 	*((uint32_t *)(&cont_pkt->entry_type)) =
113 	    __constant_cpu_to_le32(CONTINUE_TYPE);
114 
115 	return (cont_pkt);
116 }
117 
118 /**
119  * qla2x00_prep_cont_type1_iocb() - Initialize a Continuation Type 1 IOCB.
120  * @ha: HA context
121  *
122  * Returns a pointer to the continuation type 1 IOCB packet.
123  */
124 static inline cont_a64_entry_t *
125 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
126 {
127 	cont_a64_entry_t *cont_pkt;
128 
129 	struct req_que *req = vha->req;
130 	/* Adjust ring index. */
131 	req->ring_index++;
132 	if (req->ring_index == req->length) {
133 		req->ring_index = 0;
134 		req->ring_ptr = req->ring;
135 	} else {
136 		req->ring_ptr++;
137 	}
138 
139 	cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
140 
141 	/* Load packet defaults. */
142 	*((uint32_t *)(&cont_pkt->entry_type)) =
143 	    __constant_cpu_to_le32(CONTINUE_A64_TYPE);
144 
145 	return (cont_pkt);
146 }
147 
148 /**
149  * qla2x00_build_scsi_iocbs_32() - Build IOCB command utilizing 32bit
150  * capable IOCB types.
151  *
152  * @sp: SRB command to process
153  * @cmd_pkt: Command type 2 IOCB
154  * @tot_dsds: Total number of segments to transfer
155  */
156 void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
157     uint16_t tot_dsds)
158 {
159 	uint16_t	avail_dsds;
160 	uint32_t	*cur_dsd;
161 	scsi_qla_host_t	*vha;
162 	struct scsi_cmnd *cmd;
163 	struct scatterlist *sg;
164 	int i;
165 
166 	cmd = sp->cmd;
167 
168 	/* Update entry type to indicate Command Type 2 IOCB */
169 	*((uint32_t *)(&cmd_pkt->entry_type)) =
170 	    __constant_cpu_to_le32(COMMAND_TYPE);
171 
172 	/* No data transfer */
173 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
174 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
175 		return;
176 	}
177 
178 	vha = sp->fcport->vha;
179 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
180 
181 	/* Three DSDs are available in the Command Type 2 IOCB */
182 	avail_dsds = 3;
183 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
184 
185 	/* Load data segments */
186 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
187 		cont_entry_t *cont_pkt;
188 
189 		/* Allocate additional continuation packets? */
190 		if (avail_dsds == 0) {
191 			/*
192 			 * Seven DSDs are available in the Continuation
193 			 * Type 0 IOCB.
194 			 */
195 			cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
196 			cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
197 			avail_dsds = 7;
198 		}
199 
200 		*cur_dsd++ = cpu_to_le32(sg_dma_address(sg));
201 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
202 		avail_dsds--;
203 	}
204 }
205 
206 /**
207  * qla2x00_build_scsi_iocbs_64() - Build IOCB command utilizing 64bit
208  * capable IOCB types.
209  *
210  * @sp: SRB command to process
211  * @cmd_pkt: Command type 3 IOCB
212  * @tot_dsds: Total number of segments to transfer
213  */
214 void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
215     uint16_t tot_dsds)
216 {
217 	uint16_t	avail_dsds;
218 	uint32_t	*cur_dsd;
219 	scsi_qla_host_t	*vha;
220 	struct scsi_cmnd *cmd;
221 	struct scatterlist *sg;
222 	int i;
223 
224 	cmd = sp->cmd;
225 
226 	/* Update entry type to indicate Command Type 3 IOCB */
227 	*((uint32_t *)(&cmd_pkt->entry_type)) =
228 	    __constant_cpu_to_le32(COMMAND_A64_TYPE);
229 
230 	/* No data transfer */
231 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
232 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
233 		return;
234 	}
235 
236 	vha = sp->fcport->vha;
237 	cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
238 
239 	/* Two DSDs are available in the Command Type 3 IOCB */
240 	avail_dsds = 2;
241 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
242 
243 	/* Load data segments */
244 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
245 		dma_addr_t	sle_dma;
246 		cont_a64_entry_t *cont_pkt;
247 
248 		/* Allocate additional continuation packets? */
249 		if (avail_dsds == 0) {
250 			/*
251 			 * Five DSDs are available in the Continuation
252 			 * Type 1 IOCB.
253 			 */
254 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
255 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
256 			avail_dsds = 5;
257 		}
258 
259 		sle_dma = sg_dma_address(sg);
260 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
261 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
262 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
263 		avail_dsds--;
264 	}
265 }
266 
267 /**
268  * qla2x00_start_scsi() - Send a SCSI command to the ISP
269  * @sp: command to send to the ISP
270  *
271  * Returns non-zero if a failure occurred, else zero.
272  */
273 int
274 qla2x00_start_scsi(srb_t *sp)
275 {
276 	int		ret, nseg;
277 	unsigned long   flags;
278 	scsi_qla_host_t	*vha;
279 	struct scsi_cmnd *cmd;
280 	uint32_t	*clr_ptr;
281 	uint32_t        index;
282 	uint32_t	handle;
283 	cmd_entry_t	*cmd_pkt;
284 	uint16_t	cnt;
285 	uint16_t	req_cnt;
286 	uint16_t	tot_dsds;
287 	struct device_reg_2xxx __iomem *reg;
288 	struct qla_hw_data *ha;
289 	struct req_que *req;
290 	struct rsp_que *rsp;
291 
292 	/* Setup device pointers. */
293 	ret = 0;
294 	vha = sp->fcport->vha;
295 	ha = vha->hw;
296 	reg = &ha->iobase->isp;
297 	cmd = sp->cmd;
298 	req = ha->req_q_map[0];
299 	rsp = ha->rsp_q_map[0];
300 	/* So we know we haven't pci_map'ed anything yet */
301 	tot_dsds = 0;
302 
303 	/* Send marker if required */
304 	if (vha->marker_needed != 0) {
305 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
306 							!= QLA_SUCCESS)
307 			return (QLA_FUNCTION_FAILED);
308 		vha->marker_needed = 0;
309 	}
310 
311 	/* Acquire ring specific lock */
312 	spin_lock_irqsave(&ha->hardware_lock, flags);
313 
314 	/* Check for room in outstanding command list. */
315 	handle = req->current_outstanding_cmd;
316 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
317 		handle++;
318 		if (handle == MAX_OUTSTANDING_COMMANDS)
319 			handle = 1;
320 		if (!req->outstanding_cmds[handle])
321 			break;
322 	}
323 	if (index == MAX_OUTSTANDING_COMMANDS)
324 		goto queuing_error;
325 
326 	/* Map the sg table so we have an accurate count of sg entries needed */
327 	if (scsi_sg_count(cmd)) {
328 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
329 		    scsi_sg_count(cmd), cmd->sc_data_direction);
330 		if (unlikely(!nseg))
331 			goto queuing_error;
332 	} else
333 		nseg = 0;
334 
335 	tot_dsds = nseg;
336 
337 	/* Calculate the number of request entries needed. */
338 	req_cnt = ha->isp_ops->calc_req_entries(tot_dsds);
339 	if (req->cnt < (req_cnt + 2)) {
340 		cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg));
341 		if (req->ring_index < cnt)
342 			req->cnt = cnt - req->ring_index;
343 		else
344 			req->cnt = req->length -
345 			    (req->ring_index - cnt);
346 	}
347 	if (req->cnt < (req_cnt + 2))
348 		goto queuing_error;
349 
350 	/* Build command packet */
351 	req->current_outstanding_cmd = handle;
352 	req->outstanding_cmds[handle] = sp;
353 	sp->handle = handle;
354 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
355 	req->cnt -= req_cnt;
356 
357 	cmd_pkt = (cmd_entry_t *)req->ring_ptr;
358 	cmd_pkt->handle = handle;
359 	/* Zero out remaining portion of packet. */
360 	clr_ptr = (uint32_t *)cmd_pkt + 2;
361 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
362 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
363 
364 	/* Set target ID and LUN number*/
365 	SET_TARGET_ID(ha, cmd_pkt->target, sp->fcport->loop_id);
366 	cmd_pkt->lun = cpu_to_le16(sp->cmd->device->lun);
367 
368 	/* Update tagged queuing modifier */
369 	cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
370 
371 	/* Load SCSI command packet. */
372 	memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len);
373 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
374 
375 	/* Build IOCB segments */
376 	ha->isp_ops->build_iocbs(sp, cmd_pkt, tot_dsds);
377 
378 	/* Set total data segment count. */
379 	cmd_pkt->entry_count = (uint8_t)req_cnt;
380 	wmb();
381 
382 	/* Adjust ring index. */
383 	req->ring_index++;
384 	if (req->ring_index == req->length) {
385 		req->ring_index = 0;
386 		req->ring_ptr = req->ring;
387 	} else
388 		req->ring_ptr++;
389 
390 	sp->flags |= SRB_DMA_VALID;
391 
392 	/* Set chip new ring index. */
393 	WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index);
394 	RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg));	/* PCI Posting. */
395 
396 	/* Manage unprocessed RIO/ZIO commands in response queue. */
397 	if (vha->flags.process_response_queue &&
398 	    rsp->ring_ptr->signature != RESPONSE_PROCESSED)
399 		qla2x00_process_response_queue(rsp);
400 
401 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
402 	return (QLA_SUCCESS);
403 
404 queuing_error:
405 	if (tot_dsds)
406 		scsi_dma_unmap(cmd);
407 
408 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
409 
410 	return (QLA_FUNCTION_FAILED);
411 }
412 
413 /**
414  * qla2x00_marker() - Send a marker IOCB to the firmware.
415  * @ha: HA context
416  * @loop_id: loop ID
417  * @lun: LUN
418  * @type: marker modifier
419  *
420  * Can be called from both normal and interrupt context.
421  *
422  * Returns non-zero if a failure occurred, else zero.
423  */
424 int
425 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
426 			struct rsp_que *rsp, uint16_t loop_id,
427 			uint16_t lun, uint8_t type)
428 {
429 	mrk_entry_t *mrk;
430 	struct mrk_entry_24xx *mrk24;
431 	struct qla_hw_data *ha = vha->hw;
432 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
433 
434 	mrk24 = NULL;
435 	mrk = (mrk_entry_t *)qla2x00_req_pkt(vha, req, rsp);
436 	if (mrk == NULL) {
437 		DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
438 		    __func__, base_vha->host_no));
439 
440 		return (QLA_FUNCTION_FAILED);
441 	}
442 
443 	mrk->entry_type = MARKER_TYPE;
444 	mrk->modifier = type;
445 	if (type != MK_SYNC_ALL) {
446 		if (IS_FWI2_CAPABLE(ha)) {
447 			mrk24 = (struct mrk_entry_24xx *) mrk;
448 			mrk24->nport_handle = cpu_to_le16(loop_id);
449 			mrk24->lun[1] = LSB(lun);
450 			mrk24->lun[2] = MSB(lun);
451 			host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
452 			mrk24->vp_index = vha->vp_idx;
453 			mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
454 		} else {
455 			SET_TARGET_ID(ha, mrk->target, loop_id);
456 			mrk->lun = cpu_to_le16(lun);
457 		}
458 	}
459 	wmb();
460 
461 	qla2x00_isp_cmd(vha, req);
462 
463 	return (QLA_SUCCESS);
464 }
465 
466 int
467 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
468 		struct rsp_que *rsp, uint16_t loop_id, uint16_t lun,
469 		uint8_t type)
470 {
471 	int ret;
472 	unsigned long flags = 0;
473 
474 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
475 	ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type);
476 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
477 
478 	return (ret);
479 }
480 
481 /**
482  * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
483  * @ha: HA context
484  *
485  * Note: The caller must hold the hardware lock before calling this routine.
486  *
487  * Returns NULL if function failed, else, a pointer to the request packet.
488  */
489 static request_t *
490 qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
491 		struct rsp_que *rsp)
492 {
493 	struct qla_hw_data *ha = vha->hw;
494 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
495 	request_t	*pkt = NULL;
496 	uint16_t	cnt;
497 	uint32_t	*dword_ptr;
498 	uint32_t	timer;
499 	uint16_t	req_cnt = 1;
500 
501 	/* Wait 1 second for slot. */
502 	for (timer = HZ; timer; timer--) {
503 		if ((req_cnt + 2) >= req->cnt) {
504 			/* Calculate number of free request entries. */
505 			if (ha->mqenable)
506 				cnt = (uint16_t)
507 					RD_REG_DWORD(&reg->isp25mq.req_q_out);
508 			else {
509 				if (IS_FWI2_CAPABLE(ha))
510 					cnt = (uint16_t)RD_REG_DWORD(
511 						&reg->isp24.req_q_out);
512 				else
513 					cnt = qla2x00_debounce_register(
514 						ISP_REQ_Q_OUT(ha, &reg->isp));
515 			}
516 			if  (req->ring_index < cnt)
517 				req->cnt = cnt - req->ring_index;
518 			else
519 				req->cnt = req->length -
520 				    (req->ring_index - cnt);
521 		}
522 		/* If room for request in request ring. */
523 		if ((req_cnt + 2) < req->cnt) {
524 			req->cnt--;
525 			pkt = req->ring_ptr;
526 
527 			/* Zero out packet. */
528 			dword_ptr = (uint32_t *)pkt;
529 			for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
530 				*dword_ptr++ = 0;
531 
532 			/* Set entry count. */
533 			pkt->entry_count = 1;
534 
535 			break;
536 		}
537 
538 		/* Release ring specific lock */
539 		spin_unlock_irq(&ha->hardware_lock);
540 
541 		udelay(2);   /* 2 us */
542 
543 		/* Check for pending interrupts. */
544 		/* During init we issue marker directly */
545 		if (!vha->marker_needed && !vha->flags.init_done)
546 			qla2x00_poll(rsp);
547 		spin_lock_irq(&ha->hardware_lock);
548 	}
549 	if (!pkt) {
550 		DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
551 	}
552 
553 	return (pkt);
554 }
555 
556 /**
557  * qla2x00_isp_cmd() - Modify the request ring pointer.
558  * @ha: HA context
559  *
560  * Note: The caller must hold the hardware lock before calling this routine.
561  */
562 static void
563 qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
564 {
565 	struct qla_hw_data *ha = vha->hw;
566 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
567 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
568 
569 	DEBUG5(printk("%s(): IOCB data:\n", __func__));
570 	DEBUG5(qla2x00_dump_buffer(
571 	    (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
572 
573 	/* Adjust ring index. */
574 	req->ring_index++;
575 	if (req->ring_index == req->length) {
576 		req->ring_index = 0;
577 		req->ring_ptr = req->ring;
578 	} else
579 		req->ring_ptr++;
580 
581 	/* Set chip new ring index. */
582 	if (ha->mqenable) {
583 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
584 		RD_REG_DWORD(&ioreg->hccr);
585 	}
586 	else {
587 		if (IS_FWI2_CAPABLE(ha)) {
588 			WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
589 			RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
590 		} else {
591 			WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
592 				req->ring_index);
593 			RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
594 		}
595 	}
596 
597 }
598 
599 /**
600  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
601  * Continuation Type 1 IOCBs to allocate.
602  *
603  * @dsds: number of data segment decriptors needed
604  *
605  * Returns the number of IOCB entries needed to store @dsds.
606  */
607 static inline uint16_t
608 qla24xx_calc_iocbs(uint16_t dsds)
609 {
610 	uint16_t iocbs;
611 
612 	iocbs = 1;
613 	if (dsds > 1) {
614 		iocbs += (dsds - 1) / 5;
615 		if ((dsds - 1) % 5)
616 			iocbs++;
617 	}
618 	return iocbs;
619 }
620 
621 /**
622  * qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
623  * IOCB types.
624  *
625  * @sp: SRB command to process
626  * @cmd_pkt: Command type 3 IOCB
627  * @tot_dsds: Total number of segments to transfer
628  */
629 static inline void
630 qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
631     uint16_t tot_dsds)
632 {
633 	uint16_t	avail_dsds;
634 	uint32_t	*cur_dsd;
635 	scsi_qla_host_t	*vha;
636 	struct scsi_cmnd *cmd;
637 	struct scatterlist *sg;
638 	int i;
639 	struct req_que *req;
640 
641 	cmd = sp->cmd;
642 
643 	/* Update entry type to indicate Command Type 3 IOCB */
644 	*((uint32_t *)(&cmd_pkt->entry_type)) =
645 	    __constant_cpu_to_le32(COMMAND_TYPE_7);
646 
647 	/* No data transfer */
648 	if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
649 		cmd_pkt->byte_count = __constant_cpu_to_le32(0);
650 		return;
651 	}
652 
653 	vha = sp->fcport->vha;
654 	req = vha->req;
655 
656 	/* Set transfer direction */
657 	if (cmd->sc_data_direction == DMA_TO_DEVICE) {
658 		cmd_pkt->task_mgmt_flags =
659 		    __constant_cpu_to_le16(TMF_WRITE_DATA);
660 		sp->fcport->vha->hw->qla_stats.output_bytes +=
661 		    scsi_bufflen(sp->cmd);
662 	} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
663 		cmd_pkt->task_mgmt_flags =
664 		    __constant_cpu_to_le16(TMF_READ_DATA);
665 		sp->fcport->vha->hw->qla_stats.input_bytes +=
666 		    scsi_bufflen(sp->cmd);
667 	}
668 
669 	/* One DSD is available in the Command Type 3 IOCB */
670 	avail_dsds = 1;
671 	cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address;
672 
673 	/* Load data segments */
674 
675 	scsi_for_each_sg(cmd, sg, tot_dsds, i) {
676 		dma_addr_t	sle_dma;
677 		cont_a64_entry_t *cont_pkt;
678 
679 		/* Allocate additional continuation packets? */
680 		if (avail_dsds == 0) {
681 			/*
682 			 * Five DSDs are available in the Continuation
683 			 * Type 1 IOCB.
684 			 */
685 			cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
686 			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
687 			avail_dsds = 5;
688 		}
689 
690 		sle_dma = sg_dma_address(sg);
691 		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
692 		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
693 		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
694 		avail_dsds--;
695 	}
696 }
697 
698 
699 /**
700  * qla24xx_start_scsi() - Send a SCSI command to the ISP
701  * @sp: command to send to the ISP
702  *
703  * Returns non-zero if a failure occurred, else zero.
704  */
705 int
706 qla24xx_start_scsi(srb_t *sp)
707 {
708 	int		ret, nseg;
709 	unsigned long   flags;
710 	uint32_t	*clr_ptr;
711 	uint32_t        index;
712 	uint32_t	handle;
713 	struct cmd_type_7 *cmd_pkt;
714 	uint16_t	cnt;
715 	uint16_t	req_cnt;
716 	uint16_t	tot_dsds;
717 	struct req_que *req = NULL;
718 	struct rsp_que *rsp = NULL;
719 	struct scsi_cmnd *cmd = sp->cmd;
720 	struct scsi_qla_host *vha = sp->fcport->vha;
721 	struct qla_hw_data *ha = vha->hw;
722 
723 	/* Setup device pointers. */
724 	ret = 0;
725 
726 	qla25xx_set_que(sp, &rsp);
727 	req = vha->req;
728 
729 	/* So we know we haven't pci_map'ed anything yet */
730 	tot_dsds = 0;
731 
732 	/* Send marker if required */
733 	if (vha->marker_needed != 0) {
734 		if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
735 							!= QLA_SUCCESS)
736 			return QLA_FUNCTION_FAILED;
737 		vha->marker_needed = 0;
738 	}
739 
740 	/* Acquire ring specific lock */
741 	spin_lock_irqsave(&ha->hardware_lock, flags);
742 
743 	/* Check for room in outstanding command list. */
744 	handle = req->current_outstanding_cmd;
745 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
746 		handle++;
747 		if (handle == MAX_OUTSTANDING_COMMANDS)
748 			handle = 1;
749 		if (!req->outstanding_cmds[handle])
750 			break;
751 	}
752 	if (index == MAX_OUTSTANDING_COMMANDS)
753 		goto queuing_error;
754 
755 	/* Map the sg table so we have an accurate count of sg entries needed */
756 	if (scsi_sg_count(cmd)) {
757 		nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
758 		    scsi_sg_count(cmd), cmd->sc_data_direction);
759 		if (unlikely(!nseg))
760 			goto queuing_error;
761 	} else
762 		nseg = 0;
763 
764 	tot_dsds = nseg;
765 
766 	req_cnt = qla24xx_calc_iocbs(tot_dsds);
767 	if (req->cnt < (req_cnt + 2)) {
768 		cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
769 
770 		if (req->ring_index < cnt)
771 			req->cnt = cnt - req->ring_index;
772 		else
773 			req->cnt = req->length -
774 				(req->ring_index - cnt);
775 	}
776 	if (req->cnt < (req_cnt + 2))
777 		goto queuing_error;
778 
779 	/* Build command packet. */
780 	req->current_outstanding_cmd = handle;
781 	req->outstanding_cmds[handle] = sp;
782 	sp->handle = handle;
783 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
784 	req->cnt -= req_cnt;
785 
786 	cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
787 	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
788 
789 	/* Zero out remaining portion of packet. */
790 	/*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
791 	clr_ptr = (uint32_t *)cmd_pkt + 2;
792 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
793 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
794 
795 	/* Set NPORT-ID and LUN number*/
796 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
797 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
798 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
799 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
800 	cmd_pkt->vp_index = sp->fcport->vp_idx;
801 
802 	int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
803 	host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
804 
805 	/* Load SCSI command packet. */
806 	memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
807 	host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
808 
809 	cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
810 
811 	/* Build IOCB segments */
812 	qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
813 
814 	/* Set total data segment count. */
815 	cmd_pkt->entry_count = (uint8_t)req_cnt;
816 	/* Specify response queue number where completion should happen */
817 	cmd_pkt->entry_status = (uint8_t) rsp->id;
818 	wmb();
819 
820 	/* Adjust ring index. */
821 	req->ring_index++;
822 	if (req->ring_index == req->length) {
823 		req->ring_index = 0;
824 		req->ring_ptr = req->ring;
825 	} else
826 		req->ring_ptr++;
827 
828 	sp->flags |= SRB_DMA_VALID;
829 
830 	/* Set chip new ring index. */
831 	WRT_REG_DWORD(req->req_q_in, req->ring_index);
832 	RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
833 
834 	/* Manage unprocessed RIO/ZIO commands in response queue. */
835 	if (vha->flags.process_response_queue &&
836 		rsp->ring_ptr->signature != RESPONSE_PROCESSED)
837 		qla24xx_process_response_queue(vha, rsp);
838 
839 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
840 	return QLA_SUCCESS;
841 
842 queuing_error:
843 	if (tot_dsds)
844 		scsi_dma_unmap(cmd);
845 
846 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
847 
848 	return QLA_FUNCTION_FAILED;
849 }
850 
851 static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
852 {
853 	struct scsi_cmnd *cmd = sp->cmd;
854 	struct qla_hw_data *ha = sp->fcport->vha->hw;
855 	int affinity = cmd->request->cpu;
856 
857 	if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
858 		affinity < ha->max_rsp_queues - 1)
859 		*rsp = ha->rsp_q_map[affinity + 1];
860 	 else
861 		*rsp = ha->rsp_q_map[0];
862 }
863 
864 /* Generic Control-SRB manipulation functions. */
865 
866 static void *
867 qla2x00_alloc_iocbs(srb_t *sp)
868 {
869 	scsi_qla_host_t	*vha = sp->fcport->vha;
870 	struct qla_hw_data *ha = vha->hw;
871 	struct req_que *req = ha->req_q_map[0];
872 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
873 	uint32_t index, handle;
874 	request_t *pkt;
875 	uint16_t cnt, req_cnt;
876 
877 	pkt = NULL;
878 	req_cnt = 1;
879 
880 	/* Check for room in outstanding command list. */
881 	handle = req->current_outstanding_cmd;
882 	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
883 		handle++;
884 		if (handle == MAX_OUTSTANDING_COMMANDS)
885 			handle = 1;
886 		if (!req->outstanding_cmds[handle])
887 			break;
888 	}
889 	if (index == MAX_OUTSTANDING_COMMANDS)
890 		goto queuing_error;
891 
892 	/* Check for room on request queue. */
893 	if (req->cnt < req_cnt) {
894 		if (ha->mqenable)
895 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
896 		else if (IS_FWI2_CAPABLE(ha))
897 			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
898 		else
899 			cnt = qla2x00_debounce_register(
900 			    ISP_REQ_Q_OUT(ha, &reg->isp));
901 
902 		if  (req->ring_index < cnt)
903 			req->cnt = cnt - req->ring_index;
904 		else
905 			req->cnt = req->length -
906 			    (req->ring_index - cnt);
907 	}
908 	if (req->cnt < req_cnt)
909 		goto queuing_error;
910 
911 	/* Prep packet */
912 	req->current_outstanding_cmd = handle;
913 	req->outstanding_cmds[handle] = sp;
914 	req->cnt -= req_cnt;
915 
916 	pkt = req->ring_ptr;
917 	memset(pkt, 0, REQUEST_ENTRY_SIZE);
918 	pkt->entry_count = req_cnt;
919 	pkt->handle = handle;
920 	sp->handle = handle;
921 
922 queuing_error:
923 	return pkt;
924 }
925 
926 static void
927 qla2x00_start_iocbs(srb_t *sp)
928 {
929 	struct qla_hw_data *ha = sp->fcport->vha->hw;
930 	struct req_que *req = ha->req_q_map[0];
931 	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
932 	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
933 
934 	/* Adjust ring index. */
935 	req->ring_index++;
936 	if (req->ring_index == req->length) {
937 		req->ring_index = 0;
938 		req->ring_ptr = req->ring;
939 	} else
940 		req->ring_ptr++;
941 
942 	/* Set chip new ring index. */
943 	if (ha->mqenable) {
944 		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
945 		RD_REG_DWORD(&ioreg->hccr);
946 	} else if (IS_FWI2_CAPABLE(ha)) {
947 		WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
948 		RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
949 	} else {
950 		WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
951 		RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
952 	}
953 }
954 
955 static void
956 qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
957 {
958 	struct srb_logio *lio = sp->ctx;
959 
960 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
961 	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
962 	if (lio->flags & SRB_LOGIN_COND_PLOGI)
963 		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
964 	if (lio->flags & SRB_LOGIN_SKIP_PRLI)
965 		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
966 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
967 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
968 	logio->port_id[1] = sp->fcport->d_id.b.area;
969 	logio->port_id[2] = sp->fcport->d_id.b.domain;
970 	logio->vp_index = sp->fcport->vp_idx;
971 }
972 
973 static void
974 qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
975 {
976 	struct qla_hw_data *ha = sp->fcport->vha->hw;
977 	struct srb_logio *lio = sp->ctx;
978 	uint16_t opts;
979 
980 	mbx->entry_type = MBX_IOCB_TYPE;;
981 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
982 	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
983 	opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
984 	opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
985 	if (HAS_EXTENDED_IDS(ha)) {
986 		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
987 		mbx->mb10 = cpu_to_le16(opts);
988 	} else {
989 		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
990 	}
991 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
992 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
993 	    sp->fcport->d_id.b.al_pa);
994 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
995 }
996 
997 static void
998 qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
999 {
1000 	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1001 	logio->control_flags =
1002 	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1003 	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1004 	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1005 	logio->port_id[1] = sp->fcport->d_id.b.area;
1006 	logio->port_id[2] = sp->fcport->d_id.b.domain;
1007 	logio->vp_index = sp->fcport->vp_idx;
1008 }
1009 
1010 static void
1011 qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1012 {
1013 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1014 
1015 	mbx->entry_type = MBX_IOCB_TYPE;;
1016 	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
1017 	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
1018 	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
1019 	    cpu_to_le16(sp->fcport->loop_id):
1020 	    cpu_to_le16(sp->fcport->loop_id << 8);
1021 	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1022 	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1023 	    sp->fcport->d_id.b.al_pa);
1024 	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
1025 	/* Implicit: mbx->mbx10 = 0. */
1026 }
1027 
1028 int
1029 qla2x00_start_sp(srb_t *sp)
1030 {
1031 	int rval;
1032 	struct qla_hw_data *ha = sp->fcport->vha->hw;
1033 	void *pkt;
1034 	struct srb_ctx *ctx = sp->ctx;
1035 	unsigned long flags;
1036 
1037 	rval = QLA_FUNCTION_FAILED;
1038 	spin_lock_irqsave(&ha->hardware_lock, flags);
1039 	pkt = qla2x00_alloc_iocbs(sp);
1040 	if (!pkt)
1041 		goto done;
1042 
1043 	rval = QLA_SUCCESS;
1044 	switch (ctx->type) {
1045 	case SRB_LOGIN_CMD:
1046 		IS_FWI2_CAPABLE(ha) ?
1047 		    qla24xx_login_iocb(sp, pkt):
1048 		    qla2x00_login_iocb(sp, pkt);
1049 		break;
1050 	case SRB_LOGOUT_CMD:
1051 		IS_FWI2_CAPABLE(ha) ?
1052 		    qla24xx_logout_iocb(sp, pkt):
1053 		    qla2x00_logout_iocb(sp, pkt);
1054 		break;
1055 	default:
1056 		break;
1057 	}
1058 
1059 	wmb();
1060 	qla2x00_start_iocbs(sp);
1061 done:
1062 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1063 	return rval;
1064 }
1065