xref: /openbmc/linux/drivers/scsi/qedf/qedf_io.c (revision ba61bb17)
1 /*
2  *  QLogic FCoE Offload Driver
3  *  Copyright (c) 2016-2018 Cavium Inc.
4  *
5  *  This software is available under the terms of the GNU General Public License
6  *  (GPL) Version 2, available from the file COPYING in the main directory of
7  *  this source tree.
8  */
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11 #include "qedf.h"
12 #include <scsi/scsi_tcq.h>
13 
14 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
15 	unsigned int timer_msec)
16 {
17 	queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
18 	    msecs_to_jiffies(timer_msec));
19 }
20 
21 static void qedf_cmd_timeout(struct work_struct *work)
22 {
23 
24 	struct qedf_ioreq *io_req =
25 	    container_of(work, struct qedf_ioreq, timeout_work.work);
26 	struct qedf_ctx *qedf;
27 	struct qedf_rport *fcport;
28 	u8 op = 0;
29 
30 	if (io_req == NULL) {
31 		QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
32 		return;
33 	}
34 
35 	fcport = io_req->fcport;
36 	if (io_req->fcport == NULL) {
37 		QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
38 		return;
39 	}
40 
41 	qedf = fcport->qedf;
42 
43 	switch (io_req->cmd_type) {
44 	case QEDF_ABTS:
45 		if (qedf == NULL) {
46 			QEDF_INFO(NULL, QEDF_LOG_IO, "qedf is NULL for xid=0x%x.\n",
47 			    io_req->xid);
48 			return;
49 		}
50 
51 		QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
52 		    io_req->xid);
53 		/* Cleanup timed out ABTS */
54 		qedf_initiate_cleanup(io_req, true);
55 		complete(&io_req->abts_done);
56 
57 		/*
58 		 * Need to call kref_put for reference taken when initiate_abts
59 		 * was called since abts_compl won't be called now that we've
60 		 * cleaned up the task.
61 		 */
62 		kref_put(&io_req->refcount, qedf_release_cmd);
63 
64 		/*
65 		 * Now that the original I/O and the ABTS are complete see
66 		 * if we need to reconnect to the target.
67 		 */
68 		qedf_restart_rport(fcport);
69 		break;
70 	case QEDF_ELS:
71 		kref_get(&io_req->refcount);
72 		/*
73 		 * Don't attempt to clean an ELS timeout as any subseqeunt
74 		 * ABTS or cleanup requests just hang.  For now just free
75 		 * the resources of the original I/O and the RRQ
76 		 */
77 		QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
78 			  io_req->xid);
79 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
80 		/* Call callback function to complete command */
81 		if (io_req->cb_func && io_req->cb_arg) {
82 			op = io_req->cb_arg->op;
83 			io_req->cb_func(io_req->cb_arg);
84 			io_req->cb_arg = NULL;
85 		}
86 		qedf_initiate_cleanup(io_req, true);
87 		kref_put(&io_req->refcount, qedf_release_cmd);
88 		break;
89 	case QEDF_SEQ_CLEANUP:
90 		QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
91 		    "xid=0x%x.\n", io_req->xid);
92 		qedf_initiate_cleanup(io_req, true);
93 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
94 		qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
95 		break;
96 	default:
97 		break;
98 	}
99 }
100 
101 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
102 {
103 	struct io_bdt *bdt_info;
104 	struct qedf_ctx *qedf = cmgr->qedf;
105 	size_t bd_tbl_sz;
106 	u16 min_xid = QEDF_MIN_XID;
107 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
108 	int num_ios;
109 	int i;
110 	struct qedf_ioreq *io_req;
111 
112 	num_ios = max_xid - min_xid + 1;
113 
114 	/* Free fcoe_bdt_ctx structures */
115 	if (!cmgr->io_bdt_pool)
116 		goto free_cmd_pool;
117 
118 	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
119 	for (i = 0; i < num_ios; i++) {
120 		bdt_info = cmgr->io_bdt_pool[i];
121 		if (bdt_info->bd_tbl) {
122 			dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
123 			    bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
124 			bdt_info->bd_tbl = NULL;
125 		}
126 	}
127 
128 	/* Destroy io_bdt pool */
129 	for (i = 0; i < num_ios; i++) {
130 		kfree(cmgr->io_bdt_pool[i]);
131 		cmgr->io_bdt_pool[i] = NULL;
132 	}
133 
134 	kfree(cmgr->io_bdt_pool);
135 	cmgr->io_bdt_pool = NULL;
136 
137 free_cmd_pool:
138 
139 	for (i = 0; i < num_ios; i++) {
140 		io_req = &cmgr->cmds[i];
141 		kfree(io_req->sgl_task_params);
142 		kfree(io_req->task_params);
143 		/* Make sure we free per command sense buffer */
144 		if (io_req->sense_buffer)
145 			dma_free_coherent(&qedf->pdev->dev,
146 			    QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
147 			    io_req->sense_buffer_dma);
148 		cancel_delayed_work_sync(&io_req->rrq_work);
149 	}
150 
151 	/* Free command manager itself */
152 	vfree(cmgr);
153 }
154 
155 static void qedf_handle_rrq(struct work_struct *work)
156 {
157 	struct qedf_ioreq *io_req =
158 	    container_of(work, struct qedf_ioreq, rrq_work.work);
159 
160 	qedf_send_rrq(io_req);
161 
162 }
163 
164 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
165 {
166 	struct qedf_cmd_mgr *cmgr;
167 	struct io_bdt *bdt_info;
168 	struct qedf_ioreq *io_req;
169 	u16 xid;
170 	int i;
171 	int num_ios;
172 	u16 min_xid = QEDF_MIN_XID;
173 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
174 
175 	/* Make sure num_queues is already set before calling this function */
176 	if (!qedf->num_queues) {
177 		QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
178 		return NULL;
179 	}
180 
181 	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
182 		QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
183 			   "max_xid 0x%x.\n", min_xid, max_xid);
184 		return NULL;
185 	}
186 
187 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
188 		   "0x%x.\n", min_xid, max_xid);
189 
190 	num_ios = max_xid - min_xid + 1;
191 
192 	cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
193 	if (!cmgr) {
194 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
195 		return NULL;
196 	}
197 
198 	cmgr->qedf = qedf;
199 	spin_lock_init(&cmgr->lock);
200 
201 	/*
202 	 * Initialize I/O request fields.
203 	 */
204 	xid = QEDF_MIN_XID;
205 
206 	for (i = 0; i < num_ios; i++) {
207 		io_req = &cmgr->cmds[i];
208 		INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
209 
210 		io_req->xid = xid++;
211 
212 		INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
213 
214 		/* Allocate DMA memory to hold sense buffer */
215 		io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
216 		    QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
217 		    GFP_KERNEL);
218 		if (!io_req->sense_buffer)
219 			goto mem_err;
220 
221 		/* Allocate task parameters to pass to f/w init funcions */
222 		io_req->task_params = kzalloc(sizeof(*io_req->task_params),
223 					      GFP_KERNEL);
224 		if (!io_req->task_params) {
225 			QEDF_ERR(&(qedf->dbg_ctx),
226 				 "Failed to allocate task_params for xid=0x%x\n",
227 				 i);
228 			goto mem_err;
229 		}
230 
231 		/*
232 		 * Allocate scatter/gather list info to pass to f/w init
233 		 * functions.
234 		 */
235 		io_req->sgl_task_params = kzalloc(
236 		    sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
237 		if (!io_req->sgl_task_params) {
238 			QEDF_ERR(&(qedf->dbg_ctx),
239 				 "Failed to allocate sgl_task_params for xid=0x%x\n",
240 				 i);
241 			goto mem_err;
242 		}
243 	}
244 
245 	/* Allocate pool of io_bdts - one for each qedf_ioreq */
246 	cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
247 	    GFP_KERNEL);
248 
249 	if (!cmgr->io_bdt_pool) {
250 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
251 		goto mem_err;
252 	}
253 
254 	for (i = 0; i < num_ios; i++) {
255 		cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
256 		    GFP_KERNEL);
257 		if (!cmgr->io_bdt_pool[i]) {
258 			QEDF_WARN(&(qedf->dbg_ctx),
259 				  "Failed to alloc io_bdt_pool[%d].\n", i);
260 			goto mem_err;
261 		}
262 	}
263 
264 	for (i = 0; i < num_ios; i++) {
265 		bdt_info = cmgr->io_bdt_pool[i];
266 		bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
267 		    QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
268 		    &bdt_info->bd_tbl_dma, GFP_KERNEL);
269 		if (!bdt_info->bd_tbl) {
270 			QEDF_WARN(&(qedf->dbg_ctx),
271 				  "Failed to alloc bdt_tbl[%d].\n", i);
272 			goto mem_err;
273 		}
274 	}
275 	atomic_set(&cmgr->free_list_cnt, num_ios);
276 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
277 	    "cmgr->free_list_cnt=%d.\n",
278 	    atomic_read(&cmgr->free_list_cnt));
279 
280 	return cmgr;
281 
282 mem_err:
283 	qedf_cmd_mgr_free(cmgr);
284 	return NULL;
285 }
286 
287 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
288 {
289 	struct qedf_ctx *qedf = fcport->qedf;
290 	struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
291 	struct qedf_ioreq *io_req = NULL;
292 	struct io_bdt *bd_tbl;
293 	u16 xid;
294 	uint32_t free_sqes;
295 	int i;
296 	unsigned long flags;
297 
298 	free_sqes = atomic_read(&fcport->free_sqes);
299 
300 	if (!free_sqes) {
301 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
302 		    "Returning NULL, free_sqes=%d.\n ",
303 		    free_sqes);
304 		goto out_failed;
305 	}
306 
307 	/* Limit the number of outstanding R/W tasks */
308 	if ((atomic_read(&fcport->num_active_ios) >=
309 	    NUM_RW_TASKS_PER_CONNECTION)) {
310 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
311 		    "Returning NULL, num_active_ios=%d.\n",
312 		    atomic_read(&fcport->num_active_ios));
313 		goto out_failed;
314 	}
315 
316 	/* Limit global TIDs certain tasks */
317 	if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
318 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
319 		    "Returning NULL, free_list_cnt=%d.\n",
320 		    atomic_read(&cmd_mgr->free_list_cnt));
321 		goto out_failed;
322 	}
323 
324 	spin_lock_irqsave(&cmd_mgr->lock, flags);
325 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
326 		io_req = &cmd_mgr->cmds[cmd_mgr->idx];
327 		cmd_mgr->idx++;
328 		if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
329 			cmd_mgr->idx = 0;
330 
331 		/* Check to make sure command was previously freed */
332 		if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags))
333 			break;
334 	}
335 
336 	if (i == FCOE_PARAMS_NUM_TASKS) {
337 		spin_unlock_irqrestore(&cmd_mgr->lock, flags);
338 		goto out_failed;
339 	}
340 
341 	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
342 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
343 
344 	atomic_inc(&fcport->num_active_ios);
345 	atomic_dec(&fcport->free_sqes);
346 	xid = io_req->xid;
347 	atomic_dec(&cmd_mgr->free_list_cnt);
348 
349 	io_req->cmd_mgr = cmd_mgr;
350 	io_req->fcport = fcport;
351 
352 	/* Hold the io_req against deletion */
353 	kref_init(&io_req->refcount);
354 
355 	/* Bind io_bdt for this io_req */
356 	/* Have a static link between io_req and io_bdt_pool */
357 	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
358 	if (bd_tbl == NULL) {
359 		QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
360 		kref_put(&io_req->refcount, qedf_release_cmd);
361 		goto out_failed;
362 	}
363 	bd_tbl->io_req = io_req;
364 	io_req->cmd_type = cmd_type;
365 	io_req->tm_flags = 0;
366 
367 	/* Reset sequence offset data */
368 	io_req->rx_buf_off = 0;
369 	io_req->tx_buf_off = 0;
370 	io_req->rx_id = 0xffff; /* No OX_ID */
371 
372 	return io_req;
373 
374 out_failed:
375 	/* Record failure for stats and return NULL to caller */
376 	qedf->alloc_failures++;
377 	return NULL;
378 }
379 
380 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
381 {
382 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
383 	struct qedf_ctx *qedf = io_req->fcport->qedf;
384 	uint64_t sz = sizeof(struct scsi_sge);
385 
386 	/* clear tm flags */
387 	if (mp_req->mp_req_bd) {
388 		dma_free_coherent(&qedf->pdev->dev, sz,
389 		    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
390 		mp_req->mp_req_bd = NULL;
391 	}
392 	if (mp_req->mp_resp_bd) {
393 		dma_free_coherent(&qedf->pdev->dev, sz,
394 		    mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
395 		mp_req->mp_resp_bd = NULL;
396 	}
397 	if (mp_req->req_buf) {
398 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
399 		    mp_req->req_buf, mp_req->req_buf_dma);
400 		mp_req->req_buf = NULL;
401 	}
402 	if (mp_req->resp_buf) {
403 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
404 		    mp_req->resp_buf, mp_req->resp_buf_dma);
405 		mp_req->resp_buf = NULL;
406 	}
407 }
408 
409 void qedf_release_cmd(struct kref *ref)
410 {
411 	struct qedf_ioreq *io_req =
412 	    container_of(ref, struct qedf_ioreq, refcount);
413 	struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
414 	struct qedf_rport *fcport = io_req->fcport;
415 
416 	if (io_req->cmd_type == QEDF_ELS ||
417 	    io_req->cmd_type == QEDF_TASK_MGMT_CMD)
418 		qedf_free_mp_resc(io_req);
419 
420 	atomic_inc(&cmd_mgr->free_list_cnt);
421 	atomic_dec(&fcport->num_active_ios);
422 	if (atomic_read(&fcport->num_active_ios) < 0)
423 		QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
424 
425 	/* Increment task retry identifier now that the request is released */
426 	io_req->task_retry_identifier++;
427 
428 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
429 }
430 
431 static int qedf_split_bd(struct qedf_ioreq *io_req, u64 addr, int sg_len,
432 	int bd_index)
433 {
434 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
435 	int frag_size, sg_frags;
436 
437 	sg_frags = 0;
438 	while (sg_len) {
439 		if (sg_len > QEDF_BD_SPLIT_SZ)
440 			frag_size = QEDF_BD_SPLIT_SZ;
441 		else
442 			frag_size = sg_len;
443 		bd[bd_index + sg_frags].sge_addr.lo = U64_LO(addr);
444 		bd[bd_index + sg_frags].sge_addr.hi = U64_HI(addr);
445 		bd[bd_index + sg_frags].sge_len = (uint16_t)frag_size;
446 
447 		addr += (u64)frag_size;
448 		sg_frags++;
449 		sg_len -= frag_size;
450 	}
451 	return sg_frags;
452 }
453 
454 static int qedf_map_sg(struct qedf_ioreq *io_req)
455 {
456 	struct scsi_cmnd *sc = io_req->sc_cmd;
457 	struct Scsi_Host *host = sc->device->host;
458 	struct fc_lport *lport = shost_priv(host);
459 	struct qedf_ctx *qedf = lport_priv(lport);
460 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
461 	struct scatterlist *sg;
462 	int byte_count = 0;
463 	int sg_count = 0;
464 	int bd_count = 0;
465 	int sg_frags;
466 	unsigned int sg_len;
467 	u64 addr, end_addr;
468 	int i;
469 
470 	sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
471 	    scsi_sg_count(sc), sc->sc_data_direction);
472 
473 	sg = scsi_sglist(sc);
474 
475 	/*
476 	 * New condition to send single SGE as cached-SGL with length less
477 	 * than 64k.
478 	 */
479 	if ((sg_count == 1) && (sg_dma_len(sg) <=
480 	    QEDF_MAX_SGLEN_FOR_CACHESGL)) {
481 		sg_len = sg_dma_len(sg);
482 		addr = (u64)sg_dma_address(sg);
483 
484 		bd[bd_count].sge_addr.lo = (addr & 0xffffffff);
485 		bd[bd_count].sge_addr.hi = (addr >> 32);
486 		bd[bd_count].sge_len = (u16)sg_len;
487 
488 		return ++bd_count;
489 	}
490 
491 	scsi_for_each_sg(sc, sg, sg_count, i) {
492 		sg_len = sg_dma_len(sg);
493 		addr = (u64)sg_dma_address(sg);
494 		end_addr = (u64)(addr + sg_len);
495 
496 		/*
497 		 * First s/g element in the list so check if the end_addr
498 		 * is paged aligned. Also check to make sure the length is
499 		 * at least page size.
500 		 */
501 		if ((i == 0) && (sg_count > 1) &&
502 		    ((end_addr % QEDF_PAGE_SIZE) ||
503 		    sg_len < QEDF_PAGE_SIZE))
504 			io_req->use_slowpath = true;
505 		/*
506 		 * Last s/g element so check if the start address is paged
507 		 * aligned.
508 		 */
509 		else if ((i == (sg_count - 1)) && (sg_count > 1) &&
510 		    (addr % QEDF_PAGE_SIZE))
511 			io_req->use_slowpath = true;
512 		/*
513 		 * Intermediate s/g element so check if start and end address
514 		 * is page aligned.
515 		 */
516 		else if ((i != 0) && (i != (sg_count - 1)) &&
517 		    ((addr % QEDF_PAGE_SIZE) || (end_addr % QEDF_PAGE_SIZE)))
518 			io_req->use_slowpath = true;
519 
520 		if (sg_len > QEDF_MAX_BD_LEN) {
521 			sg_frags = qedf_split_bd(io_req, addr, sg_len,
522 			    bd_count);
523 		} else {
524 			sg_frags = 1;
525 			bd[bd_count].sge_addr.lo = U64_LO(addr);
526 			bd[bd_count].sge_addr.hi  = U64_HI(addr);
527 			bd[bd_count].sge_len = (uint16_t)sg_len;
528 		}
529 
530 		bd_count += sg_frags;
531 		byte_count += sg_len;
532 	}
533 
534 	if (byte_count != scsi_bufflen(sc))
535 		QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
536 			  "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
537 			   scsi_bufflen(sc), io_req->xid);
538 
539 	return bd_count;
540 }
541 
542 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
543 {
544 	struct scsi_cmnd *sc = io_req->sc_cmd;
545 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
546 	int bd_count;
547 
548 	if (scsi_sg_count(sc)) {
549 		bd_count = qedf_map_sg(io_req);
550 		if (bd_count == 0)
551 			return -ENOMEM;
552 	} else {
553 		bd_count = 0;
554 		bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
555 		bd[0].sge_len = 0;
556 	}
557 	io_req->bd_tbl->bd_valid = bd_count;
558 
559 	return 0;
560 }
561 
562 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
563 				  struct fcp_cmnd *fcp_cmnd)
564 {
565 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
566 
567 	/* fcp_cmnd is 32 bytes */
568 	memset(fcp_cmnd, 0, FCP_CMND_LEN);
569 
570 	/* 8 bytes: SCSI LUN info */
571 	int_to_scsilun(sc_cmd->device->lun,
572 			(struct scsi_lun *)&fcp_cmnd->fc_lun);
573 
574 	/* 4 bytes: flag info */
575 	fcp_cmnd->fc_pri_ta = 0;
576 	fcp_cmnd->fc_tm_flags = io_req->tm_flags;
577 	fcp_cmnd->fc_flags = io_req->io_req_flags;
578 	fcp_cmnd->fc_cmdref = 0;
579 
580 	/* Populate data direction */
581 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
582 		fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
583 	} else {
584 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
585 			fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
586 		else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
587 			fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
588 	}
589 
590 	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
591 
592 	/* 16 bytes: CDB information */
593 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
594 		memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
595 
596 	/* 4 bytes: FCP data length */
597 	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
598 }
599 
600 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
601 	struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
602 	struct fcoe_wqe *sqe)
603 {
604 	enum fcoe_task_type task_type;
605 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
606 	struct io_bdt *bd_tbl = io_req->bd_tbl;
607 	u8 fcp_cmnd[32];
608 	u32 tmp_fcp_cmnd[8];
609 	int bd_count = 0;
610 	struct qedf_ctx *qedf = fcport->qedf;
611 	uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
612 	struct regpair sense_data_buffer_phys_addr;
613 	u32 tx_io_size = 0;
614 	u32 rx_io_size = 0;
615 	int i, cnt;
616 
617 	/* Note init_initiator_rw_fcoe_task memsets the task context */
618 	io_req->task = task_ctx;
619 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
620 	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
621 	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
622 
623 	/* Set task type bassed on DMA directio of command */
624 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
625 		task_type = FCOE_TASK_TYPE_READ_INITIATOR;
626 	} else {
627 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
628 			task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
629 			tx_io_size = io_req->data_xfer_len;
630 		} else {
631 			task_type = FCOE_TASK_TYPE_READ_INITIATOR;
632 			rx_io_size = io_req->data_xfer_len;
633 		}
634 	}
635 
636 	/* Setup the fields for fcoe_task_params */
637 	io_req->task_params->context = task_ctx;
638 	io_req->task_params->sqe = sqe;
639 	io_req->task_params->task_type = task_type;
640 	io_req->task_params->tx_io_size = tx_io_size;
641 	io_req->task_params->rx_io_size = rx_io_size;
642 	io_req->task_params->conn_cid = fcport->fw_cid;
643 	io_req->task_params->itid = io_req->xid;
644 	io_req->task_params->cq_rss_number = cq_idx;
645 	io_req->task_params->is_tape_device = fcport->dev_type;
646 
647 	/* Fill in information for scatter/gather list */
648 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
649 		bd_count = bd_tbl->bd_valid;
650 		io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
651 		io_req->sgl_task_params->sgl_phys_addr.lo =
652 			U64_LO(bd_tbl->bd_tbl_dma);
653 		io_req->sgl_task_params->sgl_phys_addr.hi =
654 			U64_HI(bd_tbl->bd_tbl_dma);
655 		io_req->sgl_task_params->num_sges = bd_count;
656 		io_req->sgl_task_params->total_buffer_size =
657 		    scsi_bufflen(io_req->sc_cmd);
658 		io_req->sgl_task_params->small_mid_sge =
659 			io_req->use_slowpath;
660 	}
661 
662 	/* Fill in physical address of sense buffer */
663 	sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
664 	sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
665 
666 	/* fill FCP_CMND IU */
667 	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
668 
669 	/* Swap fcp_cmnd since FC is big endian */
670 	cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
671 	for (i = 0; i < cnt; i++) {
672 		tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
673 	}
674 	memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
675 
676 	init_initiator_rw_fcoe_task(io_req->task_params,
677 				    io_req->sgl_task_params,
678 				    sense_data_buffer_phys_addr,
679 				    io_req->task_retry_identifier, fcp_cmnd);
680 
681 	/* Increment SGL type counters */
682 	if (bd_count == 1) {
683 		qedf->single_sge_ios++;
684 		io_req->sge_type = QEDF_IOREQ_SINGLE_SGE;
685 	} else if (io_req->use_slowpath) {
686 		qedf->slow_sge_ios++;
687 		io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
688 	} else {
689 		qedf->fast_sge_ios++;
690 		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
691 	}
692 }
693 
694 void qedf_init_mp_task(struct qedf_ioreq *io_req,
695 	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
696 {
697 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
698 	struct qedf_rport *fcport = io_req->fcport;
699 	struct qedf_ctx *qedf = io_req->fcport->qedf;
700 	struct fc_frame_header *fc_hdr;
701 	struct fcoe_tx_mid_path_params task_fc_hdr;
702 	struct scsi_sgl_task_params tx_sgl_task_params;
703 	struct scsi_sgl_task_params rx_sgl_task_params;
704 
705 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
706 		  "Initializing MP task for cmd_type=%d\n",
707 		  io_req->cmd_type);
708 
709 	qedf->control_requests++;
710 
711 	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
712 	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
713 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
714 	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
715 
716 	/* Setup the task from io_req for easy reference */
717 	io_req->task = task_ctx;
718 
719 	/* Setup the fields for fcoe_task_params */
720 	io_req->task_params->context = task_ctx;
721 	io_req->task_params->sqe = sqe;
722 	io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
723 	io_req->task_params->tx_io_size = io_req->data_xfer_len;
724 	/* rx_io_size tells the f/w how large a response buffer we have */
725 	io_req->task_params->rx_io_size = PAGE_SIZE;
726 	io_req->task_params->conn_cid = fcport->fw_cid;
727 	io_req->task_params->itid = io_req->xid;
728 	/* Return middle path commands on CQ 0 */
729 	io_req->task_params->cq_rss_number = 0;
730 	io_req->task_params->is_tape_device = fcport->dev_type;
731 
732 	fc_hdr = &(mp_req->req_fc_hdr);
733 	/* Set OX_ID and RX_ID based on driver task id */
734 	fc_hdr->fh_ox_id = io_req->xid;
735 	fc_hdr->fh_rx_id = htons(0xffff);
736 
737 	/* Set up FC header information */
738 	task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
739 	task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
740 	task_fc_hdr.type = fc_hdr->fh_type;
741 	task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
742 	task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
743 	task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
744 	task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
745 
746 	/* Set up s/g list parameters for request buffer */
747 	tx_sgl_task_params.sgl = mp_req->mp_req_bd;
748 	tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
749 	tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
750 	tx_sgl_task_params.num_sges = 1;
751 	/* Set PAGE_SIZE for now since sg element is that size ??? */
752 	tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
753 	tx_sgl_task_params.small_mid_sge = 0;
754 
755 	/* Set up s/g list parameters for request buffer */
756 	rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
757 	rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
758 	rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
759 	rx_sgl_task_params.num_sges = 1;
760 	/* Set PAGE_SIZE for now since sg element is that size ??? */
761 	rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
762 	rx_sgl_task_params.small_mid_sge = 0;
763 
764 
765 	/*
766 	 * Last arg is 0 as previous code did not set that we wanted the
767 	 * fc header information.
768 	 */
769 	init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
770 						     &task_fc_hdr,
771 						     &tx_sgl_task_params,
772 						     &rx_sgl_task_params, 0);
773 
774 	/* Midpath requests always consume 1 SGE */
775 	qedf->single_sge_ios++;
776 }
777 
778 /* Presumed that fcport->rport_lock is held */
779 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
780 {
781 	uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
782 	u16 rval;
783 
784 	rval = fcport->sq_prod_idx;
785 
786 	/* Adjust ring index */
787 	fcport->sq_prod_idx++;
788 	fcport->fw_sq_prod_idx++;
789 	if (fcport->sq_prod_idx == total_sqe)
790 		fcport->sq_prod_idx = 0;
791 
792 	return rval;
793 }
794 
795 void qedf_ring_doorbell(struct qedf_rport *fcport)
796 {
797 	struct fcoe_db_data dbell = { 0 };
798 
799 	dbell.agg_flags = 0;
800 
801 	dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
802 	dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
803 	dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
804 	    FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
805 
806 	dbell.sq_prod = fcport->fw_sq_prod_idx;
807 	writel(*(u32 *)&dbell, fcport->p_doorbell);
808 	/* Make sure SQ index is updated so f/w prcesses requests in order */
809 	wmb();
810 	mmiowb();
811 }
812 
813 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
814 			  int8_t direction)
815 {
816 	struct qedf_ctx *qedf = fcport->qedf;
817 	struct qedf_io_log *io_log;
818 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
819 	unsigned long flags;
820 	uint8_t op;
821 
822 	spin_lock_irqsave(&qedf->io_trace_lock, flags);
823 
824 	io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
825 	io_log->direction = direction;
826 	io_log->task_id = io_req->xid;
827 	io_log->port_id = fcport->rdata->ids.port_id;
828 	io_log->lun = sc_cmd->device->lun;
829 	io_log->op = op = sc_cmd->cmnd[0];
830 	io_log->lba[0] = sc_cmd->cmnd[2];
831 	io_log->lba[1] = sc_cmd->cmnd[3];
832 	io_log->lba[2] = sc_cmd->cmnd[4];
833 	io_log->lba[3] = sc_cmd->cmnd[5];
834 	io_log->bufflen = scsi_bufflen(sc_cmd);
835 	io_log->sg_count = scsi_sg_count(sc_cmd);
836 	io_log->result = sc_cmd->result;
837 	io_log->jiffies = jiffies;
838 	io_log->refcount = kref_read(&io_req->refcount);
839 
840 	if (direction == QEDF_IO_TRACE_REQ) {
841 		/* For requests we only care abot the submission CPU */
842 		io_log->req_cpu = io_req->cpu;
843 		io_log->int_cpu = 0;
844 		io_log->rsp_cpu = 0;
845 	} else if (direction == QEDF_IO_TRACE_RSP) {
846 		io_log->req_cpu = io_req->cpu;
847 		io_log->int_cpu = io_req->int_cpu;
848 		io_log->rsp_cpu = smp_processor_id();
849 	}
850 
851 	io_log->sge_type = io_req->sge_type;
852 
853 	qedf->io_trace_idx++;
854 	if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
855 		qedf->io_trace_idx = 0;
856 
857 	spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
858 }
859 
860 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
861 {
862 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
863 	struct Scsi_Host *host = sc_cmd->device->host;
864 	struct fc_lport *lport = shost_priv(host);
865 	struct qedf_ctx *qedf = lport_priv(lport);
866 	struct e4_fcoe_task_context *task_ctx;
867 	u16 xid;
868 	enum fcoe_task_type req_type = 0;
869 	struct fcoe_wqe *sqe;
870 	u16 sqe_idx;
871 
872 	/* Initialize rest of io_req fileds */
873 	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
874 	sc_cmd->SCp.ptr = (char *)io_req;
875 	io_req->use_slowpath = false; /* Assume fast SGL by default */
876 
877 	/* Record which cpu this request is associated with */
878 	io_req->cpu = smp_processor_id();
879 
880 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
881 		req_type = FCOE_TASK_TYPE_READ_INITIATOR;
882 		io_req->io_req_flags = QEDF_READ;
883 		qedf->input_requests++;
884 	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
885 		req_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
886 		io_req->io_req_flags = QEDF_WRITE;
887 		qedf->output_requests++;
888 	} else {
889 		io_req->io_req_flags = 0;
890 		qedf->control_requests++;
891 	}
892 
893 	xid = io_req->xid;
894 
895 	/* Build buffer descriptor list for firmware from sg list */
896 	if (qedf_build_bd_list_from_sg(io_req)) {
897 		QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
898 		kref_put(&io_req->refcount, qedf_release_cmd);
899 		return -EAGAIN;
900 	}
901 
902 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
903 		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
904 		kref_put(&io_req->refcount, qedf_release_cmd);
905 	}
906 
907 	/* Obtain free SQE */
908 	sqe_idx = qedf_get_sqe_idx(fcport);
909 	sqe = &fcport->sq[sqe_idx];
910 	memset(sqe, 0, sizeof(struct fcoe_wqe));
911 
912 	/* Get the task context */
913 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
914 	if (!task_ctx) {
915 		QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
916 			   xid);
917 		kref_put(&io_req->refcount, qedf_release_cmd);
918 		return -EINVAL;
919 	}
920 
921 	qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
922 
923 	/* Ring doorbell */
924 	qedf_ring_doorbell(fcport);
925 
926 	if (qedf_io_tracing && io_req->sc_cmd)
927 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
928 
929 	return false;
930 }
931 
932 int
933 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
934 {
935 	struct fc_lport *lport = shost_priv(host);
936 	struct qedf_ctx *qedf = lport_priv(lport);
937 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
938 	struct fc_rport_libfc_priv *rp = rport->dd_data;
939 	struct qedf_rport *fcport;
940 	struct qedf_ioreq *io_req;
941 	int rc = 0;
942 	int rval;
943 	unsigned long flags = 0;
944 
945 
946 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
947 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
948 		sc_cmd->result = DID_NO_CONNECT << 16;
949 		sc_cmd->scsi_done(sc_cmd);
950 		return 0;
951 	}
952 
953 	if (!qedf->pdev->msix_enabled) {
954 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
955 		    "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
956 		    sc_cmd);
957 		sc_cmd->result = DID_NO_CONNECT << 16;
958 		sc_cmd->scsi_done(sc_cmd);
959 		return 0;
960 	}
961 
962 	rval = fc_remote_port_chkready(rport);
963 	if (rval) {
964 		sc_cmd->result = rval;
965 		sc_cmd->scsi_done(sc_cmd);
966 		return 0;
967 	}
968 
969 	/* Retry command if we are doing a qed drain operation */
970 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
971 		rc = SCSI_MLQUEUE_HOST_BUSY;
972 		goto exit_qcmd;
973 	}
974 
975 	if (lport->state != LPORT_ST_READY ||
976 	    atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
977 		rc = SCSI_MLQUEUE_HOST_BUSY;
978 		goto exit_qcmd;
979 	}
980 
981 	/* rport and tgt are allocated together, so tgt should be non-NULL */
982 	fcport = (struct qedf_rport *)&rp[1];
983 
984 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
985 		/*
986 		 * Session is not offloaded yet. Let SCSI-ml retry
987 		 * the command.
988 		 */
989 		rc = SCSI_MLQUEUE_TARGET_BUSY;
990 		goto exit_qcmd;
991 	}
992 	if (fcport->retry_delay_timestamp) {
993 		if (time_after(jiffies, fcport->retry_delay_timestamp)) {
994 			fcport->retry_delay_timestamp = 0;
995 		} else {
996 			/* If retry_delay timer is active, flow off the ML */
997 			rc = SCSI_MLQUEUE_TARGET_BUSY;
998 			goto exit_qcmd;
999 		}
1000 	}
1001 
1002 	io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1003 	if (!io_req) {
1004 		rc = SCSI_MLQUEUE_HOST_BUSY;
1005 		goto exit_qcmd;
1006 	}
1007 
1008 	io_req->sc_cmd = sc_cmd;
1009 
1010 	/* Take fcport->rport_lock for posting to fcport send queue */
1011 	spin_lock_irqsave(&fcport->rport_lock, flags);
1012 	if (qedf_post_io_req(fcport, io_req)) {
1013 		QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1014 		/* Return SQE to pool */
1015 		atomic_inc(&fcport->free_sqes);
1016 		rc = SCSI_MLQUEUE_HOST_BUSY;
1017 	}
1018 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1019 
1020 exit_qcmd:
1021 	return rc;
1022 }
1023 
1024 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1025 				 struct fcoe_cqe_rsp_info *fcp_rsp)
1026 {
1027 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1028 	struct qedf_ctx *qedf = io_req->fcport->qedf;
1029 	u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1030 	int fcp_sns_len = 0;
1031 	int fcp_rsp_len = 0;
1032 	uint8_t *rsp_info, *sense_data;
1033 
1034 	io_req->fcp_status = FC_GOOD;
1035 	io_req->fcp_resid = 0;
1036 	if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1037 	    FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1038 		io_req->fcp_resid = fcp_rsp->fcp_resid;
1039 
1040 	io_req->scsi_comp_flags = rsp_flags;
1041 	CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1042 	    fcp_rsp->scsi_status_code;
1043 
1044 	if (rsp_flags &
1045 	    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1046 		fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1047 
1048 	if (rsp_flags &
1049 	    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1050 		fcp_sns_len = fcp_rsp->fcp_sns_len;
1051 
1052 	io_req->fcp_rsp_len = fcp_rsp_len;
1053 	io_req->fcp_sns_len = fcp_sns_len;
1054 	rsp_info = sense_data = io_req->sense_buffer;
1055 
1056 	/* fetch fcp_rsp_code */
1057 	if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1058 		/* Only for task management function */
1059 		io_req->fcp_rsp_code = rsp_info[3];
1060 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1061 		    "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1062 		/* Adjust sense-data location. */
1063 		sense_data += fcp_rsp_len;
1064 	}
1065 
1066 	if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1067 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1068 		    "Truncating sense buffer\n");
1069 		fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1070 	}
1071 
1072 	/* The sense buffer can be NULL for TMF commands */
1073 	if (sc_cmd->sense_buffer) {
1074 		memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1075 		if (fcp_sns_len)
1076 			memcpy(sc_cmd->sense_buffer, sense_data,
1077 			    fcp_sns_len);
1078 	}
1079 }
1080 
1081 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1082 {
1083 	struct scsi_cmnd *sc = io_req->sc_cmd;
1084 
1085 	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1086 		dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1087 		    scsi_sg_count(sc), sc->sc_data_direction);
1088 		io_req->bd_tbl->bd_valid = 0;
1089 	}
1090 }
1091 
1092 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1093 	struct qedf_ioreq *io_req)
1094 {
1095 	u16 xid, rval;
1096 	struct e4_fcoe_task_context *task_ctx;
1097 	struct scsi_cmnd *sc_cmd;
1098 	struct fcoe_cqe_rsp_info *fcp_rsp;
1099 	struct qedf_rport *fcport;
1100 	int refcount;
1101 	u16 scope, qualifier = 0;
1102 	u8 fw_residual_flag = 0;
1103 
1104 	if (!io_req)
1105 		return;
1106 	if (!cqe)
1107 		return;
1108 
1109 	xid = io_req->xid;
1110 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
1111 	sc_cmd = io_req->sc_cmd;
1112 	fcp_rsp = &cqe->cqe_info.rsp_info;
1113 
1114 	if (!sc_cmd) {
1115 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1116 		return;
1117 	}
1118 
1119 	if (!sc_cmd->SCp.ptr) {
1120 		QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1121 		    "another context.\n");
1122 		return;
1123 	}
1124 
1125 	if (!sc_cmd->request) {
1126 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1127 		    "sc_cmd=%p.\n", sc_cmd);
1128 		return;
1129 	}
1130 
1131 	if (!sc_cmd->request->special) {
1132 		QEDF_WARN(&(qedf->dbg_ctx), "request->special is NULL so "
1133 		    "request not valid, sc_cmd=%p.\n", sc_cmd);
1134 		return;
1135 	}
1136 
1137 	if (!sc_cmd->request->q) {
1138 		QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1139 		   "is not valid, sc_cmd=%p.\n", sc_cmd);
1140 		return;
1141 	}
1142 
1143 	fcport = io_req->fcport;
1144 
1145 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
1146 
1147 	qedf_unmap_sg_list(qedf, io_req);
1148 
1149 	/* Check for FCP transport error */
1150 	if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1151 		QEDF_ERR(&(qedf->dbg_ctx),
1152 		    "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1153 		    "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1154 		    io_req->fcp_rsp_code);
1155 		sc_cmd->result = DID_BUS_BUSY << 16;
1156 		goto out;
1157 	}
1158 
1159 	fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1160 	    FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1161 	if (fw_residual_flag) {
1162 		QEDF_ERR(&(qedf->dbg_ctx),
1163 		    "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x "
1164 		    "fcp_resid=%d fw_residual=0x%x.\n", io_req->xid,
1165 		    fcp_rsp->rsp_flags.flags, io_req->fcp_resid,
1166 		    cqe->cqe_info.rsp_info.fw_residual);
1167 
1168 		if (io_req->cdb_status == 0)
1169 			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1170 		else
1171 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1172 
1173 		/* Abort the command since we did not get all the data */
1174 		init_completion(&io_req->abts_done);
1175 		rval = qedf_initiate_abts(io_req, true);
1176 		if (rval) {
1177 			QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1178 			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1179 		}
1180 
1181 		/*
1182 		 * Set resid to the whole buffer length so we won't try to resue
1183 		 * any previously data.
1184 		 */
1185 		scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1186 		goto out;
1187 	}
1188 
1189 	switch (io_req->fcp_status) {
1190 	case FC_GOOD:
1191 		if (io_req->cdb_status == 0) {
1192 			/* Good I/O completion */
1193 			sc_cmd->result = DID_OK << 16;
1194 		} else {
1195 			refcount = kref_read(&io_req->refcount);
1196 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1197 			    "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1198 			    "lba=%02x%02x%02x%02x cdb_status=%d "
1199 			    "fcp_resid=0x%x refcount=%d.\n",
1200 			    qedf->lport->host->host_no, sc_cmd->device->id,
1201 			    sc_cmd->device->lun, io_req->xid,
1202 			    sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1203 			    sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1204 			    io_req->cdb_status, io_req->fcp_resid,
1205 			    refcount);
1206 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1207 
1208 			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1209 			    io_req->cdb_status == SAM_STAT_BUSY) {
1210 				/*
1211 				 * Check whether we need to set retry_delay at
1212 				 * all based on retry_delay module parameter
1213 				 * and the status qualifier.
1214 				 */
1215 
1216 				/* Upper 2 bits */
1217 				scope = fcp_rsp->retry_delay_timer & 0xC000;
1218 				/* Lower 14 bits */
1219 				qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1220 
1221 				if (qedf_retry_delay &&
1222 				    scope > 0 && qualifier > 0 &&
1223 				    qualifier <= 0x3FEF) {
1224 					/* Check we don't go over the max */
1225 					if (qualifier > QEDF_RETRY_DELAY_MAX)
1226 						qualifier =
1227 						    QEDF_RETRY_DELAY_MAX;
1228 					fcport->retry_delay_timestamp =
1229 					    jiffies + (qualifier * HZ / 10);
1230 				}
1231 				/* Record stats */
1232 				if (io_req->cdb_status ==
1233 				    SAM_STAT_TASK_SET_FULL)
1234 					qedf->task_set_fulls++;
1235 				else
1236 					qedf->busy++;
1237 			}
1238 		}
1239 		if (io_req->fcp_resid)
1240 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
1241 		break;
1242 	default:
1243 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1244 			   io_req->fcp_status);
1245 		break;
1246 	}
1247 
1248 out:
1249 	if (qedf_io_tracing)
1250 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1251 
1252 	io_req->sc_cmd = NULL;
1253 	sc_cmd->SCp.ptr =  NULL;
1254 	sc_cmd->scsi_done(sc_cmd);
1255 	kref_put(&io_req->refcount, qedf_release_cmd);
1256 }
1257 
1258 /* Return a SCSI command in some other context besides a normal completion */
1259 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1260 	int result)
1261 {
1262 	u16 xid;
1263 	struct scsi_cmnd *sc_cmd;
1264 	int refcount;
1265 
1266 	if (!io_req)
1267 		return;
1268 
1269 	xid = io_req->xid;
1270 	sc_cmd = io_req->sc_cmd;
1271 
1272 	if (!sc_cmd) {
1273 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1274 		return;
1275 	}
1276 
1277 	if (!sc_cmd->SCp.ptr) {
1278 		QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1279 		    "another context.\n");
1280 		return;
1281 	}
1282 
1283 	qedf_unmap_sg_list(qedf, io_req);
1284 
1285 	sc_cmd->result = result << 16;
1286 	refcount = kref_read(&io_req->refcount);
1287 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1288 	    "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1289 	    "allowed=%d retries=%d refcount=%d.\n",
1290 	    qedf->lport->host->host_no, sc_cmd->device->id,
1291 	    sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1292 	    sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1293 	    sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1294 	    refcount);
1295 
1296 	/*
1297 	 * Set resid to the whole buffer length so we won't try to resue any
1298 	 * previously read data
1299 	 */
1300 	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1301 
1302 	if (qedf_io_tracing)
1303 		qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1304 
1305 	io_req->sc_cmd = NULL;
1306 	sc_cmd->SCp.ptr = NULL;
1307 	sc_cmd->scsi_done(sc_cmd);
1308 	kref_put(&io_req->refcount, qedf_release_cmd);
1309 }
1310 
1311 /*
1312  * Handle warning type CQE completions. This is mainly used for REC timer
1313  * popping.
1314  */
1315 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1316 	struct qedf_ioreq *io_req)
1317 {
1318 	int rval, i;
1319 	struct qedf_rport *fcport = io_req->fcport;
1320 	u64 err_warn_bit_map;
1321 	u8 err_warn = 0xff;
1322 
1323 	if (!cqe)
1324 		return;
1325 
1326 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1327 		  "xid=0x%x\n", io_req->xid);
1328 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1329 		  "err_warn_bitmap=%08x:%08x\n",
1330 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1331 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1332 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1333 		  "rx_buff_off=%08x, rx_id=%04x\n",
1334 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1335 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1336 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1337 
1338 	/* Normalize the error bitmap value to an just an unsigned int */
1339 	err_warn_bit_map = (u64)
1340 	    ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1341 	    (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1342 	for (i = 0; i < 64; i++) {
1343 		if (err_warn_bit_map & (u64)((u64)1 << i)) {
1344 			err_warn = i;
1345 			break;
1346 		}
1347 	}
1348 
1349 	/* Check if REC TOV expired if this is a tape device */
1350 	if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1351 		if (err_warn ==
1352 		    FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1353 			QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1354 			if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1355 				io_req->rx_buf_off =
1356 				    cqe->cqe_info.err_info.rx_buf_off;
1357 				io_req->tx_buf_off =
1358 				    cqe->cqe_info.err_info.tx_buf_off;
1359 				io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1360 				rval = qedf_send_rec(io_req);
1361 				/*
1362 				 * We only want to abort the io_req if we
1363 				 * can't queue the REC command as we want to
1364 				 * keep the exchange open for recovery.
1365 				 */
1366 				if (rval)
1367 					goto send_abort;
1368 			}
1369 			return;
1370 		}
1371 	}
1372 
1373 send_abort:
1374 	init_completion(&io_req->abts_done);
1375 	rval = qedf_initiate_abts(io_req, true);
1376 	if (rval)
1377 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1378 }
1379 
1380 /* Cleanup a command when we receive an error detection completion */
1381 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1382 	struct qedf_ioreq *io_req)
1383 {
1384 	int rval;
1385 
1386 	if (!cqe)
1387 		return;
1388 
1389 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1390 		  "xid=0x%x\n", io_req->xid);
1391 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1392 		  "err_warn_bitmap=%08x:%08x\n",
1393 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1394 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1395 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1396 		  "rx_buff_off=%08x, rx_id=%04x\n",
1397 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1398 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1399 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1400 
1401 	if (qedf->stop_io_on_error) {
1402 		qedf_stop_all_io(qedf);
1403 		return;
1404 	}
1405 
1406 	init_completion(&io_req->abts_done);
1407 	rval = qedf_initiate_abts(io_req, true);
1408 	if (rval)
1409 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1410 }
1411 
1412 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1413 	struct qedf_ioreq *els_req)
1414 {
1415 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1416 	    "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1417 	    kref_read(&els_req->refcount));
1418 
1419 	/*
1420 	 * Need to distinguish this from a timeout when calling the
1421 	 * els_req->cb_func.
1422 	 */
1423 	els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1424 
1425 	/* Cancel the timer */
1426 	cancel_delayed_work_sync(&els_req->timeout_work);
1427 
1428 	/* Call callback function to complete command */
1429 	if (els_req->cb_func && els_req->cb_arg) {
1430 		els_req->cb_func(els_req->cb_arg);
1431 		els_req->cb_arg = NULL;
1432 	}
1433 
1434 	/* Release kref for original initiate_els */
1435 	kref_put(&els_req->refcount, qedf_release_cmd);
1436 }
1437 
1438 /* A value of -1 for lun is a wild card that means flush all
1439  * active SCSI I/Os for the target.
1440  */
1441 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1442 {
1443 	struct qedf_ioreq *io_req;
1444 	struct qedf_ctx *qedf;
1445 	struct qedf_cmd_mgr *cmd_mgr;
1446 	int i, rc;
1447 
1448 	if (!fcport)
1449 		return;
1450 
1451 	/* Check that fcport is still offloaded */
1452 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1453 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1454 		return;
1455 	}
1456 
1457 	qedf = fcport->qedf;
1458 	cmd_mgr = qedf->cmd_mgr;
1459 
1460 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Flush active i/o's.\n");
1461 
1462 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1463 		io_req = &cmd_mgr->cmds[i];
1464 
1465 		if (!io_req)
1466 			continue;
1467 		if (io_req->fcport != fcport)
1468 			continue;
1469 		if (io_req->cmd_type == QEDF_ELS) {
1470 			rc = kref_get_unless_zero(&io_req->refcount);
1471 			if (!rc) {
1472 				QEDF_ERR(&(qedf->dbg_ctx),
1473 				    "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1474 				    io_req, io_req->xid);
1475 				continue;
1476 			}
1477 			qedf_flush_els_req(qedf, io_req);
1478 			/*
1479 			 * Release the kref and go back to the top of the
1480 			 * loop.
1481 			 */
1482 			goto free_cmd;
1483 		}
1484 
1485 		if (io_req->cmd_type == QEDF_ABTS) {
1486 			rc = kref_get_unless_zero(&io_req->refcount);
1487 			if (!rc) {
1488 				QEDF_ERR(&(qedf->dbg_ctx),
1489 				    "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1490 				    io_req, io_req->xid);
1491 				continue;
1492 			}
1493 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1494 			    "Flushing abort xid=0x%x.\n", io_req->xid);
1495 
1496 			clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1497 
1498 			if (io_req->sc_cmd) {
1499 				if (io_req->return_scsi_cmd_on_abts)
1500 					qedf_scsi_done(qedf, io_req, DID_ERROR);
1501 			}
1502 
1503 			/* Notify eh_abort handler that ABTS is complete */
1504 			complete(&io_req->abts_done);
1505 			kref_put(&io_req->refcount, qedf_release_cmd);
1506 
1507 			goto free_cmd;
1508 		}
1509 
1510 		if (!io_req->sc_cmd)
1511 			continue;
1512 		if (lun > 0) {
1513 			if (io_req->sc_cmd->device->lun !=
1514 			    (u64)lun)
1515 				continue;
1516 		}
1517 
1518 		/*
1519 		 * Use kref_get_unless_zero in the unlikely case the command
1520 		 * we're about to flush was completed in the normal SCSI path
1521 		 */
1522 		rc = kref_get_unless_zero(&io_req->refcount);
1523 		if (!rc) {
1524 			QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1525 			    "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1526 			continue;
1527 		}
1528 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1529 		    "Cleanup xid=0x%x.\n", io_req->xid);
1530 
1531 		/* Cleanup task and return I/O mid-layer */
1532 		qedf_initiate_cleanup(io_req, true);
1533 
1534 free_cmd:
1535 		kref_put(&io_req->refcount, qedf_release_cmd);
1536 	}
1537 }
1538 
1539 /*
1540  * Initiate a ABTS middle path command. Note that we don't have to initialize
1541  * the task context for an ABTS task.
1542  */
1543 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1544 {
1545 	struct fc_lport *lport;
1546 	struct qedf_rport *fcport = io_req->fcport;
1547 	struct fc_rport_priv *rdata;
1548 	struct qedf_ctx *qedf;
1549 	u16 xid;
1550 	u32 r_a_tov = 0;
1551 	int rc = 0;
1552 	unsigned long flags;
1553 	struct fcoe_wqe *sqe;
1554 	u16 sqe_idx;
1555 
1556 	/* Sanity check qedf_rport before dereferencing any pointers */
1557 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1558 		QEDF_ERR(NULL, "tgt not offloaded\n");
1559 		rc = 1;
1560 		goto abts_err;
1561 	}
1562 
1563 	rdata = fcport->rdata;
1564 	r_a_tov = rdata->r_a_tov;
1565 	qedf = fcport->qedf;
1566 	lport = qedf->lport;
1567 
1568 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1569 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1570 		rc = 1;
1571 		goto abts_err;
1572 	}
1573 
1574 	if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1575 		QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1576 		rc = 1;
1577 		goto abts_err;
1578 	}
1579 
1580 	/* Ensure room on SQ */
1581 	if (!atomic_read(&fcport->free_sqes)) {
1582 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1583 		rc = 1;
1584 		goto abts_err;
1585 	}
1586 
1587 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1588 		QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1589 		rc = 1;
1590 		goto out;
1591 	}
1592 
1593 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1594 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1595 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1596 		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1597 			  "cleanup or abort processing or already "
1598 			  "completed.\n", io_req->xid);
1599 		rc = 1;
1600 		goto out;
1601 	}
1602 
1603 	kref_get(&io_req->refcount);
1604 
1605 	xid = io_req->xid;
1606 	qedf->control_requests++;
1607 	qedf->packet_aborts++;
1608 
1609 	/* Set the return CPU to be the same as the request one */
1610 	io_req->cpu = smp_processor_id();
1611 
1612 	/* Set the command type to abort */
1613 	io_req->cmd_type = QEDF_ABTS;
1614 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1615 
1616 	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1617 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "ABTS io_req xid = "
1618 		   "0x%x\n", xid);
1619 
1620 	qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT * HZ);
1621 
1622 	spin_lock_irqsave(&fcport->rport_lock, flags);
1623 
1624 	sqe_idx = qedf_get_sqe_idx(fcport);
1625 	sqe = &fcport->sq[sqe_idx];
1626 	memset(sqe, 0, sizeof(struct fcoe_wqe));
1627 	io_req->task_params->sqe = sqe;
1628 
1629 	init_initiator_abort_fcoe_task(io_req->task_params);
1630 	qedf_ring_doorbell(fcport);
1631 
1632 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1633 
1634 	return rc;
1635 abts_err:
1636 	/*
1637 	 * If the ABTS task fails to queue then we need to cleanup the
1638 	 * task at the firmware.
1639 	 */
1640 	qedf_initiate_cleanup(io_req, return_scsi_cmd_on_abts);
1641 out:
1642 	return rc;
1643 }
1644 
1645 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1646 	struct qedf_ioreq *io_req)
1647 {
1648 	uint32_t r_ctl;
1649 	uint16_t xid;
1650 
1651 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1652 		   "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1653 
1654 	cancel_delayed_work(&io_req->timeout_work);
1655 
1656 	xid = io_req->xid;
1657 	r_ctl = cqe->cqe_info.abts_info.r_ctl;
1658 
1659 	switch (r_ctl) {
1660 	case FC_RCTL_BA_ACC:
1661 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1662 		    "ABTS response - ACC Send RRQ after R_A_TOV\n");
1663 		io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
1664 		/*
1665 		 * Dont release this cmd yet. It will be relesed
1666 		 * after we get RRQ response
1667 		 */
1668 		kref_get(&io_req->refcount);
1669 		queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
1670 		    msecs_to_jiffies(qedf->lport->r_a_tov));
1671 		break;
1672 	/* For error cases let the cleanup return the command */
1673 	case FC_RCTL_BA_RJT:
1674 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
1675 		   "ABTS response - RJT\n");
1676 		io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
1677 		break;
1678 	default:
1679 		QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
1680 		break;
1681 	}
1682 
1683 	clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1684 
1685 	if (io_req->sc_cmd) {
1686 		if (io_req->return_scsi_cmd_on_abts)
1687 			qedf_scsi_done(qedf, io_req, DID_ERROR);
1688 	}
1689 
1690 	/* Notify eh_abort handler that ABTS is complete */
1691 	complete(&io_req->abts_done);
1692 
1693 	kref_put(&io_req->refcount, qedf_release_cmd);
1694 }
1695 
1696 int qedf_init_mp_req(struct qedf_ioreq *io_req)
1697 {
1698 	struct qedf_mp_req *mp_req;
1699 	struct scsi_sge *mp_req_bd;
1700 	struct scsi_sge *mp_resp_bd;
1701 	struct qedf_ctx *qedf = io_req->fcport->qedf;
1702 	dma_addr_t addr;
1703 	uint64_t sz;
1704 
1705 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
1706 
1707 	mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
1708 	memset(mp_req, 0, sizeof(struct qedf_mp_req));
1709 
1710 	if (io_req->cmd_type != QEDF_ELS) {
1711 		mp_req->req_len = sizeof(struct fcp_cmnd);
1712 		io_req->data_xfer_len = mp_req->req_len;
1713 	} else
1714 		mp_req->req_len = io_req->data_xfer_len;
1715 
1716 	mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
1717 	    &mp_req->req_buf_dma, GFP_KERNEL);
1718 	if (!mp_req->req_buf) {
1719 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
1720 		qedf_free_mp_resc(io_req);
1721 		return -ENOMEM;
1722 	}
1723 
1724 	mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
1725 	    QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
1726 	if (!mp_req->resp_buf) {
1727 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
1728 			  "buffer\n");
1729 		qedf_free_mp_resc(io_req);
1730 		return -ENOMEM;
1731 	}
1732 
1733 	/* Allocate and map mp_req_bd and mp_resp_bd */
1734 	sz = sizeof(struct scsi_sge);
1735 	mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1736 	    &mp_req->mp_req_bd_dma, GFP_KERNEL);
1737 	if (!mp_req->mp_req_bd) {
1738 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
1739 		qedf_free_mp_resc(io_req);
1740 		return -ENOMEM;
1741 	}
1742 
1743 	mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
1744 	    &mp_req->mp_resp_bd_dma, GFP_KERNEL);
1745 	if (!mp_req->mp_resp_bd) {
1746 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
1747 		qedf_free_mp_resc(io_req);
1748 		return -ENOMEM;
1749 	}
1750 
1751 	/* Fill bd table */
1752 	addr = mp_req->req_buf_dma;
1753 	mp_req_bd = mp_req->mp_req_bd;
1754 	mp_req_bd->sge_addr.lo = U64_LO(addr);
1755 	mp_req_bd->sge_addr.hi = U64_HI(addr);
1756 	mp_req_bd->sge_len = QEDF_PAGE_SIZE;
1757 
1758 	/*
1759 	 * MP buffer is either a task mgmt command or an ELS.
1760 	 * So the assumption is that it consumes a single bd
1761 	 * entry in the bd table
1762 	 */
1763 	mp_resp_bd = mp_req->mp_resp_bd;
1764 	addr = mp_req->resp_buf_dma;
1765 	mp_resp_bd->sge_addr.lo = U64_LO(addr);
1766 	mp_resp_bd->sge_addr.hi = U64_HI(addr);
1767 	mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
1768 
1769 	return 0;
1770 }
1771 
1772 /*
1773  * Last ditch effort to clear the port if it's stuck. Used only after a
1774  * cleanup task times out.
1775  */
1776 static void qedf_drain_request(struct qedf_ctx *qedf)
1777 {
1778 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
1779 		QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
1780 		return;
1781 	}
1782 
1783 	/* Set bit to return all queuecommand requests as busy */
1784 	set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1785 
1786 	/* Call qed drain request for function. Should be synchronous */
1787 	qed_ops->common->drain(qedf->cdev);
1788 
1789 	/* Settle time for CQEs to be returned */
1790 	msleep(100);
1791 
1792 	/* Unplug and continue */
1793 	clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
1794 }
1795 
1796 /*
1797  * Returns SUCCESS if the cleanup task does not timeout, otherwise return
1798  * FAILURE.
1799  */
1800 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
1801 	bool return_scsi_cmd_on_abts)
1802 {
1803 	struct qedf_rport *fcport;
1804 	struct qedf_ctx *qedf;
1805 	uint16_t xid;
1806 	struct e4_fcoe_task_context *task;
1807 	int tmo = 0;
1808 	int rc = SUCCESS;
1809 	unsigned long flags;
1810 	struct fcoe_wqe *sqe;
1811 	u16 sqe_idx;
1812 
1813 	fcport = io_req->fcport;
1814 	if (!fcport) {
1815 		QEDF_ERR(NULL, "fcport is NULL.\n");
1816 		return SUCCESS;
1817 	}
1818 
1819 	/* Sanity check qedf_rport before dereferencing any pointers */
1820 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1821 		QEDF_ERR(NULL, "tgt not offloaded\n");
1822 		rc = 1;
1823 		return SUCCESS;
1824 	}
1825 
1826 	qedf = fcport->qedf;
1827 	if (!qedf) {
1828 		QEDF_ERR(NULL, "qedf is NULL.\n");
1829 		return SUCCESS;
1830 	}
1831 
1832 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1833 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
1834 		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
1835 			  "cleanup processing or already completed.\n",
1836 			  io_req->xid);
1837 		return SUCCESS;
1838 	}
1839 
1840 	/* Ensure room on SQ */
1841 	if (!atomic_read(&fcport->free_sqes)) {
1842 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1843 		return FAILED;
1844 	}
1845 
1846 
1847 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid=0x%x\n",
1848 	    io_req->xid);
1849 
1850 	/* Cleanup cmds re-use the same TID as the original I/O */
1851 	xid = io_req->xid;
1852 	io_req->cmd_type = QEDF_CLEANUP;
1853 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1854 
1855 	/* Set the return CPU to be the same as the request one */
1856 	io_req->cpu = smp_processor_id();
1857 
1858 	set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1859 
1860 	task = qedf_get_task_mem(&qedf->tasks, xid);
1861 
1862 	init_completion(&io_req->tm_done);
1863 
1864 	spin_lock_irqsave(&fcport->rport_lock, flags);
1865 
1866 	sqe_idx = qedf_get_sqe_idx(fcport);
1867 	sqe = &fcport->sq[sqe_idx];
1868 	memset(sqe, 0, sizeof(struct fcoe_wqe));
1869 	io_req->task_params->sqe = sqe;
1870 
1871 	init_initiator_cleanup_fcoe_task(io_req->task_params);
1872 	qedf_ring_doorbell(fcport);
1873 
1874 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1875 
1876 	tmo = wait_for_completion_timeout(&io_req->tm_done,
1877 	    QEDF_CLEANUP_TIMEOUT * HZ);
1878 
1879 	if (!tmo) {
1880 		rc = FAILED;
1881 		/* Timeout case */
1882 		QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
1883 			  "xid=%x.\n", io_req->xid);
1884 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1885 		/* Issue a drain request if cleanup task times out */
1886 		QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
1887 		qedf_drain_request(qedf);
1888 	}
1889 
1890 	if (io_req->sc_cmd) {
1891 		if (io_req->return_scsi_cmd_on_abts)
1892 			qedf_scsi_done(qedf, io_req, DID_ERROR);
1893 	}
1894 
1895 	if (rc == SUCCESS)
1896 		io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
1897 	else
1898 		io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
1899 
1900 	return rc;
1901 }
1902 
1903 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1904 	struct qedf_ioreq *io_req)
1905 {
1906 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
1907 		   io_req->xid);
1908 
1909 	clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
1910 
1911 	/* Complete so we can finish cleaning up the I/O */
1912 	complete(&io_req->tm_done);
1913 }
1914 
1915 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
1916 	uint8_t tm_flags)
1917 {
1918 	struct qedf_ioreq *io_req;
1919 	struct e4_fcoe_task_context *task;
1920 	struct qedf_ctx *qedf = fcport->qedf;
1921 	struct fc_lport *lport = qedf->lport;
1922 	int rc = 0;
1923 	uint16_t xid;
1924 	int tmo = 0;
1925 	unsigned long flags;
1926 	struct fcoe_wqe *sqe;
1927 	u16 sqe_idx;
1928 
1929 	if (!sc_cmd) {
1930 		QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n");
1931 		return FAILED;
1932 	}
1933 
1934 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1935 		QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
1936 		rc = FAILED;
1937 		return FAILED;
1938 	}
1939 
1940 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "portid = 0x%x "
1941 		   "tm_flags = %d\n", fcport->rdata->ids.port_id, tm_flags);
1942 
1943 	io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
1944 	if (!io_req) {
1945 		QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
1946 		rc = -EAGAIN;
1947 		goto reset_tmf_err;
1948 	}
1949 
1950 	if (tm_flags == FCP_TMF_LUN_RESET)
1951 		qedf->lun_resets++;
1952 	else if (tm_flags == FCP_TMF_TGT_RESET)
1953 		qedf->target_resets++;
1954 
1955 	/* Initialize rest of io_req fields */
1956 	io_req->sc_cmd = sc_cmd;
1957 	io_req->fcport = fcport;
1958 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
1959 
1960 	/* Set the return CPU to be the same as the request one */
1961 	io_req->cpu = smp_processor_id();
1962 
1963 	/* Set TM flags */
1964 	io_req->io_req_flags = QEDF_READ;
1965 	io_req->data_xfer_len = 0;
1966 	io_req->tm_flags = tm_flags;
1967 
1968 	/* Default is to return a SCSI command when an error occurs */
1969 	io_req->return_scsi_cmd_on_abts = true;
1970 
1971 	/* Obtain exchange id */
1972 	xid = io_req->xid;
1973 
1974 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
1975 		   "0x%x\n", xid);
1976 
1977 	/* Initialize task context for this IO request */
1978 	task = qedf_get_task_mem(&qedf->tasks, xid);
1979 
1980 	init_completion(&io_req->tm_done);
1981 
1982 	spin_lock_irqsave(&fcport->rport_lock, flags);
1983 
1984 	sqe_idx = qedf_get_sqe_idx(fcport);
1985 	sqe = &fcport->sq[sqe_idx];
1986 	memset(sqe, 0, sizeof(struct fcoe_wqe));
1987 
1988 	qedf_init_task(fcport, lport, io_req, task, sqe);
1989 	qedf_ring_doorbell(fcport);
1990 
1991 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1992 
1993 	tmo = wait_for_completion_timeout(&io_req->tm_done,
1994 	    QEDF_TM_TIMEOUT * HZ);
1995 
1996 	if (!tmo) {
1997 		rc = FAILED;
1998 		QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
1999 	} else {
2000 		/* Check TMF response code */
2001 		if (io_req->fcp_rsp_code == 0)
2002 			rc = SUCCESS;
2003 		else
2004 			rc = FAILED;
2005 	}
2006 
2007 	if (tm_flags == FCP_TMF_LUN_RESET)
2008 		qedf_flush_active_ios(fcport, (int)sc_cmd->device->lun);
2009 	else
2010 		qedf_flush_active_ios(fcport, -1);
2011 
2012 	kref_put(&io_req->refcount, qedf_release_cmd);
2013 
2014 	if (rc != SUCCESS) {
2015 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2016 		rc = FAILED;
2017 	} else {
2018 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2019 		rc = SUCCESS;
2020 	}
2021 reset_tmf_err:
2022 	return rc;
2023 }
2024 
2025 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2026 {
2027 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2028 	struct fc_rport_libfc_priv *rp = rport->dd_data;
2029 	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2030 	struct qedf_ctx *qedf;
2031 	struct fc_lport *lport;
2032 	int rc = SUCCESS;
2033 	int rval;
2034 
2035 	rval = fc_remote_port_chkready(rport);
2036 
2037 	if (rval) {
2038 		QEDF_ERR(NULL, "device_reset rport not ready\n");
2039 		rc = FAILED;
2040 		goto tmf_err;
2041 	}
2042 
2043 	if (fcport == NULL) {
2044 		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2045 		rc = FAILED;
2046 		goto tmf_err;
2047 	}
2048 
2049 	qedf = fcport->qedf;
2050 	lport = qedf->lport;
2051 
2052 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2053 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2054 		rc = SUCCESS;
2055 		goto tmf_err;
2056 	}
2057 
2058 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2059 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2060 		rc = FAILED;
2061 		goto tmf_err;
2062 	}
2063 
2064 	rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2065 
2066 tmf_err:
2067 	return rc;
2068 }
2069 
2070 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2071 	struct qedf_ioreq *io_req)
2072 {
2073 	struct fcoe_cqe_rsp_info *fcp_rsp;
2074 
2075 	fcp_rsp = &cqe->cqe_info.rsp_info;
2076 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
2077 
2078 	io_req->sc_cmd = NULL;
2079 	complete(&io_req->tm_done);
2080 }
2081 
2082 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2083 	struct fcoe_cqe *cqe)
2084 {
2085 	unsigned long flags;
2086 	uint16_t tmp;
2087 	uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2088 	u32 payload_len, crc;
2089 	struct fc_frame_header *fh;
2090 	struct fc_frame *fp;
2091 	struct qedf_io_work *io_work;
2092 	u32 bdq_idx;
2093 	void *bdq_addr;
2094 	struct scsi_bd *p_bd_info;
2095 
2096 	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2097 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2098 		  "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2099 		  le32_to_cpu(p_bd_info->address.hi),
2100 		  le32_to_cpu(p_bd_info->address.lo),
2101 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2102 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2103 		  qedf->bdq_prod_idx, pktlen);
2104 
2105 	bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2106 	if (bdq_idx >= QEDF_BDQ_SIZE) {
2107 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2108 		    bdq_idx);
2109 		goto increment_prod;
2110 	}
2111 
2112 	bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2113 	if (!bdq_addr) {
2114 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2115 		    "unsolicited packet.\n");
2116 		goto increment_prod;
2117 	}
2118 
2119 	if (qedf_dump_frames) {
2120 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2121 		    "BDQ frame is at addr=%p.\n", bdq_addr);
2122 		print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2123 		    (void *)bdq_addr, pktlen, false);
2124 	}
2125 
2126 	/* Allocate frame */
2127 	payload_len = pktlen - sizeof(struct fc_frame_header);
2128 	fp = fc_frame_alloc(qedf->lport, payload_len);
2129 	if (!fp) {
2130 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2131 		goto increment_prod;
2132 	}
2133 
2134 	/* Copy data from BDQ buffer into fc_frame struct */
2135 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2136 	memcpy(fh, (void *)bdq_addr, pktlen);
2137 
2138 	/* Initialize the frame so libfc sees it as a valid frame */
2139 	crc = fcoe_fc_crc(fp);
2140 	fc_frame_init(fp);
2141 	fr_dev(fp) = qedf->lport;
2142 	fr_sof(fp) = FC_SOF_I3;
2143 	fr_eof(fp) = FC_EOF_T;
2144 	fr_crc(fp) = cpu_to_le32(~crc);
2145 
2146 	/*
2147 	 * We need to return the frame back up to libfc in a non-atomic
2148 	 * context
2149 	 */
2150 	io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2151 	if (!io_work) {
2152 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2153 			   "work for I/O completion.\n");
2154 		fc_frame_free(fp);
2155 		goto increment_prod;
2156 	}
2157 	memset(io_work, 0, sizeof(struct qedf_io_work));
2158 
2159 	INIT_WORK(&io_work->work, qedf_fp_io_handler);
2160 
2161 	/* Copy contents of CQE for deferred processing */
2162 	memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2163 
2164 	io_work->qedf = qedf;
2165 	io_work->fp = fp;
2166 
2167 	queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2168 increment_prod:
2169 	spin_lock_irqsave(&qedf->hba_lock, flags);
2170 
2171 	/* Increment producer to let f/w know we've handled the frame */
2172 	qedf->bdq_prod_idx++;
2173 
2174 	/* Producer index wraps at uint16_t boundary */
2175 	if (qedf->bdq_prod_idx == 0xffff)
2176 		qedf->bdq_prod_idx = 0;
2177 
2178 	writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2179 	tmp = readw(qedf->bdq_primary_prod);
2180 	writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2181 	tmp = readw(qedf->bdq_secondary_prod);
2182 
2183 	spin_unlock_irqrestore(&qedf->hba_lock, flags);
2184 }
2185