xref: /openbmc/linux/drivers/scsi/qedf/qedf_io.c (revision e825b29a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  QLogic FCoE Offload Driver
4  *  Copyright (c) 2016-2018 Cavium Inc.
5  */
6 #include <linux/spinlock.h>
7 #include <linux/vmalloc.h>
8 #include "qedf.h"
9 #include <scsi/scsi_tcq.h>
10 
11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
12 	unsigned int timer_msec)
13 {
14 	queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
15 	    msecs_to_jiffies(timer_msec));
16 }
17 
18 static void qedf_cmd_timeout(struct work_struct *work)
19 {
20 
21 	struct qedf_ioreq *io_req =
22 	    container_of(work, struct qedf_ioreq, timeout_work.work);
23 	struct qedf_ctx *qedf;
24 	struct qedf_rport *fcport;
25 
26 	fcport = io_req->fcport;
27 	if (io_req->fcport == NULL) {
28 		QEDF_INFO(NULL, QEDF_LOG_IO,  "fcport is NULL.\n");
29 		return;
30 	}
31 
32 	qedf = fcport->qedf;
33 
34 	switch (io_req->cmd_type) {
35 	case QEDF_ABTS:
36 		if (qedf == NULL) {
37 			QEDF_INFO(NULL, QEDF_LOG_IO,
38 				  "qedf is NULL for ABTS xid=0x%x.\n",
39 				  io_req->xid);
40 			return;
41 		}
42 
43 		QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
44 		    io_req->xid);
45 		/* Cleanup timed out ABTS */
46 		qedf_initiate_cleanup(io_req, true);
47 		complete(&io_req->abts_done);
48 
49 		/*
50 		 * Need to call kref_put for reference taken when initiate_abts
51 		 * was called since abts_compl won't be called now that we've
52 		 * cleaned up the task.
53 		 */
54 		kref_put(&io_req->refcount, qedf_release_cmd);
55 
56 		/* Clear in abort bit now that we're done with the command */
57 		clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
58 
59 		/*
60 		 * Now that the original I/O and the ABTS are complete see
61 		 * if we need to reconnect to the target.
62 		 */
63 		qedf_restart_rport(fcport);
64 		break;
65 	case QEDF_ELS:
66 		if (!qedf) {
67 			QEDF_INFO(NULL, QEDF_LOG_IO,
68 				  "qedf is NULL for ELS xid=0x%x.\n",
69 				  io_req->xid);
70 			return;
71 		}
72 		/* ELS request no longer outstanding since it timed out */
73 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
74 
75 		kref_get(&io_req->refcount);
76 		/*
77 		 * Don't attempt to clean an ELS timeout as any subseqeunt
78 		 * ABTS or cleanup requests just hang.  For now just free
79 		 * the resources of the original I/O and the RRQ
80 		 */
81 		QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
82 			  io_req->xid);
83 		qedf_initiate_cleanup(io_req, true);
84 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
85 		/* Call callback function to complete command */
86 		if (io_req->cb_func && io_req->cb_arg) {
87 			io_req->cb_func(io_req->cb_arg);
88 			io_req->cb_arg = NULL;
89 		}
90 		kref_put(&io_req->refcount, qedf_release_cmd);
91 		break;
92 	case QEDF_SEQ_CLEANUP:
93 		QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
94 		    "xid=0x%x.\n", io_req->xid);
95 		qedf_initiate_cleanup(io_req, true);
96 		io_req->event = QEDF_IOREQ_EV_ELS_TMO;
97 		qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
98 		break;
99 	default:
100 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
101 			  "Hit default case, xid=0x%x.\n", io_req->xid);
102 		break;
103 	}
104 }
105 
106 void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr)
107 {
108 	struct io_bdt *bdt_info;
109 	struct qedf_ctx *qedf = cmgr->qedf;
110 	size_t bd_tbl_sz;
111 	u16 min_xid = 0;
112 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
113 	int num_ios;
114 	int i;
115 	struct qedf_ioreq *io_req;
116 
117 	num_ios = max_xid - min_xid + 1;
118 
119 	/* Free fcoe_bdt_ctx structures */
120 	if (!cmgr->io_bdt_pool) {
121 		QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
122 		goto free_cmd_pool;
123 	}
124 
125 	bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge);
126 	for (i = 0; i < num_ios; i++) {
127 		bdt_info = cmgr->io_bdt_pool[i];
128 		if (bdt_info->bd_tbl) {
129 			dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
130 			    bdt_info->bd_tbl, bdt_info->bd_tbl_dma);
131 			bdt_info->bd_tbl = NULL;
132 		}
133 	}
134 
135 	/* Destroy io_bdt pool */
136 	for (i = 0; i < num_ios; i++) {
137 		kfree(cmgr->io_bdt_pool[i]);
138 		cmgr->io_bdt_pool[i] = NULL;
139 	}
140 
141 	kfree(cmgr->io_bdt_pool);
142 	cmgr->io_bdt_pool = NULL;
143 
144 free_cmd_pool:
145 
146 	for (i = 0; i < num_ios; i++) {
147 		io_req = &cmgr->cmds[i];
148 		kfree(io_req->sgl_task_params);
149 		kfree(io_req->task_params);
150 		/* Make sure we free per command sense buffer */
151 		if (io_req->sense_buffer)
152 			dma_free_coherent(&qedf->pdev->dev,
153 			    QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
154 			    io_req->sense_buffer_dma);
155 		cancel_delayed_work_sync(&io_req->rrq_work);
156 	}
157 
158 	/* Free command manager itself */
159 	vfree(cmgr);
160 }
161 
162 static void qedf_handle_rrq(struct work_struct *work)
163 {
164 	struct qedf_ioreq *io_req =
165 	    container_of(work, struct qedf_ioreq, rrq_work.work);
166 
167 	atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
168 	qedf_send_rrq(io_req);
169 
170 }
171 
172 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
173 {
174 	struct qedf_cmd_mgr *cmgr;
175 	struct io_bdt *bdt_info;
176 	struct qedf_ioreq *io_req;
177 	u16 xid;
178 	int i;
179 	int num_ios;
180 	u16 min_xid = 0;
181 	u16 max_xid = (FCOE_PARAMS_NUM_TASKS - 1);
182 
183 	/* Make sure num_queues is already set before calling this function */
184 	if (!qedf->num_queues) {
185 		QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
186 		return NULL;
187 	}
188 
189 	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) {
190 		QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
191 			   "max_xid 0x%x.\n", min_xid, max_xid);
192 		return NULL;
193 	}
194 
195 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
196 		   "0x%x.\n", min_xid, max_xid);
197 
198 	num_ios = max_xid - min_xid + 1;
199 
200 	cmgr = vzalloc(sizeof(struct qedf_cmd_mgr));
201 	if (!cmgr) {
202 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
203 		return NULL;
204 	}
205 
206 	cmgr->qedf = qedf;
207 	spin_lock_init(&cmgr->lock);
208 
209 	/*
210 	 * Initialize I/O request fields.
211 	 */
212 	xid = 0;
213 
214 	for (i = 0; i < num_ios; i++) {
215 		io_req = &cmgr->cmds[i];
216 		INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
217 
218 		io_req->xid = xid++;
219 
220 		INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
221 
222 		/* Allocate DMA memory to hold sense buffer */
223 		io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
224 		    QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
225 		    GFP_KERNEL);
226 		if (!io_req->sense_buffer) {
227 			QEDF_ERR(&qedf->dbg_ctx,
228 				 "Failed to alloc sense buffer.\n");
229 			goto mem_err;
230 		}
231 
232 		/* Allocate task parameters to pass to f/w init funcions */
233 		io_req->task_params = kzalloc(sizeof(*io_req->task_params),
234 					      GFP_KERNEL);
235 		if (!io_req->task_params) {
236 			QEDF_ERR(&(qedf->dbg_ctx),
237 				 "Failed to allocate task_params for xid=0x%x\n",
238 				 i);
239 			goto mem_err;
240 		}
241 
242 		/*
243 		 * Allocate scatter/gather list info to pass to f/w init
244 		 * functions.
245 		 */
246 		io_req->sgl_task_params = kzalloc(
247 		    sizeof(struct scsi_sgl_task_params), GFP_KERNEL);
248 		if (!io_req->sgl_task_params) {
249 			QEDF_ERR(&(qedf->dbg_ctx),
250 				 "Failed to allocate sgl_task_params for xid=0x%x\n",
251 				 i);
252 			goto mem_err;
253 		}
254 	}
255 
256 	/* Allocate pool of io_bdts - one for each qedf_ioreq */
257 	cmgr->io_bdt_pool = kmalloc_array(num_ios, sizeof(struct io_bdt *),
258 	    GFP_KERNEL);
259 
260 	if (!cmgr->io_bdt_pool) {
261 		QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
262 		goto mem_err;
263 	}
264 
265 	for (i = 0; i < num_ios; i++) {
266 		cmgr->io_bdt_pool[i] = kmalloc(sizeof(struct io_bdt),
267 		    GFP_KERNEL);
268 		if (!cmgr->io_bdt_pool[i]) {
269 			QEDF_WARN(&(qedf->dbg_ctx),
270 				  "Failed to alloc io_bdt_pool[%d].\n", i);
271 			goto mem_err;
272 		}
273 	}
274 
275 	for (i = 0; i < num_ios; i++) {
276 		bdt_info = cmgr->io_bdt_pool[i];
277 		bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
278 		    QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge),
279 		    &bdt_info->bd_tbl_dma, GFP_KERNEL);
280 		if (!bdt_info->bd_tbl) {
281 			QEDF_WARN(&(qedf->dbg_ctx),
282 				  "Failed to alloc bdt_tbl[%d].\n", i);
283 			goto mem_err;
284 		}
285 	}
286 	atomic_set(&cmgr->free_list_cnt, num_ios);
287 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
288 	    "cmgr->free_list_cnt=%d.\n",
289 	    atomic_read(&cmgr->free_list_cnt));
290 
291 	return cmgr;
292 
293 mem_err:
294 	qedf_cmd_mgr_free(cmgr);
295 	return NULL;
296 }
297 
298 struct qedf_ioreq *qedf_alloc_cmd(struct qedf_rport *fcport, u8 cmd_type)
299 {
300 	struct qedf_ctx *qedf = fcport->qedf;
301 	struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
302 	struct qedf_ioreq *io_req = NULL;
303 	struct io_bdt *bd_tbl;
304 	u16 xid;
305 	uint32_t free_sqes;
306 	int i;
307 	unsigned long flags;
308 
309 	free_sqes = atomic_read(&fcport->free_sqes);
310 
311 	if (!free_sqes) {
312 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
313 		    "Returning NULL, free_sqes=%d.\n ",
314 		    free_sqes);
315 		goto out_failed;
316 	}
317 
318 	/* Limit the number of outstanding R/W tasks */
319 	if ((atomic_read(&fcport->num_active_ios) >=
320 	    NUM_RW_TASKS_PER_CONNECTION)) {
321 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
322 		    "Returning NULL, num_active_ios=%d.\n",
323 		    atomic_read(&fcport->num_active_ios));
324 		goto out_failed;
325 	}
326 
327 	/* Limit global TIDs certain tasks */
328 	if (atomic_read(&cmd_mgr->free_list_cnt) <= GBL_RSVD_TASKS) {
329 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
330 		    "Returning NULL, free_list_cnt=%d.\n",
331 		    atomic_read(&cmd_mgr->free_list_cnt));
332 		goto out_failed;
333 	}
334 
335 	spin_lock_irqsave(&cmd_mgr->lock, flags);
336 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
337 		io_req = &cmd_mgr->cmds[cmd_mgr->idx];
338 		cmd_mgr->idx++;
339 		if (cmd_mgr->idx == FCOE_PARAMS_NUM_TASKS)
340 			cmd_mgr->idx = 0;
341 
342 		/* Check to make sure command was previously freed */
343 		if (!io_req->alloc)
344 			break;
345 	}
346 
347 	if (i == FCOE_PARAMS_NUM_TASKS) {
348 		spin_unlock_irqrestore(&cmd_mgr->lock, flags);
349 		goto out_failed;
350 	}
351 
352 	if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
353 		QEDF_ERR(&qedf->dbg_ctx,
354 			 "io_req found to be dirty ox_id = 0x%x.\n",
355 			 io_req->xid);
356 
357 	/* Clear any flags now that we've reallocated the xid */
358 	io_req->flags = 0;
359 	io_req->alloc = 1;
360 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
361 
362 	atomic_inc(&fcport->num_active_ios);
363 	atomic_dec(&fcport->free_sqes);
364 	xid = io_req->xid;
365 	atomic_dec(&cmd_mgr->free_list_cnt);
366 
367 	io_req->cmd_mgr = cmd_mgr;
368 	io_req->fcport = fcport;
369 
370 	/* Clear any stale sc_cmd back pointer */
371 	io_req->sc_cmd = NULL;
372 	io_req->lun = -1;
373 
374 	/* Hold the io_req against deletion */
375 	kref_init(&io_req->refcount);	/* ID: 001 */
376 	atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
377 
378 	/* Bind io_bdt for this io_req */
379 	/* Have a static link between io_req and io_bdt_pool */
380 	bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
381 	if (bd_tbl == NULL) {
382 		QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
383 		kref_put(&io_req->refcount, qedf_release_cmd);
384 		goto out_failed;
385 	}
386 	bd_tbl->io_req = io_req;
387 	io_req->cmd_type = cmd_type;
388 	io_req->tm_flags = 0;
389 
390 	/* Reset sequence offset data */
391 	io_req->rx_buf_off = 0;
392 	io_req->tx_buf_off = 0;
393 	io_req->rx_id = 0xffff; /* No OX_ID */
394 
395 	return io_req;
396 
397 out_failed:
398 	/* Record failure for stats and return NULL to caller */
399 	qedf->alloc_failures++;
400 	return NULL;
401 }
402 
403 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
404 {
405 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
406 	struct qedf_ctx *qedf = io_req->fcport->qedf;
407 	uint64_t sz = sizeof(struct scsi_sge);
408 
409 	/* clear tm flags */
410 	if (mp_req->mp_req_bd) {
411 		dma_free_coherent(&qedf->pdev->dev, sz,
412 		    mp_req->mp_req_bd, mp_req->mp_req_bd_dma);
413 		mp_req->mp_req_bd = NULL;
414 	}
415 	if (mp_req->mp_resp_bd) {
416 		dma_free_coherent(&qedf->pdev->dev, sz,
417 		    mp_req->mp_resp_bd, mp_req->mp_resp_bd_dma);
418 		mp_req->mp_resp_bd = NULL;
419 	}
420 	if (mp_req->req_buf) {
421 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
422 		    mp_req->req_buf, mp_req->req_buf_dma);
423 		mp_req->req_buf = NULL;
424 	}
425 	if (mp_req->resp_buf) {
426 		dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
427 		    mp_req->resp_buf, mp_req->resp_buf_dma);
428 		mp_req->resp_buf = NULL;
429 	}
430 }
431 
432 void qedf_release_cmd(struct kref *ref)
433 {
434 	struct qedf_ioreq *io_req =
435 	    container_of(ref, struct qedf_ioreq, refcount);
436 	struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
437 	struct qedf_rport *fcport = io_req->fcport;
438 	unsigned long flags;
439 
440 	if (io_req->cmd_type == QEDF_SCSI_CMD) {
441 		QEDF_WARN(&fcport->qedf->dbg_ctx,
442 			  "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
443 			  io_req, io_req->xid);
444 		WARN_ON(io_req->sc_cmd);
445 	}
446 
447 	if (io_req->cmd_type == QEDF_ELS ||
448 	    io_req->cmd_type == QEDF_TASK_MGMT_CMD)
449 		qedf_free_mp_resc(io_req);
450 
451 	atomic_inc(&cmd_mgr->free_list_cnt);
452 	atomic_dec(&fcport->num_active_ios);
453 	atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
454 	if (atomic_read(&fcport->num_active_ios) < 0) {
455 		QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
456 		WARN_ON(1);
457 	}
458 
459 	/* Increment task retry identifier now that the request is released */
460 	io_req->task_retry_identifier++;
461 	io_req->fcport = NULL;
462 
463 	clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
464 	io_req->cpu = 0;
465 	spin_lock_irqsave(&cmd_mgr->lock, flags);
466 	io_req->fcport = NULL;
467 	io_req->alloc = 0;
468 	spin_unlock_irqrestore(&cmd_mgr->lock, flags);
469 }
470 
471 static int qedf_map_sg(struct qedf_ioreq *io_req)
472 {
473 	struct scsi_cmnd *sc = io_req->sc_cmd;
474 	struct Scsi_Host *host = sc->device->host;
475 	struct fc_lport *lport = shost_priv(host);
476 	struct qedf_ctx *qedf = lport_priv(lport);
477 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
478 	struct scatterlist *sg;
479 	int byte_count = 0;
480 	int sg_count = 0;
481 	int bd_count = 0;
482 	u32 sg_len;
483 	u64 addr;
484 	int i = 0;
485 
486 	sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
487 	    scsi_sg_count(sc), sc->sc_data_direction);
488 	sg = scsi_sglist(sc);
489 
490 	io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
491 
492 	if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
493 		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
494 
495 	scsi_for_each_sg(sc, sg, sg_count, i) {
496 		sg_len = (u32)sg_dma_len(sg);
497 		addr = (u64)sg_dma_address(sg);
498 
499 		/*
500 		 * Intermediate s/g element so check if start address
501 		 * is page aligned.  Only required for writes and only if the
502 		 * number of scatter/gather elements is 8 or more.
503 		 */
504 		if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
505 		    (i != (sg_count - 1)) && sg_len < QEDF_PAGE_SIZE)
506 			io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
507 
508 		bd[bd_count].sge_addr.lo = cpu_to_le32(U64_LO(addr));
509 		bd[bd_count].sge_addr.hi  = cpu_to_le32(U64_HI(addr));
510 		bd[bd_count].sge_len = cpu_to_le32(sg_len);
511 
512 		bd_count++;
513 		byte_count += sg_len;
514 	}
515 
516 	/* To catch a case where FAST and SLOW nothing is set, set FAST */
517 	if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
518 		io_req->sge_type = QEDF_IOREQ_FAST_SGE;
519 
520 	if (byte_count != scsi_bufflen(sc))
521 		QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
522 			  "scsi_bufflen = %d, task_id = 0x%x.\n", byte_count,
523 			   scsi_bufflen(sc), io_req->xid);
524 
525 	return bd_count;
526 }
527 
528 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
529 {
530 	struct scsi_cmnd *sc = io_req->sc_cmd;
531 	struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
532 	int bd_count;
533 
534 	if (scsi_sg_count(sc)) {
535 		bd_count = qedf_map_sg(io_req);
536 		if (bd_count == 0)
537 			return -ENOMEM;
538 	} else {
539 		bd_count = 0;
540 		bd[0].sge_addr.lo = bd[0].sge_addr.hi = 0;
541 		bd[0].sge_len = 0;
542 	}
543 	io_req->bd_tbl->bd_valid = bd_count;
544 
545 	return 0;
546 }
547 
548 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
549 				  struct fcp_cmnd *fcp_cmnd)
550 {
551 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
552 
553 	/* fcp_cmnd is 32 bytes */
554 	memset(fcp_cmnd, 0, FCP_CMND_LEN);
555 
556 	/* 8 bytes: SCSI LUN info */
557 	int_to_scsilun(sc_cmd->device->lun,
558 			(struct scsi_lun *)&fcp_cmnd->fc_lun);
559 
560 	/* 4 bytes: flag info */
561 	fcp_cmnd->fc_pri_ta = 0;
562 	fcp_cmnd->fc_tm_flags = io_req->tm_flags;
563 	fcp_cmnd->fc_flags = io_req->io_req_flags;
564 	fcp_cmnd->fc_cmdref = 0;
565 
566 	/* Populate data direction */
567 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
568 		fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
569 	} else {
570 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
571 			fcp_cmnd->fc_flags |= FCP_CFL_WRDATA;
572 		else if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE)
573 			fcp_cmnd->fc_flags |= FCP_CFL_RDDATA;
574 	}
575 
576 	fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
577 
578 	/* 16 bytes: CDB information */
579 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
580 		memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len);
581 
582 	/* 4 bytes: FCP data length */
583 	fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
584 }
585 
586 static void  qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
587 	struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
588 	struct fcoe_wqe *sqe)
589 {
590 	enum fcoe_task_type task_type;
591 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
592 	struct io_bdt *bd_tbl = io_req->bd_tbl;
593 	u8 fcp_cmnd[32];
594 	u32 tmp_fcp_cmnd[8];
595 	int bd_count = 0;
596 	struct qedf_ctx *qedf = fcport->qedf;
597 	uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
598 	struct regpair sense_data_buffer_phys_addr;
599 	u32 tx_io_size = 0;
600 	u32 rx_io_size = 0;
601 	int i, cnt;
602 
603 	/* Note init_initiator_rw_fcoe_task memsets the task context */
604 	io_req->task = task_ctx;
605 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
606 	memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
607 	memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
608 
609 	/* Set task type bassed on DMA directio of command */
610 	if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
611 		task_type = FCOE_TASK_TYPE_READ_INITIATOR;
612 	} else {
613 		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
614 			task_type = FCOE_TASK_TYPE_WRITE_INITIATOR;
615 			tx_io_size = io_req->data_xfer_len;
616 		} else {
617 			task_type = FCOE_TASK_TYPE_READ_INITIATOR;
618 			rx_io_size = io_req->data_xfer_len;
619 		}
620 	}
621 
622 	/* Setup the fields for fcoe_task_params */
623 	io_req->task_params->context = task_ctx;
624 	io_req->task_params->sqe = sqe;
625 	io_req->task_params->task_type = task_type;
626 	io_req->task_params->tx_io_size = tx_io_size;
627 	io_req->task_params->rx_io_size = rx_io_size;
628 	io_req->task_params->conn_cid = fcport->fw_cid;
629 	io_req->task_params->itid = io_req->xid;
630 	io_req->task_params->cq_rss_number = cq_idx;
631 	io_req->task_params->is_tape_device = fcport->dev_type;
632 
633 	/* Fill in information for scatter/gather list */
634 	if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
635 		bd_count = bd_tbl->bd_valid;
636 		io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
637 		io_req->sgl_task_params->sgl_phys_addr.lo =
638 			U64_LO(bd_tbl->bd_tbl_dma);
639 		io_req->sgl_task_params->sgl_phys_addr.hi =
640 			U64_HI(bd_tbl->bd_tbl_dma);
641 		io_req->sgl_task_params->num_sges = bd_count;
642 		io_req->sgl_task_params->total_buffer_size =
643 		    scsi_bufflen(io_req->sc_cmd);
644 		if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
645 			io_req->sgl_task_params->small_mid_sge = 1;
646 		else
647 			io_req->sgl_task_params->small_mid_sge = 0;
648 	}
649 
650 	/* Fill in physical address of sense buffer */
651 	sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
652 	sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
653 
654 	/* fill FCP_CMND IU */
655 	qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
656 
657 	/* Swap fcp_cmnd since FC is big endian */
658 	cnt = sizeof(struct fcp_cmnd) / sizeof(u32);
659 	for (i = 0; i < cnt; i++) {
660 		tmp_fcp_cmnd[i] = cpu_to_be32(tmp_fcp_cmnd[i]);
661 	}
662 	memcpy(fcp_cmnd, tmp_fcp_cmnd, sizeof(struct fcp_cmnd));
663 
664 	init_initiator_rw_fcoe_task(io_req->task_params,
665 				    io_req->sgl_task_params,
666 				    sense_data_buffer_phys_addr,
667 				    io_req->task_retry_identifier, fcp_cmnd);
668 
669 	/* Increment SGL type counters */
670 	if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
671 		qedf->slow_sge_ios++;
672 	else
673 		qedf->fast_sge_ios++;
674 }
675 
676 void qedf_init_mp_task(struct qedf_ioreq *io_req,
677 	struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
678 {
679 	struct qedf_mp_req *mp_req = &(io_req->mp_req);
680 	struct qedf_rport *fcport = io_req->fcport;
681 	struct qedf_ctx *qedf = io_req->fcport->qedf;
682 	struct fc_frame_header *fc_hdr;
683 	struct fcoe_tx_mid_path_params task_fc_hdr;
684 	struct scsi_sgl_task_params tx_sgl_task_params;
685 	struct scsi_sgl_task_params rx_sgl_task_params;
686 
687 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
688 		  "Initializing MP task for cmd_type=%d\n",
689 		  io_req->cmd_type);
690 
691 	qedf->control_requests++;
692 
693 	memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
694 	memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
695 	memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
696 	memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
697 
698 	/* Setup the task from io_req for easy reference */
699 	io_req->task = task_ctx;
700 
701 	/* Setup the fields for fcoe_task_params */
702 	io_req->task_params->context = task_ctx;
703 	io_req->task_params->sqe = sqe;
704 	io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
705 	io_req->task_params->tx_io_size = io_req->data_xfer_len;
706 	/* rx_io_size tells the f/w how large a response buffer we have */
707 	io_req->task_params->rx_io_size = PAGE_SIZE;
708 	io_req->task_params->conn_cid = fcport->fw_cid;
709 	io_req->task_params->itid = io_req->xid;
710 	/* Return middle path commands on CQ 0 */
711 	io_req->task_params->cq_rss_number = 0;
712 	io_req->task_params->is_tape_device = fcport->dev_type;
713 
714 	fc_hdr = &(mp_req->req_fc_hdr);
715 	/* Set OX_ID and RX_ID based on driver task id */
716 	fc_hdr->fh_ox_id = io_req->xid;
717 	fc_hdr->fh_rx_id = htons(0xffff);
718 
719 	/* Set up FC header information */
720 	task_fc_hdr.parameter = fc_hdr->fh_parm_offset;
721 	task_fc_hdr.r_ctl = fc_hdr->fh_r_ctl;
722 	task_fc_hdr.type = fc_hdr->fh_type;
723 	task_fc_hdr.cs_ctl = fc_hdr->fh_cs_ctl;
724 	task_fc_hdr.df_ctl = fc_hdr->fh_df_ctl;
725 	task_fc_hdr.rx_id = fc_hdr->fh_rx_id;
726 	task_fc_hdr.ox_id = fc_hdr->fh_ox_id;
727 
728 	/* Set up s/g list parameters for request buffer */
729 	tx_sgl_task_params.sgl = mp_req->mp_req_bd;
730 	tx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_req_bd_dma);
731 	tx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_req_bd_dma);
732 	tx_sgl_task_params.num_sges = 1;
733 	/* Set PAGE_SIZE for now since sg element is that size ??? */
734 	tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
735 	tx_sgl_task_params.small_mid_sge = 0;
736 
737 	/* Set up s/g list parameters for request buffer */
738 	rx_sgl_task_params.sgl = mp_req->mp_resp_bd;
739 	rx_sgl_task_params.sgl_phys_addr.lo = U64_LO(mp_req->mp_resp_bd_dma);
740 	rx_sgl_task_params.sgl_phys_addr.hi = U64_HI(mp_req->mp_resp_bd_dma);
741 	rx_sgl_task_params.num_sges = 1;
742 	/* Set PAGE_SIZE for now since sg element is that size ??? */
743 	rx_sgl_task_params.total_buffer_size = PAGE_SIZE;
744 	rx_sgl_task_params.small_mid_sge = 0;
745 
746 
747 	/*
748 	 * Last arg is 0 as previous code did not set that we wanted the
749 	 * fc header information.
750 	 */
751 	init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
752 						     &task_fc_hdr,
753 						     &tx_sgl_task_params,
754 						     &rx_sgl_task_params, 0);
755 }
756 
757 /* Presumed that fcport->rport_lock is held */
758 u16 qedf_get_sqe_idx(struct qedf_rport *fcport)
759 {
760 	uint16_t total_sqe = (fcport->sq_mem_size)/(sizeof(struct fcoe_wqe));
761 	u16 rval;
762 
763 	rval = fcport->sq_prod_idx;
764 
765 	/* Adjust ring index */
766 	fcport->sq_prod_idx++;
767 	fcport->fw_sq_prod_idx++;
768 	if (fcport->sq_prod_idx == total_sqe)
769 		fcport->sq_prod_idx = 0;
770 
771 	return rval;
772 }
773 
774 void qedf_ring_doorbell(struct qedf_rport *fcport)
775 {
776 	struct fcoe_db_data dbell = { 0 };
777 
778 	dbell.agg_flags = 0;
779 
780 	dbell.params |= DB_DEST_XCM << FCOE_DB_DATA_DEST_SHIFT;
781 	dbell.params |= DB_AGG_CMD_SET << FCOE_DB_DATA_AGG_CMD_SHIFT;
782 	dbell.params |= DQ_XCM_FCOE_SQ_PROD_CMD <<
783 	    FCOE_DB_DATA_AGG_VAL_SEL_SHIFT;
784 
785 	dbell.sq_prod = fcport->fw_sq_prod_idx;
786 	/* wmb makes sure that the BDs data is updated before updating the
787 	 * producer, otherwise FW may read old data from the BDs.
788 	 */
789 	wmb();
790 	barrier();
791 	writel(*(u32 *)&dbell, fcport->p_doorbell);
792 	/*
793 	 * Fence required to flush the write combined buffer, since another
794 	 * CPU may write to the same doorbell address and data may be lost
795 	 * due to relaxed order nature of write combined bar.
796 	 */
797 	wmb();
798 }
799 
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
801 			  int8_t direction)
802 {
803 	struct qedf_ctx *qedf = fcport->qedf;
804 	struct qedf_io_log *io_log;
805 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
806 	unsigned long flags;
807 	uint8_t op;
808 
809 	spin_lock_irqsave(&qedf->io_trace_lock, flags);
810 
811 	io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
812 	io_log->direction = direction;
813 	io_log->task_id = io_req->xid;
814 	io_log->port_id = fcport->rdata->ids.port_id;
815 	io_log->lun = sc_cmd->device->lun;
816 	io_log->op = op = sc_cmd->cmnd[0];
817 	io_log->lba[0] = sc_cmd->cmnd[2];
818 	io_log->lba[1] = sc_cmd->cmnd[3];
819 	io_log->lba[2] = sc_cmd->cmnd[4];
820 	io_log->lba[3] = sc_cmd->cmnd[5];
821 	io_log->bufflen = scsi_bufflen(sc_cmd);
822 	io_log->sg_count = scsi_sg_count(sc_cmd);
823 	io_log->result = sc_cmd->result;
824 	io_log->jiffies = jiffies;
825 	io_log->refcount = kref_read(&io_req->refcount);
826 
827 	if (direction == QEDF_IO_TRACE_REQ) {
828 		/* For requests we only care abot the submission CPU */
829 		io_log->req_cpu = io_req->cpu;
830 		io_log->int_cpu = 0;
831 		io_log->rsp_cpu = 0;
832 	} else if (direction == QEDF_IO_TRACE_RSP) {
833 		io_log->req_cpu = io_req->cpu;
834 		io_log->int_cpu = io_req->int_cpu;
835 		io_log->rsp_cpu = smp_processor_id();
836 	}
837 
838 	io_log->sge_type = io_req->sge_type;
839 
840 	qedf->io_trace_idx++;
841 	if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
842 		qedf->io_trace_idx = 0;
843 
844 	spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
845 }
846 
847 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
848 {
849 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
850 	struct Scsi_Host *host = sc_cmd->device->host;
851 	struct fc_lport *lport = shost_priv(host);
852 	struct qedf_ctx *qedf = lport_priv(lport);
853 	struct e4_fcoe_task_context *task_ctx;
854 	u16 xid;
855 	struct fcoe_wqe *sqe;
856 	u16 sqe_idx;
857 
858 	/* Initialize rest of io_req fileds */
859 	io_req->data_xfer_len = scsi_bufflen(sc_cmd);
860 	sc_cmd->SCp.ptr = (char *)io_req;
861 	io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
862 
863 	/* Record which cpu this request is associated with */
864 	io_req->cpu = smp_processor_id();
865 
866 	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
867 		io_req->io_req_flags = QEDF_READ;
868 		qedf->input_requests++;
869 	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
870 		io_req->io_req_flags = QEDF_WRITE;
871 		qedf->output_requests++;
872 	} else {
873 		io_req->io_req_flags = 0;
874 		qedf->control_requests++;
875 	}
876 
877 	xid = io_req->xid;
878 
879 	/* Build buffer descriptor list for firmware from sg list */
880 	if (qedf_build_bd_list_from_sg(io_req)) {
881 		QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
882 		/* Release cmd will release io_req, but sc_cmd is assigned */
883 		io_req->sc_cmd = NULL;
884 		kref_put(&io_req->refcount, qedf_release_cmd);
885 		return -EAGAIN;
886 	}
887 
888 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
889 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
890 		QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
891 		/* Release cmd will release io_req, but sc_cmd is assigned */
892 		io_req->sc_cmd = NULL;
893 		kref_put(&io_req->refcount, qedf_release_cmd);
894 		return -EINVAL;
895 	}
896 
897 	/* Record LUN number for later use if we neeed them */
898 	io_req->lun = (int)sc_cmd->device->lun;
899 
900 	/* Obtain free SQE */
901 	sqe_idx = qedf_get_sqe_idx(fcport);
902 	sqe = &fcport->sq[sqe_idx];
903 	memset(sqe, 0, sizeof(struct fcoe_wqe));
904 
905 	/* Get the task context */
906 	task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
907 	if (!task_ctx) {
908 		QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
909 			   xid);
910 		/* Release cmd will release io_req, but sc_cmd is assigned */
911 		io_req->sc_cmd = NULL;
912 		kref_put(&io_req->refcount, qedf_release_cmd);
913 		return -EINVAL;
914 	}
915 
916 	qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
917 
918 	/* Ring doorbell */
919 	qedf_ring_doorbell(fcport);
920 
921 	/* Set that command is with the firmware now */
922 	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
923 
924 	if (qedf_io_tracing && io_req->sc_cmd)
925 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
926 
927 	return false;
928 }
929 
930 int
931 qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd)
932 {
933 	struct fc_lport *lport = shost_priv(host);
934 	struct qedf_ctx *qedf = lport_priv(lport);
935 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
936 	struct fc_rport_libfc_priv *rp = rport->dd_data;
937 	struct qedf_rport *fcport;
938 	struct qedf_ioreq *io_req;
939 	int rc = 0;
940 	int rval;
941 	unsigned long flags = 0;
942 	int num_sgs = 0;
943 
944 	num_sgs = scsi_sg_count(sc_cmd);
945 	if (scsi_sg_count(sc_cmd) > QEDF_MAX_BDS_PER_CMD) {
946 		QEDF_ERR(&qedf->dbg_ctx,
947 			 "Number of SG elements %d exceeds what hardware limitation of %d.\n",
948 			 num_sgs, QEDF_MAX_BDS_PER_CMD);
949 		sc_cmd->result = DID_ERROR;
950 		sc_cmd->scsi_done(sc_cmd);
951 		return 0;
952 	}
953 
954 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
955 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
956 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
957 			  "Returning DNC as unloading or stop io, flags 0x%lx.\n",
958 			  qedf->flags);
959 		sc_cmd->result = DID_NO_CONNECT << 16;
960 		sc_cmd->scsi_done(sc_cmd);
961 		return 0;
962 	}
963 
964 	if (!qedf->pdev->msix_enabled) {
965 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
966 		    "Completing sc_cmd=%p DID_NO_CONNECT as MSI-X is not enabled.\n",
967 		    sc_cmd);
968 		sc_cmd->result = DID_NO_CONNECT << 16;
969 		sc_cmd->scsi_done(sc_cmd);
970 		return 0;
971 	}
972 
973 	rval = fc_remote_port_chkready(rport);
974 	if (rval) {
975 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
976 			  "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n",
977 			  rval, rport->port_id);
978 		sc_cmd->result = rval;
979 		sc_cmd->scsi_done(sc_cmd);
980 		return 0;
981 	}
982 
983 	/* Retry command if we are doing a qed drain operation */
984 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
985 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
986 		rc = SCSI_MLQUEUE_HOST_BUSY;
987 		goto exit_qcmd;
988 	}
989 
990 	if (lport->state != LPORT_ST_READY ||
991 	    atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
992 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
993 		rc = SCSI_MLQUEUE_HOST_BUSY;
994 		goto exit_qcmd;
995 	}
996 
997 	/* rport and tgt are allocated together, so tgt should be non-NULL */
998 	fcport = (struct qedf_rport *)&rp[1];
999 
1000 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags) ||
1001 	    test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1002 		/*
1003 		 * Session is not offloaded yet. Let SCSI-ml retry
1004 		 * the command.
1005 		 */
1006 		rc = SCSI_MLQUEUE_TARGET_BUSY;
1007 		goto exit_qcmd;
1008 	}
1009 
1010 	atomic_inc(&fcport->ios_to_queue);
1011 
1012 	if (fcport->retry_delay_timestamp) {
1013 		/* Take fcport->rport_lock for resetting the delay_timestamp */
1014 		spin_lock_irqsave(&fcport->rport_lock, flags);
1015 		if (time_after(jiffies, fcport->retry_delay_timestamp)) {
1016 			fcport->retry_delay_timestamp = 0;
1017 		} else {
1018 			spin_unlock_irqrestore(&fcport->rport_lock, flags);
1019 			/* If retry_delay timer is active, flow off the ML */
1020 			rc = SCSI_MLQUEUE_TARGET_BUSY;
1021 			atomic_dec(&fcport->ios_to_queue);
1022 			goto exit_qcmd;
1023 		}
1024 		spin_unlock_irqrestore(&fcport->rport_lock, flags);
1025 	}
1026 
1027 	io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1028 	if (!io_req) {
1029 		rc = SCSI_MLQUEUE_HOST_BUSY;
1030 		atomic_dec(&fcport->ios_to_queue);
1031 		goto exit_qcmd;
1032 	}
1033 
1034 	io_req->sc_cmd = sc_cmd;
1035 
1036 	/* Take fcport->rport_lock for posting to fcport send queue */
1037 	spin_lock_irqsave(&fcport->rport_lock, flags);
1038 	if (qedf_post_io_req(fcport, io_req)) {
1039 		QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1040 		/* Return SQE to pool */
1041 		atomic_inc(&fcport->free_sqes);
1042 		rc = SCSI_MLQUEUE_HOST_BUSY;
1043 	}
1044 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1045 	atomic_dec(&fcport->ios_to_queue);
1046 
1047 exit_qcmd:
1048 	return rc;
1049 }
1050 
1051 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1052 				 struct fcoe_cqe_rsp_info *fcp_rsp)
1053 {
1054 	struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1055 	struct qedf_ctx *qedf = io_req->fcport->qedf;
1056 	u8 rsp_flags = fcp_rsp->rsp_flags.flags;
1057 	int fcp_sns_len = 0;
1058 	int fcp_rsp_len = 0;
1059 	uint8_t *rsp_info, *sense_data;
1060 
1061 	io_req->fcp_status = FC_GOOD;
1062 	io_req->fcp_resid = 0;
1063 	if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER |
1064 	    FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER))
1065 		io_req->fcp_resid = fcp_rsp->fcp_resid;
1066 
1067 	io_req->scsi_comp_flags = rsp_flags;
1068 	CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1069 	    fcp_rsp->scsi_status_code;
1070 
1071 	if (rsp_flags &
1072 	    FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID)
1073 		fcp_rsp_len = fcp_rsp->fcp_rsp_len;
1074 
1075 	if (rsp_flags &
1076 	    FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID)
1077 		fcp_sns_len = fcp_rsp->fcp_sns_len;
1078 
1079 	io_req->fcp_rsp_len = fcp_rsp_len;
1080 	io_req->fcp_sns_len = fcp_sns_len;
1081 	rsp_info = sense_data = io_req->sense_buffer;
1082 
1083 	/* fetch fcp_rsp_code */
1084 	if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
1085 		/* Only for task management function */
1086 		io_req->fcp_rsp_code = rsp_info[3];
1087 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1088 		    "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1089 		/* Adjust sense-data location. */
1090 		sense_data += fcp_rsp_len;
1091 	}
1092 
1093 	if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) {
1094 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1095 		    "Truncating sense buffer\n");
1096 		fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
1097 	}
1098 
1099 	/* The sense buffer can be NULL for TMF commands */
1100 	if (sc_cmd->sense_buffer) {
1101 		memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1102 		if (fcp_sns_len)
1103 			memcpy(sc_cmd->sense_buffer, sense_data,
1104 			    fcp_sns_len);
1105 	}
1106 }
1107 
1108 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1109 {
1110 	struct scsi_cmnd *sc = io_req->sc_cmd;
1111 
1112 	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1113 		dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1114 		    scsi_sg_count(sc), sc->sc_data_direction);
1115 		io_req->bd_tbl->bd_valid = 0;
1116 	}
1117 }
1118 
1119 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1120 	struct qedf_ioreq *io_req)
1121 {
1122 	struct scsi_cmnd *sc_cmd;
1123 	struct fcoe_cqe_rsp_info *fcp_rsp;
1124 	struct qedf_rport *fcport;
1125 	int refcount;
1126 	u16 scope, qualifier = 0;
1127 	u8 fw_residual_flag = 0;
1128 	unsigned long flags = 0;
1129 	u16 chk_scope = 0;
1130 
1131 	if (!io_req)
1132 		return;
1133 	if (!cqe)
1134 		return;
1135 
1136 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1137 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1138 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1139 		QEDF_ERR(&qedf->dbg_ctx,
1140 			 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1141 			 io_req->xid);
1142 		return;
1143 	}
1144 
1145 	sc_cmd = io_req->sc_cmd;
1146 	fcp_rsp = &cqe->cqe_info.rsp_info;
1147 
1148 	if (!sc_cmd) {
1149 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1150 		return;
1151 	}
1152 
1153 	if (!sc_cmd->SCp.ptr) {
1154 		QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1155 		    "another context.\n");
1156 		return;
1157 	}
1158 
1159 	if (!sc_cmd->device) {
1160 		QEDF_ERR(&qedf->dbg_ctx,
1161 			 "Device for sc_cmd %p is NULL.\n", sc_cmd);
1162 		return;
1163 	}
1164 
1165 	if (!scsi_cmd_to_rq(sc_cmd)->q) {
1166 		QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1167 		   "is not valid, sc_cmd=%p.\n", sc_cmd);
1168 		return;
1169 	}
1170 
1171 	fcport = io_req->fcport;
1172 
1173 	/*
1174 	 * When flush is active, let the cmds be completed from the cleanup
1175 	 * context
1176 	 */
1177 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1178 	    (test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags) &&
1179 	     sc_cmd->device->lun == (u64)fcport->lun_reset_lun)) {
1180 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1181 			  "Dropping good completion xid=0x%x as fcport is flushing",
1182 			  io_req->xid);
1183 		return;
1184 	}
1185 
1186 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
1187 
1188 	qedf_unmap_sg_list(qedf, io_req);
1189 
1190 	/* Check for FCP transport error */
1191 	if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1192 		QEDF_ERR(&(qedf->dbg_ctx),
1193 		    "FCP I/O protocol failure xid=0x%x fcp_rsp_len=%d "
1194 		    "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1195 		    io_req->fcp_rsp_code);
1196 		sc_cmd->result = DID_BUS_BUSY << 16;
1197 		goto out;
1198 	}
1199 
1200 	fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
1201 	    FCOE_CQE_RSP_INFO_FW_UNDERRUN);
1202 	if (fw_residual_flag) {
1203 		QEDF_ERR(&qedf->dbg_ctx,
1204 			 "Firmware detected underrun: xid=0x%x fcp_rsp.flags=0x%02x fcp_resid=%d fw_residual=0x%x lba=%02x%02x%02x%02x.\n",
1205 			 io_req->xid, fcp_rsp->rsp_flags.flags,
1206 			 io_req->fcp_resid,
1207 			 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
1208 			 sc_cmd->cmnd[3], sc_cmd->cmnd[4], sc_cmd->cmnd[5]);
1209 
1210 		if (io_req->cdb_status == 0)
1211 			sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1212 		else
1213 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1214 
1215 		/*
1216 		 * Set resid to the whole buffer length so we won't try to resue
1217 		 * any previously data.
1218 		 */
1219 		scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1220 		goto out;
1221 	}
1222 
1223 	switch (io_req->fcp_status) {
1224 	case FC_GOOD:
1225 		if (io_req->cdb_status == 0) {
1226 			/* Good I/O completion */
1227 			sc_cmd->result = DID_OK << 16;
1228 		} else {
1229 			refcount = kref_read(&io_req->refcount);
1230 			QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1231 			    "%d:0:%d:%lld xid=0x%0x op=0x%02x "
1232 			    "lba=%02x%02x%02x%02x cdb_status=%d "
1233 			    "fcp_resid=0x%x refcount=%d.\n",
1234 			    qedf->lport->host->host_no, sc_cmd->device->id,
1235 			    sc_cmd->device->lun, io_req->xid,
1236 			    sc_cmd->cmnd[0], sc_cmd->cmnd[2], sc_cmd->cmnd[3],
1237 			    sc_cmd->cmnd[4], sc_cmd->cmnd[5],
1238 			    io_req->cdb_status, io_req->fcp_resid,
1239 			    refcount);
1240 			sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1241 
1242 			if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1243 			    io_req->cdb_status == SAM_STAT_BUSY) {
1244 				/*
1245 				 * Check whether we need to set retry_delay at
1246 				 * all based on retry_delay module parameter
1247 				 * and the status qualifier.
1248 				 */
1249 
1250 				/* Upper 2 bits */
1251 				scope = fcp_rsp->retry_delay_timer & 0xC000;
1252 				/* Lower 14 bits */
1253 				qualifier = fcp_rsp->retry_delay_timer & 0x3FFF;
1254 
1255 				if (qedf_retry_delay)
1256 					chk_scope = 1;
1257 				/* Record stats */
1258 				if (io_req->cdb_status ==
1259 				    SAM_STAT_TASK_SET_FULL)
1260 					qedf->task_set_fulls++;
1261 				else
1262 					qedf->busy++;
1263 			}
1264 		}
1265 		if (io_req->fcp_resid)
1266 			scsi_set_resid(sc_cmd, io_req->fcp_resid);
1267 
1268 		if (chk_scope == 1) {
1269 			if ((scope == 1 || scope == 2) &&
1270 			    (qualifier > 0 && qualifier <= 0x3FEF)) {
1271 				/* Check we don't go over the max */
1272 				if (qualifier > QEDF_RETRY_DELAY_MAX) {
1273 					qualifier = QEDF_RETRY_DELAY_MAX;
1274 					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1275 						  "qualifier = %d\n",
1276 						  (fcp_rsp->retry_delay_timer &
1277 						  0x3FFF));
1278 				}
1279 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1280 					  "Scope = %d and qualifier = %d",
1281 					  scope, qualifier);
1282 				/*  Take fcport->rport_lock to
1283 				 *  update the retry_delay_timestamp
1284 				 */
1285 				spin_lock_irqsave(&fcport->rport_lock, flags);
1286 				fcport->retry_delay_timestamp =
1287 					jiffies + (qualifier * HZ / 10);
1288 				spin_unlock_irqrestore(&fcport->rport_lock,
1289 						       flags);
1290 
1291 			} else {
1292 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1293 					  "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1294 					  scope, qualifier);
1295 			}
1296 		}
1297 		break;
1298 	default:
1299 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1300 			   io_req->fcp_status);
1301 		break;
1302 	}
1303 
1304 out:
1305 	if (qedf_io_tracing)
1306 		qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1307 
1308 	/*
1309 	 * We wait till the end of the function to clear the
1310 	 * outstanding bit in case we need to send an abort
1311 	 */
1312 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1313 
1314 	io_req->sc_cmd = NULL;
1315 	sc_cmd->SCp.ptr =  NULL;
1316 	sc_cmd->scsi_done(sc_cmd);
1317 	kref_put(&io_req->refcount, qedf_release_cmd);
1318 }
1319 
1320 /* Return a SCSI command in some other context besides a normal completion */
1321 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1322 	int result)
1323 {
1324 	struct scsi_cmnd *sc_cmd;
1325 	int refcount;
1326 
1327 	if (!io_req) {
1328 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1329 		return;
1330 	}
1331 
1332 	if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1333 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1334 			  "io_req:%p scsi_done handling already done\n",
1335 			  io_req);
1336 		return;
1337 	}
1338 
1339 	/*
1340 	 * We will be done with this command after this call so clear the
1341 	 * outstanding bit.
1342 	 */
1343 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1344 
1345 	sc_cmd = io_req->sc_cmd;
1346 
1347 	if (!sc_cmd) {
1348 		QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1349 		return;
1350 	}
1351 
1352 	if (!virt_addr_valid(sc_cmd)) {
1353 		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1354 		goto bad_scsi_ptr;
1355 	}
1356 
1357 	if (!sc_cmd->SCp.ptr) {
1358 		QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1359 		    "another context.\n");
1360 		return;
1361 	}
1362 
1363 	if (!sc_cmd->device) {
1364 		QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1365 			 sc_cmd);
1366 		goto bad_scsi_ptr;
1367 	}
1368 
1369 	if (!virt_addr_valid(sc_cmd->device)) {
1370 		QEDF_ERR(&qedf->dbg_ctx,
1371 			 "Device pointer for sc_cmd %p is bad.\n", sc_cmd);
1372 		goto bad_scsi_ptr;
1373 	}
1374 
1375 	if (!sc_cmd->sense_buffer) {
1376 		QEDF_ERR(&qedf->dbg_ctx,
1377 			 "sc_cmd->sense_buffer for sc_cmd %p is NULL.\n",
1378 			 sc_cmd);
1379 		goto bad_scsi_ptr;
1380 	}
1381 
1382 	if (!virt_addr_valid(sc_cmd->sense_buffer)) {
1383 		QEDF_ERR(&qedf->dbg_ctx,
1384 			 "sc_cmd->sense_buffer for sc_cmd %p is bad.\n",
1385 			 sc_cmd);
1386 		goto bad_scsi_ptr;
1387 	}
1388 
1389 	if (!sc_cmd->scsi_done) {
1390 		QEDF_ERR(&qedf->dbg_ctx,
1391 			 "sc_cmd->scsi_done for sc_cmd %p is NULL.\n",
1392 			 sc_cmd);
1393 		goto bad_scsi_ptr;
1394 	}
1395 
1396 	qedf_unmap_sg_list(qedf, io_req);
1397 
1398 	sc_cmd->result = result << 16;
1399 	refcount = kref_read(&io_req->refcount);
1400 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1401 	    "sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
1402 	    "allowed=%d retries=%d refcount=%d.\n",
1403 	    qedf->lport->host->host_no, sc_cmd->device->id,
1404 	    sc_cmd->device->lun, sc_cmd, sc_cmd->result, sc_cmd->cmnd[0],
1405 	    sc_cmd->cmnd[2], sc_cmd->cmnd[3], sc_cmd->cmnd[4],
1406 	    sc_cmd->cmnd[5], sc_cmd->allowed, sc_cmd->retries,
1407 	    refcount);
1408 
1409 	/*
1410 	 * Set resid to the whole buffer length so we won't try to resue any
1411 	 * previously read data
1412 	 */
1413 	scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd));
1414 
1415 	if (qedf_io_tracing)
1416 		qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1417 
1418 	io_req->sc_cmd = NULL;
1419 	sc_cmd->SCp.ptr = NULL;
1420 	sc_cmd->scsi_done(sc_cmd);
1421 	kref_put(&io_req->refcount, qedf_release_cmd);
1422 	return;
1423 
1424 bad_scsi_ptr:
1425 	/*
1426 	 * Clear the io_req->sc_cmd backpointer so we don't try to process
1427 	 * this again
1428 	 */
1429 	io_req->sc_cmd = NULL;
1430 	kref_put(&io_req->refcount, qedf_release_cmd);  /* ID: 001 */
1431 }
1432 
1433 /*
1434  * Handle warning type CQE completions. This is mainly used for REC timer
1435  * popping.
1436  */
1437 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1438 	struct qedf_ioreq *io_req)
1439 {
1440 	int rval, i;
1441 	struct qedf_rport *fcport = io_req->fcport;
1442 	u64 err_warn_bit_map;
1443 	u8 err_warn = 0xff;
1444 
1445 	if (!cqe) {
1446 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1447 			  "cqe is NULL for io_req %p xid=0x%x\n",
1448 			  io_req, io_req->xid);
1449 		return;
1450 	}
1451 
1452 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1453 		  "xid=0x%x\n", io_req->xid);
1454 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1455 		  "err_warn_bitmap=%08x:%08x\n",
1456 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1457 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1458 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1459 		  "rx_buff_off=%08x, rx_id=%04x\n",
1460 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1461 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1462 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1463 
1464 	/* Normalize the error bitmap value to an just an unsigned int */
1465 	err_warn_bit_map = (u64)
1466 	    ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
1467 	    (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
1468 	for (i = 0; i < 64; i++) {
1469 		if (err_warn_bit_map & (u64)((u64)1 << i)) {
1470 			err_warn = i;
1471 			break;
1472 		}
1473 	}
1474 
1475 	/* Check if REC TOV expired if this is a tape device */
1476 	if (fcport->dev_type == QEDF_RPORT_TYPE_TAPE) {
1477 		if (err_warn ==
1478 		    FCOE_WARNING_CODE_REC_TOV_TIMER_EXPIRATION) {
1479 			QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1480 			if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1481 				io_req->rx_buf_off =
1482 				    cqe->cqe_info.err_info.rx_buf_off;
1483 				io_req->tx_buf_off =
1484 				    cqe->cqe_info.err_info.tx_buf_off;
1485 				io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1486 				rval = qedf_send_rec(io_req);
1487 				/*
1488 				 * We only want to abort the io_req if we
1489 				 * can't queue the REC command as we want to
1490 				 * keep the exchange open for recovery.
1491 				 */
1492 				if (rval)
1493 					goto send_abort;
1494 			}
1495 			return;
1496 		}
1497 	}
1498 
1499 send_abort:
1500 	init_completion(&io_req->abts_done);
1501 	rval = qedf_initiate_abts(io_req, true);
1502 	if (rval)
1503 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1504 }
1505 
1506 /* Cleanup a command when we receive an error detection completion */
1507 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1508 	struct qedf_ioreq *io_req)
1509 {
1510 	int rval;
1511 
1512 	if (io_req == NULL) {
1513 		QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1514 		return;
1515 	}
1516 
1517 	if (io_req->fcport == NULL) {
1518 		QEDF_INFO(NULL, QEDF_LOG_IO, "fcport is NULL.\n");
1519 		return;
1520 	}
1521 
1522 	if (!cqe) {
1523 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1524 			"cqe is NULL for io_req %p\n", io_req);
1525 		return;
1526 	}
1527 
1528 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1529 		  "xid=0x%x\n", io_req->xid);
1530 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1531 		  "err_warn_bitmap=%08x:%08x\n",
1532 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
1533 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
1534 	QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1535 		  "rx_buff_off=%08x, rx_id=%04x\n",
1536 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
1537 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
1538 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
1539 
1540 	/* When flush is active, let the cmds be flushed out from the cleanup context */
1541 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1542 		(test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1543 		 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1544 		QEDF_ERR(&qedf->dbg_ctx,
1545 			"Dropping EQE for xid=0x%x as fcport is flushing",
1546 			io_req->xid);
1547 		return;
1548 	}
1549 
1550 	if (qedf->stop_io_on_error) {
1551 		qedf_stop_all_io(qedf);
1552 		return;
1553 	}
1554 
1555 	init_completion(&io_req->abts_done);
1556 	rval = qedf_initiate_abts(io_req, true);
1557 	if (rval)
1558 		QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1559 }
1560 
1561 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1562 	struct qedf_ioreq *els_req)
1563 {
1564 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1565 	    "Flushing ELS request xid=0x%x refcount=%d.\n", els_req->xid,
1566 	    kref_read(&els_req->refcount));
1567 
1568 	/*
1569 	 * Need to distinguish this from a timeout when calling the
1570 	 * els_req->cb_func.
1571 	 */
1572 	els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
1573 
1574 	clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
1575 
1576 	/* Cancel the timer */
1577 	cancel_delayed_work_sync(&els_req->timeout_work);
1578 
1579 	/* Call callback function to complete command */
1580 	if (els_req->cb_func && els_req->cb_arg) {
1581 		els_req->cb_func(els_req->cb_arg);
1582 		els_req->cb_arg = NULL;
1583 	}
1584 
1585 	/* Release kref for original initiate_els */
1586 	kref_put(&els_req->refcount, qedf_release_cmd);
1587 }
1588 
1589 /* A value of -1 for lun is a wild card that means flush all
1590  * active SCSI I/Os for the target.
1591  */
1592 void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
1593 {
1594 	struct qedf_ioreq *io_req;
1595 	struct qedf_ctx *qedf;
1596 	struct qedf_cmd_mgr *cmd_mgr;
1597 	int i, rc;
1598 	unsigned long flags;
1599 	int flush_cnt = 0;
1600 	int wait_cnt = 100;
1601 	int refcount = 0;
1602 
1603 	if (!fcport) {
1604 		QEDF_ERR(NULL, "fcport is NULL\n");
1605 		return;
1606 	}
1607 
1608 	/* Check that fcport is still offloaded */
1609 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1610 		QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
1611 		return;
1612 	}
1613 
1614 	qedf = fcport->qedf;
1615 
1616 	if (!qedf) {
1617 		QEDF_ERR(NULL, "qedf is NULL.\n");
1618 		return;
1619 	}
1620 
1621 	/* Only wait for all commands to be queued in the Upload context */
1622 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1623 	    (lun == -1)) {
1624 		while (atomic_read(&fcport->ios_to_queue)) {
1625 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1626 				  "Waiting for %d I/Os to be queued\n",
1627 				  atomic_read(&fcport->ios_to_queue));
1628 			if (wait_cnt == 0) {
1629 				QEDF_ERR(NULL,
1630 					 "%d IOs request could not be queued\n",
1631 					 atomic_read(&fcport->ios_to_queue));
1632 			}
1633 			msleep(20);
1634 			wait_cnt--;
1635 		}
1636 	}
1637 
1638 	cmd_mgr = qedf->cmd_mgr;
1639 
1640 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1641 		  "Flush active i/o's num=0x%x fcport=0x%p port_id=0x%06x scsi_id=%d.\n",
1642 		  atomic_read(&fcport->num_active_ios), fcport,
1643 		  fcport->rdata->ids.port_id, fcport->rport->scsi_target_id);
1644 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1645 
1646 	mutex_lock(&qedf->flush_mutex);
1647 	if (lun == -1) {
1648 		set_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1649 	} else {
1650 		set_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1651 		fcport->lun_reset_lun = lun;
1652 	}
1653 
1654 	for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1655 		io_req = &cmd_mgr->cmds[i];
1656 
1657 		if (!io_req)
1658 			continue;
1659 		if (!io_req->fcport)
1660 			continue;
1661 
1662 		spin_lock_irqsave(&cmd_mgr->lock, flags);
1663 
1664 		if (io_req->alloc) {
1665 			if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1666 				if (io_req->cmd_type == QEDF_SCSI_CMD)
1667 					QEDF_ERR(&qedf->dbg_ctx,
1668 						 "Allocated but not queued, xid=0x%x\n",
1669 						 io_req->xid);
1670 			}
1671 			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1672 		} else {
1673 			spin_unlock_irqrestore(&cmd_mgr->lock, flags);
1674 			continue;
1675 		}
1676 
1677 		if (io_req->fcport != fcport)
1678 			continue;
1679 
1680 		/* In case of ABTS, CMD_OUTSTANDING is cleared on ABTS response,
1681 		 * but RRQ is still pending.
1682 		 * Workaround: Within qedf_send_rrq, we check if the fcport is
1683 		 * NULL, and we drop the ref on the io_req to clean it up.
1684 		 */
1685 		if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1686 			refcount = kref_read(&io_req->refcount);
1687 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1688 				  "Not outstanding, xid=0x%x, cmd_type=%d refcount=%d.\n",
1689 				  io_req->xid, io_req->cmd_type, refcount);
1690 			/* If RRQ work has been queue, try to cancel it and
1691 			 * free the io_req
1692 			 */
1693 			if (atomic_read(&io_req->state) ==
1694 			    QEDFC_CMD_ST_RRQ_WAIT) {
1695 				if (cancel_delayed_work_sync
1696 				    (&io_req->rrq_work)) {
1697 					QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1698 						  "Putting reference for pending RRQ work xid=0x%x.\n",
1699 						  io_req->xid);
1700 					/* ID: 003 */
1701 					kref_put(&io_req->refcount,
1702 						 qedf_release_cmd);
1703 				}
1704 			}
1705 			continue;
1706 		}
1707 
1708 		/* Only consider flushing ELS during target reset */
1709 		if (io_req->cmd_type == QEDF_ELS &&
1710 		    lun == -1) {
1711 			rc = kref_get_unless_zero(&io_req->refcount);
1712 			if (!rc) {
1713 				QEDF_ERR(&(qedf->dbg_ctx),
1714 				    "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1715 				    io_req, io_req->xid);
1716 				continue;
1717 			}
1718 			qedf_initiate_cleanup(io_req, false);
1719 			flush_cnt++;
1720 			qedf_flush_els_req(qedf, io_req);
1721 
1722 			/*
1723 			 * Release the kref and go back to the top of the
1724 			 * loop.
1725 			 */
1726 			goto free_cmd;
1727 		}
1728 
1729 		if (io_req->cmd_type == QEDF_ABTS) {
1730 			/* ID: 004 */
1731 			rc = kref_get_unless_zero(&io_req->refcount);
1732 			if (!rc) {
1733 				QEDF_ERR(&(qedf->dbg_ctx),
1734 				    "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1735 				    io_req, io_req->xid);
1736 				continue;
1737 			}
1738 			if (lun != -1 && io_req->lun != lun)
1739 				goto free_cmd;
1740 
1741 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1742 			    "Flushing abort xid=0x%x.\n", io_req->xid);
1743 
1744 			if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1745 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1746 					  "Putting ref for cancelled RRQ work xid=0x%x.\n",
1747 					  io_req->xid);
1748 				kref_put(&io_req->refcount, qedf_release_cmd);
1749 			}
1750 
1751 			if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1752 				QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1753 					  "Putting ref for cancelled tmo work xid=0x%x.\n",
1754 					  io_req->xid);
1755 				qedf_initiate_cleanup(io_req, true);
1756 				/* Notify eh_abort handler that ABTS is
1757 				 * complete
1758 				 */
1759 				complete(&io_req->abts_done);
1760 				clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1761 				/* ID: 002 */
1762 				kref_put(&io_req->refcount, qedf_release_cmd);
1763 			}
1764 			flush_cnt++;
1765 			goto free_cmd;
1766 		}
1767 
1768 		if (!io_req->sc_cmd)
1769 			continue;
1770 		if (!io_req->sc_cmd->device) {
1771 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1772 				  "Device backpointer NULL for sc_cmd=%p.\n",
1773 				  io_req->sc_cmd);
1774 			/* Put reference for non-existent scsi_cmnd */
1775 			io_req->sc_cmd = NULL;
1776 			qedf_initiate_cleanup(io_req, false);
1777 			kref_put(&io_req->refcount, qedf_release_cmd);
1778 			continue;
1779 		}
1780 		if (lun > -1) {
1781 			if (io_req->lun != lun)
1782 				continue;
1783 		}
1784 
1785 		/*
1786 		 * Use kref_get_unless_zero in the unlikely case the command
1787 		 * we're about to flush was completed in the normal SCSI path
1788 		 */
1789 		rc = kref_get_unless_zero(&io_req->refcount);
1790 		if (!rc) {
1791 			QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1792 			    "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1793 			continue;
1794 		}
1795 
1796 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1797 		    "Cleanup xid=0x%x.\n", io_req->xid);
1798 		flush_cnt++;
1799 
1800 		/* Cleanup task and return I/O mid-layer */
1801 		qedf_initiate_cleanup(io_req, true);
1802 
1803 free_cmd:
1804 		kref_put(&io_req->refcount, qedf_release_cmd);	/* ID: 004 */
1805 	}
1806 
1807 	wait_cnt = 60;
1808 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1809 		  "Flushed 0x%x I/Os, active=0x%x.\n",
1810 		  flush_cnt, atomic_read(&fcport->num_active_ios));
1811 	/* Only wait for all commands to complete in the Upload context */
1812 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags) &&
1813 	    (lun == -1)) {
1814 		while (atomic_read(&fcport->num_active_ios)) {
1815 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1816 				  "Flushed 0x%x I/Os, active=0x%x cnt=%d.\n",
1817 				  flush_cnt,
1818 				  atomic_read(&fcport->num_active_ios),
1819 				  wait_cnt);
1820 			if (wait_cnt == 0) {
1821 				QEDF_ERR(&qedf->dbg_ctx,
1822 					 "Flushed %d I/Os, active=%d.\n",
1823 					 flush_cnt,
1824 					 atomic_read(&fcport->num_active_ios));
1825 				for (i = 0; i < FCOE_PARAMS_NUM_TASKS; i++) {
1826 					io_req = &cmd_mgr->cmds[i];
1827 					if (io_req->fcport &&
1828 					    io_req->fcport == fcport) {
1829 						refcount =
1830 						kref_read(&io_req->refcount);
1831 						set_bit(QEDF_CMD_DIRTY,
1832 							&io_req->flags);
1833 						QEDF_ERR(&qedf->dbg_ctx,
1834 							 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1835 							 io_req, io_req->xid,
1836 							 io_req->flags,
1837 							 io_req->sc_cmd,
1838 							 refcount,
1839 							 io_req->cmd_type);
1840 					}
1841 				}
1842 				WARN_ON(1);
1843 				break;
1844 			}
1845 			msleep(500);
1846 			wait_cnt--;
1847 		}
1848 	}
1849 
1850 	clear_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags);
1851 	clear_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags);
1852 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1853 	mutex_unlock(&qedf->flush_mutex);
1854 }
1855 
1856 /*
1857  * Initiate a ABTS middle path command. Note that we don't have to initialize
1858  * the task context for an ABTS task.
1859  */
1860 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1861 {
1862 	struct fc_lport *lport;
1863 	struct qedf_rport *fcport = io_req->fcport;
1864 	struct fc_rport_priv *rdata;
1865 	struct qedf_ctx *qedf;
1866 	u16 xid;
1867 	int rc = 0;
1868 	unsigned long flags;
1869 	struct fcoe_wqe *sqe;
1870 	u16 sqe_idx;
1871 	int refcount = 0;
1872 
1873 	/* Sanity check qedf_rport before dereferencing any pointers */
1874 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
1875 		QEDF_ERR(NULL, "tgt not offloaded\n");
1876 		rc = 1;
1877 		goto out;
1878 	}
1879 
1880 	qedf = fcport->qedf;
1881 	rdata = fcport->rdata;
1882 
1883 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
1884 		QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1885 		rc = 1;
1886 		goto out;
1887 	}
1888 
1889 	lport = qedf->lport;
1890 
1891 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
1892 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1893 		rc = 1;
1894 		goto drop_rdata_kref;
1895 	}
1896 
1897 	if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1898 		QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1899 		rc = 1;
1900 		goto drop_rdata_kref;
1901 	}
1902 
1903 	/* Ensure room on SQ */
1904 	if (!atomic_read(&fcport->free_sqes)) {
1905 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1906 		rc = 1;
1907 		goto drop_rdata_kref;
1908 	}
1909 
1910 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
1911 		QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1912 		rc = 1;
1913 		goto drop_rdata_kref;
1914 	}
1915 
1916 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1917 	    test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1918 	    test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1919 		QEDF_ERR(&qedf->dbg_ctx,
1920 			 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1921 			 io_req->xid, io_req->sc_cmd);
1922 		rc = 1;
1923 		goto drop_rdata_kref;
1924 	}
1925 
1926 	kref_get(&io_req->refcount);
1927 
1928 	xid = io_req->xid;
1929 	qedf->control_requests++;
1930 	qedf->packet_aborts++;
1931 
1932 	/* Set the command type to abort */
1933 	io_req->cmd_type = QEDF_ABTS;
1934 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1935 
1936 	set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1937 	refcount = kref_read(&io_req->refcount);
1938 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1939 		  "ABTS io_req xid = 0x%x refcount=%d\n",
1940 		  xid, refcount);
1941 
1942 	qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1943 
1944 	spin_lock_irqsave(&fcport->rport_lock, flags);
1945 
1946 	sqe_idx = qedf_get_sqe_idx(fcport);
1947 	sqe = &fcport->sq[sqe_idx];
1948 	memset(sqe, 0, sizeof(struct fcoe_wqe));
1949 	io_req->task_params->sqe = sqe;
1950 
1951 	init_initiator_abort_fcoe_task(io_req->task_params);
1952 	qedf_ring_doorbell(fcport);
1953 
1954 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
1955 
1956 drop_rdata_kref:
1957 	kref_put(&rdata->kref, fc_rport_destroy);
1958 out:
1959 	return rc;
1960 }
1961 
1962 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1963 	struct qedf_ioreq *io_req)
1964 {
1965 	uint32_t r_ctl;
1966 	int rc;
1967 	struct qedf_rport *fcport = io_req->fcport;
1968 
1969 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1970 		   "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1971 
1972 	r_ctl = cqe->cqe_info.abts_info.r_ctl;
1973 
1974 	/* This was added at a point when we were scheduling abts_compl &
1975 	 * cleanup_compl on different CPUs and there was a possibility of
1976 	 * the io_req to be freed from the other context before we got here.
1977 	 */
1978 	if (!fcport) {
1979 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1980 			  "Dropping ABTS completion xid=0x%x as fcport is NULL",
1981 			  io_req->xid);
1982 		return;
1983 	}
1984 
1985 	/*
1986 	 * When flush is active, let the cmds be completed from the cleanup
1987 	 * context
1988 	 */
1989 	if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
1990 	    test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
1991 		QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1992 			  "Dropping ABTS completion xid=0x%x as fcport is flushing",
1993 			  io_req->xid);
1994 		return;
1995 	}
1996 
1997 	if (!cancel_delayed_work(&io_req->timeout_work)) {
1998 		QEDF_ERR(&qedf->dbg_ctx,
1999 			 "Wasn't able to cancel abts timeout work.\n");
2000 	}
2001 
2002 	switch (r_ctl) {
2003 	case FC_RCTL_BA_ACC:
2004 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2005 		    "ABTS response - ACC Send RRQ after R_A_TOV\n");
2006 		io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2007 		rc = kref_get_unless_zero(&io_req->refcount);	/* ID: 003 */
2008 		if (!rc) {
2009 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2010 				  "kref is already zero so ABTS was already completed or flushed xid=0x%x.\n",
2011 				  io_req->xid);
2012 			return;
2013 		}
2014 		/*
2015 		 * Dont release this cmd yet. It will be relesed
2016 		 * after we get RRQ response
2017 		 */
2018 		queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2019 		    msecs_to_jiffies(qedf->lport->r_a_tov));
2020 		atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2021 		break;
2022 	/* For error cases let the cleanup return the command */
2023 	case FC_RCTL_BA_RJT:
2024 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2025 		   "ABTS response - RJT\n");
2026 		io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2027 		break;
2028 	default:
2029 		QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2030 		break;
2031 	}
2032 
2033 	clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2034 
2035 	if (io_req->sc_cmd) {
2036 		if (!io_req->return_scsi_cmd_on_abts)
2037 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2038 				  "Not call scsi_done for xid=0x%x.\n",
2039 				  io_req->xid);
2040 		if (io_req->return_scsi_cmd_on_abts)
2041 			qedf_scsi_done(qedf, io_req, DID_ERROR);
2042 	}
2043 
2044 	/* Notify eh_abort handler that ABTS is complete */
2045 	complete(&io_req->abts_done);
2046 
2047 	kref_put(&io_req->refcount, qedf_release_cmd);
2048 }
2049 
2050 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2051 {
2052 	struct qedf_mp_req *mp_req;
2053 	struct scsi_sge *mp_req_bd;
2054 	struct scsi_sge *mp_resp_bd;
2055 	struct qedf_ctx *qedf = io_req->fcport->qedf;
2056 	dma_addr_t addr;
2057 	uint64_t sz;
2058 
2059 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2060 
2061 	mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2062 	memset(mp_req, 0, sizeof(struct qedf_mp_req));
2063 
2064 	if (io_req->cmd_type != QEDF_ELS) {
2065 		mp_req->req_len = sizeof(struct fcp_cmnd);
2066 		io_req->data_xfer_len = mp_req->req_len;
2067 	} else
2068 		mp_req->req_len = io_req->data_xfer_len;
2069 
2070 	mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2071 	    &mp_req->req_buf_dma, GFP_KERNEL);
2072 	if (!mp_req->req_buf) {
2073 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2074 		qedf_free_mp_resc(io_req);
2075 		return -ENOMEM;
2076 	}
2077 
2078 	mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2079 	    QEDF_PAGE_SIZE, &mp_req->resp_buf_dma, GFP_KERNEL);
2080 	if (!mp_req->resp_buf) {
2081 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2082 			  "buffer\n");
2083 		qedf_free_mp_resc(io_req);
2084 		return -ENOMEM;
2085 	}
2086 
2087 	/* Allocate and map mp_req_bd and mp_resp_bd */
2088 	sz = sizeof(struct scsi_sge);
2089 	mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2090 	    &mp_req->mp_req_bd_dma, GFP_KERNEL);
2091 	if (!mp_req->mp_req_bd) {
2092 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2093 		qedf_free_mp_resc(io_req);
2094 		return -ENOMEM;
2095 	}
2096 
2097 	mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2098 	    &mp_req->mp_resp_bd_dma, GFP_KERNEL);
2099 	if (!mp_req->mp_resp_bd) {
2100 		QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2101 		qedf_free_mp_resc(io_req);
2102 		return -ENOMEM;
2103 	}
2104 
2105 	/* Fill bd table */
2106 	addr = mp_req->req_buf_dma;
2107 	mp_req_bd = mp_req->mp_req_bd;
2108 	mp_req_bd->sge_addr.lo = U64_LO(addr);
2109 	mp_req_bd->sge_addr.hi = U64_HI(addr);
2110 	mp_req_bd->sge_len = QEDF_PAGE_SIZE;
2111 
2112 	/*
2113 	 * MP buffer is either a task mgmt command or an ELS.
2114 	 * So the assumption is that it consumes a single bd
2115 	 * entry in the bd table
2116 	 */
2117 	mp_resp_bd = mp_req->mp_resp_bd;
2118 	addr = mp_req->resp_buf_dma;
2119 	mp_resp_bd->sge_addr.lo = U64_LO(addr);
2120 	mp_resp_bd->sge_addr.hi = U64_HI(addr);
2121 	mp_resp_bd->sge_len = QEDF_PAGE_SIZE;
2122 
2123 	return 0;
2124 }
2125 
2126 /*
2127  * Last ditch effort to clear the port if it's stuck. Used only after a
2128  * cleanup task times out.
2129  */
2130 static void qedf_drain_request(struct qedf_ctx *qedf)
2131 {
2132 	if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2133 		QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2134 		return;
2135 	}
2136 
2137 	/* Set bit to return all queuecommand requests as busy */
2138 	set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2139 
2140 	/* Call qed drain request for function. Should be synchronous */
2141 	qed_ops->common->drain(qedf->cdev);
2142 
2143 	/* Settle time for CQEs to be returned */
2144 	msleep(100);
2145 
2146 	/* Unplug and continue */
2147 	clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2148 }
2149 
2150 /*
2151  * Returns SUCCESS if the cleanup task does not timeout, otherwise return
2152  * FAILURE.
2153  */
2154 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2155 	bool return_scsi_cmd_on_abts)
2156 {
2157 	struct qedf_rport *fcport;
2158 	struct qedf_ctx *qedf;
2159 	int tmo = 0;
2160 	int rc = SUCCESS;
2161 	unsigned long flags;
2162 	struct fcoe_wqe *sqe;
2163 	u16 sqe_idx;
2164 	int refcount = 0;
2165 
2166 	fcport = io_req->fcport;
2167 	if (!fcport) {
2168 		QEDF_ERR(NULL, "fcport is NULL.\n");
2169 		return SUCCESS;
2170 	}
2171 
2172 	/* Sanity check qedf_rport before dereferencing any pointers */
2173 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2174 		QEDF_ERR(NULL, "tgt not offloaded\n");
2175 		return SUCCESS;
2176 	}
2177 
2178 	qedf = fcport->qedf;
2179 	if (!qedf) {
2180 		QEDF_ERR(NULL, "qedf is NULL.\n");
2181 		return SUCCESS;
2182 	}
2183 
2184 	if (io_req->cmd_type == QEDF_ELS) {
2185 		goto process_els;
2186 	}
2187 
2188 	if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2189 	    test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2190 		QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2191 			  "cleanup processing or already completed.\n",
2192 			  io_req->xid);
2193 		return SUCCESS;
2194 	}
2195 	set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2196 
2197 process_els:
2198 	/* Ensure room on SQ */
2199 	if (!atomic_read(&fcport->free_sqes)) {
2200 		QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2201 		/* Need to make sure we clear the flag since it was set */
2202 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2203 		return FAILED;
2204 	}
2205 
2206 	if (io_req->cmd_type == QEDF_CLEANUP) {
2207 		QEDF_ERR(&qedf->dbg_ctx,
2208 			 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2209 			 io_req->xid, io_req->cmd_type);
2210 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2211 		return SUCCESS;
2212 	}
2213 
2214 	refcount = kref_read(&io_req->refcount);
2215 
2216 	QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2217 		  "Entered xid=0x%x sc_cmd=%p cmd_type=%d flags=0x%lx refcount=%d fcport=%p port_id=0x%06x\n",
2218 		  io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2219 		  refcount, fcport, fcport->rdata->ids.port_id);
2220 
2221 	/* Cleanup cmds re-use the same TID as the original I/O */
2222 	io_req->cmd_type = QEDF_CLEANUP;
2223 	io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2224 
2225 	init_completion(&io_req->cleanup_done);
2226 
2227 	spin_lock_irqsave(&fcport->rport_lock, flags);
2228 
2229 	sqe_idx = qedf_get_sqe_idx(fcport);
2230 	sqe = &fcport->sq[sqe_idx];
2231 	memset(sqe, 0, sizeof(struct fcoe_wqe));
2232 	io_req->task_params->sqe = sqe;
2233 
2234 	init_initiator_cleanup_fcoe_task(io_req->task_params);
2235 	qedf_ring_doorbell(fcport);
2236 
2237 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2238 
2239 	tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2240 					  QEDF_CLEANUP_TIMEOUT * HZ);
2241 
2242 	if (!tmo) {
2243 		rc = FAILED;
2244 		/* Timeout case */
2245 		QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2246 			  "xid=%x.\n", io_req->xid);
2247 		clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2248 		/* Issue a drain request if cleanup task times out */
2249 		QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2250 		qedf_drain_request(qedf);
2251 	}
2252 
2253 	/* If it TASK MGMT handle it, reference will be decreased
2254 	 * in qedf_execute_tmf
2255 	 */
2256 	if (io_req->tm_flags  == FCP_TMF_LUN_RESET ||
2257 	    io_req->tm_flags == FCP_TMF_TGT_RESET) {
2258 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2259 		io_req->sc_cmd = NULL;
2260 		complete(&io_req->tm_done);
2261 	}
2262 
2263 	if (io_req->sc_cmd) {
2264 		if (!io_req->return_scsi_cmd_on_abts)
2265 			QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2266 				  "Not call scsi_done for xid=0x%x.\n",
2267 				  io_req->xid);
2268 		if (io_req->return_scsi_cmd_on_abts)
2269 			qedf_scsi_done(qedf, io_req, DID_ERROR);
2270 	}
2271 
2272 	if (rc == SUCCESS)
2273 		io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2274 	else
2275 		io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2276 
2277 	return rc;
2278 }
2279 
2280 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2281 	struct qedf_ioreq *io_req)
2282 {
2283 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2284 		   io_req->xid);
2285 
2286 	clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2287 
2288 	/* Complete so we can finish cleaning up the I/O */
2289 	complete(&io_req->cleanup_done);
2290 }
2291 
2292 static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
2293 	uint8_t tm_flags)
2294 {
2295 	struct qedf_ioreq *io_req;
2296 	struct e4_fcoe_task_context *task;
2297 	struct qedf_ctx *qedf = fcport->qedf;
2298 	struct fc_lport *lport = qedf->lport;
2299 	int rc = 0;
2300 	uint16_t xid;
2301 	int tmo = 0;
2302 	int lun = 0;
2303 	unsigned long flags;
2304 	struct fcoe_wqe *sqe;
2305 	u16 sqe_idx;
2306 
2307 	if (!sc_cmd) {
2308 		QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2309 		return FAILED;
2310 	}
2311 
2312 	lun = (int)sc_cmd->device->lun;
2313 	if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
2314 		QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2315 		rc = FAILED;
2316 		goto no_flush;
2317 	}
2318 
2319 	io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2320 	if (!io_req) {
2321 		QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2322 		rc = -EAGAIN;
2323 		goto no_flush;
2324 	}
2325 
2326 	if (tm_flags == FCP_TMF_LUN_RESET)
2327 		qedf->lun_resets++;
2328 	else if (tm_flags == FCP_TMF_TGT_RESET)
2329 		qedf->target_resets++;
2330 
2331 	/* Initialize rest of io_req fields */
2332 	io_req->sc_cmd = sc_cmd;
2333 	io_req->fcport = fcport;
2334 	io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2335 
2336 	/* Record which cpu this request is associated with */
2337 	io_req->cpu = smp_processor_id();
2338 
2339 	/* Set TM flags */
2340 	io_req->io_req_flags = QEDF_READ;
2341 	io_req->data_xfer_len = 0;
2342 	io_req->tm_flags = tm_flags;
2343 
2344 	/* Default is to return a SCSI command when an error occurs */
2345 	io_req->return_scsi_cmd_on_abts = false;
2346 
2347 	/* Obtain exchange id */
2348 	xid = io_req->xid;
2349 
2350 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2351 		   "0x%x\n", xid);
2352 
2353 	/* Initialize task context for this IO request */
2354 	task = qedf_get_task_mem(&qedf->tasks, xid);
2355 
2356 	init_completion(&io_req->tm_done);
2357 
2358 	spin_lock_irqsave(&fcport->rport_lock, flags);
2359 
2360 	sqe_idx = qedf_get_sqe_idx(fcport);
2361 	sqe = &fcport->sq[sqe_idx];
2362 	memset(sqe, 0, sizeof(struct fcoe_wqe));
2363 
2364 	qedf_init_task(fcport, lport, io_req, task, sqe);
2365 	qedf_ring_doorbell(fcport);
2366 
2367 	spin_unlock_irqrestore(&fcport->rport_lock, flags);
2368 
2369 	set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2370 	tmo = wait_for_completion_timeout(&io_req->tm_done,
2371 	    QEDF_TM_TIMEOUT * HZ);
2372 
2373 	if (!tmo) {
2374 		rc = FAILED;
2375 		QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2376 		/* Clear outstanding bit since command timed out */
2377 		clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2378 		io_req->sc_cmd = NULL;
2379 	} else {
2380 		/* Check TMF response code */
2381 		if (io_req->fcp_rsp_code == 0)
2382 			rc = SUCCESS;
2383 		else
2384 			rc = FAILED;
2385 	}
2386 	/*
2387 	 * Double check that fcport has not gone into an uploading state before
2388 	 * executing the command flush for the LUN/target.
2389 	 */
2390 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2391 		QEDF_ERR(&qedf->dbg_ctx,
2392 			 "fcport is uploading, not executing flush.\n");
2393 		goto no_flush;
2394 	}
2395 	/* We do not need this io_req any more */
2396 	kref_put(&io_req->refcount, qedf_release_cmd);
2397 
2398 
2399 	if (tm_flags == FCP_TMF_LUN_RESET)
2400 		qedf_flush_active_ios(fcport, lun);
2401 	else
2402 		qedf_flush_active_ios(fcport, -1);
2403 
2404 no_flush:
2405 	if (rc != SUCCESS) {
2406 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2407 		rc = FAILED;
2408 	} else {
2409 		QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2410 		rc = SUCCESS;
2411 	}
2412 	return rc;
2413 }
2414 
2415 int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
2416 {
2417 	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
2418 	struct fc_rport_libfc_priv *rp = rport->dd_data;
2419 	struct qedf_rport *fcport = (struct qedf_rport *)&rp[1];
2420 	struct qedf_ctx *qedf;
2421 	struct fc_lport *lport = shost_priv(sc_cmd->device->host);
2422 	int rc = SUCCESS;
2423 	int rval;
2424 	struct qedf_ioreq *io_req = NULL;
2425 	int ref_cnt = 0;
2426 	struct fc_rport_priv *rdata = fcport->rdata;
2427 
2428 	QEDF_ERR(NULL,
2429 		 "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n",
2430 		 tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff,
2431 		 rport->scsi_target_id, (int)sc_cmd->device->lun);
2432 
2433 	if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
2434 		QEDF_ERR(NULL, "stale rport\n");
2435 		return FAILED;
2436 	}
2437 
2438 	QEDF_ERR(NULL, "portid=%06x tm_flags =%s\n", rdata->ids.port_id,
2439 		 (tm_flags == FCP_TMF_TGT_RESET) ? "TARGET RESET" :
2440 		 "LUN RESET");
2441 
2442 	if (sc_cmd->SCp.ptr) {
2443 		io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2444 		ref_cnt = kref_read(&io_req->refcount);
2445 		QEDF_ERR(NULL,
2446 			 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2447 			 io_req, io_req->xid, ref_cnt);
2448 	}
2449 
2450 	rval = fc_remote_port_chkready(rport);
2451 	if (rval) {
2452 		QEDF_ERR(NULL, "device_reset rport not ready\n");
2453 		rc = FAILED;
2454 		goto tmf_err;
2455 	}
2456 
2457 	rc = fc_block_scsi_eh(sc_cmd);
2458 	if (rc)
2459 		goto tmf_err;
2460 
2461 	if (!fcport) {
2462 		QEDF_ERR(NULL, "device_reset: rport is NULL\n");
2463 		rc = FAILED;
2464 		goto tmf_err;
2465 	}
2466 
2467 	qedf = fcport->qedf;
2468 
2469 	if (!qedf) {
2470 		QEDF_ERR(NULL, "qedf is NULL.\n");
2471 		rc = FAILED;
2472 		goto tmf_err;
2473 	}
2474 
2475 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2476 		QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2477 		rc = SUCCESS;
2478 		goto tmf_err;
2479 	}
2480 
2481 	if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2482 	    test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2483 		rc = SUCCESS;
2484 		goto tmf_err;
2485 	}
2486 
2487 	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
2488 		QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2489 		rc = FAILED;
2490 		goto tmf_err;
2491 	}
2492 
2493 	if (test_bit(QEDF_RPORT_UPLOADING_CONNECTION, &fcport->flags)) {
2494 		if (!fcport->rdata)
2495 			QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2496 				 fcport);
2497 		else
2498 			QEDF_ERR(&qedf->dbg_ctx,
2499 				 "fcport %p port_id=%06x is uploading.\n",
2500 				 fcport, fcport->rdata->ids.port_id);
2501 		rc = FAILED;
2502 		goto tmf_err;
2503 	}
2504 
2505 	rc = qedf_execute_tmf(fcport, sc_cmd, tm_flags);
2506 
2507 tmf_err:
2508 	kref_put(&rdata->kref, fc_rport_destroy);
2509 	return rc;
2510 }
2511 
2512 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2513 	struct qedf_ioreq *io_req)
2514 {
2515 	struct fcoe_cqe_rsp_info *fcp_rsp;
2516 
2517 	clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2518 
2519 	fcp_rsp = &cqe->cqe_info.rsp_info;
2520 	qedf_parse_fcp_rsp(io_req, fcp_rsp);
2521 
2522 	io_req->sc_cmd = NULL;
2523 	complete(&io_req->tm_done);
2524 }
2525 
2526 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2527 	struct fcoe_cqe *cqe)
2528 {
2529 	unsigned long flags;
2530 	uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
2531 	u32 payload_len, crc;
2532 	struct fc_frame_header *fh;
2533 	struct fc_frame *fp;
2534 	struct qedf_io_work *io_work;
2535 	u32 bdq_idx;
2536 	void *bdq_addr;
2537 	struct scsi_bd *p_bd_info;
2538 
2539 	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
2540 	QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2541 		  "address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
2542 		  le32_to_cpu(p_bd_info->address.hi),
2543 		  le32_to_cpu(p_bd_info->address.lo),
2544 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
2545 		  le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
2546 		  qedf->bdq_prod_idx, pktlen);
2547 
2548 	bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
2549 	if (bdq_idx >= QEDF_BDQ_SIZE) {
2550 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2551 		    bdq_idx);
2552 		goto increment_prod;
2553 	}
2554 
2555 	bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2556 	if (!bdq_addr) {
2557 		QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2558 		    "unsolicited packet.\n");
2559 		goto increment_prod;
2560 	}
2561 
2562 	if (qedf_dump_frames) {
2563 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2564 		    "BDQ frame is at addr=%p.\n", bdq_addr);
2565 		print_hex_dump(KERN_WARNING, "bdq ", DUMP_PREFIX_OFFSET, 16, 1,
2566 		    (void *)bdq_addr, pktlen, false);
2567 	}
2568 
2569 	/* Allocate frame */
2570 	payload_len = pktlen - sizeof(struct fc_frame_header);
2571 	fp = fc_frame_alloc(qedf->lport, payload_len);
2572 	if (!fp) {
2573 		QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2574 		goto increment_prod;
2575 	}
2576 
2577 	/* Copy data from BDQ buffer into fc_frame struct */
2578 	fh = (struct fc_frame_header *)fc_frame_header_get(fp);
2579 	memcpy(fh, (void *)bdq_addr, pktlen);
2580 
2581 	QEDF_WARN(&qedf->dbg_ctx,
2582 		  "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n",
2583 		  ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl,
2584 		  fh->fh_type, fc_frame_payload_op(fp));
2585 
2586 	/* Initialize the frame so libfc sees it as a valid frame */
2587 	crc = fcoe_fc_crc(fp);
2588 	fc_frame_init(fp);
2589 	fr_dev(fp) = qedf->lport;
2590 	fr_sof(fp) = FC_SOF_I3;
2591 	fr_eof(fp) = FC_EOF_T;
2592 	fr_crc(fp) = cpu_to_le32(~crc);
2593 
2594 	/*
2595 	 * We need to return the frame back up to libfc in a non-atomic
2596 	 * context
2597 	 */
2598 	io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2599 	if (!io_work) {
2600 		QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2601 			   "work for I/O completion.\n");
2602 		fc_frame_free(fp);
2603 		goto increment_prod;
2604 	}
2605 	memset(io_work, 0, sizeof(struct qedf_io_work));
2606 
2607 	INIT_WORK(&io_work->work, qedf_fp_io_handler);
2608 
2609 	/* Copy contents of CQE for deferred processing */
2610 	memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
2611 
2612 	io_work->qedf = qedf;
2613 	io_work->fp = fp;
2614 
2615 	queue_work_on(smp_processor_id(), qedf_io_wq, &io_work->work);
2616 increment_prod:
2617 	spin_lock_irqsave(&qedf->hba_lock, flags);
2618 
2619 	/* Increment producer to let f/w know we've handled the frame */
2620 	qedf->bdq_prod_idx++;
2621 
2622 	/* Producer index wraps at uint16_t boundary */
2623 	if (qedf->bdq_prod_idx == 0xffff)
2624 		qedf->bdq_prod_idx = 0;
2625 
2626 	writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2627 	readw(qedf->bdq_primary_prod);
2628 	writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2629 	readw(qedf->bdq_secondary_prod);
2630 
2631 	spin_unlock_irqrestore(&qedf->hba_lock, flags);
2632 }
2633