xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_nvmet.c (revision f220d3eb)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30 
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58 
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 						 struct lpfc_nvmet_rcv_ctx *,
61 						 dma_addr_t rspbuf,
62 						 uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 						  struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 					  struct lpfc_nvmet_rcv_ctx *,
67 					  uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 					    struct lpfc_nvmet_rcv_ctx *,
70 					    uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 					   struct lpfc_nvmet_rcv_ctx *,
73 					   uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 				    struct lpfc_nvmet_rcv_ctx *);
76 
77 static union lpfc_wqe128 lpfc_tsend_cmd_template;
78 static union lpfc_wqe128 lpfc_treceive_cmd_template;
79 static union lpfc_wqe128 lpfc_trsp_cmd_template;
80 
81 /* Setup WQE templates for NVME IOs */
82 void
83 lpfc_nvmet_cmd_template(void)
84 {
85 	union lpfc_wqe128 *wqe;
86 
87 	/* TSEND template */
88 	wqe = &lpfc_tsend_cmd_template;
89 	memset(wqe, 0, sizeof(union lpfc_wqe128));
90 
91 	/* Word 0, 1, 2 - BDE is variable */
92 
93 	/* Word 3 - payload_offset_len is zero */
94 
95 	/* Word 4 - relative_offset is variable */
96 
97 	/* Word 5 - is zero */
98 
99 	/* Word 6 - ctxt_tag, xri_tag is variable */
100 
101 	/* Word 7 - wqe_ar is variable */
102 	bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
103 	bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
104 	bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
105 	bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
106 	bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
107 
108 	/* Word 8 - abort_tag is variable */
109 
110 	/* Word 9  - reqtag, rcvoxid is variable */
111 
112 	/* Word 10 - wqes, xc is variable */
113 	bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
114 	bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
115 	bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
116 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
117 	bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
118 	bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
119 
120 	/* Word 11 - sup, irsp, irsplen is variable */
121 	bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
122 	bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
123 	bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
124 	bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
125 	bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
126 	bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
127 
128 	/* Word 12 - fcp_data_len is variable */
129 
130 	/* Word 13, 14, 15 - PBDE is zero */
131 
132 	/* TRECEIVE template */
133 	wqe = &lpfc_treceive_cmd_template;
134 	memset(wqe, 0, sizeof(union lpfc_wqe128));
135 
136 	/* Word 0, 1, 2 - BDE is variable */
137 
138 	/* Word 3 */
139 	wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
140 
141 	/* Word 4 - relative_offset is variable */
142 
143 	/* Word 5 - is zero */
144 
145 	/* Word 6 - ctxt_tag, xri_tag is variable */
146 
147 	/* Word 7 */
148 	bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
149 	bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
150 	bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
151 	bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
152 	bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
153 
154 	/* Word 8 - abort_tag is variable */
155 
156 	/* Word 9  - reqtag, rcvoxid is variable */
157 
158 	/* Word 10 - xc is variable */
159 	bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
160 	bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
161 	bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
162 	bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
163 	bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
164 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
165 
166 	/* Word 11 - pbde is variable */
167 	bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
168 	bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
169 	bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
170 	bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
171 	bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
172 	bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
173 
174 	/* Word 12 - fcp_data_len is variable */
175 
176 	/* Word 13, 14, 15 - PBDE is variable */
177 
178 	/* TRSP template */
179 	wqe = &lpfc_trsp_cmd_template;
180 	memset(wqe, 0, sizeof(union lpfc_wqe128));
181 
182 	/* Word 0, 1, 2 - BDE is variable */
183 
184 	/* Word 3 - response_len is variable */
185 
186 	/* Word 4, 5 - is zero */
187 
188 	/* Word 6 - ctxt_tag, xri_tag is variable */
189 
190 	/* Word 7 */
191 	bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
192 	bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
193 	bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
194 	bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
195 	bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
196 
197 	/* Word 8 - abort_tag is variable */
198 
199 	/* Word 9  - reqtag is variable */
200 
201 	/* Word 10 wqes, xc is variable */
202 	bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
203 	bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
204 	bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
205 	bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
206 	bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
207 	bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
208 
209 	/* Word 11 irsp, irsplen is variable */
210 	bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
211 	bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
212 	bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
213 	bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
214 	bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
215 	bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
216 
217 	/* Word 12, 13, 14, 15 - is zero */
218 }
219 
220 void
221 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
222 {
223 	unsigned long iflag;
224 
225 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226 			"6313 NVMET Defer ctx release xri x%x flg x%x\n",
227 			ctxp->oxid, ctxp->flag);
228 
229 	spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
230 	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231 		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
232 				       iflag);
233 		return;
234 	}
235 	ctxp->flag |= LPFC_NVMET_CTX_RLS;
236 	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237 	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
238 }
239 
240 /**
241  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
242  * @phba: Pointer to HBA context object.
243  * @cmdwqe: Pointer to driver command WQE object.
244  * @wcqe: Pointer to driver response CQE object.
245  *
246  * The function is called from SLI ring event handler with no
247  * lock held. This function is the completion handler for NVME LS commands
248  * The function frees memory resources used for the NVME commands.
249  **/
250 static void
251 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
252 			  struct lpfc_wcqe_complete *wcqe)
253 {
254 	struct lpfc_nvmet_tgtport *tgtp;
255 	struct nvmefc_tgt_ls_req *rsp;
256 	struct lpfc_nvmet_rcv_ctx *ctxp;
257 	uint32_t status, result;
258 
259 	status = bf_get(lpfc_wcqe_c_status, wcqe);
260 	result = wcqe->parameter;
261 	ctxp = cmdwqe->context2;
262 
263 	if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
264 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
265 				"6410 NVMET LS cmpl state mismatch IO x%x: "
266 				"%d %d\n",
267 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
268 	}
269 
270 	if (!phba->targetport)
271 		goto out;
272 
273 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
274 
275 	if (tgtp) {
276 		if (status) {
277 			atomic_inc(&tgtp->xmt_ls_rsp_error);
278 			if (result == IOERR_ABORT_REQUESTED)
279 				atomic_inc(&tgtp->xmt_ls_rsp_aborted);
280 			if (bf_get(lpfc_wcqe_c_xb, wcqe))
281 				atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
282 		} else {
283 			atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
284 		}
285 	}
286 
287 out:
288 	rsp = &ctxp->ctx.ls_req;
289 
290 	lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
291 			 ctxp->oxid, status, result);
292 
293 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
294 			"6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
295 			status, result, ctxp->oxid);
296 
297 	lpfc_nlp_put(cmdwqe->context1);
298 	cmdwqe->context2 = NULL;
299 	cmdwqe->context3 = NULL;
300 	lpfc_sli_release_iocbq(phba, cmdwqe);
301 	rsp->done(rsp);
302 	kfree(ctxp);
303 }
304 
305 /**
306  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
307  * @phba: HBA buffer is associated with
308  * @ctxp: context to clean up
309  * @mp: Buffer to free
310  *
311  * Description: Frees the given DMA buffer in the appropriate way given by
312  * reposting it to its associated RQ so it can be reused.
313  *
314  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
315  *
316  * Returns: None
317  **/
318 void
319 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
320 {
321 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
322 	struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
323 	struct lpfc_nvmet_tgtport *tgtp;
324 	struct fc_frame_header *fc_hdr;
325 	struct rqb_dmabuf *nvmebuf;
326 	struct lpfc_nvmet_ctx_info *infop;
327 	uint32_t *payload;
328 	uint32_t size, oxid, sid, rc;
329 	int cpu;
330 	unsigned long iflag;
331 
332 	if (ctxp->txrdy) {
333 		dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
334 			      ctxp->txrdy_phys);
335 		ctxp->txrdy = NULL;
336 		ctxp->txrdy_phys = 0;
337 	}
338 
339 	if (ctxp->state == LPFC_NVMET_STE_FREE) {
340 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341 				"6411 NVMET free, already free IO x%x: %d %d\n",
342 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
343 	}
344 	ctxp->state = LPFC_NVMET_STE_FREE;
345 
346 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
347 	if (phba->sli4_hba.nvmet_io_wait_cnt) {
348 		list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
349 				 nvmebuf, struct rqb_dmabuf,
350 				 hbuf.list);
351 		phba->sli4_hba.nvmet_io_wait_cnt--;
352 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
353 				       iflag);
354 
355 		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
356 		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
357 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
358 		payload = (uint32_t *)(nvmebuf->dbuf.virt);
359 		size = nvmebuf->bytes_recv;
360 		sid = sli4_sid_from_fc_hdr(fc_hdr);
361 
362 		ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
363 		ctxp->wqeq = NULL;
364 		ctxp->txrdy = NULL;
365 		ctxp->offset = 0;
366 		ctxp->phba = phba;
367 		ctxp->size = size;
368 		ctxp->oxid = oxid;
369 		ctxp->sid = sid;
370 		ctxp->state = LPFC_NVMET_STE_RCV;
371 		ctxp->entry_cnt = 1;
372 		ctxp->flag = 0;
373 		ctxp->ctxbuf = ctx_buf;
374 		ctxp->rqb_buffer = (void *)nvmebuf;
375 		spin_lock_init(&ctxp->ctxlock);
376 
377 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
378 		if (ctxp->ts_cmd_nvme) {
379 			ctxp->ts_cmd_nvme = ktime_get_ns();
380 			ctxp->ts_nvme_data = 0;
381 			ctxp->ts_data_wqput = 0;
382 			ctxp->ts_isr_data = 0;
383 			ctxp->ts_data_nvme = 0;
384 			ctxp->ts_nvme_status = 0;
385 			ctxp->ts_status_wqput = 0;
386 			ctxp->ts_isr_status = 0;
387 			ctxp->ts_status_nvme = 0;
388 		}
389 #endif
390 		atomic_inc(&tgtp->rcv_fcp_cmd_in);
391 		/*
392 		 * The calling sequence should be:
393 		 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394 		 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395 		 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
396 		 * the NVME command / FC header is stored.
397 		 * A buffer has already been reposted for this IO, so just free
398 		 * the nvmebuf.
399 		 */
400 		rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
401 					  payload, size);
402 
403 		/* Process FCP command */
404 		if (rc == 0) {
405 			ctxp->rqb_buffer = NULL;
406 			atomic_inc(&tgtp->rcv_fcp_cmd_out);
407 			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
408 			return;
409 		}
410 
411 		/* Processing of FCP command is deferred */
412 		if (rc == -EOVERFLOW) {
413 			lpfc_nvmeio_data(phba,
414 					 "NVMET RCV BUSY: xri x%x sz %d "
415 					 "from %06x\n",
416 					 oxid, size, sid);
417 			atomic_inc(&tgtp->rcv_fcp_cmd_out);
418 			return;
419 		}
420 		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
421 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
422 				"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
423 				ctxp->oxid, rc,
424 				atomic_read(&tgtp->rcv_fcp_cmd_in),
425 				atomic_read(&tgtp->rcv_fcp_cmd_out),
426 				atomic_read(&tgtp->xmt_fcp_release));
427 
428 		lpfc_nvmet_defer_release(phba, ctxp);
429 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
430 		nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
431 		return;
432 	}
433 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
434 
435 	/*
436 	 * Use the CPU context list, from the MRQ the IO was received on
437 	 * (ctxp->idx), to save context structure.
438 	 */
439 	cpu = smp_processor_id();
440 	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
441 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
442 	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
443 	infop->nvmet_ctx_list_cnt++;
444 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
445 #endif
446 }
447 
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
449 static void
450 lpfc_nvmet_ktime(struct lpfc_hba *phba,
451 		 struct lpfc_nvmet_rcv_ctx *ctxp)
452 {
453 	uint64_t seg1, seg2, seg3, seg4, seg5;
454 	uint64_t seg6, seg7, seg8, seg9, seg10;
455 	uint64_t segsum;
456 
457 	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
458 	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
459 	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
460 	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
461 	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
462 		return;
463 
464 	if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
465 		return;
466 	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
467 		return;
468 	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
469 		return;
470 	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
471 		return;
472 	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
473 		return;
474 	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
475 		return;
476 	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
477 		return;
478 	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
479 		return;
480 	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
481 		return;
482 	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
483 		return;
484 	/*
485 	 * Segment 1 - Time from FCP command received by MSI-X ISR
486 	 * to FCP command is passed to NVME Layer.
487 	 * Segment 2 - Time from FCP command payload handed
488 	 * off to NVME Layer to Driver receives a Command op
489 	 * from NVME Layer.
490 	 * Segment 3 - Time from Driver receives a Command op
491 	 * from NVME Layer to Command is put on WQ.
492 	 * Segment 4 - Time from Driver WQ put is done
493 	 * to MSI-X ISR for Command cmpl.
494 	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
495 	 * Command cmpl is passed to NVME Layer.
496 	 * Segment 6 - Time from Command cmpl is passed to NVME
497 	 * Layer to Driver receives a RSP op from NVME Layer.
498 	 * Segment 7 - Time from Driver receives a RSP op from
499 	 * NVME Layer to WQ put is done on TRSP FCP Status.
500 	 * Segment 8 - Time from Driver WQ put is done on TRSP
501 	 * FCP Status to MSI-X ISR for TRSP cmpl.
502 	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
503 	 * TRSP cmpl is passed to NVME Layer.
504 	 * Segment 10 - Time from FCP command received by
505 	 * MSI-X ISR to command is completed on wire.
506 	 * (Segments 1 thru 8) for READDATA / WRITEDATA
507 	 * (Segments 1 thru 4) for READDATA_RSP
508 	 */
509 	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
510 	segsum = seg1;
511 
512 	seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
513 	if (segsum > seg2)
514 		return;
515 	seg2 -= segsum;
516 	segsum += seg2;
517 
518 	seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
519 	if (segsum > seg3)
520 		return;
521 	seg3 -= segsum;
522 	segsum += seg3;
523 
524 	seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
525 	if (segsum > seg4)
526 		return;
527 	seg4 -= segsum;
528 	segsum += seg4;
529 
530 	seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
531 	if (segsum > seg5)
532 		return;
533 	seg5 -= segsum;
534 	segsum += seg5;
535 
536 
537 	/* For auto rsp commands seg6 thru seg10 will be 0 */
538 	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
539 		seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
540 		if (segsum > seg6)
541 			return;
542 		seg6 -= segsum;
543 		segsum += seg6;
544 
545 		seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
546 		if (segsum > seg7)
547 			return;
548 		seg7 -= segsum;
549 		segsum += seg7;
550 
551 		seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
552 		if (segsum > seg8)
553 			return;
554 		seg8 -= segsum;
555 		segsum += seg8;
556 
557 		seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
558 		if (segsum > seg9)
559 			return;
560 		seg9 -= segsum;
561 		segsum += seg9;
562 
563 		if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
564 			return;
565 		seg10 = (ctxp->ts_isr_status -
566 			ctxp->ts_isr_cmd);
567 	} else {
568 		if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
569 			return;
570 		seg6 =  0;
571 		seg7 =  0;
572 		seg8 =  0;
573 		seg9 =  0;
574 		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
575 	}
576 
577 	phba->ktime_seg1_total += seg1;
578 	if (seg1 < phba->ktime_seg1_min)
579 		phba->ktime_seg1_min = seg1;
580 	else if (seg1 > phba->ktime_seg1_max)
581 		phba->ktime_seg1_max = seg1;
582 
583 	phba->ktime_seg2_total += seg2;
584 	if (seg2 < phba->ktime_seg2_min)
585 		phba->ktime_seg2_min = seg2;
586 	else if (seg2 > phba->ktime_seg2_max)
587 		phba->ktime_seg2_max = seg2;
588 
589 	phba->ktime_seg3_total += seg3;
590 	if (seg3 < phba->ktime_seg3_min)
591 		phba->ktime_seg3_min = seg3;
592 	else if (seg3 > phba->ktime_seg3_max)
593 		phba->ktime_seg3_max = seg3;
594 
595 	phba->ktime_seg4_total += seg4;
596 	if (seg4 < phba->ktime_seg4_min)
597 		phba->ktime_seg4_min = seg4;
598 	else if (seg4 > phba->ktime_seg4_max)
599 		phba->ktime_seg4_max = seg4;
600 
601 	phba->ktime_seg5_total += seg5;
602 	if (seg5 < phba->ktime_seg5_min)
603 		phba->ktime_seg5_min = seg5;
604 	else if (seg5 > phba->ktime_seg5_max)
605 		phba->ktime_seg5_max = seg5;
606 
607 	phba->ktime_data_samples++;
608 	if (!seg6)
609 		goto out;
610 
611 	phba->ktime_seg6_total += seg6;
612 	if (seg6 < phba->ktime_seg6_min)
613 		phba->ktime_seg6_min = seg6;
614 	else if (seg6 > phba->ktime_seg6_max)
615 		phba->ktime_seg6_max = seg6;
616 
617 	phba->ktime_seg7_total += seg7;
618 	if (seg7 < phba->ktime_seg7_min)
619 		phba->ktime_seg7_min = seg7;
620 	else if (seg7 > phba->ktime_seg7_max)
621 		phba->ktime_seg7_max = seg7;
622 
623 	phba->ktime_seg8_total += seg8;
624 	if (seg8 < phba->ktime_seg8_min)
625 		phba->ktime_seg8_min = seg8;
626 	else if (seg8 > phba->ktime_seg8_max)
627 		phba->ktime_seg8_max = seg8;
628 
629 	phba->ktime_seg9_total += seg9;
630 	if (seg9 < phba->ktime_seg9_min)
631 		phba->ktime_seg9_min = seg9;
632 	else if (seg9 > phba->ktime_seg9_max)
633 		phba->ktime_seg9_max = seg9;
634 out:
635 	phba->ktime_seg10_total += seg10;
636 	if (seg10 < phba->ktime_seg10_min)
637 		phba->ktime_seg10_min = seg10;
638 	else if (seg10 > phba->ktime_seg10_max)
639 		phba->ktime_seg10_max = seg10;
640 	phba->ktime_status_samples++;
641 }
642 #endif
643 
644 /**
645  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
646  * @phba: Pointer to HBA context object.
647  * @cmdwqe: Pointer to driver command WQE object.
648  * @wcqe: Pointer to driver response CQE object.
649  *
650  * The function is called from SLI ring event handler with no
651  * lock held. This function is the completion handler for NVME FCP commands
652  * The function frees memory resources used for the NVME commands.
653  **/
654 static void
655 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
656 			  struct lpfc_wcqe_complete *wcqe)
657 {
658 	struct lpfc_nvmet_tgtport *tgtp;
659 	struct nvmefc_tgt_fcp_req *rsp;
660 	struct lpfc_nvmet_rcv_ctx *ctxp;
661 	uint32_t status, result, op, start_clean, logerr;
662 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
663 	uint32_t id;
664 #endif
665 
666 	ctxp = cmdwqe->context2;
667 	ctxp->flag &= ~LPFC_NVMET_IO_INP;
668 
669 	rsp = &ctxp->ctx.fcp_req;
670 	op = rsp->op;
671 
672 	status = bf_get(lpfc_wcqe_c_status, wcqe);
673 	result = wcqe->parameter;
674 
675 	if (phba->targetport)
676 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
677 	else
678 		tgtp = NULL;
679 
680 	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
681 			 ctxp->oxid, op, status);
682 
683 	if (status) {
684 		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
685 		rsp->transferred_length = 0;
686 		if (tgtp) {
687 			atomic_inc(&tgtp->xmt_fcp_rsp_error);
688 			if (result == IOERR_ABORT_REQUESTED)
689 				atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
690 		}
691 
692 		logerr = LOG_NVME_IOERR;
693 
694 		/* pick up SLI4 exhange busy condition */
695 		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
696 			ctxp->flag |= LPFC_NVMET_XBUSY;
697 			logerr |= LOG_NVME_ABTS;
698 			if (tgtp)
699 				atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
700 
701 		} else {
702 			ctxp->flag &= ~LPFC_NVMET_XBUSY;
703 		}
704 
705 		lpfc_printf_log(phba, KERN_INFO, logerr,
706 				"6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
707 				ctxp->oxid, status, result, ctxp->flag);
708 
709 	} else {
710 		rsp->fcp_error = NVME_SC_SUCCESS;
711 		if (op == NVMET_FCOP_RSP)
712 			rsp->transferred_length = rsp->rsplen;
713 		else
714 			rsp->transferred_length = rsp->transfer_length;
715 		if (tgtp)
716 			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
717 	}
718 
719 	if ((op == NVMET_FCOP_READDATA_RSP) ||
720 	    (op == NVMET_FCOP_RSP)) {
721 		/* Sanity check */
722 		ctxp->state = LPFC_NVMET_STE_DONE;
723 		ctxp->entry_cnt++;
724 
725 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
726 		if (ctxp->ts_cmd_nvme) {
727 			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
728 				ctxp->ts_isr_data =
729 					cmdwqe->isr_timestamp;
730 				ctxp->ts_data_nvme =
731 					ktime_get_ns();
732 				ctxp->ts_nvme_status =
733 					ctxp->ts_data_nvme;
734 				ctxp->ts_status_wqput =
735 					ctxp->ts_data_nvme;
736 				ctxp->ts_isr_status =
737 					ctxp->ts_data_nvme;
738 				ctxp->ts_status_nvme =
739 					ctxp->ts_data_nvme;
740 			} else {
741 				ctxp->ts_isr_status =
742 					cmdwqe->isr_timestamp;
743 				ctxp->ts_status_nvme =
744 					ktime_get_ns();
745 			}
746 		}
747 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
748 			id = smp_processor_id();
749 			if (ctxp->cpu != id)
750 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
751 						"6703 CPU Check cmpl: "
752 						"cpu %d expect %d\n",
753 						id, ctxp->cpu);
754 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
755 				phba->cpucheck_cmpl_io[id]++;
756 		}
757 #endif
758 		rsp->done(rsp);
759 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
760 		if (ctxp->ts_cmd_nvme)
761 			lpfc_nvmet_ktime(phba, ctxp);
762 #endif
763 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
764 	} else {
765 		ctxp->entry_cnt++;
766 		start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
767 		memset(((char *)cmdwqe) + start_clean, 0,
768 		       (sizeof(struct lpfc_iocbq) - start_clean));
769 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
770 		if (ctxp->ts_cmd_nvme) {
771 			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
772 			ctxp->ts_data_nvme = ktime_get_ns();
773 		}
774 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
775 			id = smp_processor_id();
776 			if (ctxp->cpu != id)
777 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
778 						"6704 CPU Check cmdcmpl: "
779 						"cpu %d expect %d\n",
780 						id, ctxp->cpu);
781 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
782 				phba->cpucheck_ccmpl_io[id]++;
783 		}
784 #endif
785 		rsp->done(rsp);
786 	}
787 }
788 
789 static int
790 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
791 		      struct nvmefc_tgt_ls_req *rsp)
792 {
793 	struct lpfc_nvmet_rcv_ctx *ctxp =
794 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
795 	struct lpfc_hba *phba = ctxp->phba;
796 	struct hbq_dmabuf *nvmebuf =
797 		(struct hbq_dmabuf *)ctxp->rqb_buffer;
798 	struct lpfc_iocbq *nvmewqeq;
799 	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
800 	struct lpfc_dmabuf dmabuf;
801 	struct ulp_bde64 bpl;
802 	int rc;
803 
804 	if (phba->pport->load_flag & FC_UNLOADING)
805 		return -ENODEV;
806 
807 	if (phba->pport->load_flag & FC_UNLOADING)
808 		return -ENODEV;
809 
810 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
811 			"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
812 
813 	if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
814 	    (ctxp->entry_cnt != 1)) {
815 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
816 				"6412 NVMET LS rsp state mismatch "
817 				"oxid x%x: %d %d\n",
818 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
819 	}
820 	ctxp->state = LPFC_NVMET_STE_LS_RSP;
821 	ctxp->entry_cnt++;
822 
823 	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
824 				      rsp->rsplen);
825 	if (nvmewqeq == NULL) {
826 		atomic_inc(&nvmep->xmt_ls_drop);
827 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
828 				"6150 LS Drop IO x%x: Prep\n",
829 				ctxp->oxid);
830 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
831 		atomic_inc(&nvmep->xmt_ls_abort);
832 		lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
833 						ctxp->sid, ctxp->oxid);
834 		return -ENOMEM;
835 	}
836 
837 	/* Save numBdes for bpl2sgl */
838 	nvmewqeq->rsvd2 = 1;
839 	nvmewqeq->hba_wqidx = 0;
840 	nvmewqeq->context3 = &dmabuf;
841 	dmabuf.virt = &bpl;
842 	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
843 	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
844 	bpl.tus.f.bdeSize = rsp->rsplen;
845 	bpl.tus.f.bdeFlags = 0;
846 	bpl.tus.w = le32_to_cpu(bpl.tus.w);
847 
848 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
849 	nvmewqeq->iocb_cmpl = NULL;
850 	nvmewqeq->context2 = ctxp;
851 
852 	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
853 			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
854 
855 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
856 	if (rc == WQE_SUCCESS) {
857 		/*
858 		 * Okay to repost buffer here, but wait till cmpl
859 		 * before freeing ctxp and iocbq.
860 		 */
861 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
862 		ctxp->rqb_buffer = 0;
863 		atomic_inc(&nvmep->xmt_ls_rsp);
864 		return 0;
865 	}
866 	/* Give back resources */
867 	atomic_inc(&nvmep->xmt_ls_drop);
868 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
869 			"6151 LS Drop IO x%x: Issue %d\n",
870 			ctxp->oxid, rc);
871 
872 	lpfc_nlp_put(nvmewqeq->context1);
873 
874 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
875 	atomic_inc(&nvmep->xmt_ls_abort);
876 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
877 	return -ENXIO;
878 }
879 
880 static int
881 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
882 		      struct nvmefc_tgt_fcp_req *rsp)
883 {
884 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
885 	struct lpfc_nvmet_rcv_ctx *ctxp =
886 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
887 	struct lpfc_hba *phba = ctxp->phba;
888 	struct lpfc_queue *wq;
889 	struct lpfc_iocbq *nvmewqeq;
890 	struct lpfc_sli_ring *pring;
891 	unsigned long iflags;
892 	int rc;
893 
894 	if (phba->pport->load_flag & FC_UNLOADING) {
895 		rc = -ENODEV;
896 		goto aerr;
897 	}
898 
899 	if (phba->pport->load_flag & FC_UNLOADING) {
900 		rc = -ENODEV;
901 		goto aerr;
902 	}
903 
904 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
905 	if (ctxp->ts_cmd_nvme) {
906 		if (rsp->op == NVMET_FCOP_RSP)
907 			ctxp->ts_nvme_status = ktime_get_ns();
908 		else
909 			ctxp->ts_nvme_data = ktime_get_ns();
910 	}
911 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
912 		int id = smp_processor_id();
913 		ctxp->cpu = id;
914 		if (id < LPFC_CHECK_CPU_CNT)
915 			phba->cpucheck_xmt_io[id]++;
916 		if (rsp->hwqid != id) {
917 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
918 					"6705 CPU Check OP: "
919 					"cpu %d expect %d\n",
920 					id, rsp->hwqid);
921 			ctxp->cpu = rsp->hwqid;
922 		}
923 	}
924 #endif
925 
926 	/* Sanity check */
927 	if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
928 	    (ctxp->state == LPFC_NVMET_STE_ABORT)) {
929 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
930 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
931 				"6102 IO xri x%x aborted\n",
932 				ctxp->oxid);
933 		rc = -ENXIO;
934 		goto aerr;
935 	}
936 
937 	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
938 	if (nvmewqeq == NULL) {
939 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
940 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
941 				"6152 FCP Drop IO x%x: Prep\n",
942 				ctxp->oxid);
943 		rc = -ENXIO;
944 		goto aerr;
945 	}
946 
947 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
948 	nvmewqeq->iocb_cmpl = NULL;
949 	nvmewqeq->context2 = ctxp;
950 	nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
951 	ctxp->wqeq->hba_wqidx = rsp->hwqid;
952 
953 	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
954 			 ctxp->oxid, rsp->op, rsp->rsplen);
955 
956 	ctxp->flag |= LPFC_NVMET_IO_INP;
957 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
958 	if (rc == WQE_SUCCESS) {
959 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
960 		if (!ctxp->ts_cmd_nvme)
961 			return 0;
962 		if (rsp->op == NVMET_FCOP_RSP)
963 			ctxp->ts_status_wqput = ktime_get_ns();
964 		else
965 			ctxp->ts_data_wqput = ktime_get_ns();
966 #endif
967 		return 0;
968 	}
969 
970 	if (rc == -EBUSY) {
971 		/*
972 		 * WQ was full, so queue nvmewqeq to be sent after
973 		 * WQE release CQE
974 		 */
975 		ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
976 		wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
977 		pring = wq->pring;
978 		spin_lock_irqsave(&pring->ring_lock, iflags);
979 		list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
980 		wq->q_flag |= HBA_NVMET_WQFULL;
981 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
982 		atomic_inc(&lpfc_nvmep->defer_wqfull);
983 		return 0;
984 	}
985 
986 	/* Give back resources */
987 	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
988 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
989 			"6153 FCP Drop IO x%x: Issue: %d\n",
990 			ctxp->oxid, rc);
991 
992 	ctxp->wqeq->hba_wqidx = 0;
993 	nvmewqeq->context2 = NULL;
994 	nvmewqeq->context3 = NULL;
995 	rc = -EBUSY;
996 aerr:
997 	return rc;
998 }
999 
1000 static void
1001 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1002 {
1003 	struct lpfc_nvmet_tgtport *tport = targetport->private;
1004 
1005 	/* release any threads waiting for the unreg to complete */
1006 	complete(&tport->tport_unreg_done);
1007 }
1008 
1009 static void
1010 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1011 			 struct nvmefc_tgt_fcp_req *req)
1012 {
1013 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1014 	struct lpfc_nvmet_rcv_ctx *ctxp =
1015 		container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1016 	struct lpfc_hba *phba = ctxp->phba;
1017 	struct lpfc_queue *wq;
1018 	unsigned long flags;
1019 
1020 	if (phba->pport->load_flag & FC_UNLOADING)
1021 		return;
1022 
1023 	if (phba->pport->load_flag & FC_UNLOADING)
1024 		return;
1025 
1026 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1027 			"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1028 			ctxp->oxid, ctxp->flag, ctxp->state);
1029 
1030 	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1031 			 ctxp->oxid, ctxp->flag, ctxp->state);
1032 
1033 	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1034 
1035 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1036 	ctxp->state = LPFC_NVMET_STE_ABORT;
1037 
1038 	/* Since iaab/iaar are NOT set, we need to check
1039 	 * if the firmware is in process of aborting IO
1040 	 */
1041 	if (ctxp->flag & LPFC_NVMET_XBUSY) {
1042 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1043 		return;
1044 	}
1045 	ctxp->flag |= LPFC_NVMET_ABORT_OP;
1046 
1047 	if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1048 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1049 						 ctxp->oxid);
1050 		wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
1051 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1052 		lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1053 		return;
1054 	}
1055 
1056 	/* An state of LPFC_NVMET_STE_RCV means we have just received
1057 	 * the NVME command and have not started processing it.
1058 	 * (by issuing any IO WQEs on this exchange yet)
1059 	 */
1060 	if (ctxp->state == LPFC_NVMET_STE_RCV)
1061 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1062 						 ctxp->oxid);
1063 	else
1064 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1065 					       ctxp->oxid);
1066 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1067 }
1068 
1069 static void
1070 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1071 			   struct nvmefc_tgt_fcp_req *rsp)
1072 {
1073 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1074 	struct lpfc_nvmet_rcv_ctx *ctxp =
1075 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1076 	struct lpfc_hba *phba = ctxp->phba;
1077 	unsigned long flags;
1078 	bool aborting = false;
1079 
1080 	if (ctxp->state != LPFC_NVMET_STE_DONE &&
1081 	    ctxp->state != LPFC_NVMET_STE_ABORT) {
1082 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1083 				"6413 NVMET release bad state %d %d oxid x%x\n",
1084 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1085 	}
1086 
1087 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1088 	if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1089 	    (ctxp->flag & LPFC_NVMET_XBUSY)) {
1090 		aborting = true;
1091 		/* let the abort path do the real release */
1092 		lpfc_nvmet_defer_release(phba, ctxp);
1093 	}
1094 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1095 
1096 	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1097 			 ctxp->state, aborting);
1098 
1099 	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1100 
1101 	if (aborting)
1102 		return;
1103 
1104 	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1105 }
1106 
1107 static void
1108 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1109 		     struct nvmefc_tgt_fcp_req *rsp)
1110 {
1111 	struct lpfc_nvmet_tgtport *tgtp;
1112 	struct lpfc_nvmet_rcv_ctx *ctxp =
1113 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1114 	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1115 	struct lpfc_hba *phba = ctxp->phba;
1116 
1117 	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1118 			 ctxp->oxid, ctxp->size, smp_processor_id());
1119 
1120 	if (!nvmebuf) {
1121 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1122 				"6425 Defer rcv: no buffer xri x%x: "
1123 				"flg %x ste %x\n",
1124 				ctxp->oxid, ctxp->flag, ctxp->state);
1125 		return;
1126 	}
1127 
1128 	tgtp = phba->targetport->private;
1129 	if (tgtp)
1130 		atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1131 
1132 	/* Free the nvmebuf since a new buffer already replaced it */
1133 	nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1134 }
1135 
1136 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1137 	.targetport_delete = lpfc_nvmet_targetport_delete,
1138 	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1139 	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
1140 	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1141 	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1142 	.defer_rcv	= lpfc_nvmet_defer_rcv,
1143 
1144 	.max_hw_queues  = 1,
1145 	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1146 	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1147 	.dma_boundary = 0xFFFFFFFF,
1148 
1149 	/* optional features */
1150 	.target_features = 0,
1151 	/* sizes of additional private data for data structures */
1152 	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1153 };
1154 
1155 static void
1156 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1157 		struct lpfc_nvmet_ctx_info *infop)
1158 {
1159 	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1160 	unsigned long flags;
1161 
1162 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1163 	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1164 				&infop->nvmet_ctx_list, list) {
1165 		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1166 		list_del_init(&ctx_buf->list);
1167 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1168 
1169 		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1170 		ctx_buf->sglq->state = SGL_FREED;
1171 		ctx_buf->sglq->ndlp = NULL;
1172 
1173 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1174 		list_add_tail(&ctx_buf->sglq->list,
1175 				&phba->sli4_hba.lpfc_nvmet_sgl_list);
1176 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1177 
1178 		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1179 		kfree(ctx_buf->context);
1180 	}
1181 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1182 }
1183 
1184 static void
1185 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1186 {
1187 	struct lpfc_nvmet_ctx_info *infop;
1188 	int i, j;
1189 
1190 	/* The first context list, MRQ 0 CPU 0 */
1191 	infop = phba->sli4_hba.nvmet_ctx_info;
1192 	if (!infop)
1193 		return;
1194 
1195 	/* Cycle the the entire CPU context list for every MRQ */
1196 	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1197 		for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1198 			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
1199 			infop++; /* next */
1200 		}
1201 	}
1202 	kfree(phba->sli4_hba.nvmet_ctx_info);
1203 	phba->sli4_hba.nvmet_ctx_info = NULL;
1204 }
1205 
1206 static int
1207 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1208 {
1209 	struct lpfc_nvmet_ctxbuf *ctx_buf;
1210 	struct lpfc_iocbq *nvmewqe;
1211 	union lpfc_wqe128 *wqe;
1212 	struct lpfc_nvmet_ctx_info *last_infop;
1213 	struct lpfc_nvmet_ctx_info *infop;
1214 	int i, j, idx;
1215 
1216 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1217 			"6403 Allocate NVMET resources for %d XRIs\n",
1218 			phba->sli4_hba.nvmet_xri_cnt);
1219 
1220 	phba->sli4_hba.nvmet_ctx_info = kcalloc(
1221 		phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1222 		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1223 	if (!phba->sli4_hba.nvmet_ctx_info) {
1224 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1225 				"6419 Failed allocate memory for "
1226 				"nvmet context lists\n");
1227 		return -ENOMEM;
1228 	}
1229 
1230 	/*
1231 	 * Assuming X CPUs in the system, and Y MRQs, allocate some
1232 	 * lpfc_nvmet_ctx_info structures as follows:
1233 	 *
1234 	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1235 	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1236 	 * ...
1237 	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1238 	 *
1239 	 * Each line represents a MRQ "silo" containing an entry for
1240 	 * every CPU.
1241 	 *
1242 	 * MRQ X is initially assumed to be associated with CPU X, thus
1243 	 * contexts are initially distributed across all MRQs using
1244 	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1245 	 * freed, the are freed to the MRQ silo based on the CPU number
1246 	 * of the IO completion. Thus a context that was allocated for MRQ A
1247 	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1248 	 */
1249 	infop = phba->sli4_hba.nvmet_ctx_info;
1250 	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1251 		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1252 			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1253 			spin_lock_init(&infop->nvmet_ctx_list_lock);
1254 			infop->nvmet_ctx_list_cnt = 0;
1255 			infop++;
1256 		}
1257 	}
1258 
1259 	/*
1260 	 * Setup the next CPU context info ptr for each MRQ.
1261 	 * MRQ 0 will cycle thru CPUs 0 - X separately from
1262 	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1263 	 */
1264 	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1265 		last_infop = lpfc_get_ctx_list(phba, 0, j);
1266 		for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
1267 			infop = lpfc_get_ctx_list(phba, i, j);
1268 			infop->nvmet_ctx_next_cpu = last_infop;
1269 			last_infop = infop;
1270 		}
1271 	}
1272 
1273 	/* For all nvmet xris, allocate resources needed to process a
1274 	 * received command on a per xri basis.
1275 	 */
1276 	idx = 0;
1277 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1278 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1279 		if (!ctx_buf) {
1280 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1281 					"6404 Ran out of memory for NVMET\n");
1282 			return -ENOMEM;
1283 		}
1284 
1285 		ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1286 					   GFP_KERNEL);
1287 		if (!ctx_buf->context) {
1288 			kfree(ctx_buf);
1289 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1290 					"6405 Ran out of NVMET "
1291 					"context memory\n");
1292 			return -ENOMEM;
1293 		}
1294 		ctx_buf->context->ctxbuf = ctx_buf;
1295 		ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1296 
1297 		ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1298 		if (!ctx_buf->iocbq) {
1299 			kfree(ctx_buf->context);
1300 			kfree(ctx_buf);
1301 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1302 					"6406 Ran out of NVMET iocb/WQEs\n");
1303 			return -ENOMEM;
1304 		}
1305 		ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1306 		nvmewqe = ctx_buf->iocbq;
1307 		wqe = &nvmewqe->wqe;
1308 
1309 		/* Initialize WQE */
1310 		memset(wqe, 0, sizeof(union lpfc_wqe));
1311 
1312 		ctx_buf->iocbq->context1 = NULL;
1313 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1314 		ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1315 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1316 		if (!ctx_buf->sglq) {
1317 			lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1318 			kfree(ctx_buf->context);
1319 			kfree(ctx_buf);
1320 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1321 					"6407 Ran out of NVMET XRIs\n");
1322 			return -ENOMEM;
1323 		}
1324 
1325 		/*
1326 		 * Add ctx to MRQidx context list. Our initial assumption
1327 		 * is MRQidx will be associated with CPUidx. This association
1328 		 * can change on the fly.
1329 		 */
1330 		infop = lpfc_get_ctx_list(phba, idx, idx);
1331 		spin_lock(&infop->nvmet_ctx_list_lock);
1332 		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1333 		infop->nvmet_ctx_list_cnt++;
1334 		spin_unlock(&infop->nvmet_ctx_list_lock);
1335 
1336 		/* Spread ctx structures evenly across all MRQs */
1337 		idx++;
1338 		if (idx >= phba->cfg_nvmet_mrq)
1339 			idx = 0;
1340 	}
1341 
1342 	infop = phba->sli4_hba.nvmet_ctx_info;
1343 	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1344 		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1345 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1346 					"6408 TOTAL NVMET ctx for CPU %d "
1347 					"MRQ %d: cnt %d nextcpu %p\n",
1348 					i, j, infop->nvmet_ctx_list_cnt,
1349 					infop->nvmet_ctx_next_cpu);
1350 			infop++;
1351 		}
1352 	}
1353 	return 0;
1354 }
1355 
1356 int
1357 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1358 {
1359 	struct lpfc_vport  *vport = phba->pport;
1360 	struct lpfc_nvmet_tgtport *tgtp;
1361 	struct nvmet_fc_port_info pinfo;
1362 	int error;
1363 
1364 	if (phba->targetport)
1365 		return 0;
1366 
1367 	error = lpfc_nvmet_setup_io_context(phba);
1368 	if (error)
1369 		return error;
1370 
1371 	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1372 	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1373 	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1374 	pinfo.port_id = vport->fc_myDID;
1375 
1376 	/* Limit to LPFC_MAX_NVME_SEG_CNT.
1377 	 * For now need + 1 to get around NVME transport logic.
1378 	 */
1379 	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1380 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1381 				"6400 Reducing sg segment cnt to %d\n",
1382 				LPFC_MAX_NVME_SEG_CNT);
1383 		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1384 	} else {
1385 		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1386 	}
1387 	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1388 	lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1389 	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1390 
1391 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1392 	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1393 					     &phba->pcidev->dev,
1394 					     &phba->targetport);
1395 #else
1396 	error = -ENOENT;
1397 #endif
1398 	if (error) {
1399 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1400 				"6025 Cannot register NVME targetport x%x: "
1401 				"portnm %llx nodenm %llx segs %d qs %d\n",
1402 				error,
1403 				pinfo.port_name, pinfo.node_name,
1404 				lpfc_tgttemplate.max_sgl_segments,
1405 				lpfc_tgttemplate.max_hw_queues);
1406 		phba->targetport = NULL;
1407 		phba->nvmet_support = 0;
1408 
1409 		lpfc_nvmet_cleanup_io_context(phba);
1410 
1411 	} else {
1412 		tgtp = (struct lpfc_nvmet_tgtport *)
1413 			phba->targetport->private;
1414 		tgtp->phba = phba;
1415 
1416 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1417 				"6026 Registered NVME "
1418 				"targetport: %p, private %p "
1419 				"portnm %llx nodenm %llx segs %d qs %d\n",
1420 				phba->targetport, tgtp,
1421 				pinfo.port_name, pinfo.node_name,
1422 				lpfc_tgttemplate.max_sgl_segments,
1423 				lpfc_tgttemplate.max_hw_queues);
1424 
1425 		atomic_set(&tgtp->rcv_ls_req_in, 0);
1426 		atomic_set(&tgtp->rcv_ls_req_out, 0);
1427 		atomic_set(&tgtp->rcv_ls_req_drop, 0);
1428 		atomic_set(&tgtp->xmt_ls_abort, 0);
1429 		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1430 		atomic_set(&tgtp->xmt_ls_rsp, 0);
1431 		atomic_set(&tgtp->xmt_ls_drop, 0);
1432 		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1433 		atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1434 		atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1435 		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1436 		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1437 		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1438 		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1439 		atomic_set(&tgtp->xmt_fcp_drop, 0);
1440 		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1441 		atomic_set(&tgtp->xmt_fcp_read, 0);
1442 		atomic_set(&tgtp->xmt_fcp_write, 0);
1443 		atomic_set(&tgtp->xmt_fcp_rsp, 0);
1444 		atomic_set(&tgtp->xmt_fcp_release, 0);
1445 		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1446 		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1447 		atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1448 		atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1449 		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1450 		atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1451 		atomic_set(&tgtp->xmt_fcp_abort, 0);
1452 		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1453 		atomic_set(&tgtp->xmt_abort_unsol, 0);
1454 		atomic_set(&tgtp->xmt_abort_sol, 0);
1455 		atomic_set(&tgtp->xmt_abort_rsp, 0);
1456 		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1457 		atomic_set(&tgtp->defer_ctx, 0);
1458 		atomic_set(&tgtp->defer_fod, 0);
1459 		atomic_set(&tgtp->defer_wqfull, 0);
1460 	}
1461 	return error;
1462 }
1463 
1464 int
1465 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1466 {
1467 	struct lpfc_vport  *vport = phba->pport;
1468 
1469 	if (!phba->targetport)
1470 		return 0;
1471 
1472 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1473 			 "6007 Update NVMET port %p did x%x\n",
1474 			 phba->targetport, vport->fc_myDID);
1475 
1476 	phba->targetport->port_id = vport->fc_myDID;
1477 	return 0;
1478 }
1479 
1480 /**
1481  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1482  * @phba: pointer to lpfc hba data structure.
1483  * @axri: pointer to the nvmet xri abort wcqe structure.
1484  *
1485  * This routine is invoked by the worker thread to process a SLI4 fast-path
1486  * NVMET aborted xri.
1487  **/
1488 void
1489 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1490 			    struct sli4_wcqe_xri_aborted *axri)
1491 {
1492 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1493 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1494 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1495 	struct lpfc_nvmet_tgtport *tgtp;
1496 	struct lpfc_nodelist *ndlp;
1497 	unsigned long iflag = 0;
1498 	int rrq_empty = 0;
1499 	bool released = false;
1500 
1501 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1502 			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1503 
1504 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1505 		return;
1506 
1507 	if (phba->targetport) {
1508 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1509 		atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1510 	}
1511 
1512 	spin_lock_irqsave(&phba->hbalock, iflag);
1513 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1514 	list_for_each_entry_safe(ctxp, next_ctxp,
1515 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1516 				 list) {
1517 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1518 			continue;
1519 
1520 		/* Check if we already received a free context call
1521 		 * and we have completed processing an abort situation.
1522 		 */
1523 		if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1524 		    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1525 			list_del(&ctxp->list);
1526 			released = true;
1527 		}
1528 		ctxp->flag &= ~LPFC_NVMET_XBUSY;
1529 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1530 
1531 		rrq_empty = list_empty(&phba->active_rrq_list);
1532 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1533 		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1534 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1535 		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1536 		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1537 			lpfc_set_rrq_active(phba, ndlp,
1538 				ctxp->ctxbuf->sglq->sli4_lxritag,
1539 				rxid, 1);
1540 			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1541 		}
1542 
1543 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1544 				"6318 XB aborted oxid %x flg x%x (%x)\n",
1545 				ctxp->oxid, ctxp->flag, released);
1546 		if (released)
1547 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1548 
1549 		if (rrq_empty)
1550 			lpfc_worker_wake_up(phba);
1551 		return;
1552 	}
1553 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1554 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1555 }
1556 
1557 int
1558 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1559 			   struct fc_frame_header *fc_hdr)
1560 
1561 {
1562 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1563 	struct lpfc_hba *phba = vport->phba;
1564 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1565 	struct nvmefc_tgt_fcp_req *rsp;
1566 	uint16_t xri;
1567 	unsigned long iflag = 0;
1568 
1569 	xri = be16_to_cpu(fc_hdr->fh_ox_id);
1570 
1571 	spin_lock_irqsave(&phba->hbalock, iflag);
1572 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1573 	list_for_each_entry_safe(ctxp, next_ctxp,
1574 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1575 				 list) {
1576 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1577 			continue;
1578 
1579 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1580 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1581 
1582 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1583 		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1584 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1585 
1586 		lpfc_nvmeio_data(phba,
1587 			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1588 			xri, smp_processor_id(), 0);
1589 
1590 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1591 				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1592 
1593 		rsp = &ctxp->ctx.fcp_req;
1594 		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1595 
1596 		/* Respond with BA_ACC accordingly */
1597 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1598 		return 0;
1599 	}
1600 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1601 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1602 
1603 	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1604 			 xri, smp_processor_id(), 1);
1605 
1606 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1607 			"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1608 
1609 	/* Respond with BA_RJT accordingly */
1610 	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1611 #endif
1612 	return 0;
1613 }
1614 
1615 static void
1616 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1617 			struct lpfc_nvmet_rcv_ctx *ctxp)
1618 {
1619 	struct lpfc_sli_ring *pring;
1620 	struct lpfc_iocbq *nvmewqeq;
1621 	struct lpfc_iocbq *next_nvmewqeq;
1622 	unsigned long iflags;
1623 	struct lpfc_wcqe_complete wcqe;
1624 	struct lpfc_wcqe_complete *wcqep;
1625 
1626 	pring = wq->pring;
1627 	wcqep = &wcqe;
1628 
1629 	/* Fake an ABORT error code back to cmpl routine */
1630 	memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1631 	bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1632 	wcqep->parameter = IOERR_ABORT_REQUESTED;
1633 
1634 	spin_lock_irqsave(&pring->ring_lock, iflags);
1635 	list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1636 				 &wq->wqfull_list, list) {
1637 		if (ctxp) {
1638 			/* Checking for a specific IO to flush */
1639 			if (nvmewqeq->context2 == ctxp) {
1640 				list_del(&nvmewqeq->list);
1641 				spin_unlock_irqrestore(&pring->ring_lock,
1642 						       iflags);
1643 				lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1644 							  wcqep);
1645 				return;
1646 			}
1647 			continue;
1648 		} else {
1649 			/* Flush all IOs */
1650 			list_del(&nvmewqeq->list);
1651 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
1652 			lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1653 			spin_lock_irqsave(&pring->ring_lock, iflags);
1654 		}
1655 	}
1656 	if (!ctxp)
1657 		wq->q_flag &= ~HBA_NVMET_WQFULL;
1658 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
1659 }
1660 
1661 void
1662 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1663 			  struct lpfc_queue *wq)
1664 {
1665 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1666 	struct lpfc_sli_ring *pring;
1667 	struct lpfc_iocbq *nvmewqeq;
1668 	unsigned long iflags;
1669 	int rc;
1670 
1671 	/*
1672 	 * Some WQE slots are available, so try to re-issue anything
1673 	 * on the WQ wqfull_list.
1674 	 */
1675 	pring = wq->pring;
1676 	spin_lock_irqsave(&pring->ring_lock, iflags);
1677 	while (!list_empty(&wq->wqfull_list)) {
1678 		list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1679 				 list);
1680 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
1681 		rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
1682 		spin_lock_irqsave(&pring->ring_lock, iflags);
1683 		if (rc == -EBUSY) {
1684 			/* WQ was full again, so put it back on the list */
1685 			list_add(&nvmewqeq->list, &wq->wqfull_list);
1686 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
1687 			return;
1688 		}
1689 	}
1690 	wq->q_flag &= ~HBA_NVMET_WQFULL;
1691 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
1692 
1693 #endif
1694 }
1695 
1696 void
1697 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1698 {
1699 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1700 	struct lpfc_nvmet_tgtport *tgtp;
1701 	struct lpfc_queue *wq;
1702 	uint32_t qidx;
1703 
1704 	if (phba->nvmet_support == 0)
1705 		return;
1706 	if (phba->targetport) {
1707 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1708 		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
1709 			wq = phba->sli4_hba.nvme_wq[qidx];
1710 			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1711 		}
1712 		init_completion(&tgtp->tport_unreg_done);
1713 		nvmet_fc_unregister_targetport(phba->targetport);
1714 		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1715 		lpfc_nvmet_cleanup_io_context(phba);
1716 	}
1717 	phba->targetport = NULL;
1718 #endif
1719 }
1720 
1721 /**
1722  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1723  * @phba: pointer to lpfc hba data structure.
1724  * @pring: pointer to a SLI ring.
1725  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1726  *
1727  * This routine is used for processing the WQE associated with a unsolicited
1728  * event. It first determines whether there is an existing ndlp that matches
1729  * the DID from the unsolicited WQE. If not, it will create a new one with
1730  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1731  * WQE is then used to invoke the proper routine and to set up proper state
1732  * of the discovery state machine.
1733  **/
1734 static void
1735 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1736 			   struct hbq_dmabuf *nvmebuf)
1737 {
1738 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1739 	struct lpfc_nvmet_tgtport *tgtp;
1740 	struct fc_frame_header *fc_hdr;
1741 	struct lpfc_nvmet_rcv_ctx *ctxp;
1742 	uint32_t *payload;
1743 	uint32_t size, oxid, sid, rc;
1744 
1745 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1746 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1747 
1748 	if (!phba->targetport) {
1749 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1750 				"6154 LS Drop IO x%x\n", oxid);
1751 		oxid = 0;
1752 		size = 0;
1753 		sid = 0;
1754 		ctxp = NULL;
1755 		goto dropit;
1756 	}
1757 
1758 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1759 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1760 	size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1761 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1762 
1763 	ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1764 	if (ctxp == NULL) {
1765 		atomic_inc(&tgtp->rcv_ls_req_drop);
1766 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1767 				"6155 LS Drop IO x%x: Alloc\n",
1768 				oxid);
1769 dropit:
1770 		lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1771 				 "xri x%x sz %d from %06x\n",
1772 				 oxid, size, sid);
1773 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1774 		return;
1775 	}
1776 	ctxp->phba = phba;
1777 	ctxp->size = size;
1778 	ctxp->oxid = oxid;
1779 	ctxp->sid = sid;
1780 	ctxp->wqeq = NULL;
1781 	ctxp->state = LPFC_NVMET_STE_LS_RCV;
1782 	ctxp->entry_cnt = 1;
1783 	ctxp->rqb_buffer = (void *)nvmebuf;
1784 
1785 	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1786 			 oxid, size, sid);
1787 	/*
1788 	 * The calling sequence should be:
1789 	 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1790 	 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1791 	 */
1792 	atomic_inc(&tgtp->rcv_ls_req_in);
1793 	rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1794 				 payload, size);
1795 
1796 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1797 			"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1798 			"%08x %08x %08x\n", size, rc,
1799 			*payload, *(payload+1), *(payload+2),
1800 			*(payload+3), *(payload+4), *(payload+5));
1801 
1802 	if (rc == 0) {
1803 		atomic_inc(&tgtp->rcv_ls_req_out);
1804 		return;
1805 	}
1806 
1807 	lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1808 			 oxid, size, sid);
1809 
1810 	atomic_inc(&tgtp->rcv_ls_req_drop);
1811 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1812 			"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1813 			ctxp->oxid, rc);
1814 
1815 	/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1816 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1817 
1818 	atomic_inc(&tgtp->xmt_ls_abort);
1819 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1820 #endif
1821 }
1822 
1823 static struct lpfc_nvmet_ctxbuf *
1824 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1825 			     struct lpfc_nvmet_ctx_info *current_infop)
1826 {
1827 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1828 	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1829 	struct lpfc_nvmet_ctx_info *get_infop;
1830 	int i;
1831 
1832 	/*
1833 	 * The current_infop for the MRQ a NVME command IU was received
1834 	 * on is empty. Our goal is to replenish this MRQs context
1835 	 * list from a another CPUs.
1836 	 *
1837 	 * First we need to pick a context list to start looking on.
1838 	 * nvmet_ctx_start_cpu has available context the last time
1839 	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1840 	 * is just the next sequential CPU for this MRQ.
1841 	 */
1842 	if (current_infop->nvmet_ctx_start_cpu)
1843 		get_infop = current_infop->nvmet_ctx_start_cpu;
1844 	else
1845 		get_infop = current_infop->nvmet_ctx_next_cpu;
1846 
1847 	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1848 		if (get_infop == current_infop) {
1849 			get_infop = get_infop->nvmet_ctx_next_cpu;
1850 			continue;
1851 		}
1852 		spin_lock(&get_infop->nvmet_ctx_list_lock);
1853 
1854 		/* Just take the entire context list, if there are any */
1855 		if (get_infop->nvmet_ctx_list_cnt) {
1856 			list_splice_init(&get_infop->nvmet_ctx_list,
1857 				    &current_infop->nvmet_ctx_list);
1858 			current_infop->nvmet_ctx_list_cnt =
1859 				get_infop->nvmet_ctx_list_cnt - 1;
1860 			get_infop->nvmet_ctx_list_cnt = 0;
1861 			spin_unlock(&get_infop->nvmet_ctx_list_lock);
1862 
1863 			current_infop->nvmet_ctx_start_cpu = get_infop;
1864 			list_remove_head(&current_infop->nvmet_ctx_list,
1865 					 ctx_buf, struct lpfc_nvmet_ctxbuf,
1866 					 list);
1867 			return ctx_buf;
1868 		}
1869 
1870 		/* Otherwise, move on to the next CPU for this MRQ */
1871 		spin_unlock(&get_infop->nvmet_ctx_list_lock);
1872 		get_infop = get_infop->nvmet_ctx_next_cpu;
1873 	}
1874 
1875 #endif
1876 	/* Nothing found, all contexts for the MRQ are in-flight */
1877 	return NULL;
1878 }
1879 
1880 /**
1881  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1882  * @phba: pointer to lpfc hba data structure.
1883  * @idx: relative index of MRQ vector
1884  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1885  *
1886  * This routine is used for processing the WQE associated with a unsolicited
1887  * event. It first determines whether there is an existing ndlp that matches
1888  * the DID from the unsolicited WQE. If not, it will create a new one with
1889  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1890  * WQE is then used to invoke the proper routine and to set up proper state
1891  * of the discovery state machine.
1892  **/
1893 static void
1894 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1895 			    uint32_t idx,
1896 			    struct rqb_dmabuf *nvmebuf,
1897 			    uint64_t isr_timestamp)
1898 {
1899 	struct lpfc_nvmet_rcv_ctx *ctxp;
1900 	struct lpfc_nvmet_tgtport *tgtp;
1901 	struct fc_frame_header *fc_hdr;
1902 	struct lpfc_nvmet_ctxbuf *ctx_buf;
1903 	struct lpfc_nvmet_ctx_info *current_infop;
1904 	uint32_t *payload;
1905 	uint32_t size, oxid, sid, rc, qno;
1906 	unsigned long iflag;
1907 	int current_cpu;
1908 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1909 	uint32_t id;
1910 #endif
1911 
1912 	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1913 		return;
1914 
1915 	ctx_buf = NULL;
1916 	if (!nvmebuf || !phba->targetport) {
1917 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1918 				"6157 NVMET FCP Drop IO\n");
1919 		oxid = 0;
1920 		size = 0;
1921 		sid = 0;
1922 		ctxp = NULL;
1923 		goto dropit;
1924 	}
1925 
1926 	/*
1927 	 * Get a pointer to the context list for this MRQ based on
1928 	 * the CPU this MRQ IRQ is associated with. If the CPU association
1929 	 * changes from our initial assumption, the context list could
1930 	 * be empty, thus it would need to be replenished with the
1931 	 * context list from another CPU for this MRQ.
1932 	 */
1933 	current_cpu = smp_processor_id();
1934 	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1935 	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
1936 	if (current_infop->nvmet_ctx_list_cnt) {
1937 		list_remove_head(&current_infop->nvmet_ctx_list,
1938 				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1939 		current_infop->nvmet_ctx_list_cnt--;
1940 	} else {
1941 		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1942 	}
1943 	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
1944 
1945 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1946 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1947 	size = nvmebuf->bytes_recv;
1948 
1949 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1950 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1951 		id = smp_processor_id();
1952 		if (id < LPFC_CHECK_CPU_CNT)
1953 			phba->cpucheck_rcv_io[id]++;
1954 	}
1955 #endif
1956 
1957 	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
1958 			 oxid, size, smp_processor_id());
1959 
1960 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1961 
1962 	if (!ctx_buf) {
1963 		/* Queue this NVME IO to process later */
1964 		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1965 		list_add_tail(&nvmebuf->hbuf.list,
1966 			      &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1967 		phba->sli4_hba.nvmet_io_wait_cnt++;
1968 		phba->sli4_hba.nvmet_io_wait_total++;
1969 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1970 				       iflag);
1971 
1972 		/* Post a brand new DMA buffer to RQ */
1973 		qno = nvmebuf->idx;
1974 		lpfc_post_rq_buffer(
1975 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1976 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1977 
1978 		atomic_inc(&tgtp->defer_ctx);
1979 		return;
1980 	}
1981 
1982 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1983 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1984 
1985 	ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1986 	if (ctxp->state != LPFC_NVMET_STE_FREE) {
1987 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1988 				"6414 NVMET Context corrupt %d %d oxid x%x\n",
1989 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1990 	}
1991 	ctxp->wqeq = NULL;
1992 	ctxp->txrdy = NULL;
1993 	ctxp->offset = 0;
1994 	ctxp->phba = phba;
1995 	ctxp->size = size;
1996 	ctxp->oxid = oxid;
1997 	ctxp->sid = sid;
1998 	ctxp->idx = idx;
1999 	ctxp->state = LPFC_NVMET_STE_RCV;
2000 	ctxp->entry_cnt = 1;
2001 	ctxp->flag = 0;
2002 	ctxp->ctxbuf = ctx_buf;
2003 	ctxp->rqb_buffer = (void *)nvmebuf;
2004 	spin_lock_init(&ctxp->ctxlock);
2005 
2006 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2007 	if (isr_timestamp) {
2008 		ctxp->ts_isr_cmd = isr_timestamp;
2009 		ctxp->ts_cmd_nvme = ktime_get_ns();
2010 		ctxp->ts_nvme_data = 0;
2011 		ctxp->ts_data_wqput = 0;
2012 		ctxp->ts_isr_data = 0;
2013 		ctxp->ts_data_nvme = 0;
2014 		ctxp->ts_nvme_status = 0;
2015 		ctxp->ts_status_wqput = 0;
2016 		ctxp->ts_isr_status = 0;
2017 		ctxp->ts_status_nvme = 0;
2018 	} else {
2019 		ctxp->ts_cmd_nvme = 0;
2020 	}
2021 #endif
2022 
2023 	atomic_inc(&tgtp->rcv_fcp_cmd_in);
2024 	/*
2025 	 * The calling sequence should be:
2026 	 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2027 	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2028 	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2029 	 * the NVME command / FC header is stored, so we are free to repost
2030 	 * the buffer.
2031 	 */
2032 	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2033 				  payload, size);
2034 
2035 	/* Process FCP command */
2036 	if (rc == 0) {
2037 		ctxp->rqb_buffer = NULL;
2038 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2039 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2040 		return;
2041 	}
2042 
2043 	/* Processing of FCP command is deferred */
2044 	if (rc == -EOVERFLOW) {
2045 		/*
2046 		 * Post a brand new DMA buffer to RQ and defer
2047 		 * freeing rcv buffer till .defer_rcv callback
2048 		 */
2049 		qno = nvmebuf->idx;
2050 		lpfc_post_rq_buffer(
2051 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2052 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2053 
2054 		lpfc_nvmeio_data(phba,
2055 				 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2056 				 oxid, size, sid);
2057 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2058 		atomic_inc(&tgtp->defer_fod);
2059 		return;
2060 	}
2061 	ctxp->rqb_buffer = nvmebuf;
2062 
2063 	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2064 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2065 			"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2066 			ctxp->oxid, rc,
2067 			atomic_read(&tgtp->rcv_fcp_cmd_in),
2068 			atomic_read(&tgtp->rcv_fcp_cmd_out),
2069 			atomic_read(&tgtp->xmt_fcp_release));
2070 dropit:
2071 	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2072 			 oxid, size, sid);
2073 	if (oxid) {
2074 		lpfc_nvmet_defer_release(phba, ctxp);
2075 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2076 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2077 		return;
2078 	}
2079 
2080 	if (ctx_buf)
2081 		lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2082 
2083 	if (nvmebuf)
2084 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2085 }
2086 
2087 /**
2088  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2089  * @phba: pointer to lpfc hba data structure.
2090  * @pring: pointer to a SLI ring.
2091  * @nvmebuf: pointer to received nvme data structure.
2092  *
2093  * This routine is used to process an unsolicited event received from a SLI
2094  * (Service Level Interface) ring. The actual processing of the data buffer
2095  * associated with the unsolicited event is done by invoking the routine
2096  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2097  * SLI RQ on which the unsolicited event was received.
2098  **/
2099 void
2100 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2101 			  struct lpfc_iocbq *piocb)
2102 {
2103 	struct lpfc_dmabuf *d_buf;
2104 	struct hbq_dmabuf *nvmebuf;
2105 
2106 	d_buf = piocb->context2;
2107 	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2108 
2109 	if (phba->nvmet_support == 0) {
2110 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2111 		return;
2112 	}
2113 	lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2114 }
2115 
2116 /**
2117  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2118  * @phba: pointer to lpfc hba data structure.
2119  * @idx: relative index of MRQ vector
2120  * @nvmebuf: pointer to received nvme data structure.
2121  *
2122  * This routine is used to process an unsolicited event received from a SLI
2123  * (Service Level Interface) ring. The actual processing of the data buffer
2124  * associated with the unsolicited event is done by invoking the routine
2125  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2126  * SLI RQ on which the unsolicited event was received.
2127  **/
2128 void
2129 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2130 			   uint32_t idx,
2131 			   struct rqb_dmabuf *nvmebuf,
2132 			   uint64_t isr_timestamp)
2133 {
2134 	if (phba->nvmet_support == 0) {
2135 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2136 		return;
2137 	}
2138 	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2139 				    isr_timestamp);
2140 }
2141 
2142 /**
2143  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2144  * @phba: pointer to a host N_Port data structure.
2145  * @ctxp: Context info for NVME LS Request
2146  * @rspbuf: DMA buffer of NVME command.
2147  * @rspsize: size of the NVME command.
2148  *
2149  * This routine is used for allocating a lpfc-WQE data structure from
2150  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2151  * passed into the routine for discovery state machine to issue an Extended
2152  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2153  * and preparation routine that is used by all the discovery state machine
2154  * routines and the NVME command-specific fields will be later set up by
2155  * the individual discovery machine routines after calling this routine
2156  * allocating and preparing a generic WQE data structure. It fills in the
2157  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2158  * payload and response payload (if expected). The reference count on the
2159  * ndlp is incremented by 1 and the reference to the ndlp is put into
2160  * context1 of the WQE data structure for this WQE to hold the ndlp
2161  * reference for the command's callback function to access later.
2162  *
2163  * Return code
2164  *   Pointer to the newly allocated/prepared nvme wqe data structure
2165  *   NULL - when nvme wqe data structure allocation/preparation failed
2166  **/
2167 static struct lpfc_iocbq *
2168 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2169 		       struct lpfc_nvmet_rcv_ctx *ctxp,
2170 		       dma_addr_t rspbuf, uint16_t rspsize)
2171 {
2172 	struct lpfc_nodelist *ndlp;
2173 	struct lpfc_iocbq *nvmewqe;
2174 	union lpfc_wqe128 *wqe;
2175 
2176 	if (!lpfc_is_link_up(phba)) {
2177 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2178 				"6104 NVMET prep LS wqe: link err: "
2179 				"NPORT x%x oxid:x%x ste %d\n",
2180 				ctxp->sid, ctxp->oxid, ctxp->state);
2181 		return NULL;
2182 	}
2183 
2184 	/* Allocate buffer for  command wqe */
2185 	nvmewqe = lpfc_sli_get_iocbq(phba);
2186 	if (nvmewqe == NULL) {
2187 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2188 				"6105 NVMET prep LS wqe: No WQE: "
2189 				"NPORT x%x oxid x%x ste %d\n",
2190 				ctxp->sid, ctxp->oxid, ctxp->state);
2191 		return NULL;
2192 	}
2193 
2194 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2195 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2196 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2197 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2198 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2199 				"6106 NVMET prep LS wqe: No ndlp: "
2200 				"NPORT x%x oxid x%x ste %d\n",
2201 				ctxp->sid, ctxp->oxid, ctxp->state);
2202 		goto nvme_wqe_free_wqeq_exit;
2203 	}
2204 	ctxp->wqeq = nvmewqe;
2205 
2206 	/* prevent preparing wqe with NULL ndlp reference */
2207 	nvmewqe->context1 = lpfc_nlp_get(ndlp);
2208 	if (nvmewqe->context1 == NULL)
2209 		goto nvme_wqe_free_wqeq_exit;
2210 	nvmewqe->context2 = ctxp;
2211 
2212 	wqe = &nvmewqe->wqe;
2213 	memset(wqe, 0, sizeof(union lpfc_wqe));
2214 
2215 	/* Words 0 - 2 */
2216 	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2217 	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2218 	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2219 	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2220 
2221 	/* Word 3 */
2222 
2223 	/* Word 4 */
2224 
2225 	/* Word 5 */
2226 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2227 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2228 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2229 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2230 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2231 
2232 	/* Word 6 */
2233 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2234 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2235 	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2236 
2237 	/* Word 7 */
2238 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2239 	       CMD_XMIT_SEQUENCE64_WQE);
2240 	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2241 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2242 	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2243 
2244 	/* Word 8 */
2245 	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2246 
2247 	/* Word 9 */
2248 	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2249 	/* Needs to be set by caller */
2250 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2251 
2252 	/* Word 10 */
2253 	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2254 	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2255 	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2256 	       LPFC_WQE_LENLOC_WORD12);
2257 	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2258 
2259 	/* Word 11 */
2260 	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2261 	       LPFC_WQE_CQ_ID_DEFAULT);
2262 	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2263 	       OTHER_COMMAND);
2264 
2265 	/* Word 12 */
2266 	wqe->xmit_sequence.xmit_len = rspsize;
2267 
2268 	nvmewqe->retry = 1;
2269 	nvmewqe->vport = phba->pport;
2270 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2271 	nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2272 
2273 	/* Xmit NVMET response to remote NPORT <did> */
2274 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2275 			"6039 Xmit NVMET LS response to remote "
2276 			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2277 			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2278 			rspsize);
2279 	return nvmewqe;
2280 
2281 nvme_wqe_free_wqeq_exit:
2282 	nvmewqe->context2 = NULL;
2283 	nvmewqe->context3 = NULL;
2284 	lpfc_sli_release_iocbq(phba, nvmewqe);
2285 	return NULL;
2286 }
2287 
2288 
2289 static struct lpfc_iocbq *
2290 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2291 			struct lpfc_nvmet_rcv_ctx *ctxp)
2292 {
2293 	struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2294 	struct lpfc_nvmet_tgtport *tgtp;
2295 	struct sli4_sge *sgl;
2296 	struct lpfc_nodelist *ndlp;
2297 	struct lpfc_iocbq *nvmewqe;
2298 	struct scatterlist *sgel;
2299 	union lpfc_wqe128 *wqe;
2300 	struct ulp_bde64 *bde;
2301 	uint32_t *txrdy;
2302 	dma_addr_t physaddr;
2303 	int i, cnt;
2304 	int do_pbde;
2305 	int xc = 1;
2306 
2307 	if (!lpfc_is_link_up(phba)) {
2308 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2309 				"6107 NVMET prep FCP wqe: link err:"
2310 				"NPORT x%x oxid x%x ste %d\n",
2311 				ctxp->sid, ctxp->oxid, ctxp->state);
2312 		return NULL;
2313 	}
2314 
2315 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2316 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2317 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2318 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2319 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2320 				"6108 NVMET prep FCP wqe: no ndlp: "
2321 				"NPORT x%x oxid x%x ste %d\n",
2322 				ctxp->sid, ctxp->oxid, ctxp->state);
2323 		return NULL;
2324 	}
2325 
2326 	if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2327 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2328 				"6109 NVMET prep FCP wqe: seg cnt err: "
2329 				"NPORT x%x oxid x%x ste %d cnt %d\n",
2330 				ctxp->sid, ctxp->oxid, ctxp->state,
2331 				phba->cfg_nvme_seg_cnt);
2332 		return NULL;
2333 	}
2334 
2335 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2336 	nvmewqe = ctxp->wqeq;
2337 	if (nvmewqe == NULL) {
2338 		/* Allocate buffer for  command wqe */
2339 		nvmewqe = ctxp->ctxbuf->iocbq;
2340 		if (nvmewqe == NULL) {
2341 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2342 					"6110 NVMET prep FCP wqe: No "
2343 					"WQE: NPORT x%x oxid x%x ste %d\n",
2344 					ctxp->sid, ctxp->oxid, ctxp->state);
2345 			return NULL;
2346 		}
2347 		ctxp->wqeq = nvmewqe;
2348 		xc = 0; /* create new XRI */
2349 		nvmewqe->sli4_lxritag = NO_XRI;
2350 		nvmewqe->sli4_xritag = NO_XRI;
2351 	}
2352 
2353 	/* Sanity check */
2354 	if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2355 	    (ctxp->entry_cnt == 1)) ||
2356 	    (ctxp->state == LPFC_NVMET_STE_DATA)) {
2357 		wqe = &nvmewqe->wqe;
2358 	} else {
2359 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2360 				"6111 Wrong state NVMET FCP: %d  cnt %d\n",
2361 				ctxp->state, ctxp->entry_cnt);
2362 		return NULL;
2363 	}
2364 
2365 	sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2366 	switch (rsp->op) {
2367 	case NVMET_FCOP_READDATA:
2368 	case NVMET_FCOP_READDATA_RSP:
2369 		/* From the tsend template, initialize words 7 - 11 */
2370 		memcpy(&wqe->words[7],
2371 		       &lpfc_tsend_cmd_template.words[7],
2372 		       sizeof(uint32_t) * 5);
2373 
2374 		/* Words 0 - 2 : The first sg segment */
2375 		sgel = &rsp->sg[0];
2376 		physaddr = sg_dma_address(sgel);
2377 		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2378 		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2379 		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2380 		wqe->fcp_tsend.bde.addrHigh =
2381 			cpu_to_le32(putPaddrHigh(physaddr));
2382 
2383 		/* Word 3 */
2384 		wqe->fcp_tsend.payload_offset_len = 0;
2385 
2386 		/* Word 4 */
2387 		wqe->fcp_tsend.relative_offset = ctxp->offset;
2388 
2389 		/* Word 5 */
2390 		wqe->fcp_tsend.reserved = 0;
2391 
2392 		/* Word 6 */
2393 		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2394 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2395 		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2396 		       nvmewqe->sli4_xritag);
2397 
2398 		/* Word 7 - set ar later */
2399 
2400 		/* Word 8 */
2401 		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2402 
2403 		/* Word 9 */
2404 		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2405 		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2406 
2407 		/* Word 10 - set wqes later, in template xc=1 */
2408 		if (!xc)
2409 			bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2410 
2411 		/* Word 11 - set sup, irsp, irsplen later */
2412 		do_pbde = 0;
2413 
2414 		/* Word 12 */
2415 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2416 
2417 		/* Setup 2 SKIP SGEs */
2418 		sgl->addr_hi = 0;
2419 		sgl->addr_lo = 0;
2420 		sgl->word2 = 0;
2421 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2422 		sgl->word2 = cpu_to_le32(sgl->word2);
2423 		sgl->sge_len = 0;
2424 		sgl++;
2425 		sgl->addr_hi = 0;
2426 		sgl->addr_lo = 0;
2427 		sgl->word2 = 0;
2428 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2429 		sgl->word2 = cpu_to_le32(sgl->word2);
2430 		sgl->sge_len = 0;
2431 		sgl++;
2432 		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2433 			atomic_inc(&tgtp->xmt_fcp_read_rsp);
2434 
2435 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2436 
2437 			if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2438 				if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2439 					bf_set(wqe_sup,
2440 					       &wqe->fcp_tsend.wqe_com, 1);
2441 			} else {
2442 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2443 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2444 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2445 				       ((rsp->rsplen >> 2) - 1));
2446 				memcpy(&wqe->words[16], rsp->rspaddr,
2447 				       rsp->rsplen);
2448 			}
2449 		} else {
2450 			atomic_inc(&tgtp->xmt_fcp_read);
2451 
2452 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2453 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2454 		}
2455 		break;
2456 
2457 	case NVMET_FCOP_WRITEDATA:
2458 		/* From the treceive template, initialize words 3 - 11 */
2459 		memcpy(&wqe->words[3],
2460 		       &lpfc_treceive_cmd_template.words[3],
2461 		       sizeof(uint32_t) * 9);
2462 
2463 		/* Words 0 - 2 : The first sg segment */
2464 		txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2465 				       GFP_KERNEL, &physaddr);
2466 		if (!txrdy) {
2467 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2468 					"6041 Bad txrdy buffer: oxid x%x\n",
2469 					ctxp->oxid);
2470 			return NULL;
2471 		}
2472 		ctxp->txrdy = txrdy;
2473 		ctxp->txrdy_phys = physaddr;
2474 		wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2475 		wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2476 		wqe->fcp_treceive.bde.addrLow =
2477 			cpu_to_le32(putPaddrLow(physaddr));
2478 		wqe->fcp_treceive.bde.addrHigh =
2479 			cpu_to_le32(putPaddrHigh(physaddr));
2480 
2481 		/* Word 4 */
2482 		wqe->fcp_treceive.relative_offset = ctxp->offset;
2483 
2484 		/* Word 6 */
2485 		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2486 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2487 		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2488 		       nvmewqe->sli4_xritag);
2489 
2490 		/* Word 7 */
2491 
2492 		/* Word 8 */
2493 		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2494 
2495 		/* Word 9 */
2496 		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2497 		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2498 
2499 		/* Word 10 - in template xc=1 */
2500 		if (!xc)
2501 			bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2502 
2503 		/* Word 11 - set pbde later */
2504 		if (phba->cfg_enable_pbde) {
2505 			do_pbde = 1;
2506 		} else {
2507 			bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2508 			do_pbde = 0;
2509 		}
2510 
2511 		/* Word 12 */
2512 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2513 
2514 		/* Setup 1 TXRDY and 1 SKIP SGE */
2515 		txrdy[0] = 0;
2516 		txrdy[1] = cpu_to_be32(rsp->transfer_length);
2517 		txrdy[2] = 0;
2518 
2519 		sgl->addr_hi = putPaddrHigh(physaddr);
2520 		sgl->addr_lo = putPaddrLow(physaddr);
2521 		sgl->word2 = 0;
2522 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2523 		sgl->word2 = cpu_to_le32(sgl->word2);
2524 		sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2525 		sgl++;
2526 		sgl->addr_hi = 0;
2527 		sgl->addr_lo = 0;
2528 		sgl->word2 = 0;
2529 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2530 		sgl->word2 = cpu_to_le32(sgl->word2);
2531 		sgl->sge_len = 0;
2532 		sgl++;
2533 		atomic_inc(&tgtp->xmt_fcp_write);
2534 		break;
2535 
2536 	case NVMET_FCOP_RSP:
2537 		/* From the treceive template, initialize words 4 - 11 */
2538 		memcpy(&wqe->words[4],
2539 		       &lpfc_trsp_cmd_template.words[4],
2540 		       sizeof(uint32_t) * 8);
2541 
2542 		/* Words 0 - 2 */
2543 		physaddr = rsp->rspdma;
2544 		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2545 		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2546 		wqe->fcp_trsp.bde.addrLow =
2547 			cpu_to_le32(putPaddrLow(physaddr));
2548 		wqe->fcp_trsp.bde.addrHigh =
2549 			cpu_to_le32(putPaddrHigh(physaddr));
2550 
2551 		/* Word 3 */
2552 		wqe->fcp_trsp.response_len = rsp->rsplen;
2553 
2554 		/* Word 6 */
2555 		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2556 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2557 		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2558 		       nvmewqe->sli4_xritag);
2559 
2560 		/* Word 7 */
2561 
2562 		/* Word 8 */
2563 		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2564 
2565 		/* Word 9 */
2566 		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2567 		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2568 
2569 		/* Word 10 */
2570 		if (xc)
2571 			bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2572 
2573 		/* Word 11 */
2574 		/* In template wqes=0 irsp=0 irsplen=0 - good response */
2575 		if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2576 			/* Bad response - embed it */
2577 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2578 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2579 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2580 			       ((rsp->rsplen >> 2) - 1));
2581 			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2582 		}
2583 		do_pbde = 0;
2584 
2585 		/* Word 12 */
2586 		wqe->fcp_trsp.rsvd_12_15[0] = 0;
2587 
2588 		/* Use rspbuf, NOT sg list */
2589 		rsp->sg_cnt = 0;
2590 		sgl->word2 = 0;
2591 		atomic_inc(&tgtp->xmt_fcp_rsp);
2592 		break;
2593 
2594 	default:
2595 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2596 				"6064 Unknown Rsp Op %d\n",
2597 				rsp->op);
2598 		return NULL;
2599 	}
2600 
2601 	nvmewqe->retry = 1;
2602 	nvmewqe->vport = phba->pport;
2603 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2604 	nvmewqe->context1 = ndlp;
2605 
2606 	for (i = 0; i < rsp->sg_cnt; i++) {
2607 		sgel = &rsp->sg[i];
2608 		physaddr = sg_dma_address(sgel);
2609 		cnt = sg_dma_len(sgel);
2610 		sgl->addr_hi = putPaddrHigh(physaddr);
2611 		sgl->addr_lo = putPaddrLow(physaddr);
2612 		sgl->word2 = 0;
2613 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2614 		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2615 		if ((i+1) == rsp->sg_cnt)
2616 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2617 		sgl->word2 = cpu_to_le32(sgl->word2);
2618 		sgl->sge_len = cpu_to_le32(cnt);
2619 		if (i == 0) {
2620 			bde = (struct ulp_bde64 *)&wqe->words[13];
2621 			if (do_pbde) {
2622 				/* Words 13-15  (PBDE) */
2623 				bde->addrLow = sgl->addr_lo;
2624 				bde->addrHigh = sgl->addr_hi;
2625 				bde->tus.f.bdeSize =
2626 					le32_to_cpu(sgl->sge_len);
2627 				bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2628 				bde->tus.w = cpu_to_le32(bde->tus.w);
2629 			} else {
2630 				memset(bde, 0, sizeof(struct ulp_bde64));
2631 			}
2632 		}
2633 		sgl++;
2634 		ctxp->offset += cnt;
2635 	}
2636 	ctxp->state = LPFC_NVMET_STE_DATA;
2637 	ctxp->entry_cnt++;
2638 	return nvmewqe;
2639 }
2640 
2641 /**
2642  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2643  * @phba: Pointer to HBA context object.
2644  * @cmdwqe: Pointer to driver command WQE object.
2645  * @wcqe: Pointer to driver response CQE object.
2646  *
2647  * The function is called from SLI ring event handler with no
2648  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2649  * The function frees memory resources used for the NVME commands.
2650  **/
2651 static void
2652 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2653 			     struct lpfc_wcqe_complete *wcqe)
2654 {
2655 	struct lpfc_nvmet_rcv_ctx *ctxp;
2656 	struct lpfc_nvmet_tgtport *tgtp;
2657 	uint32_t status, result;
2658 	unsigned long flags;
2659 	bool released = false;
2660 
2661 	ctxp = cmdwqe->context2;
2662 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2663 	result = wcqe->parameter;
2664 
2665 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2666 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2667 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2668 
2669 	ctxp->state = LPFC_NVMET_STE_DONE;
2670 
2671 	/* Check if we already received a free context call
2672 	 * and we have completed processing an abort situation.
2673 	 */
2674 	spin_lock_irqsave(&ctxp->ctxlock, flags);
2675 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2676 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2677 		list_del(&ctxp->list);
2678 		released = true;
2679 	}
2680 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2681 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2682 	atomic_inc(&tgtp->xmt_abort_rsp);
2683 
2684 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2685 			"6165 ABORT cmpl: xri x%x flg x%x (%d) "
2686 			"WCQE: %08x %08x %08x %08x\n",
2687 			ctxp->oxid, ctxp->flag, released,
2688 			wcqe->word0, wcqe->total_data_placed,
2689 			result, wcqe->word3);
2690 
2691 	cmdwqe->context2 = NULL;
2692 	cmdwqe->context3 = NULL;
2693 	/*
2694 	 * if transport has released ctx, then can reuse it. Otherwise,
2695 	 * will be recycled by transport release call.
2696 	 */
2697 	if (released)
2698 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2699 
2700 	/* This is the iocbq for the abort, not the command */
2701 	lpfc_sli_release_iocbq(phba, cmdwqe);
2702 
2703 	/* Since iaab/iaar are NOT set, there is no work left.
2704 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2705 	 * should have been called already.
2706 	 */
2707 }
2708 
2709 /**
2710  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2711  * @phba: Pointer to HBA context object.
2712  * @cmdwqe: Pointer to driver command WQE object.
2713  * @wcqe: Pointer to driver response CQE object.
2714  *
2715  * The function is called from SLI ring event handler with no
2716  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2717  * The function frees memory resources used for the NVME commands.
2718  **/
2719 static void
2720 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2721 			       struct lpfc_wcqe_complete *wcqe)
2722 {
2723 	struct lpfc_nvmet_rcv_ctx *ctxp;
2724 	struct lpfc_nvmet_tgtport *tgtp;
2725 	unsigned long flags;
2726 	uint32_t status, result;
2727 	bool released = false;
2728 
2729 	ctxp = cmdwqe->context2;
2730 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2731 	result = wcqe->parameter;
2732 
2733 	if (!ctxp) {
2734 		/* if context is clear, related io alrady complete */
2735 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2736 				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2737 				wcqe->word0, wcqe->total_data_placed,
2738 				result, wcqe->word3);
2739 		return;
2740 	}
2741 
2742 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2743 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2744 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2745 
2746 	/* Sanity check */
2747 	if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2748 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2749 				"6112 ABTS Wrong state:%d oxid x%x\n",
2750 				ctxp->state, ctxp->oxid);
2751 	}
2752 
2753 	/* Check if we already received a free context call
2754 	 * and we have completed processing an abort situation.
2755 	 */
2756 	ctxp->state = LPFC_NVMET_STE_DONE;
2757 	spin_lock_irqsave(&ctxp->ctxlock, flags);
2758 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2759 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2760 		list_del(&ctxp->list);
2761 		released = true;
2762 	}
2763 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2764 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2765 	atomic_inc(&tgtp->xmt_abort_rsp);
2766 
2767 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2768 			"6316 ABTS cmpl xri x%x flg x%x (%x) "
2769 			"WCQE: %08x %08x %08x %08x\n",
2770 			ctxp->oxid, ctxp->flag, released,
2771 			wcqe->word0, wcqe->total_data_placed,
2772 			result, wcqe->word3);
2773 
2774 	cmdwqe->context2 = NULL;
2775 	cmdwqe->context3 = NULL;
2776 	/*
2777 	 * if transport has released ctx, then can reuse it. Otherwise,
2778 	 * will be recycled by transport release call.
2779 	 */
2780 	if (released)
2781 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2782 
2783 	/* Since iaab/iaar are NOT set, there is no work left.
2784 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2785 	 * should have been called already.
2786 	 */
2787 }
2788 
2789 /**
2790  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2791  * @phba: Pointer to HBA context object.
2792  * @cmdwqe: Pointer to driver command WQE object.
2793  * @wcqe: Pointer to driver response CQE object.
2794  *
2795  * The function is called from SLI ring event handler with no
2796  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2797  * The function frees memory resources used for the NVME commands.
2798  **/
2799 static void
2800 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2801 			    struct lpfc_wcqe_complete *wcqe)
2802 {
2803 	struct lpfc_nvmet_rcv_ctx *ctxp;
2804 	struct lpfc_nvmet_tgtport *tgtp;
2805 	uint32_t status, result;
2806 
2807 	ctxp = cmdwqe->context2;
2808 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2809 	result = wcqe->parameter;
2810 
2811 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2812 	atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2813 
2814 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2815 			"6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2816 			ctxp, wcqe->word0, wcqe->total_data_placed,
2817 			result, wcqe->word3);
2818 
2819 	if (!ctxp) {
2820 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2821 				"6415 NVMET LS Abort No ctx: WCQE: "
2822 				 "%08x %08x %08x %08x\n",
2823 				wcqe->word0, wcqe->total_data_placed,
2824 				result, wcqe->word3);
2825 
2826 		lpfc_sli_release_iocbq(phba, cmdwqe);
2827 		return;
2828 	}
2829 
2830 	if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2831 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2832 				"6416 NVMET LS abort cmpl state mismatch: "
2833 				"oxid x%x: %d %d\n",
2834 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2835 	}
2836 
2837 	cmdwqe->context2 = NULL;
2838 	cmdwqe->context3 = NULL;
2839 	lpfc_sli_release_iocbq(phba, cmdwqe);
2840 	kfree(ctxp);
2841 }
2842 
2843 static int
2844 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2845 			     struct lpfc_nvmet_rcv_ctx *ctxp,
2846 			     uint32_t sid, uint16_t xri)
2847 {
2848 	struct lpfc_nvmet_tgtport *tgtp;
2849 	struct lpfc_iocbq *abts_wqeq;
2850 	union lpfc_wqe128 *wqe_abts;
2851 	struct lpfc_nodelist *ndlp;
2852 
2853 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2854 			"6067 ABTS: sid %x xri x%x/x%x\n",
2855 			sid, xri, ctxp->wqeq->sli4_xritag);
2856 
2857 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2858 
2859 	ndlp = lpfc_findnode_did(phba->pport, sid);
2860 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2861 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2862 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2863 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2864 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2865 				"6134 Drop ABTS - wrong NDLP state x%x.\n",
2866 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2867 
2868 		/* No failure to an ABTS request. */
2869 		return 0;
2870 	}
2871 
2872 	abts_wqeq = ctxp->wqeq;
2873 	wqe_abts = &abts_wqeq->wqe;
2874 
2875 	/*
2876 	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2877 	 * that were initialized in lpfc_sli4_nvmet_alloc.
2878 	 */
2879 	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2880 
2881 	/* Word 5 */
2882 	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2883 	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2884 	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2885 	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2886 	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2887 
2888 	/* Word 6 */
2889 	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2890 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2891 	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2892 	       abts_wqeq->sli4_xritag);
2893 
2894 	/* Word 7 */
2895 	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2896 	       CMD_XMIT_SEQUENCE64_WQE);
2897 	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2898 	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2899 	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2900 
2901 	/* Word 8 */
2902 	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2903 
2904 	/* Word 9 */
2905 	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2906 	/* Needs to be set by caller */
2907 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2908 
2909 	/* Word 10 */
2910 	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2911 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2912 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2913 	       LPFC_WQE_LENLOC_WORD12);
2914 	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2915 	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2916 
2917 	/* Word 11 */
2918 	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2919 	       LPFC_WQE_CQ_ID_DEFAULT);
2920 	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2921 	       OTHER_COMMAND);
2922 
2923 	abts_wqeq->vport = phba->pport;
2924 	abts_wqeq->context1 = ndlp;
2925 	abts_wqeq->context2 = ctxp;
2926 	abts_wqeq->context3 = NULL;
2927 	abts_wqeq->rsvd2 = 0;
2928 	/* hba_wqidx should already be setup from command we are aborting */
2929 	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2930 	abts_wqeq->iocb.ulpLe = 1;
2931 
2932 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2933 			"6069 Issue ABTS to xri x%x reqtag x%x\n",
2934 			xri, abts_wqeq->iotag);
2935 	return 1;
2936 }
2937 
2938 static int
2939 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2940 			       struct lpfc_nvmet_rcv_ctx *ctxp,
2941 			       uint32_t sid, uint16_t xri)
2942 {
2943 	struct lpfc_nvmet_tgtport *tgtp;
2944 	struct lpfc_iocbq *abts_wqeq;
2945 	union lpfc_wqe128 *abts_wqe;
2946 	struct lpfc_nodelist *ndlp;
2947 	unsigned long flags;
2948 	int rc;
2949 
2950 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2951 	if (!ctxp->wqeq) {
2952 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
2953 		ctxp->wqeq->hba_wqidx = 0;
2954 	}
2955 
2956 	ndlp = lpfc_findnode_did(phba->pport, sid);
2957 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2958 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2959 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2960 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2961 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2962 				"6160 Drop ABORT - wrong NDLP state x%x.\n",
2963 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2964 
2965 		/* No failure to an ABTS request. */
2966 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2967 		return 0;
2968 	}
2969 
2970 	/* Issue ABTS for this WQE based on iotag */
2971 	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2972 	if (!ctxp->abort_wqeq) {
2973 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2974 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2975 				"6161 ABORT failed: No wqeqs: "
2976 				"xri: x%x\n", ctxp->oxid);
2977 		/* No failure to an ABTS request. */
2978 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2979 		return 0;
2980 	}
2981 	abts_wqeq = ctxp->abort_wqeq;
2982 	abts_wqe = &abts_wqeq->wqe;
2983 	ctxp->state = LPFC_NVMET_STE_ABORT;
2984 
2985 	/* Announce entry to new IO submit field. */
2986 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2987 			"6162 ABORT Request to rport DID x%06x "
2988 			"for xri x%x x%x\n",
2989 			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2990 
2991 	/* If the hba is getting reset, this flag is set.  It is
2992 	 * cleared when the reset is complete and rings reestablished.
2993 	 */
2994 	spin_lock_irqsave(&phba->hbalock, flags);
2995 	/* driver queued commands are in process of being flushed */
2996 	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2997 		spin_unlock_irqrestore(&phba->hbalock, flags);
2998 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2999 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3000 				"6163 Driver in reset cleanup - flushing "
3001 				"NVME Req now. hba_flag x%x oxid x%x\n",
3002 				phba->hba_flag, ctxp->oxid);
3003 		lpfc_sli_release_iocbq(phba, abts_wqeq);
3004 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3005 		return 0;
3006 	}
3007 
3008 	/* Outstanding abort is in progress */
3009 	if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3010 		spin_unlock_irqrestore(&phba->hbalock, flags);
3011 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3012 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3013 				"6164 Outstanding NVME I/O Abort Request "
3014 				"still pending on oxid x%x\n",
3015 				ctxp->oxid);
3016 		lpfc_sli_release_iocbq(phba, abts_wqeq);
3017 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3018 		return 0;
3019 	}
3020 
3021 	/* Ready - mark outstanding as aborted by driver. */
3022 	abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3023 
3024 	/* WQEs are reused.  Clear stale data and set key fields to
3025 	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3026 	 */
3027 	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3028 
3029 	/* word 3 */
3030 	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3031 
3032 	/* word 7 */
3033 	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3034 	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3035 
3036 	/* word 8 - tell the FW to abort the IO associated with this
3037 	 * outstanding exchange ID.
3038 	 */
3039 	abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3040 
3041 	/* word 9 - this is the iotag for the abts_wqe completion. */
3042 	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3043 	       abts_wqeq->iotag);
3044 
3045 	/* word 10 */
3046 	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3047 	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3048 
3049 	/* word 11 */
3050 	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3051 	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3052 	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3053 
3054 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
3055 	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3056 	abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3057 	abts_wqeq->iocb_cmpl = 0;
3058 	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3059 	abts_wqeq->context2 = ctxp;
3060 	abts_wqeq->vport = phba->pport;
3061 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3062 	spin_unlock_irqrestore(&phba->hbalock, flags);
3063 	if (rc == WQE_SUCCESS) {
3064 		atomic_inc(&tgtp->xmt_abort_sol);
3065 		return 0;
3066 	}
3067 
3068 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3069 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3070 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3071 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3072 			"6166 Failed ABORT issue_wqe with status x%x "
3073 			"for oxid x%x.\n",
3074 			rc, ctxp->oxid);
3075 	return 1;
3076 }
3077 
3078 
3079 static int
3080 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3081 				 struct lpfc_nvmet_rcv_ctx *ctxp,
3082 				 uint32_t sid, uint16_t xri)
3083 {
3084 	struct lpfc_nvmet_tgtport *tgtp;
3085 	struct lpfc_iocbq *abts_wqeq;
3086 	unsigned long flags;
3087 	int rc;
3088 
3089 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3090 	if (!ctxp->wqeq) {
3091 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
3092 		ctxp->wqeq->hba_wqidx = 0;
3093 	}
3094 
3095 	if (ctxp->state == LPFC_NVMET_STE_FREE) {
3096 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3097 				"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3098 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3099 		rc = WQE_BUSY;
3100 		goto aerr;
3101 	}
3102 	ctxp->state = LPFC_NVMET_STE_ABORT;
3103 	ctxp->entry_cnt++;
3104 	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3105 	if (rc == 0)
3106 		goto aerr;
3107 
3108 	spin_lock_irqsave(&phba->hbalock, flags);
3109 	abts_wqeq = ctxp->wqeq;
3110 	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3111 	abts_wqeq->iocb_cmpl = NULL;
3112 	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3113 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3114 	spin_unlock_irqrestore(&phba->hbalock, flags);
3115 	if (rc == WQE_SUCCESS) {
3116 		return 0;
3117 	}
3118 
3119 aerr:
3120 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3121 	if (ctxp->flag & LPFC_NVMET_CTX_RLS)
3122 		list_del(&ctxp->list);
3123 	ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3124 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3125 
3126 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3127 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3128 			"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3129 			ctxp->oxid, rc);
3130 	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3131 	return 1;
3132 }
3133 
3134 static int
3135 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3136 				struct lpfc_nvmet_rcv_ctx *ctxp,
3137 				uint32_t sid, uint16_t xri)
3138 {
3139 	struct lpfc_nvmet_tgtport *tgtp;
3140 	struct lpfc_iocbq *abts_wqeq;
3141 	union lpfc_wqe128 *wqe_abts;
3142 	unsigned long flags;
3143 	int rc;
3144 
3145 	if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3146 	    (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3147 		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3148 		ctxp->entry_cnt++;
3149 	} else {
3150 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3151 				"6418 NVMET LS abort state mismatch "
3152 				"IO x%x: %d %d\n",
3153 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3154 		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3155 	}
3156 
3157 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3158 	if (!ctxp->wqeq) {
3159 		/* Issue ABTS for this WQE based on iotag */
3160 		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3161 		if (!ctxp->wqeq) {
3162 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3163 					"6068 Abort failed: No wqeqs: "
3164 					"xri: x%x\n", xri);
3165 			/* No failure to an ABTS request. */
3166 			kfree(ctxp);
3167 			return 0;
3168 		}
3169 	}
3170 	abts_wqeq = ctxp->wqeq;
3171 	wqe_abts = &abts_wqeq->wqe;
3172 
3173 	if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3174 		rc = WQE_BUSY;
3175 		goto out;
3176 	}
3177 
3178 	spin_lock_irqsave(&phba->hbalock, flags);
3179 	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3180 	abts_wqeq->iocb_cmpl = 0;
3181 	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3182 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
3183 	spin_unlock_irqrestore(&phba->hbalock, flags);
3184 	if (rc == WQE_SUCCESS) {
3185 		atomic_inc(&tgtp->xmt_abort_unsol);
3186 		return 0;
3187 	}
3188 out:
3189 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3190 	abts_wqeq->context2 = NULL;
3191 	abts_wqeq->context3 = NULL;
3192 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3193 	kfree(ctxp);
3194 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3195 			"6056 Failed to Issue ABTS. Status x%x\n", rc);
3196 	return 0;
3197 }
3198