xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_nvmet.c (revision 4cff79e9)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30 
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58 
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 						 struct lpfc_nvmet_rcv_ctx *,
61 						 dma_addr_t rspbuf,
62 						 uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 						  struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 					  struct lpfc_nvmet_rcv_ctx *,
67 					  uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 					    struct lpfc_nvmet_rcv_ctx *,
70 					    uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 					   struct lpfc_nvmet_rcv_ctx *,
73 					   uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 				    struct lpfc_nvmet_rcv_ctx *);
76 
77 static union lpfc_wqe128 lpfc_tsend_cmd_template;
78 static union lpfc_wqe128 lpfc_treceive_cmd_template;
79 static union lpfc_wqe128 lpfc_trsp_cmd_template;
80 
81 /* Setup WQE templates for NVME IOs */
82 void
83 lpfc_nvmet_cmd_template(void)
84 {
85 	union lpfc_wqe128 *wqe;
86 
87 	/* TSEND template */
88 	wqe = &lpfc_tsend_cmd_template;
89 	memset(wqe, 0, sizeof(union lpfc_wqe128));
90 
91 	/* Word 0, 1, 2 - BDE is variable */
92 
93 	/* Word 3 - payload_offset_len is zero */
94 
95 	/* Word 4 - relative_offset is variable */
96 
97 	/* Word 5 - is zero */
98 
99 	/* Word 6 - ctxt_tag, xri_tag is variable */
100 
101 	/* Word 7 - wqe_ar is variable */
102 	bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
103 	bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
104 	bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
105 	bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
106 	bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
107 
108 	/* Word 8 - abort_tag is variable */
109 
110 	/* Word 9  - reqtag, rcvoxid is variable */
111 
112 	/* Word 10 - wqes, xc is variable */
113 	bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
114 	bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
115 	bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
116 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
117 	bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
118 	bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
119 
120 	/* Word 11 - sup, irsp, irsplen is variable */
121 	bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
122 	bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
123 	bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
124 	bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
125 	bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
126 	bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
127 
128 	/* Word 12 - fcp_data_len is variable */
129 
130 	/* Word 13, 14, 15 - PBDE is zero */
131 
132 	/* TRECEIVE template */
133 	wqe = &lpfc_treceive_cmd_template;
134 	memset(wqe, 0, sizeof(union lpfc_wqe128));
135 
136 	/* Word 0, 1, 2 - BDE is variable */
137 
138 	/* Word 3 */
139 	wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
140 
141 	/* Word 4 - relative_offset is variable */
142 
143 	/* Word 5 - is zero */
144 
145 	/* Word 6 - ctxt_tag, xri_tag is variable */
146 
147 	/* Word 7 */
148 	bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
149 	bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
150 	bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
151 	bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
152 	bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
153 
154 	/* Word 8 - abort_tag is variable */
155 
156 	/* Word 9  - reqtag, rcvoxid is variable */
157 
158 	/* Word 10 - xc is variable */
159 	bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
160 	bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
161 	bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
162 	bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
163 	bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
164 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
165 
166 	/* Word 11 - pbde is variable */
167 	bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
168 	bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
169 	bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
170 	bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
171 	bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
172 	bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
173 
174 	/* Word 12 - fcp_data_len is variable */
175 
176 	/* Word 13, 14, 15 - PBDE is variable */
177 
178 	/* TRSP template */
179 	wqe = &lpfc_trsp_cmd_template;
180 	memset(wqe, 0, sizeof(union lpfc_wqe128));
181 
182 	/* Word 0, 1, 2 - BDE is variable */
183 
184 	/* Word 3 - response_len is variable */
185 
186 	/* Word 4, 5 - is zero */
187 
188 	/* Word 6 - ctxt_tag, xri_tag is variable */
189 
190 	/* Word 7 */
191 	bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
192 	bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
193 	bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
194 	bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
195 	bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
196 
197 	/* Word 8 - abort_tag is variable */
198 
199 	/* Word 9  - reqtag is variable */
200 
201 	/* Word 10 wqes, xc is variable */
202 	bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
203 	bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
204 	bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
205 	bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
206 	bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
207 	bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
208 
209 	/* Word 11 irsp, irsplen is variable */
210 	bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
211 	bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
212 	bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
213 	bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
214 	bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
215 	bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
216 
217 	/* Word 12, 13, 14, 15 - is zero */
218 }
219 
220 void
221 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
222 {
223 	unsigned long iflag;
224 
225 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
226 			"6313 NVMET Defer ctx release xri x%x flg x%x\n",
227 			ctxp->oxid, ctxp->flag);
228 
229 	spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
230 	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
231 		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
232 				       iflag);
233 		return;
234 	}
235 	ctxp->flag |= LPFC_NVMET_CTX_RLS;
236 	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
237 	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
238 }
239 
240 /**
241  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
242  * @phba: Pointer to HBA context object.
243  * @cmdwqe: Pointer to driver command WQE object.
244  * @wcqe: Pointer to driver response CQE object.
245  *
246  * The function is called from SLI ring event handler with no
247  * lock held. This function is the completion handler for NVME LS commands
248  * The function frees memory resources used for the NVME commands.
249  **/
250 static void
251 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
252 			  struct lpfc_wcqe_complete *wcqe)
253 {
254 	struct lpfc_nvmet_tgtport *tgtp;
255 	struct nvmefc_tgt_ls_req *rsp;
256 	struct lpfc_nvmet_rcv_ctx *ctxp;
257 	uint32_t status, result;
258 
259 	status = bf_get(lpfc_wcqe_c_status, wcqe);
260 	result = wcqe->parameter;
261 	ctxp = cmdwqe->context2;
262 
263 	if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
264 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
265 				"6410 NVMET LS cmpl state mismatch IO x%x: "
266 				"%d %d\n",
267 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
268 	}
269 
270 	if (!phba->targetport)
271 		goto out;
272 
273 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
274 
275 	if (tgtp) {
276 		if (status) {
277 			atomic_inc(&tgtp->xmt_ls_rsp_error);
278 			if (result == IOERR_ABORT_REQUESTED)
279 				atomic_inc(&tgtp->xmt_ls_rsp_aborted);
280 			if (bf_get(lpfc_wcqe_c_xb, wcqe))
281 				atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
282 		} else {
283 			atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
284 		}
285 	}
286 
287 out:
288 	rsp = &ctxp->ctx.ls_req;
289 
290 	lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
291 			 ctxp->oxid, status, result);
292 
293 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
294 			"6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
295 			status, result, ctxp->oxid);
296 
297 	lpfc_nlp_put(cmdwqe->context1);
298 	cmdwqe->context2 = NULL;
299 	cmdwqe->context3 = NULL;
300 	lpfc_sli_release_iocbq(phba, cmdwqe);
301 	rsp->done(rsp);
302 	kfree(ctxp);
303 }
304 
305 /**
306  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
307  * @phba: HBA buffer is associated with
308  * @ctxp: context to clean up
309  * @mp: Buffer to free
310  *
311  * Description: Frees the given DMA buffer in the appropriate way given by
312  * reposting it to its associated RQ so it can be reused.
313  *
314  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
315  *
316  * Returns: None
317  **/
318 void
319 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
320 {
321 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
322 	struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
323 	struct lpfc_nvmet_tgtport *tgtp;
324 	struct fc_frame_header *fc_hdr;
325 	struct rqb_dmabuf *nvmebuf;
326 	struct lpfc_nvmet_ctx_info *infop;
327 	uint32_t *payload;
328 	uint32_t size, oxid, sid, rc;
329 	int cpu;
330 	unsigned long iflag;
331 
332 	if (ctxp->txrdy) {
333 		dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
334 			      ctxp->txrdy_phys);
335 		ctxp->txrdy = NULL;
336 		ctxp->txrdy_phys = 0;
337 	}
338 
339 	if (ctxp->state == LPFC_NVMET_STE_FREE) {
340 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
341 				"6411 NVMET free, already free IO x%x: %d %d\n",
342 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
343 	}
344 	ctxp->state = LPFC_NVMET_STE_FREE;
345 
346 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
347 	if (phba->sli4_hba.nvmet_io_wait_cnt) {
348 		list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
349 				 nvmebuf, struct rqb_dmabuf,
350 				 hbuf.list);
351 		phba->sli4_hba.nvmet_io_wait_cnt--;
352 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
353 				       iflag);
354 
355 		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
356 		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
357 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
358 		payload = (uint32_t *)(nvmebuf->dbuf.virt);
359 		size = nvmebuf->bytes_recv;
360 		sid = sli4_sid_from_fc_hdr(fc_hdr);
361 
362 		ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
363 		ctxp->wqeq = NULL;
364 		ctxp->txrdy = NULL;
365 		ctxp->offset = 0;
366 		ctxp->phba = phba;
367 		ctxp->size = size;
368 		ctxp->oxid = oxid;
369 		ctxp->sid = sid;
370 		ctxp->state = LPFC_NVMET_STE_RCV;
371 		ctxp->entry_cnt = 1;
372 		ctxp->flag = 0;
373 		ctxp->ctxbuf = ctx_buf;
374 		ctxp->rqb_buffer = (void *)nvmebuf;
375 		spin_lock_init(&ctxp->ctxlock);
376 
377 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
378 		if (ctxp->ts_cmd_nvme) {
379 			ctxp->ts_cmd_nvme = ktime_get_ns();
380 			ctxp->ts_nvme_data = 0;
381 			ctxp->ts_data_wqput = 0;
382 			ctxp->ts_isr_data = 0;
383 			ctxp->ts_data_nvme = 0;
384 			ctxp->ts_nvme_status = 0;
385 			ctxp->ts_status_wqput = 0;
386 			ctxp->ts_isr_status = 0;
387 			ctxp->ts_status_nvme = 0;
388 		}
389 #endif
390 		atomic_inc(&tgtp->rcv_fcp_cmd_in);
391 		/*
392 		 * The calling sequence should be:
393 		 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
394 		 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
395 		 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
396 		 * the NVME command / FC header is stored.
397 		 * A buffer has already been reposted for this IO, so just free
398 		 * the nvmebuf.
399 		 */
400 		rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
401 					  payload, size);
402 
403 		/* Process FCP command */
404 		if (rc == 0) {
405 			atomic_inc(&tgtp->rcv_fcp_cmd_out);
406 			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
407 			return;
408 		}
409 
410 		/* Processing of FCP command is deferred */
411 		if (rc == -EOVERFLOW) {
412 			lpfc_nvmeio_data(phba,
413 					 "NVMET RCV BUSY: xri x%x sz %d "
414 					 "from %06x\n",
415 					 oxid, size, sid);
416 			atomic_inc(&tgtp->rcv_fcp_cmd_out);
417 			return;
418 		}
419 		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
420 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
421 				"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
422 				ctxp->oxid, rc,
423 				atomic_read(&tgtp->rcv_fcp_cmd_in),
424 				atomic_read(&tgtp->rcv_fcp_cmd_out),
425 				atomic_read(&tgtp->xmt_fcp_release));
426 
427 		lpfc_nvmet_defer_release(phba, ctxp);
428 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
429 		nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
430 		return;
431 	}
432 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
433 
434 	/*
435 	 * Use the CPU context list, from the MRQ the IO was received on
436 	 * (ctxp->idx), to save context structure.
437 	 */
438 	cpu = smp_processor_id();
439 	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
440 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
441 	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
442 	infop->nvmet_ctx_list_cnt++;
443 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
444 #endif
445 }
446 
447 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
448 static void
449 lpfc_nvmet_ktime(struct lpfc_hba *phba,
450 		 struct lpfc_nvmet_rcv_ctx *ctxp)
451 {
452 	uint64_t seg1, seg2, seg3, seg4, seg5;
453 	uint64_t seg6, seg7, seg8, seg9, seg10;
454 	uint64_t segsum;
455 
456 	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
457 	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
458 	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
459 	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
460 	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
461 		return;
462 
463 	if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
464 		return;
465 	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
466 		return;
467 	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
468 		return;
469 	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
470 		return;
471 	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
472 		return;
473 	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
474 		return;
475 	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
476 		return;
477 	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
478 		return;
479 	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
480 		return;
481 	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
482 		return;
483 	/*
484 	 * Segment 1 - Time from FCP command received by MSI-X ISR
485 	 * to FCP command is passed to NVME Layer.
486 	 * Segment 2 - Time from FCP command payload handed
487 	 * off to NVME Layer to Driver receives a Command op
488 	 * from NVME Layer.
489 	 * Segment 3 - Time from Driver receives a Command op
490 	 * from NVME Layer to Command is put on WQ.
491 	 * Segment 4 - Time from Driver WQ put is done
492 	 * to MSI-X ISR for Command cmpl.
493 	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
494 	 * Command cmpl is passed to NVME Layer.
495 	 * Segment 6 - Time from Command cmpl is passed to NVME
496 	 * Layer to Driver receives a RSP op from NVME Layer.
497 	 * Segment 7 - Time from Driver receives a RSP op from
498 	 * NVME Layer to WQ put is done on TRSP FCP Status.
499 	 * Segment 8 - Time from Driver WQ put is done on TRSP
500 	 * FCP Status to MSI-X ISR for TRSP cmpl.
501 	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
502 	 * TRSP cmpl is passed to NVME Layer.
503 	 * Segment 10 - Time from FCP command received by
504 	 * MSI-X ISR to command is completed on wire.
505 	 * (Segments 1 thru 8) for READDATA / WRITEDATA
506 	 * (Segments 1 thru 4) for READDATA_RSP
507 	 */
508 	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
509 	segsum = seg1;
510 
511 	seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
512 	if (segsum > seg2)
513 		return;
514 	seg2 -= segsum;
515 	segsum += seg2;
516 
517 	seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
518 	if (segsum > seg3)
519 		return;
520 	seg3 -= segsum;
521 	segsum += seg3;
522 
523 	seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
524 	if (segsum > seg4)
525 		return;
526 	seg4 -= segsum;
527 	segsum += seg4;
528 
529 	seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
530 	if (segsum > seg5)
531 		return;
532 	seg5 -= segsum;
533 	segsum += seg5;
534 
535 
536 	/* For auto rsp commands seg6 thru seg10 will be 0 */
537 	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
538 		seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
539 		if (segsum > seg6)
540 			return;
541 		seg6 -= segsum;
542 		segsum += seg6;
543 
544 		seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
545 		if (segsum > seg7)
546 			return;
547 		seg7 -= segsum;
548 		segsum += seg7;
549 
550 		seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
551 		if (segsum > seg8)
552 			return;
553 		seg8 -= segsum;
554 		segsum += seg8;
555 
556 		seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
557 		if (segsum > seg9)
558 			return;
559 		seg9 -= segsum;
560 		segsum += seg9;
561 
562 		if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
563 			return;
564 		seg10 = (ctxp->ts_isr_status -
565 			ctxp->ts_isr_cmd);
566 	} else {
567 		if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
568 			return;
569 		seg6 =  0;
570 		seg7 =  0;
571 		seg8 =  0;
572 		seg9 =  0;
573 		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
574 	}
575 
576 	phba->ktime_seg1_total += seg1;
577 	if (seg1 < phba->ktime_seg1_min)
578 		phba->ktime_seg1_min = seg1;
579 	else if (seg1 > phba->ktime_seg1_max)
580 		phba->ktime_seg1_max = seg1;
581 
582 	phba->ktime_seg2_total += seg2;
583 	if (seg2 < phba->ktime_seg2_min)
584 		phba->ktime_seg2_min = seg2;
585 	else if (seg2 > phba->ktime_seg2_max)
586 		phba->ktime_seg2_max = seg2;
587 
588 	phba->ktime_seg3_total += seg3;
589 	if (seg3 < phba->ktime_seg3_min)
590 		phba->ktime_seg3_min = seg3;
591 	else if (seg3 > phba->ktime_seg3_max)
592 		phba->ktime_seg3_max = seg3;
593 
594 	phba->ktime_seg4_total += seg4;
595 	if (seg4 < phba->ktime_seg4_min)
596 		phba->ktime_seg4_min = seg4;
597 	else if (seg4 > phba->ktime_seg4_max)
598 		phba->ktime_seg4_max = seg4;
599 
600 	phba->ktime_seg5_total += seg5;
601 	if (seg5 < phba->ktime_seg5_min)
602 		phba->ktime_seg5_min = seg5;
603 	else if (seg5 > phba->ktime_seg5_max)
604 		phba->ktime_seg5_max = seg5;
605 
606 	phba->ktime_data_samples++;
607 	if (!seg6)
608 		goto out;
609 
610 	phba->ktime_seg6_total += seg6;
611 	if (seg6 < phba->ktime_seg6_min)
612 		phba->ktime_seg6_min = seg6;
613 	else if (seg6 > phba->ktime_seg6_max)
614 		phba->ktime_seg6_max = seg6;
615 
616 	phba->ktime_seg7_total += seg7;
617 	if (seg7 < phba->ktime_seg7_min)
618 		phba->ktime_seg7_min = seg7;
619 	else if (seg7 > phba->ktime_seg7_max)
620 		phba->ktime_seg7_max = seg7;
621 
622 	phba->ktime_seg8_total += seg8;
623 	if (seg8 < phba->ktime_seg8_min)
624 		phba->ktime_seg8_min = seg8;
625 	else if (seg8 > phba->ktime_seg8_max)
626 		phba->ktime_seg8_max = seg8;
627 
628 	phba->ktime_seg9_total += seg9;
629 	if (seg9 < phba->ktime_seg9_min)
630 		phba->ktime_seg9_min = seg9;
631 	else if (seg9 > phba->ktime_seg9_max)
632 		phba->ktime_seg9_max = seg9;
633 out:
634 	phba->ktime_seg10_total += seg10;
635 	if (seg10 < phba->ktime_seg10_min)
636 		phba->ktime_seg10_min = seg10;
637 	else if (seg10 > phba->ktime_seg10_max)
638 		phba->ktime_seg10_max = seg10;
639 	phba->ktime_status_samples++;
640 }
641 #endif
642 
643 /**
644  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
645  * @phba: Pointer to HBA context object.
646  * @cmdwqe: Pointer to driver command WQE object.
647  * @wcqe: Pointer to driver response CQE object.
648  *
649  * The function is called from SLI ring event handler with no
650  * lock held. This function is the completion handler for NVME FCP commands
651  * The function frees memory resources used for the NVME commands.
652  **/
653 static void
654 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
655 			  struct lpfc_wcqe_complete *wcqe)
656 {
657 	struct lpfc_nvmet_tgtport *tgtp;
658 	struct nvmefc_tgt_fcp_req *rsp;
659 	struct lpfc_nvmet_rcv_ctx *ctxp;
660 	uint32_t status, result, op, start_clean, logerr;
661 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
662 	uint32_t id;
663 #endif
664 
665 	ctxp = cmdwqe->context2;
666 	ctxp->flag &= ~LPFC_NVMET_IO_INP;
667 
668 	rsp = &ctxp->ctx.fcp_req;
669 	op = rsp->op;
670 
671 	status = bf_get(lpfc_wcqe_c_status, wcqe);
672 	result = wcqe->parameter;
673 
674 	if (phba->targetport)
675 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
676 	else
677 		tgtp = NULL;
678 
679 	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
680 			 ctxp->oxid, op, status);
681 
682 	if (status) {
683 		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
684 		rsp->transferred_length = 0;
685 		if (tgtp) {
686 			atomic_inc(&tgtp->xmt_fcp_rsp_error);
687 			if (result == IOERR_ABORT_REQUESTED)
688 				atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
689 		}
690 
691 		logerr = LOG_NVME_IOERR;
692 
693 		/* pick up SLI4 exhange busy condition */
694 		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
695 			ctxp->flag |= LPFC_NVMET_XBUSY;
696 			logerr |= LOG_NVME_ABTS;
697 			if (tgtp)
698 				atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
699 
700 		} else {
701 			ctxp->flag &= ~LPFC_NVMET_XBUSY;
702 		}
703 
704 		lpfc_printf_log(phba, KERN_INFO, logerr,
705 				"6315 IO Error Cmpl xri x%x: %x/%x XBUSY:x%x\n",
706 				ctxp->oxid, status, result, ctxp->flag);
707 
708 	} else {
709 		rsp->fcp_error = NVME_SC_SUCCESS;
710 		if (op == NVMET_FCOP_RSP)
711 			rsp->transferred_length = rsp->rsplen;
712 		else
713 			rsp->transferred_length = rsp->transfer_length;
714 		if (tgtp)
715 			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
716 	}
717 
718 	if ((op == NVMET_FCOP_READDATA_RSP) ||
719 	    (op == NVMET_FCOP_RSP)) {
720 		/* Sanity check */
721 		ctxp->state = LPFC_NVMET_STE_DONE;
722 		ctxp->entry_cnt++;
723 
724 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
725 		if (ctxp->ts_cmd_nvme) {
726 			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
727 				ctxp->ts_isr_data =
728 					cmdwqe->isr_timestamp;
729 				ctxp->ts_data_nvme =
730 					ktime_get_ns();
731 				ctxp->ts_nvme_status =
732 					ctxp->ts_data_nvme;
733 				ctxp->ts_status_wqput =
734 					ctxp->ts_data_nvme;
735 				ctxp->ts_isr_status =
736 					ctxp->ts_data_nvme;
737 				ctxp->ts_status_nvme =
738 					ctxp->ts_data_nvme;
739 			} else {
740 				ctxp->ts_isr_status =
741 					cmdwqe->isr_timestamp;
742 				ctxp->ts_status_nvme =
743 					ktime_get_ns();
744 			}
745 		}
746 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
747 			id = smp_processor_id();
748 			if (ctxp->cpu != id)
749 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
750 						"6703 CPU Check cmpl: "
751 						"cpu %d expect %d\n",
752 						id, ctxp->cpu);
753 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
754 				phba->cpucheck_cmpl_io[id]++;
755 		}
756 #endif
757 		rsp->done(rsp);
758 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
759 		if (ctxp->ts_cmd_nvme)
760 			lpfc_nvmet_ktime(phba, ctxp);
761 #endif
762 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
763 	} else {
764 		ctxp->entry_cnt++;
765 		start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
766 		memset(((char *)cmdwqe) + start_clean, 0,
767 		       (sizeof(struct lpfc_iocbq) - start_clean));
768 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
769 		if (ctxp->ts_cmd_nvme) {
770 			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
771 			ctxp->ts_data_nvme = ktime_get_ns();
772 		}
773 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
774 			id = smp_processor_id();
775 			if (ctxp->cpu != id)
776 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
777 						"6704 CPU Check cmdcmpl: "
778 						"cpu %d expect %d\n",
779 						id, ctxp->cpu);
780 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
781 				phba->cpucheck_ccmpl_io[id]++;
782 		}
783 #endif
784 		rsp->done(rsp);
785 	}
786 }
787 
788 static int
789 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
790 		      struct nvmefc_tgt_ls_req *rsp)
791 {
792 	struct lpfc_nvmet_rcv_ctx *ctxp =
793 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
794 	struct lpfc_hba *phba = ctxp->phba;
795 	struct hbq_dmabuf *nvmebuf =
796 		(struct hbq_dmabuf *)ctxp->rqb_buffer;
797 	struct lpfc_iocbq *nvmewqeq;
798 	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
799 	struct lpfc_dmabuf dmabuf;
800 	struct ulp_bde64 bpl;
801 	int rc;
802 
803 	if (phba->pport->load_flag & FC_UNLOADING)
804 		return -ENODEV;
805 
806 	if (phba->pport->load_flag & FC_UNLOADING)
807 		return -ENODEV;
808 
809 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
810 			"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
811 
812 	if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
813 	    (ctxp->entry_cnt != 1)) {
814 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
815 				"6412 NVMET LS rsp state mismatch "
816 				"oxid x%x: %d %d\n",
817 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
818 	}
819 	ctxp->state = LPFC_NVMET_STE_LS_RSP;
820 	ctxp->entry_cnt++;
821 
822 	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
823 				      rsp->rsplen);
824 	if (nvmewqeq == NULL) {
825 		atomic_inc(&nvmep->xmt_ls_drop);
826 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
827 				"6150 LS Drop IO x%x: Prep\n",
828 				ctxp->oxid);
829 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
830 		atomic_inc(&nvmep->xmt_ls_abort);
831 		lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
832 						ctxp->sid, ctxp->oxid);
833 		return -ENOMEM;
834 	}
835 
836 	/* Save numBdes for bpl2sgl */
837 	nvmewqeq->rsvd2 = 1;
838 	nvmewqeq->hba_wqidx = 0;
839 	nvmewqeq->context3 = &dmabuf;
840 	dmabuf.virt = &bpl;
841 	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
842 	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
843 	bpl.tus.f.bdeSize = rsp->rsplen;
844 	bpl.tus.f.bdeFlags = 0;
845 	bpl.tus.w = le32_to_cpu(bpl.tus.w);
846 
847 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
848 	nvmewqeq->iocb_cmpl = NULL;
849 	nvmewqeq->context2 = ctxp;
850 
851 	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
852 			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
853 
854 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
855 	if (rc == WQE_SUCCESS) {
856 		/*
857 		 * Okay to repost buffer here, but wait till cmpl
858 		 * before freeing ctxp and iocbq.
859 		 */
860 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
861 		ctxp->rqb_buffer = 0;
862 		atomic_inc(&nvmep->xmt_ls_rsp);
863 		return 0;
864 	}
865 	/* Give back resources */
866 	atomic_inc(&nvmep->xmt_ls_drop);
867 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
868 			"6151 LS Drop IO x%x: Issue %d\n",
869 			ctxp->oxid, rc);
870 
871 	lpfc_nlp_put(nvmewqeq->context1);
872 
873 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
874 	atomic_inc(&nvmep->xmt_ls_abort);
875 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
876 	return -ENXIO;
877 }
878 
879 static int
880 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
881 		      struct nvmefc_tgt_fcp_req *rsp)
882 {
883 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
884 	struct lpfc_nvmet_rcv_ctx *ctxp =
885 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
886 	struct lpfc_hba *phba = ctxp->phba;
887 	struct lpfc_queue *wq;
888 	struct lpfc_iocbq *nvmewqeq;
889 	struct lpfc_sli_ring *pring;
890 	unsigned long iflags;
891 	int rc;
892 
893 	if (phba->pport->load_flag & FC_UNLOADING) {
894 		rc = -ENODEV;
895 		goto aerr;
896 	}
897 
898 	if (phba->pport->load_flag & FC_UNLOADING) {
899 		rc = -ENODEV;
900 		goto aerr;
901 	}
902 
903 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
904 	if (ctxp->ts_cmd_nvme) {
905 		if (rsp->op == NVMET_FCOP_RSP)
906 			ctxp->ts_nvme_status = ktime_get_ns();
907 		else
908 			ctxp->ts_nvme_data = ktime_get_ns();
909 	}
910 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
911 		int id = smp_processor_id();
912 		ctxp->cpu = id;
913 		if (id < LPFC_CHECK_CPU_CNT)
914 			phba->cpucheck_xmt_io[id]++;
915 		if (rsp->hwqid != id) {
916 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
917 					"6705 CPU Check OP: "
918 					"cpu %d expect %d\n",
919 					id, rsp->hwqid);
920 			ctxp->cpu = rsp->hwqid;
921 		}
922 	}
923 #endif
924 
925 	/* Sanity check */
926 	if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
927 	    (ctxp->state == LPFC_NVMET_STE_ABORT)) {
928 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
929 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
930 				"6102 IO xri x%x aborted\n",
931 				ctxp->oxid);
932 		rc = -ENXIO;
933 		goto aerr;
934 	}
935 
936 	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
937 	if (nvmewqeq == NULL) {
938 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
939 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
940 				"6152 FCP Drop IO x%x: Prep\n",
941 				ctxp->oxid);
942 		rc = -ENXIO;
943 		goto aerr;
944 	}
945 
946 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
947 	nvmewqeq->iocb_cmpl = NULL;
948 	nvmewqeq->context2 = ctxp;
949 	nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
950 	ctxp->wqeq->hba_wqidx = rsp->hwqid;
951 
952 	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
953 			 ctxp->oxid, rsp->op, rsp->rsplen);
954 
955 	ctxp->flag |= LPFC_NVMET_IO_INP;
956 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
957 	if (rc == WQE_SUCCESS) {
958 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
959 		if (!ctxp->ts_cmd_nvme)
960 			return 0;
961 		if (rsp->op == NVMET_FCOP_RSP)
962 			ctxp->ts_status_wqput = ktime_get_ns();
963 		else
964 			ctxp->ts_data_wqput = ktime_get_ns();
965 #endif
966 		return 0;
967 	}
968 
969 	if (rc == -EBUSY) {
970 		/*
971 		 * WQ was full, so queue nvmewqeq to be sent after
972 		 * WQE release CQE
973 		 */
974 		ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
975 		wq = phba->sli4_hba.nvme_wq[rsp->hwqid];
976 		pring = wq->pring;
977 		spin_lock_irqsave(&pring->ring_lock, iflags);
978 		list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
979 		wq->q_flag |= HBA_NVMET_WQFULL;
980 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
981 		atomic_inc(&lpfc_nvmep->defer_wqfull);
982 		return 0;
983 	}
984 
985 	/* Give back resources */
986 	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
987 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
988 			"6153 FCP Drop IO x%x: Issue: %d\n",
989 			ctxp->oxid, rc);
990 
991 	ctxp->wqeq->hba_wqidx = 0;
992 	nvmewqeq->context2 = NULL;
993 	nvmewqeq->context3 = NULL;
994 	rc = -EBUSY;
995 aerr:
996 	return rc;
997 }
998 
999 static void
1000 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1001 {
1002 	struct lpfc_nvmet_tgtport *tport = targetport->private;
1003 
1004 	/* release any threads waiting for the unreg to complete */
1005 	complete(&tport->tport_unreg_done);
1006 }
1007 
1008 static void
1009 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1010 			 struct nvmefc_tgt_fcp_req *req)
1011 {
1012 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1013 	struct lpfc_nvmet_rcv_ctx *ctxp =
1014 		container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1015 	struct lpfc_hba *phba = ctxp->phba;
1016 	struct lpfc_queue *wq;
1017 	unsigned long flags;
1018 
1019 	if (phba->pport->load_flag & FC_UNLOADING)
1020 		return;
1021 
1022 	if (phba->pport->load_flag & FC_UNLOADING)
1023 		return;
1024 
1025 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1026 			"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
1027 			ctxp->oxid, ctxp->flag, ctxp->state);
1028 
1029 	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1030 			 ctxp->oxid, ctxp->flag, ctxp->state);
1031 
1032 	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1033 
1034 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1035 	ctxp->state = LPFC_NVMET_STE_ABORT;
1036 
1037 	/* Since iaab/iaar are NOT set, we need to check
1038 	 * if the firmware is in process of aborting IO
1039 	 */
1040 	if (ctxp->flag & LPFC_NVMET_XBUSY) {
1041 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1042 		return;
1043 	}
1044 	ctxp->flag |= LPFC_NVMET_ABORT_OP;
1045 
1046 	if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1047 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1048 						 ctxp->oxid);
1049 		wq = phba->sli4_hba.nvme_wq[ctxp->wqeq->hba_wqidx];
1050 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1051 		lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1052 		return;
1053 	}
1054 
1055 	/* An state of LPFC_NVMET_STE_RCV means we have just received
1056 	 * the NVME command and have not started processing it.
1057 	 * (by issuing any IO WQEs on this exchange yet)
1058 	 */
1059 	if (ctxp->state == LPFC_NVMET_STE_RCV)
1060 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1061 						 ctxp->oxid);
1062 	else
1063 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1064 					       ctxp->oxid);
1065 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1066 }
1067 
1068 static void
1069 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1070 			   struct nvmefc_tgt_fcp_req *rsp)
1071 {
1072 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1073 	struct lpfc_nvmet_rcv_ctx *ctxp =
1074 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1075 	struct lpfc_hba *phba = ctxp->phba;
1076 	unsigned long flags;
1077 	bool aborting = false;
1078 
1079 	if (ctxp->state != LPFC_NVMET_STE_DONE &&
1080 	    ctxp->state != LPFC_NVMET_STE_ABORT) {
1081 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1082 				"6413 NVMET release bad state %d %d oxid x%x\n",
1083 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1084 	}
1085 
1086 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1087 	if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1088 	    (ctxp->flag & LPFC_NVMET_XBUSY)) {
1089 		aborting = true;
1090 		/* let the abort path do the real release */
1091 		lpfc_nvmet_defer_release(phba, ctxp);
1092 	}
1093 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1094 
1095 	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1096 			 ctxp->state, aborting);
1097 
1098 	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1099 
1100 	if (aborting)
1101 		return;
1102 
1103 	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1104 }
1105 
1106 static void
1107 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1108 		     struct nvmefc_tgt_fcp_req *rsp)
1109 {
1110 	struct lpfc_nvmet_tgtport *tgtp;
1111 	struct lpfc_nvmet_rcv_ctx *ctxp =
1112 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1113 	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1114 	struct lpfc_hba *phba = ctxp->phba;
1115 
1116 	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1117 			 ctxp->oxid, ctxp->size, smp_processor_id());
1118 
1119 	tgtp = phba->targetport->private;
1120 	atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1121 
1122 	/* Free the nvmebuf since a new buffer already replaced it */
1123 	nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1124 }
1125 
1126 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1127 	.targetport_delete = lpfc_nvmet_targetport_delete,
1128 	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1129 	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
1130 	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1131 	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1132 	.defer_rcv	= lpfc_nvmet_defer_rcv,
1133 
1134 	.max_hw_queues  = 1,
1135 	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1136 	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1137 	.dma_boundary = 0xFFFFFFFF,
1138 
1139 	/* optional features */
1140 	.target_features = 0,
1141 	/* sizes of additional private data for data structures */
1142 	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1143 };
1144 
1145 static void
1146 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1147 		struct lpfc_nvmet_ctx_info *infop)
1148 {
1149 	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1150 	unsigned long flags;
1151 
1152 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1153 	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1154 				&infop->nvmet_ctx_list, list) {
1155 		spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1156 		list_del_init(&ctx_buf->list);
1157 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1158 
1159 		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1160 		ctx_buf->sglq->state = SGL_FREED;
1161 		ctx_buf->sglq->ndlp = NULL;
1162 
1163 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1164 		list_add_tail(&ctx_buf->sglq->list,
1165 				&phba->sli4_hba.lpfc_nvmet_sgl_list);
1166 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1167 
1168 		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1169 		kfree(ctx_buf->context);
1170 	}
1171 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1172 }
1173 
1174 static void
1175 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1176 {
1177 	struct lpfc_nvmet_ctx_info *infop;
1178 	int i, j;
1179 
1180 	/* The first context list, MRQ 0 CPU 0 */
1181 	infop = phba->sli4_hba.nvmet_ctx_info;
1182 	if (!infop)
1183 		return;
1184 
1185 	/* Cycle the the entire CPU context list for every MRQ */
1186 	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1187 		for (j = 0; j < phba->sli4_hba.num_present_cpu; j++) {
1188 			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
1189 			infop++; /* next */
1190 		}
1191 	}
1192 	kfree(phba->sli4_hba.nvmet_ctx_info);
1193 	phba->sli4_hba.nvmet_ctx_info = NULL;
1194 }
1195 
1196 static int
1197 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1198 {
1199 	struct lpfc_nvmet_ctxbuf *ctx_buf;
1200 	struct lpfc_iocbq *nvmewqe;
1201 	union lpfc_wqe128 *wqe;
1202 	struct lpfc_nvmet_ctx_info *last_infop;
1203 	struct lpfc_nvmet_ctx_info *infop;
1204 	int i, j, idx;
1205 
1206 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1207 			"6403 Allocate NVMET resources for %d XRIs\n",
1208 			phba->sli4_hba.nvmet_xri_cnt);
1209 
1210 	phba->sli4_hba.nvmet_ctx_info = kcalloc(
1211 		phba->sli4_hba.num_present_cpu * phba->cfg_nvmet_mrq,
1212 		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1213 	if (!phba->sli4_hba.nvmet_ctx_info) {
1214 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1215 				"6419 Failed allocate memory for "
1216 				"nvmet context lists\n");
1217 		return -ENOMEM;
1218 	}
1219 
1220 	/*
1221 	 * Assuming X CPUs in the system, and Y MRQs, allocate some
1222 	 * lpfc_nvmet_ctx_info structures as follows:
1223 	 *
1224 	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1225 	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1226 	 * ...
1227 	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1228 	 *
1229 	 * Each line represents a MRQ "silo" containing an entry for
1230 	 * every CPU.
1231 	 *
1232 	 * MRQ X is initially assumed to be associated with CPU X, thus
1233 	 * contexts are initially distributed across all MRQs using
1234 	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1235 	 * freed, the are freed to the MRQ silo based on the CPU number
1236 	 * of the IO completion. Thus a context that was allocated for MRQ A
1237 	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1238 	 */
1239 	infop = phba->sli4_hba.nvmet_ctx_info;
1240 	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1241 		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1242 			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1243 			spin_lock_init(&infop->nvmet_ctx_list_lock);
1244 			infop->nvmet_ctx_list_cnt = 0;
1245 			infop++;
1246 		}
1247 	}
1248 
1249 	/*
1250 	 * Setup the next CPU context info ptr for each MRQ.
1251 	 * MRQ 0 will cycle thru CPUs 0 - X separately from
1252 	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1253 	 */
1254 	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1255 		last_infop = lpfc_get_ctx_list(phba, 0, j);
1256 		for (i = phba->sli4_hba.num_present_cpu - 1;  i >= 0; i--) {
1257 			infop = lpfc_get_ctx_list(phba, i, j);
1258 			infop->nvmet_ctx_next_cpu = last_infop;
1259 			last_infop = infop;
1260 		}
1261 	}
1262 
1263 	/* For all nvmet xris, allocate resources needed to process a
1264 	 * received command on a per xri basis.
1265 	 */
1266 	idx = 0;
1267 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1268 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1269 		if (!ctx_buf) {
1270 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1271 					"6404 Ran out of memory for NVMET\n");
1272 			return -ENOMEM;
1273 		}
1274 
1275 		ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1276 					   GFP_KERNEL);
1277 		if (!ctx_buf->context) {
1278 			kfree(ctx_buf);
1279 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1280 					"6405 Ran out of NVMET "
1281 					"context memory\n");
1282 			return -ENOMEM;
1283 		}
1284 		ctx_buf->context->ctxbuf = ctx_buf;
1285 		ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1286 
1287 		ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1288 		if (!ctx_buf->iocbq) {
1289 			kfree(ctx_buf->context);
1290 			kfree(ctx_buf);
1291 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1292 					"6406 Ran out of NVMET iocb/WQEs\n");
1293 			return -ENOMEM;
1294 		}
1295 		ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1296 		nvmewqe = ctx_buf->iocbq;
1297 		wqe = &nvmewqe->wqe;
1298 
1299 		/* Initialize WQE */
1300 		memset(wqe, 0, sizeof(union lpfc_wqe));
1301 
1302 		ctx_buf->iocbq->context1 = NULL;
1303 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1304 		ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1305 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1306 		if (!ctx_buf->sglq) {
1307 			lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1308 			kfree(ctx_buf->context);
1309 			kfree(ctx_buf);
1310 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1311 					"6407 Ran out of NVMET XRIs\n");
1312 			return -ENOMEM;
1313 		}
1314 
1315 		/*
1316 		 * Add ctx to MRQidx context list. Our initial assumption
1317 		 * is MRQidx will be associated with CPUidx. This association
1318 		 * can change on the fly.
1319 		 */
1320 		infop = lpfc_get_ctx_list(phba, idx, idx);
1321 		spin_lock(&infop->nvmet_ctx_list_lock);
1322 		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1323 		infop->nvmet_ctx_list_cnt++;
1324 		spin_unlock(&infop->nvmet_ctx_list_lock);
1325 
1326 		/* Spread ctx structures evenly across all MRQs */
1327 		idx++;
1328 		if (idx >= phba->cfg_nvmet_mrq)
1329 			idx = 0;
1330 	}
1331 
1332 	infop = phba->sli4_hba.nvmet_ctx_info;
1333 	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1334 		for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1335 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1336 					"6408 TOTAL NVMET ctx for CPU %d "
1337 					"MRQ %d: cnt %d nextcpu %p\n",
1338 					i, j, infop->nvmet_ctx_list_cnt,
1339 					infop->nvmet_ctx_next_cpu);
1340 			infop++;
1341 		}
1342 	}
1343 	return 0;
1344 }
1345 
1346 int
1347 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1348 {
1349 	struct lpfc_vport  *vport = phba->pport;
1350 	struct lpfc_nvmet_tgtport *tgtp;
1351 	struct nvmet_fc_port_info pinfo;
1352 	int error;
1353 
1354 	if (phba->targetport)
1355 		return 0;
1356 
1357 	error = lpfc_nvmet_setup_io_context(phba);
1358 	if (error)
1359 		return error;
1360 
1361 	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1362 	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1363 	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1364 	pinfo.port_id = vport->fc_myDID;
1365 
1366 	/* Limit to LPFC_MAX_NVME_SEG_CNT.
1367 	 * For now need + 1 to get around NVME transport logic.
1368 	 */
1369 	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
1370 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1371 				"6400 Reducing sg segment cnt to %d\n",
1372 				LPFC_MAX_NVME_SEG_CNT);
1373 		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
1374 	} else {
1375 		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
1376 	}
1377 	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1378 	lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
1379 	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1380 
1381 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1382 	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1383 					     &phba->pcidev->dev,
1384 					     &phba->targetport);
1385 #else
1386 	error = -ENOENT;
1387 #endif
1388 	if (error) {
1389 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1390 				"6025 Cannot register NVME targetport x%x: "
1391 				"portnm %llx nodenm %llx segs %d qs %d\n",
1392 				error,
1393 				pinfo.port_name, pinfo.node_name,
1394 				lpfc_tgttemplate.max_sgl_segments,
1395 				lpfc_tgttemplate.max_hw_queues);
1396 		phba->targetport = NULL;
1397 		phba->nvmet_support = 0;
1398 
1399 		lpfc_nvmet_cleanup_io_context(phba);
1400 
1401 	} else {
1402 		tgtp = (struct lpfc_nvmet_tgtport *)
1403 			phba->targetport->private;
1404 		tgtp->phba = phba;
1405 
1406 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1407 				"6026 Registered NVME "
1408 				"targetport: %p, private %p "
1409 				"portnm %llx nodenm %llx segs %d qs %d\n",
1410 				phba->targetport, tgtp,
1411 				pinfo.port_name, pinfo.node_name,
1412 				lpfc_tgttemplate.max_sgl_segments,
1413 				lpfc_tgttemplate.max_hw_queues);
1414 
1415 		atomic_set(&tgtp->rcv_ls_req_in, 0);
1416 		atomic_set(&tgtp->rcv_ls_req_out, 0);
1417 		atomic_set(&tgtp->rcv_ls_req_drop, 0);
1418 		atomic_set(&tgtp->xmt_ls_abort, 0);
1419 		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1420 		atomic_set(&tgtp->xmt_ls_rsp, 0);
1421 		atomic_set(&tgtp->xmt_ls_drop, 0);
1422 		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1423 		atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1424 		atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1425 		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1426 		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1427 		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1428 		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1429 		atomic_set(&tgtp->xmt_fcp_drop, 0);
1430 		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1431 		atomic_set(&tgtp->xmt_fcp_read, 0);
1432 		atomic_set(&tgtp->xmt_fcp_write, 0);
1433 		atomic_set(&tgtp->xmt_fcp_rsp, 0);
1434 		atomic_set(&tgtp->xmt_fcp_release, 0);
1435 		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1436 		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1437 		atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1438 		atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1439 		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1440 		atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1441 		atomic_set(&tgtp->xmt_fcp_abort, 0);
1442 		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1443 		atomic_set(&tgtp->xmt_abort_unsol, 0);
1444 		atomic_set(&tgtp->xmt_abort_sol, 0);
1445 		atomic_set(&tgtp->xmt_abort_rsp, 0);
1446 		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1447 		atomic_set(&tgtp->defer_ctx, 0);
1448 		atomic_set(&tgtp->defer_fod, 0);
1449 		atomic_set(&tgtp->defer_wqfull, 0);
1450 	}
1451 	return error;
1452 }
1453 
1454 int
1455 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1456 {
1457 	struct lpfc_vport  *vport = phba->pport;
1458 
1459 	if (!phba->targetport)
1460 		return 0;
1461 
1462 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1463 			 "6007 Update NVMET port %p did x%x\n",
1464 			 phba->targetport, vport->fc_myDID);
1465 
1466 	phba->targetport->port_id = vport->fc_myDID;
1467 	return 0;
1468 }
1469 
1470 /**
1471  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1472  * @phba: pointer to lpfc hba data structure.
1473  * @axri: pointer to the nvmet xri abort wcqe structure.
1474  *
1475  * This routine is invoked by the worker thread to process a SLI4 fast-path
1476  * NVMET aborted xri.
1477  **/
1478 void
1479 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1480 			    struct sli4_wcqe_xri_aborted *axri)
1481 {
1482 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1483 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1484 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1485 	struct lpfc_nvmet_tgtport *tgtp;
1486 	struct lpfc_nodelist *ndlp;
1487 	unsigned long iflag = 0;
1488 	int rrq_empty = 0;
1489 	bool released = false;
1490 
1491 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1492 			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1493 
1494 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1495 		return;
1496 
1497 	if (phba->targetport) {
1498 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1499 		atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1500 	}
1501 
1502 	spin_lock_irqsave(&phba->hbalock, iflag);
1503 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1504 	list_for_each_entry_safe(ctxp, next_ctxp,
1505 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1506 				 list) {
1507 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1508 			continue;
1509 
1510 		/* Check if we already received a free context call
1511 		 * and we have completed processing an abort situation.
1512 		 */
1513 		if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1514 		    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1515 			list_del(&ctxp->list);
1516 			released = true;
1517 		}
1518 		ctxp->flag &= ~LPFC_NVMET_XBUSY;
1519 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1520 
1521 		rrq_empty = list_empty(&phba->active_rrq_list);
1522 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1523 		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1524 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1525 		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1526 		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1527 			lpfc_set_rrq_active(phba, ndlp,
1528 				ctxp->ctxbuf->sglq->sli4_lxritag,
1529 				rxid, 1);
1530 			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1531 		}
1532 
1533 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1534 				"6318 XB aborted oxid %x flg x%x (%x)\n",
1535 				ctxp->oxid, ctxp->flag, released);
1536 		if (released)
1537 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1538 
1539 		if (rrq_empty)
1540 			lpfc_worker_wake_up(phba);
1541 		return;
1542 	}
1543 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1544 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1545 }
1546 
1547 int
1548 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1549 			   struct fc_frame_header *fc_hdr)
1550 
1551 {
1552 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1553 	struct lpfc_hba *phba = vport->phba;
1554 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1555 	struct nvmefc_tgt_fcp_req *rsp;
1556 	uint16_t xri;
1557 	unsigned long iflag = 0;
1558 
1559 	xri = be16_to_cpu(fc_hdr->fh_ox_id);
1560 
1561 	spin_lock_irqsave(&phba->hbalock, iflag);
1562 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1563 	list_for_each_entry_safe(ctxp, next_ctxp,
1564 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1565 				 list) {
1566 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1567 			continue;
1568 
1569 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1570 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1571 
1572 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1573 		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1574 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1575 
1576 		lpfc_nvmeio_data(phba,
1577 			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1578 			xri, smp_processor_id(), 0);
1579 
1580 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1581 				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1582 
1583 		rsp = &ctxp->ctx.fcp_req;
1584 		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1585 
1586 		/* Respond with BA_ACC accordingly */
1587 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1588 		return 0;
1589 	}
1590 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1591 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1592 
1593 	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1594 			 xri, smp_processor_id(), 1);
1595 
1596 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1597 			"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1598 
1599 	/* Respond with BA_RJT accordingly */
1600 	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1601 #endif
1602 	return 0;
1603 }
1604 
1605 static void
1606 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1607 			struct lpfc_nvmet_rcv_ctx *ctxp)
1608 {
1609 	struct lpfc_sli_ring *pring;
1610 	struct lpfc_iocbq *nvmewqeq;
1611 	struct lpfc_iocbq *next_nvmewqeq;
1612 	unsigned long iflags;
1613 	struct lpfc_wcqe_complete wcqe;
1614 	struct lpfc_wcqe_complete *wcqep;
1615 
1616 	pring = wq->pring;
1617 	wcqep = &wcqe;
1618 
1619 	/* Fake an ABORT error code back to cmpl routine */
1620 	memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1621 	bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1622 	wcqep->parameter = IOERR_ABORT_REQUESTED;
1623 
1624 	spin_lock_irqsave(&pring->ring_lock, iflags);
1625 	list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1626 				 &wq->wqfull_list, list) {
1627 		if (ctxp) {
1628 			/* Checking for a specific IO to flush */
1629 			if (nvmewqeq->context2 == ctxp) {
1630 				list_del(&nvmewqeq->list);
1631 				spin_unlock_irqrestore(&pring->ring_lock,
1632 						       iflags);
1633 				lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1634 							  wcqep);
1635 				return;
1636 			}
1637 			continue;
1638 		} else {
1639 			/* Flush all IOs */
1640 			list_del(&nvmewqeq->list);
1641 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
1642 			lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1643 			spin_lock_irqsave(&pring->ring_lock, iflags);
1644 		}
1645 	}
1646 	if (!ctxp)
1647 		wq->q_flag &= ~HBA_NVMET_WQFULL;
1648 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
1649 }
1650 
1651 void
1652 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1653 			  struct lpfc_queue *wq)
1654 {
1655 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1656 	struct lpfc_sli_ring *pring;
1657 	struct lpfc_iocbq *nvmewqeq;
1658 	unsigned long iflags;
1659 	int rc;
1660 
1661 	/*
1662 	 * Some WQE slots are available, so try to re-issue anything
1663 	 * on the WQ wqfull_list.
1664 	 */
1665 	pring = wq->pring;
1666 	spin_lock_irqsave(&pring->ring_lock, iflags);
1667 	while (!list_empty(&wq->wqfull_list)) {
1668 		list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1669 				 list);
1670 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
1671 		rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
1672 		spin_lock_irqsave(&pring->ring_lock, iflags);
1673 		if (rc == -EBUSY) {
1674 			/* WQ was full again, so put it back on the list */
1675 			list_add(&nvmewqeq->list, &wq->wqfull_list);
1676 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
1677 			return;
1678 		}
1679 	}
1680 	wq->q_flag &= ~HBA_NVMET_WQFULL;
1681 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
1682 
1683 #endif
1684 }
1685 
1686 void
1687 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1688 {
1689 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1690 	struct lpfc_nvmet_tgtport *tgtp;
1691 	struct lpfc_queue *wq;
1692 	uint32_t qidx;
1693 
1694 	if (phba->nvmet_support == 0)
1695 		return;
1696 	if (phba->targetport) {
1697 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1698 		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
1699 			wq = phba->sli4_hba.nvme_wq[qidx];
1700 			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1701 		}
1702 		init_completion(&tgtp->tport_unreg_done);
1703 		nvmet_fc_unregister_targetport(phba->targetport);
1704 		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1705 		lpfc_nvmet_cleanup_io_context(phba);
1706 	}
1707 	phba->targetport = NULL;
1708 #endif
1709 }
1710 
1711 /**
1712  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1713  * @phba: pointer to lpfc hba data structure.
1714  * @pring: pointer to a SLI ring.
1715  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1716  *
1717  * This routine is used for processing the WQE associated with a unsolicited
1718  * event. It first determines whether there is an existing ndlp that matches
1719  * the DID from the unsolicited WQE. If not, it will create a new one with
1720  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1721  * WQE is then used to invoke the proper routine and to set up proper state
1722  * of the discovery state machine.
1723  **/
1724 static void
1725 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1726 			   struct hbq_dmabuf *nvmebuf)
1727 {
1728 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1729 	struct lpfc_nvmet_tgtport *tgtp;
1730 	struct fc_frame_header *fc_hdr;
1731 	struct lpfc_nvmet_rcv_ctx *ctxp;
1732 	uint32_t *payload;
1733 	uint32_t size, oxid, sid, rc;
1734 
1735 	if (!nvmebuf || !phba->targetport) {
1736 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1737 				"6154 LS Drop IO\n");
1738 		oxid = 0;
1739 		size = 0;
1740 		sid = 0;
1741 		ctxp = NULL;
1742 		goto dropit;
1743 	}
1744 
1745 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1746 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1747 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1748 	size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1749 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1750 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1751 
1752 	ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1753 	if (ctxp == NULL) {
1754 		atomic_inc(&tgtp->rcv_ls_req_drop);
1755 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1756 				"6155 LS Drop IO x%x: Alloc\n",
1757 				oxid);
1758 dropit:
1759 		lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1760 				 "xri x%x sz %d from %06x\n",
1761 				 oxid, size, sid);
1762 		if (nvmebuf)
1763 			lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1764 		return;
1765 	}
1766 	ctxp->phba = phba;
1767 	ctxp->size = size;
1768 	ctxp->oxid = oxid;
1769 	ctxp->sid = sid;
1770 	ctxp->wqeq = NULL;
1771 	ctxp->state = LPFC_NVMET_STE_LS_RCV;
1772 	ctxp->entry_cnt = 1;
1773 	ctxp->rqb_buffer = (void *)nvmebuf;
1774 
1775 	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1776 			 oxid, size, sid);
1777 	/*
1778 	 * The calling sequence should be:
1779 	 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1780 	 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1781 	 */
1782 	atomic_inc(&tgtp->rcv_ls_req_in);
1783 	rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1784 				 payload, size);
1785 
1786 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1787 			"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
1788 			"%08x %08x %08x\n", size, rc,
1789 			*payload, *(payload+1), *(payload+2),
1790 			*(payload+3), *(payload+4), *(payload+5));
1791 
1792 	if (rc == 0) {
1793 		atomic_inc(&tgtp->rcv_ls_req_out);
1794 		return;
1795 	}
1796 
1797 	lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1798 			 oxid, size, sid);
1799 
1800 	atomic_inc(&tgtp->rcv_ls_req_drop);
1801 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1802 			"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1803 			ctxp->oxid, rc);
1804 
1805 	/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1806 	if (nvmebuf)
1807 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1808 
1809 	atomic_inc(&tgtp->xmt_ls_abort);
1810 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1811 #endif
1812 }
1813 
1814 static struct lpfc_nvmet_ctxbuf *
1815 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
1816 			     struct lpfc_nvmet_ctx_info *current_infop)
1817 {
1818 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1819 	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
1820 	struct lpfc_nvmet_ctx_info *get_infop;
1821 	int i;
1822 
1823 	/*
1824 	 * The current_infop for the MRQ a NVME command IU was received
1825 	 * on is empty. Our goal is to replenish this MRQs context
1826 	 * list from a another CPUs.
1827 	 *
1828 	 * First we need to pick a context list to start looking on.
1829 	 * nvmet_ctx_start_cpu has available context the last time
1830 	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
1831 	 * is just the next sequential CPU for this MRQ.
1832 	 */
1833 	if (current_infop->nvmet_ctx_start_cpu)
1834 		get_infop = current_infop->nvmet_ctx_start_cpu;
1835 	else
1836 		get_infop = current_infop->nvmet_ctx_next_cpu;
1837 
1838 	for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
1839 		if (get_infop == current_infop) {
1840 			get_infop = get_infop->nvmet_ctx_next_cpu;
1841 			continue;
1842 		}
1843 		spin_lock(&get_infop->nvmet_ctx_list_lock);
1844 
1845 		/* Just take the entire context list, if there are any */
1846 		if (get_infop->nvmet_ctx_list_cnt) {
1847 			list_splice_init(&get_infop->nvmet_ctx_list,
1848 				    &current_infop->nvmet_ctx_list);
1849 			current_infop->nvmet_ctx_list_cnt =
1850 				get_infop->nvmet_ctx_list_cnt - 1;
1851 			get_infop->nvmet_ctx_list_cnt = 0;
1852 			spin_unlock(&get_infop->nvmet_ctx_list_lock);
1853 
1854 			current_infop->nvmet_ctx_start_cpu = get_infop;
1855 			list_remove_head(&current_infop->nvmet_ctx_list,
1856 					 ctx_buf, struct lpfc_nvmet_ctxbuf,
1857 					 list);
1858 			return ctx_buf;
1859 		}
1860 
1861 		/* Otherwise, move on to the next CPU for this MRQ */
1862 		spin_unlock(&get_infop->nvmet_ctx_list_lock);
1863 		get_infop = get_infop->nvmet_ctx_next_cpu;
1864 	}
1865 
1866 #endif
1867 	/* Nothing found, all contexts for the MRQ are in-flight */
1868 	return NULL;
1869 }
1870 
1871 /**
1872  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1873  * @phba: pointer to lpfc hba data structure.
1874  * @idx: relative index of MRQ vector
1875  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1876  *
1877  * This routine is used for processing the WQE associated with a unsolicited
1878  * event. It first determines whether there is an existing ndlp that matches
1879  * the DID from the unsolicited WQE. If not, it will create a new one with
1880  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1881  * WQE is then used to invoke the proper routine and to set up proper state
1882  * of the discovery state machine.
1883  **/
1884 static void
1885 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1886 			    uint32_t idx,
1887 			    struct rqb_dmabuf *nvmebuf,
1888 			    uint64_t isr_timestamp)
1889 {
1890 	struct lpfc_nvmet_rcv_ctx *ctxp;
1891 	struct lpfc_nvmet_tgtport *tgtp;
1892 	struct fc_frame_header *fc_hdr;
1893 	struct lpfc_nvmet_ctxbuf *ctx_buf;
1894 	struct lpfc_nvmet_ctx_info *current_infop;
1895 	uint32_t *payload;
1896 	uint32_t size, oxid, sid, rc, qno;
1897 	unsigned long iflag;
1898 	int current_cpu;
1899 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1900 	uint32_t id;
1901 #endif
1902 
1903 	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
1904 		return;
1905 
1906 	ctx_buf = NULL;
1907 	if (!nvmebuf || !phba->targetport) {
1908 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1909 				"6157 NVMET FCP Drop IO\n");
1910 		oxid = 0;
1911 		size = 0;
1912 		sid = 0;
1913 		ctxp = NULL;
1914 		goto dropit;
1915 	}
1916 
1917 	/*
1918 	 * Get a pointer to the context list for this MRQ based on
1919 	 * the CPU this MRQ IRQ is associated with. If the CPU association
1920 	 * changes from our initial assumption, the context list could
1921 	 * be empty, thus it would need to be replenished with the
1922 	 * context list from another CPU for this MRQ.
1923 	 */
1924 	current_cpu = smp_processor_id();
1925 	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
1926 	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
1927 	if (current_infop->nvmet_ctx_list_cnt) {
1928 		list_remove_head(&current_infop->nvmet_ctx_list,
1929 				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1930 		current_infop->nvmet_ctx_list_cnt--;
1931 	} else {
1932 		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
1933 	}
1934 	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
1935 
1936 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1937 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1938 	size = nvmebuf->bytes_recv;
1939 
1940 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1941 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1942 		id = smp_processor_id();
1943 		if (id < LPFC_CHECK_CPU_CNT)
1944 			phba->cpucheck_rcv_io[id]++;
1945 	}
1946 #endif
1947 
1948 	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
1949 			 oxid, size, smp_processor_id());
1950 
1951 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1952 
1953 	if (!ctx_buf) {
1954 		/* Queue this NVME IO to process later */
1955 		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1956 		list_add_tail(&nvmebuf->hbuf.list,
1957 			      &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1958 		phba->sli4_hba.nvmet_io_wait_cnt++;
1959 		phba->sli4_hba.nvmet_io_wait_total++;
1960 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1961 				       iflag);
1962 
1963 		/* Post a brand new DMA buffer to RQ */
1964 		qno = nvmebuf->idx;
1965 		lpfc_post_rq_buffer(
1966 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1967 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1968 
1969 		atomic_inc(&tgtp->defer_ctx);
1970 		return;
1971 	}
1972 
1973 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1974 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1975 
1976 	ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1977 	if (ctxp->state != LPFC_NVMET_STE_FREE) {
1978 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1979 				"6414 NVMET Context corrupt %d %d oxid x%x\n",
1980 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1981 	}
1982 	ctxp->wqeq = NULL;
1983 	ctxp->txrdy = NULL;
1984 	ctxp->offset = 0;
1985 	ctxp->phba = phba;
1986 	ctxp->size = size;
1987 	ctxp->oxid = oxid;
1988 	ctxp->sid = sid;
1989 	ctxp->idx = idx;
1990 	ctxp->state = LPFC_NVMET_STE_RCV;
1991 	ctxp->entry_cnt = 1;
1992 	ctxp->flag = 0;
1993 	ctxp->ctxbuf = ctx_buf;
1994 	ctxp->rqb_buffer = (void *)nvmebuf;
1995 	spin_lock_init(&ctxp->ctxlock);
1996 
1997 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1998 	if (isr_timestamp) {
1999 		ctxp->ts_isr_cmd = isr_timestamp;
2000 		ctxp->ts_cmd_nvme = ktime_get_ns();
2001 		ctxp->ts_nvme_data = 0;
2002 		ctxp->ts_data_wqput = 0;
2003 		ctxp->ts_isr_data = 0;
2004 		ctxp->ts_data_nvme = 0;
2005 		ctxp->ts_nvme_status = 0;
2006 		ctxp->ts_status_wqput = 0;
2007 		ctxp->ts_isr_status = 0;
2008 		ctxp->ts_status_nvme = 0;
2009 	} else {
2010 		ctxp->ts_cmd_nvme = 0;
2011 	}
2012 #endif
2013 
2014 	atomic_inc(&tgtp->rcv_fcp_cmd_in);
2015 	/*
2016 	 * The calling sequence should be:
2017 	 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
2018 	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2019 	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
2020 	 * the NVME command / FC header is stored, so we are free to repost
2021 	 * the buffer.
2022 	 */
2023 	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2024 				  payload, size);
2025 
2026 	/* Process FCP command */
2027 	if (rc == 0) {
2028 		ctxp->rqb_buffer = NULL;
2029 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2030 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2031 		return;
2032 	}
2033 
2034 	/* Processing of FCP command is deferred */
2035 	if (rc == -EOVERFLOW) {
2036 		/*
2037 		 * Post a brand new DMA buffer to RQ and defer
2038 		 * freeing rcv buffer till .defer_rcv callback
2039 		 */
2040 		qno = nvmebuf->idx;
2041 		lpfc_post_rq_buffer(
2042 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2043 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2044 
2045 		lpfc_nvmeio_data(phba,
2046 				 "NVMET RCV BUSY: xri x%x sz %d from %06x\n",
2047 				 oxid, size, sid);
2048 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2049 		atomic_inc(&tgtp->defer_fod);
2050 		return;
2051 	}
2052 	ctxp->rqb_buffer = nvmebuf;
2053 
2054 	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2055 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2056 			"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2057 			ctxp->oxid, rc,
2058 			atomic_read(&tgtp->rcv_fcp_cmd_in),
2059 			atomic_read(&tgtp->rcv_fcp_cmd_out),
2060 			atomic_read(&tgtp->xmt_fcp_release));
2061 dropit:
2062 	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2063 			 oxid, size, sid);
2064 	if (oxid) {
2065 		lpfc_nvmet_defer_release(phba, ctxp);
2066 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2067 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2068 		return;
2069 	}
2070 
2071 	if (ctx_buf)
2072 		lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
2073 
2074 	if (nvmebuf)
2075 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2076 }
2077 
2078 /**
2079  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2080  * @phba: pointer to lpfc hba data structure.
2081  * @pring: pointer to a SLI ring.
2082  * @nvmebuf: pointer to received nvme data structure.
2083  *
2084  * This routine is used to process an unsolicited event received from a SLI
2085  * (Service Level Interface) ring. The actual processing of the data buffer
2086  * associated with the unsolicited event is done by invoking the routine
2087  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2088  * SLI RQ on which the unsolicited event was received.
2089  **/
2090 void
2091 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2092 			  struct lpfc_iocbq *piocb)
2093 {
2094 	struct lpfc_dmabuf *d_buf;
2095 	struct hbq_dmabuf *nvmebuf;
2096 
2097 	d_buf = piocb->context2;
2098 	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2099 
2100 	if (phba->nvmet_support == 0) {
2101 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2102 		return;
2103 	}
2104 	lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2105 }
2106 
2107 /**
2108  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2109  * @phba: pointer to lpfc hba data structure.
2110  * @idx: relative index of MRQ vector
2111  * @nvmebuf: pointer to received nvme data structure.
2112  *
2113  * This routine is used to process an unsolicited event received from a SLI
2114  * (Service Level Interface) ring. The actual processing of the data buffer
2115  * associated with the unsolicited event is done by invoking the routine
2116  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2117  * SLI RQ on which the unsolicited event was received.
2118  **/
2119 void
2120 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2121 			   uint32_t idx,
2122 			   struct rqb_dmabuf *nvmebuf,
2123 			   uint64_t isr_timestamp)
2124 {
2125 	if (phba->nvmet_support == 0) {
2126 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2127 		return;
2128 	}
2129 	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf,
2130 				    isr_timestamp);
2131 }
2132 
2133 /**
2134  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2135  * @phba: pointer to a host N_Port data structure.
2136  * @ctxp: Context info for NVME LS Request
2137  * @rspbuf: DMA buffer of NVME command.
2138  * @rspsize: size of the NVME command.
2139  *
2140  * This routine is used for allocating a lpfc-WQE data structure from
2141  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2142  * passed into the routine for discovery state machine to issue an Extended
2143  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2144  * and preparation routine that is used by all the discovery state machine
2145  * routines and the NVME command-specific fields will be later set up by
2146  * the individual discovery machine routines after calling this routine
2147  * allocating and preparing a generic WQE data structure. It fills in the
2148  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2149  * payload and response payload (if expected). The reference count on the
2150  * ndlp is incremented by 1 and the reference to the ndlp is put into
2151  * context1 of the WQE data structure for this WQE to hold the ndlp
2152  * reference for the command's callback function to access later.
2153  *
2154  * Return code
2155  *   Pointer to the newly allocated/prepared nvme wqe data structure
2156  *   NULL - when nvme wqe data structure allocation/preparation failed
2157  **/
2158 static struct lpfc_iocbq *
2159 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2160 		       struct lpfc_nvmet_rcv_ctx *ctxp,
2161 		       dma_addr_t rspbuf, uint16_t rspsize)
2162 {
2163 	struct lpfc_nodelist *ndlp;
2164 	struct lpfc_iocbq *nvmewqe;
2165 	union lpfc_wqe128 *wqe;
2166 
2167 	if (!lpfc_is_link_up(phba)) {
2168 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2169 				"6104 NVMET prep LS wqe: link err: "
2170 				"NPORT x%x oxid:x%x ste %d\n",
2171 				ctxp->sid, ctxp->oxid, ctxp->state);
2172 		return NULL;
2173 	}
2174 
2175 	/* Allocate buffer for  command wqe */
2176 	nvmewqe = lpfc_sli_get_iocbq(phba);
2177 	if (nvmewqe == NULL) {
2178 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2179 				"6105 NVMET prep LS wqe: No WQE: "
2180 				"NPORT x%x oxid x%x ste %d\n",
2181 				ctxp->sid, ctxp->oxid, ctxp->state);
2182 		return NULL;
2183 	}
2184 
2185 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2186 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2187 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2188 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2189 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2190 				"6106 NVMET prep LS wqe: No ndlp: "
2191 				"NPORT x%x oxid x%x ste %d\n",
2192 				ctxp->sid, ctxp->oxid, ctxp->state);
2193 		goto nvme_wqe_free_wqeq_exit;
2194 	}
2195 	ctxp->wqeq = nvmewqe;
2196 
2197 	/* prevent preparing wqe with NULL ndlp reference */
2198 	nvmewqe->context1 = lpfc_nlp_get(ndlp);
2199 	if (nvmewqe->context1 == NULL)
2200 		goto nvme_wqe_free_wqeq_exit;
2201 	nvmewqe->context2 = ctxp;
2202 
2203 	wqe = &nvmewqe->wqe;
2204 	memset(wqe, 0, sizeof(union lpfc_wqe));
2205 
2206 	/* Words 0 - 2 */
2207 	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2208 	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2209 	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2210 	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2211 
2212 	/* Word 3 */
2213 
2214 	/* Word 4 */
2215 
2216 	/* Word 5 */
2217 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2218 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2219 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2220 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2221 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2222 
2223 	/* Word 6 */
2224 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2225 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2226 	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2227 
2228 	/* Word 7 */
2229 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2230 	       CMD_XMIT_SEQUENCE64_WQE);
2231 	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2232 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2233 	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2234 
2235 	/* Word 8 */
2236 	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2237 
2238 	/* Word 9 */
2239 	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2240 	/* Needs to be set by caller */
2241 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2242 
2243 	/* Word 10 */
2244 	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2245 	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2246 	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2247 	       LPFC_WQE_LENLOC_WORD12);
2248 	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2249 
2250 	/* Word 11 */
2251 	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2252 	       LPFC_WQE_CQ_ID_DEFAULT);
2253 	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2254 	       OTHER_COMMAND);
2255 
2256 	/* Word 12 */
2257 	wqe->xmit_sequence.xmit_len = rspsize;
2258 
2259 	nvmewqe->retry = 1;
2260 	nvmewqe->vport = phba->pport;
2261 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2262 	nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2263 
2264 	/* Xmit NVMET response to remote NPORT <did> */
2265 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2266 			"6039 Xmit NVMET LS response to remote "
2267 			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2268 			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2269 			rspsize);
2270 	return nvmewqe;
2271 
2272 nvme_wqe_free_wqeq_exit:
2273 	nvmewqe->context2 = NULL;
2274 	nvmewqe->context3 = NULL;
2275 	lpfc_sli_release_iocbq(phba, nvmewqe);
2276 	return NULL;
2277 }
2278 
2279 
2280 static struct lpfc_iocbq *
2281 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2282 			struct lpfc_nvmet_rcv_ctx *ctxp)
2283 {
2284 	struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2285 	struct lpfc_nvmet_tgtport *tgtp;
2286 	struct sli4_sge *sgl;
2287 	struct lpfc_nodelist *ndlp;
2288 	struct lpfc_iocbq *nvmewqe;
2289 	struct scatterlist *sgel;
2290 	union lpfc_wqe128 *wqe;
2291 	struct ulp_bde64 *bde;
2292 	uint32_t *txrdy;
2293 	dma_addr_t physaddr;
2294 	int i, cnt;
2295 	int do_pbde;
2296 	int xc = 1;
2297 
2298 	if (!lpfc_is_link_up(phba)) {
2299 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2300 				"6107 NVMET prep FCP wqe: link err:"
2301 				"NPORT x%x oxid x%x ste %d\n",
2302 				ctxp->sid, ctxp->oxid, ctxp->state);
2303 		return NULL;
2304 	}
2305 
2306 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2307 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2308 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2309 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2310 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2311 				"6108 NVMET prep FCP wqe: no ndlp: "
2312 				"NPORT x%x oxid x%x ste %d\n",
2313 				ctxp->sid, ctxp->oxid, ctxp->state);
2314 		return NULL;
2315 	}
2316 
2317 	if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2318 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2319 				"6109 NVMET prep FCP wqe: seg cnt err: "
2320 				"NPORT x%x oxid x%x ste %d cnt %d\n",
2321 				ctxp->sid, ctxp->oxid, ctxp->state,
2322 				phba->cfg_nvme_seg_cnt);
2323 		return NULL;
2324 	}
2325 
2326 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2327 	nvmewqe = ctxp->wqeq;
2328 	if (nvmewqe == NULL) {
2329 		/* Allocate buffer for  command wqe */
2330 		nvmewqe = ctxp->ctxbuf->iocbq;
2331 		if (nvmewqe == NULL) {
2332 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2333 					"6110 NVMET prep FCP wqe: No "
2334 					"WQE: NPORT x%x oxid x%x ste %d\n",
2335 					ctxp->sid, ctxp->oxid, ctxp->state);
2336 			return NULL;
2337 		}
2338 		ctxp->wqeq = nvmewqe;
2339 		xc = 0; /* create new XRI */
2340 		nvmewqe->sli4_lxritag = NO_XRI;
2341 		nvmewqe->sli4_xritag = NO_XRI;
2342 	}
2343 
2344 	/* Sanity check */
2345 	if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2346 	    (ctxp->entry_cnt == 1)) ||
2347 	    (ctxp->state == LPFC_NVMET_STE_DATA)) {
2348 		wqe = &nvmewqe->wqe;
2349 	} else {
2350 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2351 				"6111 Wrong state NVMET FCP: %d  cnt %d\n",
2352 				ctxp->state, ctxp->entry_cnt);
2353 		return NULL;
2354 	}
2355 
2356 	sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2357 	switch (rsp->op) {
2358 	case NVMET_FCOP_READDATA:
2359 	case NVMET_FCOP_READDATA_RSP:
2360 		/* From the tsend template, initialize words 7 - 11 */
2361 		memcpy(&wqe->words[7],
2362 		       &lpfc_tsend_cmd_template.words[7],
2363 		       sizeof(uint32_t) * 5);
2364 
2365 		/* Words 0 - 2 : The first sg segment */
2366 		sgel = &rsp->sg[0];
2367 		physaddr = sg_dma_address(sgel);
2368 		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2369 		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2370 		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2371 		wqe->fcp_tsend.bde.addrHigh =
2372 			cpu_to_le32(putPaddrHigh(physaddr));
2373 
2374 		/* Word 3 */
2375 		wqe->fcp_tsend.payload_offset_len = 0;
2376 
2377 		/* Word 4 */
2378 		wqe->fcp_tsend.relative_offset = ctxp->offset;
2379 
2380 		/* Word 5 */
2381 		wqe->fcp_tsend.reserved = 0;
2382 
2383 		/* Word 6 */
2384 		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2385 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2386 		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2387 		       nvmewqe->sli4_xritag);
2388 
2389 		/* Word 7 - set ar later */
2390 
2391 		/* Word 8 */
2392 		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2393 
2394 		/* Word 9 */
2395 		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2396 		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2397 
2398 		/* Word 10 - set wqes later, in template xc=1 */
2399 		if (!xc)
2400 			bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2401 
2402 		/* Word 11 - set sup, irsp, irsplen later */
2403 		do_pbde = 0;
2404 
2405 		/* Word 12 */
2406 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2407 
2408 		/* Setup 2 SKIP SGEs */
2409 		sgl->addr_hi = 0;
2410 		sgl->addr_lo = 0;
2411 		sgl->word2 = 0;
2412 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2413 		sgl->word2 = cpu_to_le32(sgl->word2);
2414 		sgl->sge_len = 0;
2415 		sgl++;
2416 		sgl->addr_hi = 0;
2417 		sgl->addr_lo = 0;
2418 		sgl->word2 = 0;
2419 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2420 		sgl->word2 = cpu_to_le32(sgl->word2);
2421 		sgl->sge_len = 0;
2422 		sgl++;
2423 		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2424 			atomic_inc(&tgtp->xmt_fcp_read_rsp);
2425 
2426 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2427 
2428 			if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2429 				if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2430 					bf_set(wqe_sup,
2431 					       &wqe->fcp_tsend.wqe_com, 1);
2432 			} else {
2433 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2434 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2435 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2436 				       ((rsp->rsplen >> 2) - 1));
2437 				memcpy(&wqe->words[16], rsp->rspaddr,
2438 				       rsp->rsplen);
2439 			}
2440 		} else {
2441 			atomic_inc(&tgtp->xmt_fcp_read);
2442 
2443 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2444 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2445 		}
2446 		break;
2447 
2448 	case NVMET_FCOP_WRITEDATA:
2449 		/* From the treceive template, initialize words 3 - 11 */
2450 		memcpy(&wqe->words[3],
2451 		       &lpfc_treceive_cmd_template.words[3],
2452 		       sizeof(uint32_t) * 9);
2453 
2454 		/* Words 0 - 2 : The first sg segment */
2455 		txrdy = dma_pool_alloc(phba->txrdy_payload_pool,
2456 				       GFP_KERNEL, &physaddr);
2457 		if (!txrdy) {
2458 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2459 					"6041 Bad txrdy buffer: oxid x%x\n",
2460 					ctxp->oxid);
2461 			return NULL;
2462 		}
2463 		ctxp->txrdy = txrdy;
2464 		ctxp->txrdy_phys = physaddr;
2465 		wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2466 		wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
2467 		wqe->fcp_treceive.bde.addrLow =
2468 			cpu_to_le32(putPaddrLow(physaddr));
2469 		wqe->fcp_treceive.bde.addrHigh =
2470 			cpu_to_le32(putPaddrHigh(physaddr));
2471 
2472 		/* Word 4 */
2473 		wqe->fcp_treceive.relative_offset = ctxp->offset;
2474 
2475 		/* Word 6 */
2476 		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2477 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2478 		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2479 		       nvmewqe->sli4_xritag);
2480 
2481 		/* Word 7 */
2482 
2483 		/* Word 8 */
2484 		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2485 
2486 		/* Word 9 */
2487 		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2488 		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2489 
2490 		/* Word 10 - in template xc=1 */
2491 		if (!xc)
2492 			bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2493 
2494 		/* Word 11 - set pbde later */
2495 		if (phba->nvme_embed_pbde) {
2496 			do_pbde = 1;
2497 		} else {
2498 			bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2499 			do_pbde = 0;
2500 		}
2501 
2502 		/* Word 12 */
2503 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2504 
2505 		/* Setup 1 TXRDY and 1 SKIP SGE */
2506 		txrdy[0] = 0;
2507 		txrdy[1] = cpu_to_be32(rsp->transfer_length);
2508 		txrdy[2] = 0;
2509 
2510 		sgl->addr_hi = putPaddrHigh(physaddr);
2511 		sgl->addr_lo = putPaddrLow(physaddr);
2512 		sgl->word2 = 0;
2513 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2514 		sgl->word2 = cpu_to_le32(sgl->word2);
2515 		sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
2516 		sgl++;
2517 		sgl->addr_hi = 0;
2518 		sgl->addr_lo = 0;
2519 		sgl->word2 = 0;
2520 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2521 		sgl->word2 = cpu_to_le32(sgl->word2);
2522 		sgl->sge_len = 0;
2523 		sgl++;
2524 		atomic_inc(&tgtp->xmt_fcp_write);
2525 		break;
2526 
2527 	case NVMET_FCOP_RSP:
2528 		/* From the treceive template, initialize words 4 - 11 */
2529 		memcpy(&wqe->words[4],
2530 		       &lpfc_trsp_cmd_template.words[4],
2531 		       sizeof(uint32_t) * 8);
2532 
2533 		/* Words 0 - 2 */
2534 		physaddr = rsp->rspdma;
2535 		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2536 		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2537 		wqe->fcp_trsp.bde.addrLow =
2538 			cpu_to_le32(putPaddrLow(physaddr));
2539 		wqe->fcp_trsp.bde.addrHigh =
2540 			cpu_to_le32(putPaddrHigh(physaddr));
2541 
2542 		/* Word 3 */
2543 		wqe->fcp_trsp.response_len = rsp->rsplen;
2544 
2545 		/* Word 6 */
2546 		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2547 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2548 		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2549 		       nvmewqe->sli4_xritag);
2550 
2551 		/* Word 7 */
2552 
2553 		/* Word 8 */
2554 		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2555 
2556 		/* Word 9 */
2557 		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2558 		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2559 
2560 		/* Word 10 */
2561 		if (xc)
2562 			bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2563 
2564 		/* Word 11 */
2565 		/* In template wqes=0 irsp=0 irsplen=0 - good response */
2566 		if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2567 			/* Bad response - embed it */
2568 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2569 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2570 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2571 			       ((rsp->rsplen >> 2) - 1));
2572 			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2573 		}
2574 		do_pbde = 0;
2575 
2576 		/* Word 12 */
2577 		wqe->fcp_trsp.rsvd_12_15[0] = 0;
2578 
2579 		/* Use rspbuf, NOT sg list */
2580 		rsp->sg_cnt = 0;
2581 		sgl->word2 = 0;
2582 		atomic_inc(&tgtp->xmt_fcp_rsp);
2583 		break;
2584 
2585 	default:
2586 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2587 				"6064 Unknown Rsp Op %d\n",
2588 				rsp->op);
2589 		return NULL;
2590 	}
2591 
2592 	nvmewqe->retry = 1;
2593 	nvmewqe->vport = phba->pport;
2594 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2595 	nvmewqe->context1 = ndlp;
2596 
2597 	for (i = 0; i < rsp->sg_cnt; i++) {
2598 		sgel = &rsp->sg[i];
2599 		physaddr = sg_dma_address(sgel);
2600 		cnt = sg_dma_len(sgel);
2601 		sgl->addr_hi = putPaddrHigh(physaddr);
2602 		sgl->addr_lo = putPaddrLow(physaddr);
2603 		sgl->word2 = 0;
2604 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2605 		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2606 		if ((i+1) == rsp->sg_cnt)
2607 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2608 		sgl->word2 = cpu_to_le32(sgl->word2);
2609 		sgl->sge_len = cpu_to_le32(cnt);
2610 		if (do_pbde && i == 0) {
2611 			bde = (struct ulp_bde64 *)&wqe->words[13];
2612 			memset(bde, 0, sizeof(struct ulp_bde64));
2613 			/* Words 13-15  (PBDE)*/
2614 			bde->addrLow = sgl->addr_lo;
2615 			bde->addrHigh = sgl->addr_hi;
2616 			bde->tus.f.bdeSize =
2617 				le32_to_cpu(sgl->sge_len);
2618 			bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2619 			bde->tus.w = cpu_to_le32(bde->tus.w);
2620 		}
2621 		sgl++;
2622 		ctxp->offset += cnt;
2623 	}
2624 	ctxp->state = LPFC_NVMET_STE_DATA;
2625 	ctxp->entry_cnt++;
2626 	return nvmewqe;
2627 }
2628 
2629 /**
2630  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2631  * @phba: Pointer to HBA context object.
2632  * @cmdwqe: Pointer to driver command WQE object.
2633  * @wcqe: Pointer to driver response CQE object.
2634  *
2635  * The function is called from SLI ring event handler with no
2636  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2637  * The function frees memory resources used for the NVME commands.
2638  **/
2639 static void
2640 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2641 			     struct lpfc_wcqe_complete *wcqe)
2642 {
2643 	struct lpfc_nvmet_rcv_ctx *ctxp;
2644 	struct lpfc_nvmet_tgtport *tgtp;
2645 	uint32_t status, result;
2646 	unsigned long flags;
2647 	bool released = false;
2648 
2649 	ctxp = cmdwqe->context2;
2650 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2651 	result = wcqe->parameter;
2652 
2653 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2654 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2655 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2656 
2657 	ctxp->state = LPFC_NVMET_STE_DONE;
2658 
2659 	/* Check if we already received a free context call
2660 	 * and we have completed processing an abort situation.
2661 	 */
2662 	spin_lock_irqsave(&ctxp->ctxlock, flags);
2663 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2664 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2665 		list_del(&ctxp->list);
2666 		released = true;
2667 	}
2668 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2669 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2670 	atomic_inc(&tgtp->xmt_abort_rsp);
2671 
2672 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2673 			"6165 ABORT cmpl: xri x%x flg x%x (%d) "
2674 			"WCQE: %08x %08x %08x %08x\n",
2675 			ctxp->oxid, ctxp->flag, released,
2676 			wcqe->word0, wcqe->total_data_placed,
2677 			result, wcqe->word3);
2678 
2679 	cmdwqe->context2 = NULL;
2680 	cmdwqe->context3 = NULL;
2681 	/*
2682 	 * if transport has released ctx, then can reuse it. Otherwise,
2683 	 * will be recycled by transport release call.
2684 	 */
2685 	if (released)
2686 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2687 
2688 	/* This is the iocbq for the abort, not the command */
2689 	lpfc_sli_release_iocbq(phba, cmdwqe);
2690 
2691 	/* Since iaab/iaar are NOT set, there is no work left.
2692 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2693 	 * should have been called already.
2694 	 */
2695 }
2696 
2697 /**
2698  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2699  * @phba: Pointer to HBA context object.
2700  * @cmdwqe: Pointer to driver command WQE object.
2701  * @wcqe: Pointer to driver response CQE object.
2702  *
2703  * The function is called from SLI ring event handler with no
2704  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2705  * The function frees memory resources used for the NVME commands.
2706  **/
2707 static void
2708 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2709 			       struct lpfc_wcqe_complete *wcqe)
2710 {
2711 	struct lpfc_nvmet_rcv_ctx *ctxp;
2712 	struct lpfc_nvmet_tgtport *tgtp;
2713 	unsigned long flags;
2714 	uint32_t status, result;
2715 	bool released = false;
2716 
2717 	ctxp = cmdwqe->context2;
2718 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2719 	result = wcqe->parameter;
2720 
2721 	if (!ctxp) {
2722 		/* if context is clear, related io alrady complete */
2723 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2724 				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2725 				wcqe->word0, wcqe->total_data_placed,
2726 				result, wcqe->word3);
2727 		return;
2728 	}
2729 
2730 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2731 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2732 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2733 
2734 	/* Sanity check */
2735 	if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2736 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2737 				"6112 ABTS Wrong state:%d oxid x%x\n",
2738 				ctxp->state, ctxp->oxid);
2739 	}
2740 
2741 	/* Check if we already received a free context call
2742 	 * and we have completed processing an abort situation.
2743 	 */
2744 	ctxp->state = LPFC_NVMET_STE_DONE;
2745 	spin_lock_irqsave(&ctxp->ctxlock, flags);
2746 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2747 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2748 		list_del(&ctxp->list);
2749 		released = true;
2750 	}
2751 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2752 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2753 	atomic_inc(&tgtp->xmt_abort_rsp);
2754 
2755 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2756 			"6316 ABTS cmpl xri x%x flg x%x (%x) "
2757 			"WCQE: %08x %08x %08x %08x\n",
2758 			ctxp->oxid, ctxp->flag, released,
2759 			wcqe->word0, wcqe->total_data_placed,
2760 			result, wcqe->word3);
2761 
2762 	cmdwqe->context2 = NULL;
2763 	cmdwqe->context3 = NULL;
2764 	/*
2765 	 * if transport has released ctx, then can reuse it. Otherwise,
2766 	 * will be recycled by transport release call.
2767 	 */
2768 	if (released)
2769 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2770 
2771 	/* Since iaab/iaar are NOT set, there is no work left.
2772 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2773 	 * should have been called already.
2774 	 */
2775 }
2776 
2777 /**
2778  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2779  * @phba: Pointer to HBA context object.
2780  * @cmdwqe: Pointer to driver command WQE object.
2781  * @wcqe: Pointer to driver response CQE object.
2782  *
2783  * The function is called from SLI ring event handler with no
2784  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2785  * The function frees memory resources used for the NVME commands.
2786  **/
2787 static void
2788 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2789 			    struct lpfc_wcqe_complete *wcqe)
2790 {
2791 	struct lpfc_nvmet_rcv_ctx *ctxp;
2792 	struct lpfc_nvmet_tgtport *tgtp;
2793 	uint32_t status, result;
2794 
2795 	ctxp = cmdwqe->context2;
2796 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2797 	result = wcqe->parameter;
2798 
2799 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2800 	atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2801 
2802 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2803 			"6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n",
2804 			ctxp, wcqe->word0, wcqe->total_data_placed,
2805 			result, wcqe->word3);
2806 
2807 	if (!ctxp) {
2808 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2809 				"6415 NVMET LS Abort No ctx: WCQE: "
2810 				 "%08x %08x %08x %08x\n",
2811 				wcqe->word0, wcqe->total_data_placed,
2812 				result, wcqe->word3);
2813 
2814 		lpfc_sli_release_iocbq(phba, cmdwqe);
2815 		return;
2816 	}
2817 
2818 	if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
2819 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2820 				"6416 NVMET LS abort cmpl state mismatch: "
2821 				"oxid x%x: %d %d\n",
2822 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
2823 	}
2824 
2825 	cmdwqe->context2 = NULL;
2826 	cmdwqe->context3 = NULL;
2827 	lpfc_sli_release_iocbq(phba, cmdwqe);
2828 	kfree(ctxp);
2829 }
2830 
2831 static int
2832 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2833 			     struct lpfc_nvmet_rcv_ctx *ctxp,
2834 			     uint32_t sid, uint16_t xri)
2835 {
2836 	struct lpfc_nvmet_tgtport *tgtp;
2837 	struct lpfc_iocbq *abts_wqeq;
2838 	union lpfc_wqe128 *wqe_abts;
2839 	struct lpfc_nodelist *ndlp;
2840 
2841 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2842 			"6067 ABTS: sid %x xri x%x/x%x\n",
2843 			sid, xri, ctxp->wqeq->sli4_xritag);
2844 
2845 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2846 
2847 	ndlp = lpfc_findnode_did(phba->pport, sid);
2848 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2849 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2850 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2851 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2852 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2853 				"6134 Drop ABTS - wrong NDLP state x%x.\n",
2854 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2855 
2856 		/* No failure to an ABTS request. */
2857 		return 0;
2858 	}
2859 
2860 	abts_wqeq = ctxp->wqeq;
2861 	wqe_abts = &abts_wqeq->wqe;
2862 
2863 	/*
2864 	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2865 	 * that were initialized in lpfc_sli4_nvmet_alloc.
2866 	 */
2867 	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2868 
2869 	/* Word 5 */
2870 	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2871 	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2872 	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2873 	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2874 	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2875 
2876 	/* Word 6 */
2877 	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2878 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2879 	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2880 	       abts_wqeq->sli4_xritag);
2881 
2882 	/* Word 7 */
2883 	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2884 	       CMD_XMIT_SEQUENCE64_WQE);
2885 	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2886 	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2887 	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2888 
2889 	/* Word 8 */
2890 	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2891 
2892 	/* Word 9 */
2893 	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2894 	/* Needs to be set by caller */
2895 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2896 
2897 	/* Word 10 */
2898 	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2899 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2900 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2901 	       LPFC_WQE_LENLOC_WORD12);
2902 	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2903 	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2904 
2905 	/* Word 11 */
2906 	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2907 	       LPFC_WQE_CQ_ID_DEFAULT);
2908 	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2909 	       OTHER_COMMAND);
2910 
2911 	abts_wqeq->vport = phba->pport;
2912 	abts_wqeq->context1 = ndlp;
2913 	abts_wqeq->context2 = ctxp;
2914 	abts_wqeq->context3 = NULL;
2915 	abts_wqeq->rsvd2 = 0;
2916 	/* hba_wqidx should already be setup from command we are aborting */
2917 	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2918 	abts_wqeq->iocb.ulpLe = 1;
2919 
2920 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2921 			"6069 Issue ABTS to xri x%x reqtag x%x\n",
2922 			xri, abts_wqeq->iotag);
2923 	return 1;
2924 }
2925 
2926 static int
2927 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2928 			       struct lpfc_nvmet_rcv_ctx *ctxp,
2929 			       uint32_t sid, uint16_t xri)
2930 {
2931 	struct lpfc_nvmet_tgtport *tgtp;
2932 	struct lpfc_iocbq *abts_wqeq;
2933 	union lpfc_wqe128 *abts_wqe;
2934 	struct lpfc_nodelist *ndlp;
2935 	unsigned long flags;
2936 	int rc;
2937 
2938 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2939 	if (!ctxp->wqeq) {
2940 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
2941 		ctxp->wqeq->hba_wqidx = 0;
2942 	}
2943 
2944 	ndlp = lpfc_findnode_did(phba->pport, sid);
2945 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2946 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2947 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2948 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2949 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2950 				"6160 Drop ABORT - wrong NDLP state x%x.\n",
2951 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2952 
2953 		/* No failure to an ABTS request. */
2954 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2955 		return 0;
2956 	}
2957 
2958 	/* Issue ABTS for this WQE based on iotag */
2959 	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2960 	if (!ctxp->abort_wqeq) {
2961 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2962 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2963 				"6161 ABORT failed: No wqeqs: "
2964 				"xri: x%x\n", ctxp->oxid);
2965 		/* No failure to an ABTS request. */
2966 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2967 		return 0;
2968 	}
2969 	abts_wqeq = ctxp->abort_wqeq;
2970 	abts_wqe = &abts_wqeq->wqe;
2971 	ctxp->state = LPFC_NVMET_STE_ABORT;
2972 
2973 	/* Announce entry to new IO submit field. */
2974 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2975 			"6162 ABORT Request to rport DID x%06x "
2976 			"for xri x%x x%x\n",
2977 			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2978 
2979 	/* If the hba is getting reset, this flag is set.  It is
2980 	 * cleared when the reset is complete and rings reestablished.
2981 	 */
2982 	spin_lock_irqsave(&phba->hbalock, flags);
2983 	/* driver queued commands are in process of being flushed */
2984 	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2985 		spin_unlock_irqrestore(&phba->hbalock, flags);
2986 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2987 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2988 				"6163 Driver in reset cleanup - flushing "
2989 				"NVME Req now. hba_flag x%x oxid x%x\n",
2990 				phba->hba_flag, ctxp->oxid);
2991 		lpfc_sli_release_iocbq(phba, abts_wqeq);
2992 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2993 		return 0;
2994 	}
2995 
2996 	/* Outstanding abort is in progress */
2997 	if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2998 		spin_unlock_irqrestore(&phba->hbalock, flags);
2999 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3000 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3001 				"6164 Outstanding NVME I/O Abort Request "
3002 				"still pending on oxid x%x\n",
3003 				ctxp->oxid);
3004 		lpfc_sli_release_iocbq(phba, abts_wqeq);
3005 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3006 		return 0;
3007 	}
3008 
3009 	/* Ready - mark outstanding as aborted by driver. */
3010 	abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3011 
3012 	/* WQEs are reused.  Clear stale data and set key fields to
3013 	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
3014 	 */
3015 	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
3016 
3017 	/* word 3 */
3018 	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
3019 
3020 	/* word 7 */
3021 	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
3022 	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
3023 
3024 	/* word 8 - tell the FW to abort the IO associated with this
3025 	 * outstanding exchange ID.
3026 	 */
3027 	abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
3028 
3029 	/* word 9 - this is the iotag for the abts_wqe completion. */
3030 	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
3031 	       abts_wqeq->iotag);
3032 
3033 	/* word 10 */
3034 	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
3035 	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
3036 
3037 	/* word 11 */
3038 	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
3039 	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
3040 	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
3041 
3042 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
3043 	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3044 	abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3045 	abts_wqeq->iocb_cmpl = 0;
3046 	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3047 	abts_wqeq->context2 = ctxp;
3048 	abts_wqeq->vport = phba->pport;
3049 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3050 	spin_unlock_irqrestore(&phba->hbalock, flags);
3051 	if (rc == WQE_SUCCESS) {
3052 		atomic_inc(&tgtp->xmt_abort_sol);
3053 		return 0;
3054 	}
3055 
3056 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3057 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3058 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3059 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3060 			"6166 Failed ABORT issue_wqe with status x%x "
3061 			"for oxid x%x.\n",
3062 			rc, ctxp->oxid);
3063 	return 1;
3064 }
3065 
3066 
3067 static int
3068 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3069 				 struct lpfc_nvmet_rcv_ctx *ctxp,
3070 				 uint32_t sid, uint16_t xri)
3071 {
3072 	struct lpfc_nvmet_tgtport *tgtp;
3073 	struct lpfc_iocbq *abts_wqeq;
3074 	unsigned long flags;
3075 	int rc;
3076 
3077 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3078 	if (!ctxp->wqeq) {
3079 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
3080 		ctxp->wqeq->hba_wqidx = 0;
3081 	}
3082 
3083 	if (ctxp->state == LPFC_NVMET_STE_FREE) {
3084 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3085 				"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3086 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3087 		rc = WQE_BUSY;
3088 		goto aerr;
3089 	}
3090 	ctxp->state = LPFC_NVMET_STE_ABORT;
3091 	ctxp->entry_cnt++;
3092 	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3093 	if (rc == 0)
3094 		goto aerr;
3095 
3096 	spin_lock_irqsave(&phba->hbalock, flags);
3097 	abts_wqeq = ctxp->wqeq;
3098 	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3099 	abts_wqeq->iocb_cmpl = NULL;
3100 	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3101 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
3102 	spin_unlock_irqrestore(&phba->hbalock, flags);
3103 	if (rc == WQE_SUCCESS) {
3104 		return 0;
3105 	}
3106 
3107 aerr:
3108 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3109 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3110 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3111 			"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
3112 			ctxp->oxid, rc);
3113 	return 1;
3114 }
3115 
3116 static int
3117 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3118 				struct lpfc_nvmet_rcv_ctx *ctxp,
3119 				uint32_t sid, uint16_t xri)
3120 {
3121 	struct lpfc_nvmet_tgtport *tgtp;
3122 	struct lpfc_iocbq *abts_wqeq;
3123 	union lpfc_wqe128 *wqe_abts;
3124 	unsigned long flags;
3125 	int rc;
3126 
3127 	if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3128 	    (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3129 		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3130 		ctxp->entry_cnt++;
3131 	} else {
3132 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3133 				"6418 NVMET LS abort state mismatch "
3134 				"IO x%x: %d %d\n",
3135 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3136 		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3137 	}
3138 
3139 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3140 	if (!ctxp->wqeq) {
3141 		/* Issue ABTS for this WQE based on iotag */
3142 		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3143 		if (!ctxp->wqeq) {
3144 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3145 					"6068 Abort failed: No wqeqs: "
3146 					"xri: x%x\n", xri);
3147 			/* No failure to an ABTS request. */
3148 			kfree(ctxp);
3149 			return 0;
3150 		}
3151 	}
3152 	abts_wqeq = ctxp->wqeq;
3153 	wqe_abts = &abts_wqeq->wqe;
3154 
3155 	if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3156 		rc = WQE_BUSY;
3157 		goto out;
3158 	}
3159 
3160 	spin_lock_irqsave(&phba->hbalock, flags);
3161 	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3162 	abts_wqeq->iocb_cmpl = 0;
3163 	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3164 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
3165 	spin_unlock_irqrestore(&phba->hbalock, flags);
3166 	if (rc == WQE_SUCCESS) {
3167 		atomic_inc(&tgtp->xmt_abort_unsol);
3168 		return 0;
3169 	}
3170 out:
3171 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3172 	abts_wqeq->context2 = NULL;
3173 	abts_wqeq->context3 = NULL;
3174 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3175 	kfree(ctxp);
3176 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3177 			"6056 Failed to Issue ABTS. Status x%x\n", rc);
3178 	return 0;
3179 }
3180