xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_nvmet.c (revision f7d84fa7)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30 
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
57 
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59 						 struct lpfc_nvmet_rcv_ctx *,
60 						 dma_addr_t rspbuf,
61 						 uint16_t rspsize);
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63 						  struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65 					  struct lpfc_nvmet_rcv_ctx *,
66 					  uint32_t, uint16_t);
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68 					    struct lpfc_nvmet_rcv_ctx *,
69 					    uint32_t, uint16_t);
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71 					   struct lpfc_nvmet_rcv_ctx *,
72 					   uint32_t, uint16_t);
73 
74 void
75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
76 {
77 	unsigned long iflag;
78 
79 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
80 			"6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 			ctxp->oxid, ctxp->flag);
82 
83 	spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
84 	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
85 		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
86 				       iflag);
87 		return;
88 	}
89 	ctxp->flag |= LPFC_NVMET_CTX_RLS;
90 	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
91 	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
92 }
93 
94 /**
95  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96  * @phba: Pointer to HBA context object.
97  * @cmdwqe: Pointer to driver command WQE object.
98  * @wcqe: Pointer to driver response CQE object.
99  *
100  * The function is called from SLI ring event handler with no
101  * lock held. This function is the completion handler for NVME LS commands
102  * The function frees memory resources used for the NVME commands.
103  **/
104 static void
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
106 			  struct lpfc_wcqe_complete *wcqe)
107 {
108 	struct lpfc_nvmet_tgtport *tgtp;
109 	struct nvmefc_tgt_ls_req *rsp;
110 	struct lpfc_nvmet_rcv_ctx *ctxp;
111 	uint32_t status, result;
112 
113 	status = bf_get(lpfc_wcqe_c_status, wcqe);
114 	result = wcqe->parameter;
115 	if (!phba->targetport)
116 		goto out;
117 
118 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
119 
120 	if (status)
121 		atomic_inc(&tgtp->xmt_ls_rsp_error);
122 	else
123 		atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
124 
125 out:
126 	ctxp = cmdwqe->context2;
127 	rsp = &ctxp->ctx.ls_req;
128 
129 	lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
130 			 ctxp->oxid, status, result);
131 
132 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
133 			"6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
134 			ctxp, status, result);
135 
136 	lpfc_nlp_put(cmdwqe->context1);
137 	cmdwqe->context2 = NULL;
138 	cmdwqe->context3 = NULL;
139 	lpfc_sli_release_iocbq(phba, cmdwqe);
140 	rsp->done(rsp);
141 	kfree(ctxp);
142 }
143 
144 /**
145  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
146  * @phba: HBA buffer is associated with
147  * @ctxp: context to clean up
148  * @mp: Buffer to free
149  *
150  * Description: Frees the given DMA buffer in the appropriate way given by
151  * reposting it to its associated RQ so it can be reused.
152  *
153  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
154  *
155  * Returns: None
156  **/
157 void
158 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
159 {
160 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
161 	struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
162 	struct lpfc_nvmet_tgtport *tgtp;
163 	struct fc_frame_header *fc_hdr;
164 	struct rqb_dmabuf *nvmebuf;
165 	struct lpfc_dmabuf *hbufp;
166 	uint32_t *payload;
167 	uint32_t size, oxid, sid, rc;
168 	unsigned long iflag;
169 
170 	if (ctxp->txrdy) {
171 		pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
172 			      ctxp->txrdy_phys);
173 		ctxp->txrdy = NULL;
174 		ctxp->txrdy_phys = 0;
175 	}
176 	ctxp->state = LPFC_NVMET_STE_FREE;
177 
178 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
179 	if (phba->sli4_hba.nvmet_io_wait_cnt) {
180 		hbufp = &nvmebuf->hbuf;
181 		list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
182 				 nvmebuf, struct rqb_dmabuf,
183 				 hbuf.list);
184 		phba->sli4_hba.nvmet_io_wait_cnt--;
185 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
186 				       iflag);
187 
188 		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
189 		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
190 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
191 		payload = (uint32_t *)(nvmebuf->dbuf.virt);
192 		size = nvmebuf->bytes_recv;
193 		sid = sli4_sid_from_fc_hdr(fc_hdr);
194 
195 		ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
196 		memset(ctxp, 0, sizeof(ctxp->ctx));
197 		ctxp->wqeq = NULL;
198 		ctxp->txrdy = NULL;
199 		ctxp->offset = 0;
200 		ctxp->phba = phba;
201 		ctxp->size = size;
202 		ctxp->oxid = oxid;
203 		ctxp->sid = sid;
204 		ctxp->state = LPFC_NVMET_STE_RCV;
205 		ctxp->entry_cnt = 1;
206 		ctxp->flag = 0;
207 		ctxp->ctxbuf = ctx_buf;
208 		spin_lock_init(&ctxp->ctxlock);
209 
210 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
211 		if (phba->ktime_on) {
212 			ctxp->ts_cmd_nvme = ktime_get_ns();
213 			ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
214 			ctxp->ts_nvme_data = 0;
215 			ctxp->ts_data_wqput = 0;
216 			ctxp->ts_isr_data = 0;
217 			ctxp->ts_data_nvme = 0;
218 			ctxp->ts_nvme_status = 0;
219 			ctxp->ts_status_wqput = 0;
220 			ctxp->ts_isr_status = 0;
221 			ctxp->ts_status_nvme = 0;
222 		}
223 #endif
224 		atomic_inc(&tgtp->rcv_fcp_cmd_in);
225 		/*
226 		 * The calling sequence should be:
227 		 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
228 		 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
229 		 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
230 		 * the NVME command / FC header is stored.
231 		 * A buffer has already been reposted for this IO, so just free
232 		 * the nvmebuf.
233 		 */
234 		rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
235 					  payload, size);
236 
237 		/* Process FCP command */
238 		if (rc == 0) {
239 			atomic_inc(&tgtp->rcv_fcp_cmd_out);
240 			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
241 			return;
242 		}
243 
244 		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
245 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
246 				"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
247 				ctxp->oxid, rc,
248 				atomic_read(&tgtp->rcv_fcp_cmd_in),
249 				atomic_read(&tgtp->rcv_fcp_cmd_out),
250 				atomic_read(&tgtp->xmt_fcp_release));
251 
252 		lpfc_nvmet_defer_release(phba, ctxp);
253 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
254 		nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
255 		return;
256 	}
257 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
258 
259 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
260 	list_add_tail(&ctx_buf->list,
261 		      &phba->sli4_hba.lpfc_nvmet_ctx_list);
262 	phba->sli4_hba.nvmet_ctx_cnt++;
263 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
264 #endif
265 }
266 
267 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
268 static void
269 lpfc_nvmet_ktime(struct lpfc_hba *phba,
270 		 struct lpfc_nvmet_rcv_ctx *ctxp)
271 {
272 	uint64_t seg1, seg2, seg3, seg4, seg5;
273 	uint64_t seg6, seg7, seg8, seg9, seg10;
274 
275 	if (!phba->ktime_on)
276 		return;
277 
278 	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
279 	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
280 	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
281 	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
282 	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
283 		return;
284 
285 	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
286 		return;
287 	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
288 		return;
289 	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
290 		return;
291 	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
292 		return;
293 	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
294 		return;
295 	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
296 		return;
297 	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
298 		return;
299 	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
300 		return;
301 	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
302 		return;
303 	/*
304 	 * Segment 1 - Time from FCP command received by MSI-X ISR
305 	 * to FCP command is passed to NVME Layer.
306 	 * Segment 2 - Time from FCP command payload handed
307 	 * off to NVME Layer to Driver receives a Command op
308 	 * from NVME Layer.
309 	 * Segment 3 - Time from Driver receives a Command op
310 	 * from NVME Layer to Command is put on WQ.
311 	 * Segment 4 - Time from Driver WQ put is done
312 	 * to MSI-X ISR for Command cmpl.
313 	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
314 	 * Command cmpl is passed to NVME Layer.
315 	 * Segment 6 - Time from Command cmpl is passed to NVME
316 	 * Layer to Driver receives a RSP op from NVME Layer.
317 	 * Segment 7 - Time from Driver receives a RSP op from
318 	 * NVME Layer to WQ put is done on TRSP FCP Status.
319 	 * Segment 8 - Time from Driver WQ put is done on TRSP
320 	 * FCP Status to MSI-X ISR for TRSP cmpl.
321 	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
322 	 * TRSP cmpl is passed to NVME Layer.
323 	 * Segment 10 - Time from FCP command received by
324 	 * MSI-X ISR to command is completed on wire.
325 	 * (Segments 1 thru 8) for READDATA / WRITEDATA
326 	 * (Segments 1 thru 4) for READDATA_RSP
327 	 */
328 	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
329 	seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
330 	seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
331 		seg1 - seg2;
332 	seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
333 		seg1 - seg2 - seg3;
334 	seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
335 		seg1 - seg2 - seg3 - seg4;
336 
337 	/* For auto rsp commands seg6 thru seg10 will be 0 */
338 	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
339 		seg6 = (ctxp->ts_nvme_status -
340 			ctxp->ts_isr_cmd) -
341 			seg1 - seg2 - seg3 - seg4 - seg5;
342 		seg7 = (ctxp->ts_status_wqput -
343 			ctxp->ts_isr_cmd) -
344 			seg1 - seg2 - seg3 -
345 			seg4 - seg5 - seg6;
346 		seg8 = (ctxp->ts_isr_status -
347 			ctxp->ts_isr_cmd) -
348 			seg1 - seg2 - seg3 - seg4 -
349 			seg5 - seg6 - seg7;
350 		seg9 = (ctxp->ts_status_nvme -
351 			ctxp->ts_isr_cmd) -
352 			seg1 - seg2 - seg3 - seg4 -
353 			seg5 - seg6 - seg7 - seg8;
354 		seg10 = (ctxp->ts_isr_status -
355 			ctxp->ts_isr_cmd);
356 	} else {
357 		seg6 =  0;
358 		seg7 =  0;
359 		seg8 =  0;
360 		seg9 =  0;
361 		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
362 	}
363 
364 	phba->ktime_seg1_total += seg1;
365 	if (seg1 < phba->ktime_seg1_min)
366 		phba->ktime_seg1_min = seg1;
367 	else if (seg1 > phba->ktime_seg1_max)
368 		phba->ktime_seg1_max = seg1;
369 
370 	phba->ktime_seg2_total += seg2;
371 	if (seg2 < phba->ktime_seg2_min)
372 		phba->ktime_seg2_min = seg2;
373 	else if (seg2 > phba->ktime_seg2_max)
374 		phba->ktime_seg2_max = seg2;
375 
376 	phba->ktime_seg3_total += seg3;
377 	if (seg3 < phba->ktime_seg3_min)
378 		phba->ktime_seg3_min = seg3;
379 	else if (seg3 > phba->ktime_seg3_max)
380 		phba->ktime_seg3_max = seg3;
381 
382 	phba->ktime_seg4_total += seg4;
383 	if (seg4 < phba->ktime_seg4_min)
384 		phba->ktime_seg4_min = seg4;
385 	else if (seg4 > phba->ktime_seg4_max)
386 		phba->ktime_seg4_max = seg4;
387 
388 	phba->ktime_seg5_total += seg5;
389 	if (seg5 < phba->ktime_seg5_min)
390 		phba->ktime_seg5_min = seg5;
391 	else if (seg5 > phba->ktime_seg5_max)
392 		phba->ktime_seg5_max = seg5;
393 
394 	phba->ktime_data_samples++;
395 	if (!seg6)
396 		goto out;
397 
398 	phba->ktime_seg6_total += seg6;
399 	if (seg6 < phba->ktime_seg6_min)
400 		phba->ktime_seg6_min = seg6;
401 	else if (seg6 > phba->ktime_seg6_max)
402 		phba->ktime_seg6_max = seg6;
403 
404 	phba->ktime_seg7_total += seg7;
405 	if (seg7 < phba->ktime_seg7_min)
406 		phba->ktime_seg7_min = seg7;
407 	else if (seg7 > phba->ktime_seg7_max)
408 		phba->ktime_seg7_max = seg7;
409 
410 	phba->ktime_seg8_total += seg8;
411 	if (seg8 < phba->ktime_seg8_min)
412 		phba->ktime_seg8_min = seg8;
413 	else if (seg8 > phba->ktime_seg8_max)
414 		phba->ktime_seg8_max = seg8;
415 
416 	phba->ktime_seg9_total += seg9;
417 	if (seg9 < phba->ktime_seg9_min)
418 		phba->ktime_seg9_min = seg9;
419 	else if (seg9 > phba->ktime_seg9_max)
420 		phba->ktime_seg9_max = seg9;
421 out:
422 	phba->ktime_seg10_total += seg10;
423 	if (seg10 < phba->ktime_seg10_min)
424 		phba->ktime_seg10_min = seg10;
425 	else if (seg10 > phba->ktime_seg10_max)
426 		phba->ktime_seg10_max = seg10;
427 	phba->ktime_status_samples++;
428 }
429 #endif
430 
431 /**
432  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
433  * @phba: Pointer to HBA context object.
434  * @cmdwqe: Pointer to driver command WQE object.
435  * @wcqe: Pointer to driver response CQE object.
436  *
437  * The function is called from SLI ring event handler with no
438  * lock held. This function is the completion handler for NVME FCP commands
439  * The function frees memory resources used for the NVME commands.
440  **/
441 static void
442 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
443 			  struct lpfc_wcqe_complete *wcqe)
444 {
445 	struct lpfc_nvmet_tgtport *tgtp;
446 	struct nvmefc_tgt_fcp_req *rsp;
447 	struct lpfc_nvmet_rcv_ctx *ctxp;
448 	uint32_t status, result, op, start_clean;
449 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
450 	uint32_t id;
451 #endif
452 
453 	ctxp = cmdwqe->context2;
454 	ctxp->flag &= ~LPFC_NVMET_IO_INP;
455 
456 	rsp = &ctxp->ctx.fcp_req;
457 	op = rsp->op;
458 
459 	status = bf_get(lpfc_wcqe_c_status, wcqe);
460 	result = wcqe->parameter;
461 
462 	if (phba->targetport)
463 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
464 	else
465 		tgtp = NULL;
466 
467 	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
468 			 ctxp->oxid, op, status);
469 
470 	if (status) {
471 		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
472 		rsp->transferred_length = 0;
473 		if (tgtp)
474 			atomic_inc(&tgtp->xmt_fcp_rsp_error);
475 
476 		/* pick up SLI4 exhange busy condition */
477 		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
478 			ctxp->flag |= LPFC_NVMET_XBUSY;
479 
480 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
481 					"6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
482 					ctxp->oxid, status, result);
483 		} else {
484 			ctxp->flag &= ~LPFC_NVMET_XBUSY;
485 		}
486 
487 	} else {
488 		rsp->fcp_error = NVME_SC_SUCCESS;
489 		if (op == NVMET_FCOP_RSP)
490 			rsp->transferred_length = rsp->rsplen;
491 		else
492 			rsp->transferred_length = rsp->transfer_length;
493 		if (tgtp)
494 			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
495 	}
496 
497 	if ((op == NVMET_FCOP_READDATA_RSP) ||
498 	    (op == NVMET_FCOP_RSP)) {
499 		/* Sanity check */
500 		ctxp->state = LPFC_NVMET_STE_DONE;
501 		ctxp->entry_cnt++;
502 
503 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
504 		if (phba->ktime_on) {
505 			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
506 				ctxp->ts_isr_data =
507 					cmdwqe->isr_timestamp;
508 				ctxp->ts_data_nvme =
509 					ktime_get_ns();
510 				ctxp->ts_nvme_status =
511 					ctxp->ts_data_nvme;
512 				ctxp->ts_status_wqput =
513 					ctxp->ts_data_nvme;
514 				ctxp->ts_isr_status =
515 					ctxp->ts_data_nvme;
516 				ctxp->ts_status_nvme =
517 					ctxp->ts_data_nvme;
518 			} else {
519 				ctxp->ts_isr_status =
520 					cmdwqe->isr_timestamp;
521 				ctxp->ts_status_nvme =
522 					ktime_get_ns();
523 			}
524 		}
525 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
526 			id = smp_processor_id();
527 			if (ctxp->cpu != id)
528 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
529 						"6703 CPU Check cmpl: "
530 						"cpu %d expect %d\n",
531 						id, ctxp->cpu);
532 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
533 				phba->cpucheck_cmpl_io[id]++;
534 		}
535 #endif
536 		rsp->done(rsp);
537 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
538 		if (phba->ktime_on)
539 			lpfc_nvmet_ktime(phba, ctxp);
540 #endif
541 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
542 	} else {
543 		ctxp->entry_cnt++;
544 		start_clean = offsetof(struct lpfc_iocbq, wqe);
545 		memset(((char *)cmdwqe) + start_clean, 0,
546 		       (sizeof(struct lpfc_iocbq) - start_clean));
547 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
548 		if (phba->ktime_on) {
549 			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
550 			ctxp->ts_data_nvme = ktime_get_ns();
551 		}
552 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
553 			id = smp_processor_id();
554 			if (ctxp->cpu != id)
555 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
556 						"6704 CPU Check cmdcmpl: "
557 						"cpu %d expect %d\n",
558 						id, ctxp->cpu);
559 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
560 				phba->cpucheck_ccmpl_io[id]++;
561 		}
562 #endif
563 		rsp->done(rsp);
564 	}
565 }
566 
567 static int
568 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
569 		      struct nvmefc_tgt_ls_req *rsp)
570 {
571 	struct lpfc_nvmet_rcv_ctx *ctxp =
572 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
573 	struct lpfc_hba *phba = ctxp->phba;
574 	struct hbq_dmabuf *nvmebuf =
575 		(struct hbq_dmabuf *)ctxp->rqb_buffer;
576 	struct lpfc_iocbq *nvmewqeq;
577 	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
578 	struct lpfc_dmabuf dmabuf;
579 	struct ulp_bde64 bpl;
580 	int rc;
581 
582 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
583 			"6023 %s: Entrypoint ctx %p %p\n", __func__,
584 			ctxp, tgtport);
585 
586 	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
587 				      rsp->rsplen);
588 	if (nvmewqeq == NULL) {
589 		atomic_inc(&nvmep->xmt_ls_drop);
590 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
591 				"6150 LS Drop IO x%x: Prep\n",
592 				ctxp->oxid);
593 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
594 		atomic_inc(&nvmep->xmt_ls_abort);
595 		lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
596 						ctxp->sid, ctxp->oxid);
597 		return -ENOMEM;
598 	}
599 
600 	/* Save numBdes for bpl2sgl */
601 	nvmewqeq->rsvd2 = 1;
602 	nvmewqeq->hba_wqidx = 0;
603 	nvmewqeq->context3 = &dmabuf;
604 	dmabuf.virt = &bpl;
605 	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
606 	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
607 	bpl.tus.f.bdeSize = rsp->rsplen;
608 	bpl.tus.f.bdeFlags = 0;
609 	bpl.tus.w = le32_to_cpu(bpl.tus.w);
610 
611 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
612 	nvmewqeq->iocb_cmpl = NULL;
613 	nvmewqeq->context2 = ctxp;
614 
615 	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
616 			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
617 
618 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
619 	if (rc == WQE_SUCCESS) {
620 		/*
621 		 * Okay to repost buffer here, but wait till cmpl
622 		 * before freeing ctxp and iocbq.
623 		 */
624 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
625 		ctxp->rqb_buffer = 0;
626 		atomic_inc(&nvmep->xmt_ls_rsp);
627 		return 0;
628 	}
629 	/* Give back resources */
630 	atomic_inc(&nvmep->xmt_ls_drop);
631 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
632 			"6151 LS Drop IO x%x: Issue %d\n",
633 			ctxp->oxid, rc);
634 
635 	lpfc_nlp_put(nvmewqeq->context1);
636 
637 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
638 	atomic_inc(&nvmep->xmt_ls_abort);
639 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
640 	return -ENXIO;
641 }
642 
643 static int
644 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
645 		      struct nvmefc_tgt_fcp_req *rsp)
646 {
647 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
648 	struct lpfc_nvmet_rcv_ctx *ctxp =
649 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
650 	struct lpfc_hba *phba = ctxp->phba;
651 	struct lpfc_iocbq *nvmewqeq;
652 	int rc;
653 
654 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
655 	if (phba->ktime_on) {
656 		if (rsp->op == NVMET_FCOP_RSP)
657 			ctxp->ts_nvme_status = ktime_get_ns();
658 		else
659 			ctxp->ts_nvme_data = ktime_get_ns();
660 	}
661 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
662 		int id = smp_processor_id();
663 		ctxp->cpu = id;
664 		if (id < LPFC_CHECK_CPU_CNT)
665 			phba->cpucheck_xmt_io[id]++;
666 		if (rsp->hwqid != id) {
667 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
668 					"6705 CPU Check OP: "
669 					"cpu %d expect %d\n",
670 					id, rsp->hwqid);
671 			ctxp->cpu = rsp->hwqid;
672 		}
673 	}
674 #endif
675 
676 	/* Sanity check */
677 	if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
678 	    (ctxp->state == LPFC_NVMET_STE_ABORT)) {
679 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
680 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
681 				"6102 IO xri x%x aborted\n",
682 				ctxp->oxid);
683 		rc = -ENXIO;
684 		goto aerr;
685 	}
686 
687 	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
688 	if (nvmewqeq == NULL) {
689 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
690 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
691 				"6152 FCP Drop IO x%x: Prep\n",
692 				ctxp->oxid);
693 		rc = -ENXIO;
694 		goto aerr;
695 	}
696 
697 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
698 	nvmewqeq->iocb_cmpl = NULL;
699 	nvmewqeq->context2 = ctxp;
700 	nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
701 	ctxp->wqeq->hba_wqidx = rsp->hwqid;
702 
703 	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
704 			 ctxp->oxid, rsp->op, rsp->rsplen);
705 
706 	ctxp->flag |= LPFC_NVMET_IO_INP;
707 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
708 	if (rc == WQE_SUCCESS) {
709 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
710 		if (!phba->ktime_on)
711 			return 0;
712 		if (rsp->op == NVMET_FCOP_RSP)
713 			ctxp->ts_status_wqput = ktime_get_ns();
714 		else
715 			ctxp->ts_data_wqput = ktime_get_ns();
716 #endif
717 		return 0;
718 	}
719 
720 	/* Give back resources */
721 	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
722 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
723 			"6153 FCP Drop IO x%x: Issue: %d\n",
724 			ctxp->oxid, rc);
725 
726 	ctxp->wqeq->hba_wqidx = 0;
727 	nvmewqeq->context2 = NULL;
728 	nvmewqeq->context3 = NULL;
729 	rc = -EBUSY;
730 aerr:
731 	return rc;
732 }
733 
734 static void
735 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
736 {
737 	struct lpfc_nvmet_tgtport *tport = targetport->private;
738 
739 	/* release any threads waiting for the unreg to complete */
740 	complete(&tport->tport_unreg_done);
741 }
742 
743 static void
744 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
745 			 struct nvmefc_tgt_fcp_req *req)
746 {
747 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
748 	struct lpfc_nvmet_rcv_ctx *ctxp =
749 		container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
750 	struct lpfc_hba *phba = ctxp->phba;
751 	unsigned long flags;
752 
753 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
754 			"6103 Abort op: oxri x%x flg x%x cnt %d\n",
755 			ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
756 
757 	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
758 			 "xri x%x flg x%x cnt x%x\n",
759 			 ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
760 
761 	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
762 	ctxp->entry_cnt++;
763 	spin_lock_irqsave(&ctxp->ctxlock, flags);
764 
765 	/* Since iaab/iaar are NOT set, we need to check
766 	 * if the firmware is in process of aborting IO
767 	 */
768 	if (ctxp->flag & LPFC_NVMET_XBUSY) {
769 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
770 		return;
771 	}
772 	ctxp->flag |= LPFC_NVMET_ABORT_OP;
773 	if (ctxp->flag & LPFC_NVMET_IO_INP)
774 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
775 					       ctxp->oxid);
776 	else
777 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
778 						 ctxp->oxid);
779 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
780 }
781 
782 static void
783 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
784 			   struct nvmefc_tgt_fcp_req *rsp)
785 {
786 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
787 	struct lpfc_nvmet_rcv_ctx *ctxp =
788 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
789 	struct lpfc_hba *phba = ctxp->phba;
790 	unsigned long flags;
791 	bool aborting = false;
792 
793 	spin_lock_irqsave(&ctxp->ctxlock, flags);
794 	if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
795 	    (ctxp->flag & LPFC_NVMET_XBUSY)) {
796 		aborting = true;
797 		/* let the abort path do the real release */
798 		lpfc_nvmet_defer_release(phba, ctxp);
799 	}
800 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
801 
802 	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
803 			 ctxp->state, aborting);
804 
805 	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
806 
807 	if (aborting)
808 		return;
809 
810 	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
811 }
812 
813 static struct nvmet_fc_target_template lpfc_tgttemplate = {
814 	.targetport_delete = lpfc_nvmet_targetport_delete,
815 	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
816 	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
817 	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
818 	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
819 
820 	.max_hw_queues  = 1,
821 	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
822 	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
823 	.dma_boundary = 0xFFFFFFFF,
824 
825 	/* optional features */
826 	.target_features = 0,
827 	/* sizes of additional private data for data structures */
828 	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
829 };
830 
831 void
832 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
833 {
834 	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
835 	unsigned long flags;
836 
837 	list_for_each_entry_safe(
838 		ctx_buf, next_ctx_buf,
839 		&phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
840 		spin_lock_irqsave(
841 			&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
842 		list_del_init(&ctx_buf->list);
843 		spin_unlock_irqrestore(
844 			&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
845 		__lpfc_clear_active_sglq(phba,
846 					 ctx_buf->sglq->sli4_lxritag);
847 		ctx_buf->sglq->state = SGL_FREED;
848 		ctx_buf->sglq->ndlp = NULL;
849 
850 		spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
851 		list_add_tail(&ctx_buf->sglq->list,
852 			      &phba->sli4_hba.lpfc_nvmet_sgl_list);
853 		spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
854 				       flags);
855 
856 		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
857 		kfree(ctx_buf->context);
858 	}
859 }
860 
861 int
862 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
863 {
864 	struct lpfc_nvmet_ctxbuf *ctx_buf;
865 	struct lpfc_iocbq *nvmewqe;
866 	union lpfc_wqe128 *wqe;
867 	int i;
868 
869 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
870 			"6403 Allocate NVMET resources for %d XRIs\n",
871 			phba->sli4_hba.nvmet_xri_cnt);
872 
873 	/* For all nvmet xris, allocate resources needed to process a
874 	 * received command on a per xri basis.
875 	 */
876 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
877 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
878 		if (!ctx_buf) {
879 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
880 					"6404 Ran out of memory for NVMET\n");
881 			return -ENOMEM;
882 		}
883 
884 		ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
885 					   GFP_KERNEL);
886 		if (!ctx_buf->context) {
887 			kfree(ctx_buf);
888 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
889 					"6405 Ran out of NVMET "
890 					"context memory\n");
891 			return -ENOMEM;
892 		}
893 		ctx_buf->context->ctxbuf = ctx_buf;
894 
895 		ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
896 		if (!ctx_buf->iocbq) {
897 			kfree(ctx_buf->context);
898 			kfree(ctx_buf);
899 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
900 					"6406 Ran out of NVMET iocb/WQEs\n");
901 			return -ENOMEM;
902 		}
903 		ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
904 		nvmewqe = ctx_buf->iocbq;
905 		wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
906 		/* Initialize WQE */
907 		memset(wqe, 0, sizeof(union lpfc_wqe));
908 		/* Word 7 */
909 		bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
910 		bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
911 		bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
912 		/* Word 10 */
913 		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
914 		bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
915 		bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
916 
917 		ctx_buf->iocbq->context1 = NULL;
918 		spin_lock(&phba->sli4_hba.sgl_list_lock);
919 		ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
920 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
921 		if (!ctx_buf->sglq) {
922 			lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
923 			kfree(ctx_buf->context);
924 			kfree(ctx_buf);
925 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
926 					"6407 Ran out of NVMET XRIs\n");
927 			return -ENOMEM;
928 		}
929 		spin_lock(&phba->sli4_hba.nvmet_io_lock);
930 		list_add_tail(&ctx_buf->list,
931 			      &phba->sli4_hba.lpfc_nvmet_ctx_list);
932 		spin_unlock(&phba->sli4_hba.nvmet_io_lock);
933 	}
934 	phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
935 	return 0;
936 }
937 
938 int
939 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
940 {
941 	struct lpfc_vport  *vport = phba->pport;
942 	struct lpfc_nvmet_tgtport *tgtp;
943 	struct nvmet_fc_port_info pinfo;
944 	int error;
945 
946 	if (phba->targetport)
947 		return 0;
948 
949 	error = lpfc_nvmet_setup_io_context(phba);
950 	if (error)
951 		return error;
952 
953 	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
954 	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
955 	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
956 	pinfo.port_id = vport->fc_myDID;
957 
958 	/* Limit to LPFC_MAX_NVME_SEG_CNT.
959 	 * For now need + 1 to get around NVME transport logic.
960 	 */
961 	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
962 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
963 				"6400 Reducing sg segment cnt to %d\n",
964 				LPFC_MAX_NVME_SEG_CNT);
965 		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
966 	} else {
967 		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
968 	}
969 	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
970 	lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
971 	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
972 					   NVMET_FCTGTFEAT_CMD_IN_ISR |
973 					   NVMET_FCTGTFEAT_OPDONE_IN_ISR;
974 
975 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
976 	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
977 					     &phba->pcidev->dev,
978 					     &phba->targetport);
979 #else
980 	error = -ENOENT;
981 #endif
982 	if (error) {
983 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
984 				"6025 Cannot register NVME targetport "
985 				"x%x\n", error);
986 		phba->targetport = NULL;
987 
988 		lpfc_nvmet_cleanup_io_context(phba);
989 
990 	} else {
991 		tgtp = (struct lpfc_nvmet_tgtport *)
992 			phba->targetport->private;
993 		tgtp->phba = phba;
994 
995 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
996 				"6026 Registered NVME "
997 				"targetport: %p, private %p "
998 				"portnm %llx nodenm %llx\n",
999 				phba->targetport, tgtp,
1000 				pinfo.port_name, pinfo.node_name);
1001 
1002 		atomic_set(&tgtp->rcv_ls_req_in, 0);
1003 		atomic_set(&tgtp->rcv_ls_req_out, 0);
1004 		atomic_set(&tgtp->rcv_ls_req_drop, 0);
1005 		atomic_set(&tgtp->xmt_ls_abort, 0);
1006 		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1007 		atomic_set(&tgtp->xmt_ls_rsp, 0);
1008 		atomic_set(&tgtp->xmt_ls_drop, 0);
1009 		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1010 		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1011 		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1012 		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1013 		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1014 		atomic_set(&tgtp->xmt_fcp_drop, 0);
1015 		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1016 		atomic_set(&tgtp->xmt_fcp_read, 0);
1017 		atomic_set(&tgtp->xmt_fcp_write, 0);
1018 		atomic_set(&tgtp->xmt_fcp_rsp, 0);
1019 		atomic_set(&tgtp->xmt_fcp_release, 0);
1020 		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1021 		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1022 		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1023 		atomic_set(&tgtp->xmt_fcp_abort, 0);
1024 		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1025 		atomic_set(&tgtp->xmt_abort_unsol, 0);
1026 		atomic_set(&tgtp->xmt_abort_sol, 0);
1027 		atomic_set(&tgtp->xmt_abort_rsp, 0);
1028 		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1029 	}
1030 	return error;
1031 }
1032 
1033 int
1034 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1035 {
1036 	struct lpfc_vport  *vport = phba->pport;
1037 
1038 	if (!phba->targetport)
1039 		return 0;
1040 
1041 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1042 			 "6007 Update NVMET port %p did x%x\n",
1043 			 phba->targetport, vport->fc_myDID);
1044 
1045 	phba->targetport->port_id = vport->fc_myDID;
1046 	return 0;
1047 }
1048 
1049 /**
1050  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1051  * @phba: pointer to lpfc hba data structure.
1052  * @axri: pointer to the nvmet xri abort wcqe structure.
1053  *
1054  * This routine is invoked by the worker thread to process a SLI4 fast-path
1055  * NVMET aborted xri.
1056  **/
1057 void
1058 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1059 			    struct sli4_wcqe_xri_aborted *axri)
1060 {
1061 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1062 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1063 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1064 	struct lpfc_nodelist *ndlp;
1065 	unsigned long iflag = 0;
1066 	int rrq_empty = 0;
1067 	bool released = false;
1068 
1069 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1070 			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1071 
1072 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1073 		return;
1074 	spin_lock_irqsave(&phba->hbalock, iflag);
1075 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1076 	list_for_each_entry_safe(ctxp, next_ctxp,
1077 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1078 				 list) {
1079 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1080 			continue;
1081 
1082 		/* Check if we already received a free context call
1083 		 * and we have completed processing an abort situation.
1084 		 */
1085 		if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1086 		    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1087 			list_del(&ctxp->list);
1088 			released = true;
1089 		}
1090 		ctxp->flag &= ~LPFC_NVMET_XBUSY;
1091 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1092 
1093 		rrq_empty = list_empty(&phba->active_rrq_list);
1094 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1095 		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1096 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1097 		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1098 		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1099 			lpfc_set_rrq_active(phba, ndlp,
1100 				ctxp->ctxbuf->sglq->sli4_lxritag,
1101 				rxid, 1);
1102 			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1103 		}
1104 
1105 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1106 				"6318 XB aborted %x flg x%x (%x)\n",
1107 				ctxp->oxid, ctxp->flag, released);
1108 		if (released)
1109 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1110 
1111 		if (rrq_empty)
1112 			lpfc_worker_wake_up(phba);
1113 		return;
1114 	}
1115 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1116 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1117 }
1118 
1119 int
1120 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1121 			   struct fc_frame_header *fc_hdr)
1122 
1123 {
1124 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1125 	struct lpfc_hba *phba = vport->phba;
1126 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1127 	struct nvmefc_tgt_fcp_req *rsp;
1128 	uint16_t xri;
1129 	unsigned long iflag = 0;
1130 
1131 	xri = be16_to_cpu(fc_hdr->fh_ox_id);
1132 
1133 	spin_lock_irqsave(&phba->hbalock, iflag);
1134 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1135 	list_for_each_entry_safe(ctxp, next_ctxp,
1136 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1137 				 list) {
1138 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1139 			continue;
1140 
1141 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1142 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1143 
1144 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1145 		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1146 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1147 
1148 		lpfc_nvmeio_data(phba,
1149 			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1150 			xri, smp_processor_id(), 0);
1151 
1152 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1153 				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1154 
1155 		rsp = &ctxp->ctx.fcp_req;
1156 		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1157 
1158 		/* Respond with BA_ACC accordingly */
1159 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1160 		return 0;
1161 	}
1162 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1163 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1164 
1165 	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1166 			 xri, smp_processor_id(), 1);
1167 
1168 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1169 			"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1170 
1171 	/* Respond with BA_RJT accordingly */
1172 	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1173 #endif
1174 	return 0;
1175 }
1176 
1177 void
1178 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1179 {
1180 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1181 	struct lpfc_nvmet_tgtport *tgtp;
1182 
1183 	if (phba->nvmet_support == 0)
1184 		return;
1185 	if (phba->targetport) {
1186 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1187 		init_completion(&tgtp->tport_unreg_done);
1188 		nvmet_fc_unregister_targetport(phba->targetport);
1189 		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1190 		lpfc_nvmet_cleanup_io_context(phba);
1191 	}
1192 	phba->targetport = NULL;
1193 #endif
1194 }
1195 
1196 /**
1197  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1198  * @phba: pointer to lpfc hba data structure.
1199  * @pring: pointer to a SLI ring.
1200  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1201  *
1202  * This routine is used for processing the WQE associated with a unsolicited
1203  * event. It first determines whether there is an existing ndlp that matches
1204  * the DID from the unsolicited WQE. If not, it will create a new one with
1205  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1206  * WQE is then used to invoke the proper routine and to set up proper state
1207  * of the discovery state machine.
1208  **/
1209 static void
1210 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1211 			   struct hbq_dmabuf *nvmebuf)
1212 {
1213 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1214 	struct lpfc_nvmet_tgtport *tgtp;
1215 	struct fc_frame_header *fc_hdr;
1216 	struct lpfc_nvmet_rcv_ctx *ctxp;
1217 	uint32_t *payload;
1218 	uint32_t size, oxid, sid, rc;
1219 
1220 	if (!nvmebuf || !phba->targetport) {
1221 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1222 				"6154 LS Drop IO\n");
1223 		oxid = 0;
1224 		size = 0;
1225 		sid = 0;
1226 		ctxp = NULL;
1227 		goto dropit;
1228 	}
1229 
1230 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1231 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1232 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1233 	size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1234 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1235 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1236 
1237 	ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1238 	if (ctxp == NULL) {
1239 		atomic_inc(&tgtp->rcv_ls_req_drop);
1240 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1241 				"6155 LS Drop IO x%x: Alloc\n",
1242 				oxid);
1243 dropit:
1244 		lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1245 				 "xri x%x sz %d from %06x\n",
1246 				 oxid, size, sid);
1247 		if (nvmebuf)
1248 			lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1249 		return;
1250 	}
1251 	ctxp->phba = phba;
1252 	ctxp->size = size;
1253 	ctxp->oxid = oxid;
1254 	ctxp->sid = sid;
1255 	ctxp->wqeq = NULL;
1256 	ctxp->state = LPFC_NVMET_STE_RCV;
1257 	ctxp->rqb_buffer = (void *)nvmebuf;
1258 
1259 	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1260 			 oxid, size, sid);
1261 	/*
1262 	 * The calling sequence should be:
1263 	 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1264 	 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1265 	 */
1266 	atomic_inc(&tgtp->rcv_ls_req_in);
1267 	rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1268 				 payload, size);
1269 
1270 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1271 			"6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
1272 			"%08x %08x %08x\n", __func__, ctxp, size, rc,
1273 			*payload, *(payload+1), *(payload+2),
1274 			*(payload+3), *(payload+4), *(payload+5));
1275 
1276 	if (rc == 0) {
1277 		atomic_inc(&tgtp->rcv_ls_req_out);
1278 		return;
1279 	}
1280 
1281 	lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1282 			 oxid, size, sid);
1283 
1284 	atomic_inc(&tgtp->rcv_ls_req_drop);
1285 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1286 			"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1287 			ctxp->oxid, rc);
1288 
1289 	/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1290 	if (nvmebuf)
1291 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1292 
1293 	atomic_inc(&tgtp->xmt_ls_abort);
1294 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1295 #endif
1296 }
1297 
1298 /**
1299  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1300  * @phba: pointer to lpfc hba data structure.
1301  * @pring: pointer to a SLI ring.
1302  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1303  *
1304  * This routine is used for processing the WQE associated with a unsolicited
1305  * event. It first determines whether there is an existing ndlp that matches
1306  * the DID from the unsolicited WQE. If not, it will create a new one with
1307  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1308  * WQE is then used to invoke the proper routine and to set up proper state
1309  * of the discovery state machine.
1310  **/
1311 static void
1312 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1313 			    struct lpfc_sli_ring *pring,
1314 			    struct rqb_dmabuf *nvmebuf,
1315 			    uint64_t isr_timestamp)
1316 {
1317 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1318 	struct lpfc_nvmet_rcv_ctx *ctxp;
1319 	struct lpfc_nvmet_tgtport *tgtp;
1320 	struct fc_frame_header *fc_hdr;
1321 	struct lpfc_nvmet_ctxbuf *ctx_buf;
1322 	uint32_t *payload;
1323 	uint32_t size, oxid, sid, rc, qno;
1324 	unsigned long iflag;
1325 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1326 	uint32_t id;
1327 #endif
1328 
1329 	ctx_buf = NULL;
1330 	if (!nvmebuf || !phba->targetport) {
1331 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1332 				"6157 NVMET FCP Drop IO\n");
1333 		oxid = 0;
1334 		size = 0;
1335 		sid = 0;
1336 		ctxp = NULL;
1337 		goto dropit;
1338 	}
1339 
1340 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
1341 	if (phba->sli4_hba.nvmet_ctx_cnt) {
1342 		list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
1343 				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1344 		phba->sli4_hba.nvmet_ctx_cnt--;
1345 	}
1346 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
1347 
1348 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1349 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1350 	size = nvmebuf->bytes_recv;
1351 
1352 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1353 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1354 		id = smp_processor_id();
1355 		if (id < LPFC_CHECK_CPU_CNT)
1356 			phba->cpucheck_rcv_io[id]++;
1357 	}
1358 #endif
1359 
1360 	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
1361 			 oxid, size, smp_processor_id());
1362 
1363 	if (!ctx_buf) {
1364 		/* Queue this NVME IO to process later */
1365 		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1366 		list_add_tail(&nvmebuf->hbuf.list,
1367 			      &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1368 		phba->sli4_hba.nvmet_io_wait_cnt++;
1369 		phba->sli4_hba.nvmet_io_wait_total++;
1370 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1371 				       iflag);
1372 
1373 		/* Post a brand new DMA buffer to RQ */
1374 		qno = nvmebuf->idx;
1375 		lpfc_post_rq_buffer(
1376 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1377 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1378 		return;
1379 	}
1380 
1381 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1383 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1384 
1385 	ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1386 	memset(ctxp, 0, sizeof(ctxp->ctx));
1387 	ctxp->wqeq = NULL;
1388 	ctxp->txrdy = NULL;
1389 	ctxp->offset = 0;
1390 	ctxp->phba = phba;
1391 	ctxp->size = size;
1392 	ctxp->oxid = oxid;
1393 	ctxp->sid = sid;
1394 	ctxp->state = LPFC_NVMET_STE_RCV;
1395 	ctxp->entry_cnt = 1;
1396 	ctxp->flag = 0;
1397 	ctxp->ctxbuf = ctx_buf;
1398 	spin_lock_init(&ctxp->ctxlock);
1399 
1400 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1401 	if (phba->ktime_on) {
1402 		ctxp->ts_isr_cmd = isr_timestamp;
1403 		ctxp->ts_cmd_nvme = ktime_get_ns();
1404 		ctxp->ts_nvme_data = 0;
1405 		ctxp->ts_data_wqput = 0;
1406 		ctxp->ts_isr_data = 0;
1407 		ctxp->ts_data_nvme = 0;
1408 		ctxp->ts_nvme_status = 0;
1409 		ctxp->ts_status_wqput = 0;
1410 		ctxp->ts_isr_status = 0;
1411 		ctxp->ts_status_nvme = 0;
1412 	}
1413 #endif
1414 
1415 	atomic_inc(&tgtp->rcv_fcp_cmd_in);
1416 	/*
1417 	 * The calling sequence should be:
1418 	 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1419 	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1420 	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1421 	 * the NVME command / FC header is stored, so we are free to repost
1422 	 * the buffer.
1423 	 */
1424 	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1425 				  payload, size);
1426 
1427 	/* Process FCP command */
1428 	if (rc == 0) {
1429 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
1430 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1431 		return;
1432 	}
1433 
1434 	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1435 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1436 			"6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1437 			ctxp->oxid, rc,
1438 			atomic_read(&tgtp->rcv_fcp_cmd_in),
1439 			atomic_read(&tgtp->rcv_fcp_cmd_out),
1440 			atomic_read(&tgtp->xmt_fcp_release));
1441 dropit:
1442 	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1443 			 oxid, size, sid);
1444 	if (oxid) {
1445 		lpfc_nvmet_defer_release(phba, ctxp);
1446 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1447 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1448 		return;
1449 	}
1450 
1451 	if (ctx_buf)
1452 		lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1453 
1454 	if (nvmebuf)
1455 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1456 #endif
1457 }
1458 
1459 /**
1460  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1461  * @phba: pointer to lpfc hba data structure.
1462  * @pring: pointer to a SLI ring.
1463  * @nvmebuf: pointer to received nvme data structure.
1464  *
1465  * This routine is used to process an unsolicited event received from a SLI
1466  * (Service Level Interface) ring. The actual processing of the data buffer
1467  * associated with the unsolicited event is done by invoking the routine
1468  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1469  * SLI RQ on which the unsolicited event was received.
1470  **/
1471 void
1472 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1473 			  struct lpfc_iocbq *piocb)
1474 {
1475 	struct lpfc_dmabuf *d_buf;
1476 	struct hbq_dmabuf *nvmebuf;
1477 
1478 	d_buf = piocb->context2;
1479 	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1480 
1481 	if (phba->nvmet_support == 0) {
1482 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1483 		return;
1484 	}
1485 	lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1486 }
1487 
1488 /**
1489  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1490  * @phba: pointer to lpfc hba data structure.
1491  * @pring: pointer to a SLI ring.
1492  * @nvmebuf: pointer to received nvme data structure.
1493  *
1494  * This routine is used to process an unsolicited event received from a SLI
1495  * (Service Level Interface) ring. The actual processing of the data buffer
1496  * associated with the unsolicited event is done by invoking the routine
1497  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1498  * SLI RQ on which the unsolicited event was received.
1499  **/
1500 void
1501 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1502 			   struct lpfc_sli_ring *pring,
1503 			   struct rqb_dmabuf *nvmebuf,
1504 			   uint64_t isr_timestamp)
1505 {
1506 	if (phba->nvmet_support == 0) {
1507 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1508 		return;
1509 	}
1510 	lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
1511 				    isr_timestamp);
1512 }
1513 
1514 /**
1515  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1516  * @phba: pointer to a host N_Port data structure.
1517  * @ctxp: Context info for NVME LS Request
1518  * @rspbuf: DMA buffer of NVME command.
1519  * @rspsize: size of the NVME command.
1520  *
1521  * This routine is used for allocating a lpfc-WQE data structure from
1522  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1523  * passed into the routine for discovery state machine to issue an Extended
1524  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1525  * and preparation routine that is used by all the discovery state machine
1526  * routines and the NVME command-specific fields will be later set up by
1527  * the individual discovery machine routines after calling this routine
1528  * allocating and preparing a generic WQE data structure. It fills in the
1529  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1530  * payload and response payload (if expected). The reference count on the
1531  * ndlp is incremented by 1 and the reference to the ndlp is put into
1532  * context1 of the WQE data structure for this WQE to hold the ndlp
1533  * reference for the command's callback function to access later.
1534  *
1535  * Return code
1536  *   Pointer to the newly allocated/prepared nvme wqe data structure
1537  *   NULL - when nvme wqe data structure allocation/preparation failed
1538  **/
1539 static struct lpfc_iocbq *
1540 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1541 		       struct lpfc_nvmet_rcv_ctx *ctxp,
1542 		       dma_addr_t rspbuf, uint16_t rspsize)
1543 {
1544 	struct lpfc_nodelist *ndlp;
1545 	struct lpfc_iocbq *nvmewqe;
1546 	union lpfc_wqe *wqe;
1547 
1548 	if (!lpfc_is_link_up(phba)) {
1549 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1550 				"6104 lpfc_nvmet_prep_ls_wqe: link err: "
1551 				"NPORT x%x oxid:x%x\n",
1552 				ctxp->sid, ctxp->oxid);
1553 		return NULL;
1554 	}
1555 
1556 	/* Allocate buffer for  command wqe */
1557 	nvmewqe = lpfc_sli_get_iocbq(phba);
1558 	if (nvmewqe == NULL) {
1559 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1560 				"6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1561 				"NPORT x%x oxid:x%x\n",
1562 				ctxp->sid, ctxp->oxid);
1563 		return NULL;
1564 	}
1565 
1566 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1567 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1568 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1569 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1570 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1571 				"6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1572 				"NPORT x%x oxid:x%x\n",
1573 				ctxp->sid, ctxp->oxid);
1574 		goto nvme_wqe_free_wqeq_exit;
1575 	}
1576 	ctxp->wqeq = nvmewqe;
1577 
1578 	/* prevent preparing wqe with NULL ndlp reference */
1579 	nvmewqe->context1 = lpfc_nlp_get(ndlp);
1580 	if (nvmewqe->context1 == NULL)
1581 		goto nvme_wqe_free_wqeq_exit;
1582 	nvmewqe->context2 = ctxp;
1583 
1584 	wqe = &nvmewqe->wqe;
1585 	memset(wqe, 0, sizeof(union lpfc_wqe));
1586 
1587 	/* Words 0 - 2 */
1588 	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1589 	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1590 	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1591 	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1592 
1593 	/* Word 3 */
1594 
1595 	/* Word 4 */
1596 
1597 	/* Word 5 */
1598 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1599 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1600 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1601 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1602 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1603 
1604 	/* Word 6 */
1605 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1606 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1607 	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1608 
1609 	/* Word 7 */
1610 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1611 	       CMD_XMIT_SEQUENCE64_WQE);
1612 	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1613 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1614 	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1615 
1616 	/* Word 8 */
1617 	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1618 
1619 	/* Word 9 */
1620 	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1621 	/* Needs to be set by caller */
1622 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1623 
1624 	/* Word 10 */
1625 	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1626 	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1627 	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1628 	       LPFC_WQE_LENLOC_WORD12);
1629 	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1630 
1631 	/* Word 11 */
1632 	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1633 	       LPFC_WQE_CQ_ID_DEFAULT);
1634 	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1635 	       OTHER_COMMAND);
1636 
1637 	/* Word 12 */
1638 	wqe->xmit_sequence.xmit_len = rspsize;
1639 
1640 	nvmewqe->retry = 1;
1641 	nvmewqe->vport = phba->pport;
1642 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1643 	nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1644 
1645 	/* Xmit NVME response to remote NPORT <did> */
1646 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1647 			"6039 Xmit NVME LS response to remote "
1648 			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1649 			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1650 			rspsize);
1651 	return nvmewqe;
1652 
1653 nvme_wqe_free_wqeq_exit:
1654 	nvmewqe->context2 = NULL;
1655 	nvmewqe->context3 = NULL;
1656 	lpfc_sli_release_iocbq(phba, nvmewqe);
1657 	return NULL;
1658 }
1659 
1660 
1661 static struct lpfc_iocbq *
1662 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1663 			struct lpfc_nvmet_rcv_ctx *ctxp)
1664 {
1665 	struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1666 	struct lpfc_nvmet_tgtport *tgtp;
1667 	struct sli4_sge *sgl;
1668 	struct lpfc_nodelist *ndlp;
1669 	struct lpfc_iocbq *nvmewqe;
1670 	struct scatterlist *sgel;
1671 	union lpfc_wqe128 *wqe;
1672 	uint32_t *txrdy;
1673 	dma_addr_t physaddr;
1674 	int i, cnt;
1675 	int xc = 1;
1676 
1677 	if (!lpfc_is_link_up(phba)) {
1678 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1679 				"6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1680 				"NPORT x%x oxid:x%x\n", ctxp->sid,
1681 				ctxp->oxid);
1682 		return NULL;
1683 	}
1684 
1685 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1686 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1687 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1688 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1689 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1690 				"6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1691 				"NPORT x%x oxid:x%x\n",
1692 				ctxp->sid, ctxp->oxid);
1693 		return NULL;
1694 	}
1695 
1696 	if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
1697 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1698 				"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1699 				"NPORT x%x oxid:x%x cnt %d\n",
1700 				ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
1701 		return NULL;
1702 	}
1703 
1704 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1705 	nvmewqe = ctxp->wqeq;
1706 	if (nvmewqe == NULL) {
1707 		/* Allocate buffer for  command wqe */
1708 		nvmewqe = ctxp->ctxbuf->iocbq;
1709 		if (nvmewqe == NULL) {
1710 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1711 					"6110 lpfc_nvmet_prep_fcp_wqe: No "
1712 					"WQE: NPORT x%x oxid:x%x\n",
1713 					ctxp->sid, ctxp->oxid);
1714 			return NULL;
1715 		}
1716 		ctxp->wqeq = nvmewqe;
1717 		xc = 0; /* create new XRI */
1718 		nvmewqe->sli4_lxritag = NO_XRI;
1719 		nvmewqe->sli4_xritag = NO_XRI;
1720 	}
1721 
1722 	/* Sanity check */
1723 	if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1724 	    (ctxp->entry_cnt == 1)) ||
1725 	    ((ctxp->state == LPFC_NVMET_STE_DATA) &&
1726 	    (ctxp->entry_cnt > 1))) {
1727 		wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1728 	} else {
1729 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1730 				"6111 Wrong state %s: %d  cnt %d\n",
1731 				__func__, ctxp->state, ctxp->entry_cnt);
1732 		return NULL;
1733 	}
1734 
1735 	sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
1736 	switch (rsp->op) {
1737 	case NVMET_FCOP_READDATA:
1738 	case NVMET_FCOP_READDATA_RSP:
1739 		/* Words 0 - 2 : The first sg segment */
1740 		sgel = &rsp->sg[0];
1741 		physaddr = sg_dma_address(sgel);
1742 		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1743 		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1744 		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1745 		wqe->fcp_tsend.bde.addrHigh =
1746 			cpu_to_le32(putPaddrHigh(physaddr));
1747 
1748 		/* Word 3 */
1749 		wqe->fcp_tsend.payload_offset_len = 0;
1750 
1751 		/* Word 4 */
1752 		wqe->fcp_tsend.relative_offset = ctxp->offset;
1753 
1754 		/* Word 5 */
1755 
1756 		/* Word 6 */
1757 		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
1758 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1759 		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
1760 		       nvmewqe->sli4_xritag);
1761 
1762 		/* Word 7 */
1763 		bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
1764 
1765 		/* Word 8 */
1766 		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
1767 
1768 		/* Word 9 */
1769 		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
1770 		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
1771 
1772 		/* Word 10 */
1773 		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1774 		bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
1775 		bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
1776 		bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
1777 		       LPFC_WQE_LENLOC_WORD12);
1778 		bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
1779 		bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
1780 		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1781 		if (phba->cfg_nvme_oas)
1782 			bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
1783 
1784 		/* Word 11 */
1785 		bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
1786 		       LPFC_WQE_CQ_ID_DEFAULT);
1787 		bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
1788 		       FCP_COMMAND_TSEND);
1789 
1790 		/* Word 12 */
1791 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1792 
1793 		/* Setup 2 SKIP SGEs */
1794 		sgl->addr_hi = 0;
1795 		sgl->addr_lo = 0;
1796 		sgl->word2 = 0;
1797 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1798 		sgl->word2 = cpu_to_le32(sgl->word2);
1799 		sgl->sge_len = 0;
1800 		sgl++;
1801 		sgl->addr_hi = 0;
1802 		sgl->addr_lo = 0;
1803 		sgl->word2 = 0;
1804 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1805 		sgl->word2 = cpu_to_le32(sgl->word2);
1806 		sgl->sge_len = 0;
1807 		sgl++;
1808 		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
1809 			atomic_inc(&tgtp->xmt_fcp_read_rsp);
1810 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
1811 			if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
1812 			    (rsp->rsplen == 12)) {
1813 				bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
1814 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1815 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1816 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1817 			} else {
1818 				bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1819 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
1820 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
1821 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
1822 				       ((rsp->rsplen >> 2) - 1));
1823 				memcpy(&wqe->words[16], rsp->rspaddr,
1824 				       rsp->rsplen);
1825 			}
1826 		} else {
1827 			atomic_inc(&tgtp->xmt_fcp_read);
1828 
1829 			bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1830 			bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1831 			bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1832 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
1833 			bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1834 		}
1835 		ctxp->state = LPFC_NVMET_STE_DATA;
1836 		break;
1837 
1838 	case NVMET_FCOP_WRITEDATA:
1839 		/* Words 0 - 2 : The first sg segment */
1840 		txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
1841 				       GFP_KERNEL, &physaddr);
1842 		if (!txrdy) {
1843 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1844 					"6041 Bad txrdy buffer: oxid x%x\n",
1845 					ctxp->oxid);
1846 			return NULL;
1847 		}
1848 		ctxp->txrdy = txrdy;
1849 		ctxp->txrdy_phys = physaddr;
1850 		wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1851 		wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
1852 		wqe->fcp_treceive.bde.addrLow =
1853 			cpu_to_le32(putPaddrLow(physaddr));
1854 		wqe->fcp_treceive.bde.addrHigh =
1855 			cpu_to_le32(putPaddrHigh(physaddr));
1856 
1857 		/* Word 3 */
1858 		wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
1859 
1860 		/* Word 4 */
1861 		wqe->fcp_treceive.relative_offset = ctxp->offset;
1862 
1863 		/* Word 5 */
1864 
1865 		/* Word 6 */
1866 		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
1867 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1868 		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
1869 		       nvmewqe->sli4_xritag);
1870 
1871 		/* Word 7 */
1872 		bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
1873 		bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
1874 		       CMD_FCP_TRECEIVE64_WQE);
1875 
1876 		/* Word 8 */
1877 		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
1878 
1879 		/* Word 9 */
1880 		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
1881 		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
1882 
1883 		/* Word 10 */
1884 		bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1885 		bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
1886 		bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
1887 		bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
1888 		       LPFC_WQE_LENLOC_WORD12);
1889 		bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
1890 		bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
1891 		bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
1892 		bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
1893 		bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1894 		if (phba->cfg_nvme_oas)
1895 			bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
1896 
1897 		/* Word 11 */
1898 		bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
1899 		       LPFC_WQE_CQ_ID_DEFAULT);
1900 		bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
1901 		       FCP_COMMAND_TRECEIVE);
1902 		bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1903 
1904 		/* Word 12 */
1905 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1906 
1907 		/* Setup 1 TXRDY and 1 SKIP SGE */
1908 		txrdy[0] = 0;
1909 		txrdy[1] = cpu_to_be32(rsp->transfer_length);
1910 		txrdy[2] = 0;
1911 
1912 		sgl->addr_hi = putPaddrHigh(physaddr);
1913 		sgl->addr_lo = putPaddrLow(physaddr);
1914 		sgl->word2 = 0;
1915 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1916 		sgl->word2 = cpu_to_le32(sgl->word2);
1917 		sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
1918 		sgl++;
1919 		sgl->addr_hi = 0;
1920 		sgl->addr_lo = 0;
1921 		sgl->word2 = 0;
1922 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1923 		sgl->word2 = cpu_to_le32(sgl->word2);
1924 		sgl->sge_len = 0;
1925 		sgl++;
1926 		ctxp->state = LPFC_NVMET_STE_DATA;
1927 		atomic_inc(&tgtp->xmt_fcp_write);
1928 		break;
1929 
1930 	case NVMET_FCOP_RSP:
1931 		/* Words 0 - 2 */
1932 		physaddr = rsp->rspdma;
1933 		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1934 		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
1935 		wqe->fcp_trsp.bde.addrLow =
1936 			cpu_to_le32(putPaddrLow(physaddr));
1937 		wqe->fcp_trsp.bde.addrHigh =
1938 			cpu_to_le32(putPaddrHigh(physaddr));
1939 
1940 		/* Word 3 */
1941 		wqe->fcp_trsp.response_len = rsp->rsplen;
1942 
1943 		/* Word 4 */
1944 		wqe->fcp_trsp.rsvd_4_5[0] = 0;
1945 
1946 
1947 		/* Word 5 */
1948 
1949 		/* Word 6 */
1950 		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
1951 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1952 		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
1953 		       nvmewqe->sli4_xritag);
1954 
1955 		/* Word 7 */
1956 		bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
1957 		bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
1958 
1959 		/* Word 8 */
1960 		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
1961 
1962 		/* Word 9 */
1963 		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
1964 		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
1965 
1966 		/* Word 10 */
1967 		bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1968 		bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
1969 		bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
1970 		bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
1971 		       LPFC_WQE_LENLOC_WORD3);
1972 		bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
1973 		bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1974 		if (phba->cfg_nvme_oas)
1975 			bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
1976 
1977 		/* Word 11 */
1978 		bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
1979 		       LPFC_WQE_CQ_ID_DEFAULT);
1980 		bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
1981 		       FCP_COMMAND_TRSP);
1982 		bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1983 		ctxp->state = LPFC_NVMET_STE_RSP;
1984 
1985 		if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
1986 			/* Good response - all zero's on wire */
1987 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
1988 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
1989 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
1990 		} else {
1991 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
1992 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
1993 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
1994 			       ((rsp->rsplen >> 2) - 1));
1995 			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
1996 		}
1997 
1998 		/* Use rspbuf, NOT sg list */
1999 		rsp->sg_cnt = 0;
2000 		sgl->word2 = 0;
2001 		atomic_inc(&tgtp->xmt_fcp_rsp);
2002 		break;
2003 
2004 	default:
2005 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2006 				"6064 Unknown Rsp Op %d\n",
2007 				rsp->op);
2008 		return NULL;
2009 	}
2010 
2011 	nvmewqe->retry = 1;
2012 	nvmewqe->vport = phba->pport;
2013 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2014 	nvmewqe->context1 = ndlp;
2015 
2016 	for (i = 0; i < rsp->sg_cnt; i++) {
2017 		sgel = &rsp->sg[i];
2018 		physaddr = sg_dma_address(sgel);
2019 		cnt = sg_dma_len(sgel);
2020 		sgl->addr_hi = putPaddrHigh(physaddr);
2021 		sgl->addr_lo = putPaddrLow(physaddr);
2022 		sgl->word2 = 0;
2023 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2024 		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2025 		if ((i+1) == rsp->sg_cnt)
2026 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2027 		sgl->word2 = cpu_to_le32(sgl->word2);
2028 		sgl->sge_len = cpu_to_le32(cnt);
2029 		sgl++;
2030 		ctxp->offset += cnt;
2031 	}
2032 	return nvmewqe;
2033 }
2034 
2035 /**
2036  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2037  * @phba: Pointer to HBA context object.
2038  * @cmdwqe: Pointer to driver command WQE object.
2039  * @wcqe: Pointer to driver response CQE object.
2040  *
2041  * The function is called from SLI ring event handler with no
2042  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2043  * The function frees memory resources used for the NVME commands.
2044  **/
2045 static void
2046 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2047 			     struct lpfc_wcqe_complete *wcqe)
2048 {
2049 	struct lpfc_nvmet_rcv_ctx *ctxp;
2050 	struct lpfc_nvmet_tgtport *tgtp;
2051 	uint32_t status, result;
2052 	unsigned long flags;
2053 	bool released = false;
2054 
2055 	ctxp = cmdwqe->context2;
2056 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2057 	result = wcqe->parameter;
2058 
2059 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2060 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2061 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2062 
2063 	ctxp->state = LPFC_NVMET_STE_DONE;
2064 
2065 	/* Check if we already received a free context call
2066 	 * and we have completed processing an abort situation.
2067 	 */
2068 	spin_lock_irqsave(&ctxp->ctxlock, flags);
2069 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2070 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2071 		list_del(&ctxp->list);
2072 		released = true;
2073 	}
2074 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2075 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2076 	atomic_inc(&tgtp->xmt_abort_rsp);
2077 
2078 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2079 			"6165 ABORT cmpl: xri x%x flg x%x (%d) "
2080 			"WCQE: %08x %08x %08x %08x\n",
2081 			ctxp->oxid, ctxp->flag, released,
2082 			wcqe->word0, wcqe->total_data_placed,
2083 			result, wcqe->word3);
2084 
2085 	cmdwqe->context2 = NULL;
2086 	cmdwqe->context3 = NULL;
2087 	/*
2088 	 * if transport has released ctx, then can reuse it. Otherwise,
2089 	 * will be recycled by transport release call.
2090 	 */
2091 	if (released)
2092 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2093 
2094 	/* This is the iocbq for the abort, not the command */
2095 	lpfc_sli_release_iocbq(phba, cmdwqe);
2096 
2097 	/* Since iaab/iaar are NOT set, there is no work left.
2098 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2099 	 * should have been called already.
2100 	 */
2101 }
2102 
2103 /**
2104  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2105  * @phba: Pointer to HBA context object.
2106  * @cmdwqe: Pointer to driver command WQE object.
2107  * @wcqe: Pointer to driver response CQE object.
2108  *
2109  * The function is called from SLI ring event handler with no
2110  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2111  * The function frees memory resources used for the NVME commands.
2112  **/
2113 static void
2114 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2115 			       struct lpfc_wcqe_complete *wcqe)
2116 {
2117 	struct lpfc_nvmet_rcv_ctx *ctxp;
2118 	struct lpfc_nvmet_tgtport *tgtp;
2119 	unsigned long flags;
2120 	uint32_t status, result;
2121 	bool released = false;
2122 
2123 	ctxp = cmdwqe->context2;
2124 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2125 	result = wcqe->parameter;
2126 
2127 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2128 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2129 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2130 
2131 	if (!ctxp) {
2132 		/* if context is clear, related io alrady complete */
2133 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2134 				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2135 				wcqe->word0, wcqe->total_data_placed,
2136 				result, wcqe->word3);
2137 		return;
2138 	}
2139 
2140 	/* Sanity check */
2141 	if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2142 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2143 				"6112 ABTS Wrong state:%d oxid x%x\n",
2144 				ctxp->state, ctxp->oxid);
2145 	}
2146 
2147 	/* Check if we already received a free context call
2148 	 * and we have completed processing an abort situation.
2149 	 */
2150 	ctxp->state = LPFC_NVMET_STE_DONE;
2151 	spin_lock_irqsave(&ctxp->ctxlock, flags);
2152 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2153 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2154 		list_del(&ctxp->list);
2155 		released = true;
2156 	}
2157 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2158 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2159 	atomic_inc(&tgtp->xmt_abort_rsp);
2160 
2161 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2162 			"6316 ABTS cmpl xri x%x flg x%x (%x) "
2163 			"WCQE: %08x %08x %08x %08x\n",
2164 			ctxp->oxid, ctxp->flag, released,
2165 			wcqe->word0, wcqe->total_data_placed,
2166 			result, wcqe->word3);
2167 
2168 	cmdwqe->context2 = NULL;
2169 	cmdwqe->context3 = NULL;
2170 	/*
2171 	 * if transport has released ctx, then can reuse it. Otherwise,
2172 	 * will be recycled by transport release call.
2173 	 */
2174 	if (released)
2175 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2176 
2177 	/* Since iaab/iaar are NOT set, there is no work left.
2178 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2179 	 * should have been called already.
2180 	 */
2181 }
2182 
2183 /**
2184  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2185  * @phba: Pointer to HBA context object.
2186  * @cmdwqe: Pointer to driver command WQE object.
2187  * @wcqe: Pointer to driver response CQE object.
2188  *
2189  * The function is called from SLI ring event handler with no
2190  * lock held. This function is the completion handler for NVME ABTS for LS cmds
2191  * The function frees memory resources used for the NVME commands.
2192  **/
2193 static void
2194 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2195 			    struct lpfc_wcqe_complete *wcqe)
2196 {
2197 	struct lpfc_nvmet_rcv_ctx *ctxp;
2198 	struct lpfc_nvmet_tgtport *tgtp;
2199 	uint32_t status, result;
2200 
2201 	ctxp = cmdwqe->context2;
2202 	status = bf_get(lpfc_wcqe_c_status, wcqe);
2203 	result = wcqe->parameter;
2204 
2205 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2206 	atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2207 
2208 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2209 			"6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
2210 			ctxp, wcqe->word0, wcqe->total_data_placed,
2211 			result, wcqe->word3);
2212 
2213 	if (ctxp) {
2214 		cmdwqe->context2 = NULL;
2215 		cmdwqe->context3 = NULL;
2216 		lpfc_sli_release_iocbq(phba, cmdwqe);
2217 		kfree(ctxp);
2218 	} else
2219 		lpfc_sli_release_iocbq(phba, cmdwqe);
2220 }
2221 
2222 static int
2223 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2224 			     struct lpfc_nvmet_rcv_ctx *ctxp,
2225 			     uint32_t sid, uint16_t xri)
2226 {
2227 	struct lpfc_nvmet_tgtport *tgtp;
2228 	struct lpfc_iocbq *abts_wqeq;
2229 	union lpfc_wqe *wqe_abts;
2230 	struct lpfc_nodelist *ndlp;
2231 
2232 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2233 			"6067 ABTS: sid %x xri x%x/x%x\n",
2234 			sid, xri, ctxp->wqeq->sli4_xritag);
2235 
2236 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2237 
2238 	ndlp = lpfc_findnode_did(phba->pport, sid);
2239 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2240 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2241 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2242 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2243 		lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2244 				"6134 Drop ABTS - wrong NDLP state x%x.\n",
2245 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2246 
2247 		/* No failure to an ABTS request. */
2248 		return 0;
2249 	}
2250 
2251 	abts_wqeq = ctxp->wqeq;
2252 	wqe_abts = &abts_wqeq->wqe;
2253 	ctxp->state = LPFC_NVMET_STE_ABORT;
2254 
2255 	/*
2256 	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2257 	 * that were initialized in lpfc_sli4_nvmet_alloc.
2258 	 */
2259 	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2260 
2261 	/* Word 5 */
2262 	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2263 	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2264 	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2265 	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2266 	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2267 
2268 	/* Word 6 */
2269 	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2270 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2271 	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2272 	       abts_wqeq->sli4_xritag);
2273 
2274 	/* Word 7 */
2275 	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2276 	       CMD_XMIT_SEQUENCE64_WQE);
2277 	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2278 	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2279 	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2280 
2281 	/* Word 8 */
2282 	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2283 
2284 	/* Word 9 */
2285 	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2286 	/* Needs to be set by caller */
2287 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2288 
2289 	/* Word 10 */
2290 	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2291 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2292 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2293 	       LPFC_WQE_LENLOC_WORD12);
2294 	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2295 	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2296 
2297 	/* Word 11 */
2298 	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2299 	       LPFC_WQE_CQ_ID_DEFAULT);
2300 	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2301 	       OTHER_COMMAND);
2302 
2303 	abts_wqeq->vport = phba->pport;
2304 	abts_wqeq->context1 = ndlp;
2305 	abts_wqeq->context2 = ctxp;
2306 	abts_wqeq->context3 = NULL;
2307 	abts_wqeq->rsvd2 = 0;
2308 	/* hba_wqidx should already be setup from command we are aborting */
2309 	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2310 	abts_wqeq->iocb.ulpLe = 1;
2311 
2312 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2313 			"6069 Issue ABTS to xri x%x reqtag x%x\n",
2314 			xri, abts_wqeq->iotag);
2315 	return 1;
2316 }
2317 
2318 static int
2319 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2320 			       struct lpfc_nvmet_rcv_ctx *ctxp,
2321 			       uint32_t sid, uint16_t xri)
2322 {
2323 	struct lpfc_nvmet_tgtport *tgtp;
2324 	struct lpfc_iocbq *abts_wqeq;
2325 	union lpfc_wqe *abts_wqe;
2326 	struct lpfc_nodelist *ndlp;
2327 	unsigned long flags;
2328 	int rc;
2329 
2330 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2331 	if (!ctxp->wqeq) {
2332 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
2333 		ctxp->wqeq->hba_wqidx = 0;
2334 	}
2335 
2336 	ndlp = lpfc_findnode_did(phba->pport, sid);
2337 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2338 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2339 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2340 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2341 		lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2342 				"6160 Drop ABORT - wrong NDLP state x%x.\n",
2343 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2344 
2345 		/* No failure to an ABTS request. */
2346 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2347 		return 0;
2348 	}
2349 
2350 	/* Issue ABTS for this WQE based on iotag */
2351 	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2352 	if (!ctxp->abort_wqeq) {
2353 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2354 		lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2355 				"6161 ABORT failed: No wqeqs: "
2356 				"xri: x%x\n", ctxp->oxid);
2357 		/* No failure to an ABTS request. */
2358 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2359 		return 0;
2360 	}
2361 	abts_wqeq = ctxp->abort_wqeq;
2362 	abts_wqe = &abts_wqeq->wqe;
2363 	ctxp->state = LPFC_NVMET_STE_ABORT;
2364 
2365 	/* Announce entry to new IO submit field. */
2366 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2367 			"6162 ABORT Request to rport DID x%06x "
2368 			"for xri x%x x%x\n",
2369 			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2370 
2371 	/* If the hba is getting reset, this flag is set.  It is
2372 	 * cleared when the reset is complete and rings reestablished.
2373 	 */
2374 	spin_lock_irqsave(&phba->hbalock, flags);
2375 	/* driver queued commands are in process of being flushed */
2376 	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2377 		spin_unlock_irqrestore(&phba->hbalock, flags);
2378 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2379 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2380 				"6163 Driver in reset cleanup - flushing "
2381 				"NVME Req now. hba_flag x%x oxid x%x\n",
2382 				phba->hba_flag, ctxp->oxid);
2383 		lpfc_sli_release_iocbq(phba, abts_wqeq);
2384 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2385 		return 0;
2386 	}
2387 
2388 	/* Outstanding abort is in progress */
2389 	if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2390 		spin_unlock_irqrestore(&phba->hbalock, flags);
2391 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2392 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2393 				"6164 Outstanding NVME I/O Abort Request "
2394 				"still pending on oxid x%x\n",
2395 				ctxp->oxid);
2396 		lpfc_sli_release_iocbq(phba, abts_wqeq);
2397 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2398 		return 0;
2399 	}
2400 
2401 	/* Ready - mark outstanding as aborted by driver. */
2402 	abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2403 
2404 	/* WQEs are reused.  Clear stale data and set key fields to
2405 	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2406 	 */
2407 	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2408 
2409 	/* word 3 */
2410 	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2411 
2412 	/* word 7 */
2413 	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2414 	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2415 
2416 	/* word 8 - tell the FW to abort the IO associated with this
2417 	 * outstanding exchange ID.
2418 	 */
2419 	abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2420 
2421 	/* word 9 - this is the iotag for the abts_wqe completion. */
2422 	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2423 	       abts_wqeq->iotag);
2424 
2425 	/* word 10 */
2426 	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2427 	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2428 
2429 	/* word 11 */
2430 	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2431 	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2432 	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2433 
2434 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
2435 	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2436 	abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2437 	abts_wqeq->iocb_cmpl = 0;
2438 	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2439 	abts_wqeq->context2 = ctxp;
2440 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2441 	spin_unlock_irqrestore(&phba->hbalock, flags);
2442 	if (rc == WQE_SUCCESS) {
2443 		atomic_inc(&tgtp->xmt_abort_sol);
2444 		return 0;
2445 	}
2446 
2447 	atomic_inc(&tgtp->xmt_abort_rsp_error);
2448 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2449 	lpfc_sli_release_iocbq(phba, abts_wqeq);
2450 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2451 			"6166 Failed ABORT issue_wqe with status x%x "
2452 			"for oxid x%x.\n",
2453 			rc, ctxp->oxid);
2454 	return 1;
2455 }
2456 
2457 
2458 static int
2459 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2460 				 struct lpfc_nvmet_rcv_ctx *ctxp,
2461 				 uint32_t sid, uint16_t xri)
2462 {
2463 	struct lpfc_nvmet_tgtport *tgtp;
2464 	struct lpfc_iocbq *abts_wqeq;
2465 	unsigned long flags;
2466 	int rc;
2467 
2468 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2469 	if (!ctxp->wqeq) {
2470 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
2471 		ctxp->wqeq->hba_wqidx = 0;
2472 	}
2473 
2474 	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2475 	if (rc == 0)
2476 		goto aerr;
2477 
2478 	spin_lock_irqsave(&phba->hbalock, flags);
2479 	abts_wqeq = ctxp->wqeq;
2480 	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2481 	abts_wqeq->iocb_cmpl = NULL;
2482 	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2483 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2484 	spin_unlock_irqrestore(&phba->hbalock, flags);
2485 	if (rc == WQE_SUCCESS) {
2486 		return 0;
2487 	}
2488 
2489 aerr:
2490 	atomic_inc(&tgtp->xmt_abort_rsp_error);
2491 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2492 	atomic_inc(&tgtp->xmt_abort_rsp_error);
2493 	lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2494 			"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2495 			ctxp->oxid, rc);
2496 	return 1;
2497 }
2498 
2499 static int
2500 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2501 				struct lpfc_nvmet_rcv_ctx *ctxp,
2502 				uint32_t sid, uint16_t xri)
2503 {
2504 	struct lpfc_nvmet_tgtport *tgtp;
2505 	struct lpfc_iocbq *abts_wqeq;
2506 	union lpfc_wqe *wqe_abts;
2507 	unsigned long flags;
2508 	int rc;
2509 
2510 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2511 	if (!ctxp->wqeq) {
2512 		/* Issue ABTS for this WQE based on iotag */
2513 		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2514 		if (!ctxp->wqeq) {
2515 			lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2516 					"6068 Abort failed: No wqeqs: "
2517 					"xri: x%x\n", xri);
2518 			/* No failure to an ABTS request. */
2519 			kfree(ctxp);
2520 			return 0;
2521 		}
2522 	}
2523 	abts_wqeq = ctxp->wqeq;
2524 	wqe_abts = &abts_wqeq->wqe;
2525 
2526 	lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2527 
2528 	spin_lock_irqsave(&phba->hbalock, flags);
2529 	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2530 	abts_wqeq->iocb_cmpl = 0;
2531 	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
2532 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2533 	spin_unlock_irqrestore(&phba->hbalock, flags);
2534 	if (rc == WQE_SUCCESS) {
2535 		atomic_inc(&tgtp->xmt_abort_unsol);
2536 		return 0;
2537 	}
2538 
2539 	atomic_inc(&tgtp->xmt_abort_rsp_error);
2540 	abts_wqeq->context2 = NULL;
2541 	abts_wqeq->context3 = NULL;
2542 	lpfc_sli_release_iocbq(phba, abts_wqeq);
2543 	kfree(ctxp);
2544 	lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2545 			"6056 Failed to Issue ABTS. Status x%x\n", rc);
2546 	return 0;
2547 }
2548