xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_nvmet.c (revision d236d361)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30 
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
57 
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59 						 struct lpfc_nvmet_rcv_ctx *,
60 						 dma_addr_t rspbuf,
61 						 uint16_t rspsize);
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63 						  struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65 					  struct lpfc_nvmet_rcv_ctx *,
66 					  uint32_t, uint16_t);
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68 					    struct lpfc_nvmet_rcv_ctx *,
69 					    uint32_t, uint16_t);
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71 					   struct lpfc_nvmet_rcv_ctx *,
72 					   uint32_t, uint16_t);
73 
74 void
75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
76 {
77 	unsigned long iflag;
78 
79 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
80 			"6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 			ctxp->oxid, ctxp->flag);
82 
83 	spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
84 	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
85 		spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
86 				       iflag);
87 		return;
88 	}
89 	ctxp->flag |= LPFC_NVMET_CTX_RLS;
90 	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
91 	spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
92 }
93 
94 /**
95  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96  * @phba: Pointer to HBA context object.
97  * @cmdwqe: Pointer to driver command WQE object.
98  * @wcqe: Pointer to driver response CQE object.
99  *
100  * The function is called from SLI ring event handler with no
101  * lock held. This function is the completion handler for NVME LS commands
102  * The function frees memory resources used for the NVME commands.
103  **/
104 static void
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
106 			  struct lpfc_wcqe_complete *wcqe)
107 {
108 	struct lpfc_nvmet_tgtport *tgtp;
109 	struct nvmefc_tgt_ls_req *rsp;
110 	struct lpfc_nvmet_rcv_ctx *ctxp;
111 	uint32_t status, result;
112 
113 	status = bf_get(lpfc_wcqe_c_status, wcqe);
114 	result = wcqe->parameter;
115 	if (!phba->targetport)
116 		goto out;
117 
118 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
119 
120 	if (status)
121 		atomic_inc(&tgtp->xmt_ls_rsp_error);
122 	else
123 		atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
124 
125 out:
126 	ctxp = cmdwqe->context2;
127 	rsp = &ctxp->ctx.ls_req;
128 
129 	lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
130 			 ctxp->oxid, status, result);
131 
132 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
133 			"6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
134 			ctxp, status, result);
135 
136 	lpfc_nlp_put(cmdwqe->context1);
137 	cmdwqe->context2 = NULL;
138 	cmdwqe->context3 = NULL;
139 	lpfc_sli_release_iocbq(phba, cmdwqe);
140 	rsp->done(rsp);
141 	kfree(ctxp);
142 }
143 
144 /**
145  * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
146  * @phba: HBA buffer is associated with
147  * @ctxp: context to clean up
148  * @mp: Buffer to free
149  *
150  * Description: Frees the given DMA buffer in the appropriate way given by
151  * reposting it to its associated RQ so it can be reused.
152  *
153  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
154  *
155  * Returns: None
156  **/
157 void
158 lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
159 		   struct lpfc_dmabuf *mp)
160 {
161 	if (ctxp) {
162 		if (ctxp->flag)
163 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
164 				"6314 rq_post ctx xri x%x flag x%x\n",
165 				ctxp->oxid, ctxp->flag);
166 
167 		if (ctxp->txrdy) {
168 			pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
169 				      ctxp->txrdy_phys);
170 			ctxp->txrdy = NULL;
171 			ctxp->txrdy_phys = 0;
172 		}
173 		ctxp->state = LPFC_NVMET_STE_FREE;
174 	}
175 	lpfc_rq_buf_free(phba, mp);
176 }
177 
178 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
179 static void
180 lpfc_nvmet_ktime(struct lpfc_hba *phba,
181 		 struct lpfc_nvmet_rcv_ctx *ctxp)
182 {
183 	uint64_t seg1, seg2, seg3, seg4, seg5;
184 	uint64_t seg6, seg7, seg8, seg9, seg10;
185 
186 	if (!phba->ktime_on)
187 		return;
188 
189 	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
190 	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
191 	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
192 	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
193 	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
194 		return;
195 
196 	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
197 		return;
198 	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
199 		return;
200 	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
201 		return;
202 	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
203 		return;
204 	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
205 		return;
206 	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
207 		return;
208 	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
209 		return;
210 	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
211 		return;
212 	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
213 		return;
214 	/*
215 	 * Segment 1 - Time from FCP command received by MSI-X ISR
216 	 * to FCP command is passed to NVME Layer.
217 	 * Segment 2 - Time from FCP command payload handed
218 	 * off to NVME Layer to Driver receives a Command op
219 	 * from NVME Layer.
220 	 * Segment 3 - Time from Driver receives a Command op
221 	 * from NVME Layer to Command is put on WQ.
222 	 * Segment 4 - Time from Driver WQ put is done
223 	 * to MSI-X ISR for Command cmpl.
224 	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
225 	 * Command cmpl is passed to NVME Layer.
226 	 * Segment 6 - Time from Command cmpl is passed to NVME
227 	 * Layer to Driver receives a RSP op from NVME Layer.
228 	 * Segment 7 - Time from Driver receives a RSP op from
229 	 * NVME Layer to WQ put is done on TRSP FCP Status.
230 	 * Segment 8 - Time from Driver WQ put is done on TRSP
231 	 * FCP Status to MSI-X ISR for TRSP cmpl.
232 	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
233 	 * TRSP cmpl is passed to NVME Layer.
234 	 * Segment 10 - Time from FCP command received by
235 	 * MSI-X ISR to command is completed on wire.
236 	 * (Segments 1 thru 8) for READDATA / WRITEDATA
237 	 * (Segments 1 thru 4) for READDATA_RSP
238 	 */
239 	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
240 	seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
241 	seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
242 		seg1 - seg2;
243 	seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
244 		seg1 - seg2 - seg3;
245 	seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
246 		seg1 - seg2 - seg3 - seg4;
247 
248 	/* For auto rsp commands seg6 thru seg10 will be 0 */
249 	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
250 		seg6 = (ctxp->ts_nvme_status -
251 			ctxp->ts_isr_cmd) -
252 			seg1 - seg2 - seg3 - seg4 - seg5;
253 		seg7 = (ctxp->ts_status_wqput -
254 			ctxp->ts_isr_cmd) -
255 			seg1 - seg2 - seg3 -
256 			seg4 - seg5 - seg6;
257 		seg8 = (ctxp->ts_isr_status -
258 			ctxp->ts_isr_cmd) -
259 			seg1 - seg2 - seg3 - seg4 -
260 			seg5 - seg6 - seg7;
261 		seg9 = (ctxp->ts_status_nvme -
262 			ctxp->ts_isr_cmd) -
263 			seg1 - seg2 - seg3 - seg4 -
264 			seg5 - seg6 - seg7 - seg8;
265 		seg10 = (ctxp->ts_isr_status -
266 			ctxp->ts_isr_cmd);
267 	} else {
268 		seg6 =  0;
269 		seg7 =  0;
270 		seg8 =  0;
271 		seg9 =  0;
272 		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
273 	}
274 
275 	phba->ktime_seg1_total += seg1;
276 	if (seg1 < phba->ktime_seg1_min)
277 		phba->ktime_seg1_min = seg1;
278 	else if (seg1 > phba->ktime_seg1_max)
279 		phba->ktime_seg1_max = seg1;
280 
281 	phba->ktime_seg2_total += seg2;
282 	if (seg2 < phba->ktime_seg2_min)
283 		phba->ktime_seg2_min = seg2;
284 	else if (seg2 > phba->ktime_seg2_max)
285 		phba->ktime_seg2_max = seg2;
286 
287 	phba->ktime_seg3_total += seg3;
288 	if (seg3 < phba->ktime_seg3_min)
289 		phba->ktime_seg3_min = seg3;
290 	else if (seg3 > phba->ktime_seg3_max)
291 		phba->ktime_seg3_max = seg3;
292 
293 	phba->ktime_seg4_total += seg4;
294 	if (seg4 < phba->ktime_seg4_min)
295 		phba->ktime_seg4_min = seg4;
296 	else if (seg4 > phba->ktime_seg4_max)
297 		phba->ktime_seg4_max = seg4;
298 
299 	phba->ktime_seg5_total += seg5;
300 	if (seg5 < phba->ktime_seg5_min)
301 		phba->ktime_seg5_min = seg5;
302 	else if (seg5 > phba->ktime_seg5_max)
303 		phba->ktime_seg5_max = seg5;
304 
305 	phba->ktime_data_samples++;
306 	if (!seg6)
307 		goto out;
308 
309 	phba->ktime_seg6_total += seg6;
310 	if (seg6 < phba->ktime_seg6_min)
311 		phba->ktime_seg6_min = seg6;
312 	else if (seg6 > phba->ktime_seg6_max)
313 		phba->ktime_seg6_max = seg6;
314 
315 	phba->ktime_seg7_total += seg7;
316 	if (seg7 < phba->ktime_seg7_min)
317 		phba->ktime_seg7_min = seg7;
318 	else if (seg7 > phba->ktime_seg7_max)
319 		phba->ktime_seg7_max = seg7;
320 
321 	phba->ktime_seg8_total += seg8;
322 	if (seg8 < phba->ktime_seg8_min)
323 		phba->ktime_seg8_min = seg8;
324 	else if (seg8 > phba->ktime_seg8_max)
325 		phba->ktime_seg8_max = seg8;
326 
327 	phba->ktime_seg9_total += seg9;
328 	if (seg9 < phba->ktime_seg9_min)
329 		phba->ktime_seg9_min = seg9;
330 	else if (seg9 > phba->ktime_seg9_max)
331 		phba->ktime_seg9_max = seg9;
332 out:
333 	phba->ktime_seg10_total += seg10;
334 	if (seg10 < phba->ktime_seg10_min)
335 		phba->ktime_seg10_min = seg10;
336 	else if (seg10 > phba->ktime_seg10_max)
337 		phba->ktime_seg10_max = seg10;
338 	phba->ktime_status_samples++;
339 }
340 #endif
341 
342 /**
343  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
344  * @phba: Pointer to HBA context object.
345  * @cmdwqe: Pointer to driver command WQE object.
346  * @wcqe: Pointer to driver response CQE object.
347  *
348  * The function is called from SLI ring event handler with no
349  * lock held. This function is the completion handler for NVME FCP commands
350  * The function frees memory resources used for the NVME commands.
351  **/
352 static void
353 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
354 			  struct lpfc_wcqe_complete *wcqe)
355 {
356 	struct lpfc_nvmet_tgtport *tgtp;
357 	struct nvmefc_tgt_fcp_req *rsp;
358 	struct lpfc_nvmet_rcv_ctx *ctxp;
359 	uint32_t status, result, op, start_clean;
360 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
361 	uint32_t id;
362 #endif
363 
364 	ctxp = cmdwqe->context2;
365 	ctxp->flag &= ~LPFC_NVMET_IO_INP;
366 
367 	rsp = &ctxp->ctx.fcp_req;
368 	op = rsp->op;
369 
370 	status = bf_get(lpfc_wcqe_c_status, wcqe);
371 	result = wcqe->parameter;
372 
373 	if (phba->targetport)
374 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
375 	else
376 		tgtp = NULL;
377 
378 	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
379 			 ctxp->oxid, op, status);
380 
381 	if (status) {
382 		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
383 		rsp->transferred_length = 0;
384 		if (tgtp)
385 			atomic_inc(&tgtp->xmt_fcp_rsp_error);
386 
387 		/* pick up SLI4 exhange busy condition */
388 		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
389 			ctxp->flag |= LPFC_NVMET_XBUSY;
390 
391 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
392 					"6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
393 					ctxp->oxid, status, result);
394 		} else {
395 			ctxp->flag &= ~LPFC_NVMET_XBUSY;
396 		}
397 
398 	} else {
399 		rsp->fcp_error = NVME_SC_SUCCESS;
400 		if (op == NVMET_FCOP_RSP)
401 			rsp->transferred_length = rsp->rsplen;
402 		else
403 			rsp->transferred_length = rsp->transfer_length;
404 		if (tgtp)
405 			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
406 	}
407 
408 	if ((op == NVMET_FCOP_READDATA_RSP) ||
409 	    (op == NVMET_FCOP_RSP)) {
410 		/* Sanity check */
411 		ctxp->state = LPFC_NVMET_STE_DONE;
412 		ctxp->entry_cnt++;
413 
414 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
415 		if (phba->ktime_on) {
416 			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
417 				ctxp->ts_isr_data =
418 					cmdwqe->isr_timestamp;
419 				ctxp->ts_data_nvme =
420 					ktime_get_ns();
421 				ctxp->ts_nvme_status =
422 					ctxp->ts_data_nvme;
423 				ctxp->ts_status_wqput =
424 					ctxp->ts_data_nvme;
425 				ctxp->ts_isr_status =
426 					ctxp->ts_data_nvme;
427 				ctxp->ts_status_nvme =
428 					ctxp->ts_data_nvme;
429 			} else {
430 				ctxp->ts_isr_status =
431 					cmdwqe->isr_timestamp;
432 				ctxp->ts_status_nvme =
433 					ktime_get_ns();
434 			}
435 		}
436 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
437 			id = smp_processor_id();
438 			if (ctxp->cpu != id)
439 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
440 						"6703 CPU Check cmpl: "
441 						"cpu %d expect %d\n",
442 						id, ctxp->cpu);
443 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
444 				phba->cpucheck_cmpl_io[id]++;
445 		}
446 #endif
447 		rsp->done(rsp);
448 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
449 		if (phba->ktime_on)
450 			lpfc_nvmet_ktime(phba, ctxp);
451 #endif
452 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
453 	} else {
454 		ctxp->entry_cnt++;
455 		start_clean = offsetof(struct lpfc_iocbq, wqe);
456 		memset(((char *)cmdwqe) + start_clean, 0,
457 		       (sizeof(struct lpfc_iocbq) - start_clean));
458 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
459 		if (phba->ktime_on) {
460 			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
461 			ctxp->ts_data_nvme = ktime_get_ns();
462 		}
463 		if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
464 			id = smp_processor_id();
465 			if (ctxp->cpu != id)
466 				lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
467 						"6704 CPU Check cmdcmpl: "
468 						"cpu %d expect %d\n",
469 						id, ctxp->cpu);
470 			if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
471 				phba->cpucheck_ccmpl_io[id]++;
472 		}
473 #endif
474 		rsp->done(rsp);
475 	}
476 }
477 
478 static int
479 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
480 		      struct nvmefc_tgt_ls_req *rsp)
481 {
482 	struct lpfc_nvmet_rcv_ctx *ctxp =
483 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
484 	struct lpfc_hba *phba = ctxp->phba;
485 	struct hbq_dmabuf *nvmebuf =
486 		(struct hbq_dmabuf *)ctxp->rqb_buffer;
487 	struct lpfc_iocbq *nvmewqeq;
488 	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
489 	struct lpfc_dmabuf dmabuf;
490 	struct ulp_bde64 bpl;
491 	int rc;
492 
493 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
494 			"6023 %s: Entrypoint ctx %p %p\n", __func__,
495 			ctxp, tgtport);
496 
497 	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
498 				      rsp->rsplen);
499 	if (nvmewqeq == NULL) {
500 		atomic_inc(&nvmep->xmt_ls_drop);
501 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
502 				"6150 LS Drop IO x%x: Prep\n",
503 				ctxp->oxid);
504 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
505 		lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
506 						ctxp->sid, ctxp->oxid);
507 		return -ENOMEM;
508 	}
509 
510 	/* Save numBdes for bpl2sgl */
511 	nvmewqeq->rsvd2 = 1;
512 	nvmewqeq->hba_wqidx = 0;
513 	nvmewqeq->context3 = &dmabuf;
514 	dmabuf.virt = &bpl;
515 	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
516 	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
517 	bpl.tus.f.bdeSize = rsp->rsplen;
518 	bpl.tus.f.bdeFlags = 0;
519 	bpl.tus.w = le32_to_cpu(bpl.tus.w);
520 
521 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
522 	nvmewqeq->iocb_cmpl = NULL;
523 	nvmewqeq->context2 = ctxp;
524 
525 	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
526 			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
527 
528 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
529 	if (rc == WQE_SUCCESS) {
530 		/*
531 		 * Okay to repost buffer here, but wait till cmpl
532 		 * before freeing ctxp and iocbq.
533 		 */
534 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
535 		ctxp->rqb_buffer = 0;
536 		atomic_inc(&nvmep->xmt_ls_rsp);
537 		return 0;
538 	}
539 	/* Give back resources */
540 	atomic_inc(&nvmep->xmt_ls_drop);
541 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
542 			"6151 LS Drop IO x%x: Issue %d\n",
543 			ctxp->oxid, rc);
544 
545 	lpfc_nlp_put(nvmewqeq->context1);
546 
547 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
548 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
549 	return -ENXIO;
550 }
551 
552 static int
553 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
554 		      struct nvmefc_tgt_fcp_req *rsp)
555 {
556 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
557 	struct lpfc_nvmet_rcv_ctx *ctxp =
558 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
559 	struct lpfc_hba *phba = ctxp->phba;
560 	struct lpfc_iocbq *nvmewqeq;
561 	int rc;
562 
563 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
564 	if (phba->ktime_on) {
565 		if (rsp->op == NVMET_FCOP_RSP)
566 			ctxp->ts_nvme_status = ktime_get_ns();
567 		else
568 			ctxp->ts_nvme_data = ktime_get_ns();
569 	}
570 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
571 		int id = smp_processor_id();
572 		ctxp->cpu = id;
573 		if (id < LPFC_CHECK_CPU_CNT)
574 			phba->cpucheck_xmt_io[id]++;
575 		if (rsp->hwqid != id) {
576 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
577 					"6705 CPU Check OP: "
578 					"cpu %d expect %d\n",
579 					id, rsp->hwqid);
580 			ctxp->cpu = rsp->hwqid;
581 		}
582 	}
583 #endif
584 
585 	/* Sanity check */
586 	if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
587 	    (ctxp->state == LPFC_NVMET_STE_ABORT)) {
588 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
589 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
590 				"6102 IO xri x%x aborted\n",
591 				ctxp->oxid);
592 		rc = -ENXIO;
593 		goto aerr;
594 	}
595 
596 	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
597 	if (nvmewqeq == NULL) {
598 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
599 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
600 				"6152 FCP Drop IO x%x: Prep\n",
601 				ctxp->oxid);
602 		rc = -ENXIO;
603 		goto aerr;
604 	}
605 
606 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
607 	nvmewqeq->iocb_cmpl = NULL;
608 	nvmewqeq->context2 = ctxp;
609 	nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
610 	ctxp->wqeq->hba_wqidx = rsp->hwqid;
611 
612 	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
613 			 ctxp->oxid, rsp->op, rsp->rsplen);
614 
615 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
616 	if (rc == WQE_SUCCESS) {
617 		ctxp->flag |= LPFC_NVMET_IO_INP;
618 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
619 		if (!phba->ktime_on)
620 			return 0;
621 		if (rsp->op == NVMET_FCOP_RSP)
622 			ctxp->ts_status_wqput = ktime_get_ns();
623 		else
624 			ctxp->ts_data_wqput = ktime_get_ns();
625 #endif
626 		return 0;
627 	}
628 
629 	/* Give back resources */
630 	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
631 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
632 			"6153 FCP Drop IO x%x: Issue: %d\n",
633 			ctxp->oxid, rc);
634 
635 	ctxp->wqeq->hba_wqidx = 0;
636 	nvmewqeq->context2 = NULL;
637 	nvmewqeq->context3 = NULL;
638 	rc = -EBUSY;
639 aerr:
640 	return rc;
641 }
642 
643 static void
644 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
645 {
646 	struct lpfc_nvmet_tgtport *tport = targetport->private;
647 
648 	/* release any threads waiting for the unreg to complete */
649 	complete(&tport->tport_unreg_done);
650 }
651 
652 static void
653 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
654 			 struct nvmefc_tgt_fcp_req *req)
655 {
656 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
657 	struct lpfc_nvmet_rcv_ctx *ctxp =
658 		container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
659 	struct lpfc_hba *phba = ctxp->phba;
660 	unsigned long flags;
661 
662 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
663 			"6103 Abort op: oxri x%x flg x%x cnt %d\n",
664 			ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
665 
666 	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
667 			 "xri x%x flg x%x cnt x%x\n",
668 			 ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
669 
670 	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
671 	ctxp->entry_cnt++;
672 	spin_lock_irqsave(&ctxp->ctxlock, flags);
673 
674 	/* Since iaab/iaar are NOT set, we need to check
675 	 * if the firmware is in process of aborting IO
676 	 */
677 	if (ctxp->flag & LPFC_NVMET_XBUSY) {
678 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
679 		return;
680 	}
681 	ctxp->flag |= LPFC_NVMET_ABORT_OP;
682 	if (ctxp->flag & LPFC_NVMET_IO_INP)
683 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
684 					       ctxp->oxid);
685 	else
686 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
687 						 ctxp->oxid);
688 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
689 }
690 
691 static void
692 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
693 			   struct nvmefc_tgt_fcp_req *rsp)
694 {
695 	struct lpfc_nvmet_rcv_ctx *ctxp =
696 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
697 	struct lpfc_hba *phba = ctxp->phba;
698 	unsigned long flags;
699 	bool aborting = false;
700 
701 	spin_lock_irqsave(&ctxp->ctxlock, flags);
702 	if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
703 	    (ctxp->flag & LPFC_NVMET_XBUSY)) {
704 		aborting = true;
705 		/* let the abort path do the real release */
706 		lpfc_nvmet_defer_release(phba, ctxp);
707 	}
708 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
709 
710 	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
711 			 ctxp->state, 0);
712 
713 	if (aborting)
714 		return;
715 
716 	lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
717 }
718 
719 static struct nvmet_fc_target_template lpfc_tgttemplate = {
720 	.targetport_delete = lpfc_nvmet_targetport_delete,
721 	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
722 	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
723 	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
724 	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
725 
726 	.max_hw_queues  = 1,
727 	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
728 	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
729 	.dma_boundary = 0xFFFFFFFF,
730 
731 	/* optional features */
732 	.target_features = 0,
733 	/* sizes of additional private data for data structures */
734 	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
735 };
736 
737 int
738 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
739 {
740 	struct lpfc_vport  *vport = phba->pport;
741 	struct lpfc_nvmet_tgtport *tgtp;
742 	struct nvmet_fc_port_info pinfo;
743 	int error = 0;
744 
745 	if (phba->targetport)
746 		return 0;
747 
748 	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
749 	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
750 	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
751 	pinfo.port_id = vport->fc_myDID;
752 
753 	/* Limit to LPFC_MAX_NVME_SEG_CNT.
754 	 * For now need + 1 to get around NVME transport logic.
755 	 */
756 	if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
757 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
758 				"6400 Reducing sg segment cnt to %d\n",
759 				LPFC_MAX_NVME_SEG_CNT);
760 		phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
761 	} else {
762 		phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
763 	}
764 	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
765 	lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
766 	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
767 					   NVMET_FCTGTFEAT_CMD_IN_ISR |
768 					   NVMET_FCTGTFEAT_OPDONE_IN_ISR;
769 
770 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
771 	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
772 					     &phba->pcidev->dev,
773 					     &phba->targetport);
774 #else
775 	error = -ENOMEM;
776 #endif
777 	if (error) {
778 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
779 				"6025 Cannot register NVME targetport "
780 				"x%x\n", error);
781 		phba->targetport = NULL;
782 	} else {
783 		tgtp = (struct lpfc_nvmet_tgtport *)
784 			phba->targetport->private;
785 		tgtp->phba = phba;
786 
787 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
788 				"6026 Registered NVME "
789 				"targetport: %p, private %p "
790 				"portnm %llx nodenm %llx\n",
791 				phba->targetport, tgtp,
792 				pinfo.port_name, pinfo.node_name);
793 
794 		atomic_set(&tgtp->rcv_ls_req_in, 0);
795 		atomic_set(&tgtp->rcv_ls_req_out, 0);
796 		atomic_set(&tgtp->rcv_ls_req_drop, 0);
797 		atomic_set(&tgtp->xmt_ls_abort, 0);
798 		atomic_set(&tgtp->xmt_ls_rsp, 0);
799 		atomic_set(&tgtp->xmt_ls_drop, 0);
800 		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
801 		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
802 		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
803 		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
804 		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
805 		atomic_set(&tgtp->xmt_fcp_abort, 0);
806 		atomic_set(&tgtp->xmt_fcp_drop, 0);
807 		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
808 		atomic_set(&tgtp->xmt_fcp_read, 0);
809 		atomic_set(&tgtp->xmt_fcp_write, 0);
810 		atomic_set(&tgtp->xmt_fcp_rsp, 0);
811 		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
812 		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
813 		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
814 		atomic_set(&tgtp->xmt_abort_rsp, 0);
815 		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
816 		atomic_set(&tgtp->xmt_abort_cmpl, 0);
817 	}
818 	return error;
819 }
820 
821 int
822 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
823 {
824 	struct lpfc_vport  *vport = phba->pport;
825 
826 	if (!phba->targetport)
827 		return 0;
828 
829 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
830 			 "6007 Update NVMET port %p did x%x\n",
831 			 phba->targetport, vport->fc_myDID);
832 
833 	phba->targetport->port_id = vport->fc_myDID;
834 	return 0;
835 }
836 
837 /**
838  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
839  * @phba: pointer to lpfc hba data structure.
840  * @axri: pointer to the nvmet xri abort wcqe structure.
841  *
842  * This routine is invoked by the worker thread to process a SLI4 fast-path
843  * NVMET aborted xri.
844  **/
845 void
846 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
847 			    struct sli4_wcqe_xri_aborted *axri)
848 {
849 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
850 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
851 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
852 	struct lpfc_nodelist *ndlp;
853 	unsigned long iflag = 0;
854 	int rrq_empty = 0;
855 	bool released = false;
856 
857 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
858 			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
859 
860 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
861 		return;
862 	spin_lock_irqsave(&phba->hbalock, iflag);
863 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
864 	list_for_each_entry_safe(ctxp, next_ctxp,
865 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
866 				 list) {
867 		if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
868 			continue;
869 
870 		/* Check if we already received a free context call
871 		 * and we have completed processing an abort situation.
872 		 */
873 		if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
874 		    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
875 			list_del(&ctxp->list);
876 			released = true;
877 		}
878 		ctxp->flag &= ~LPFC_NVMET_XBUSY;
879 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
880 
881 		rrq_empty = list_empty(&phba->active_rrq_list);
882 		spin_unlock_irqrestore(&phba->hbalock, iflag);
883 		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
884 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
885 		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
886 		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
887 			lpfc_set_rrq_active(phba, ndlp,
888 				ctxp->rqb_buffer->sglq->sli4_lxritag,
889 				rxid, 1);
890 			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
891 		}
892 
893 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
894 				"6318 XB aborted %x flg x%x (%x)\n",
895 				ctxp->oxid, ctxp->flag, released);
896 		if (released)
897 			lpfc_nvmet_rq_post(phba, ctxp,
898 					   &ctxp->rqb_buffer->hbuf);
899 		if (rrq_empty)
900 			lpfc_worker_wake_up(phba);
901 		return;
902 	}
903 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
904 	spin_unlock_irqrestore(&phba->hbalock, iflag);
905 }
906 
907 int
908 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
909 			   struct fc_frame_header *fc_hdr)
910 
911 {
912 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
913 	struct lpfc_hba *phba = vport->phba;
914 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
915 	struct nvmefc_tgt_fcp_req *rsp;
916 	uint16_t xri;
917 	unsigned long iflag = 0;
918 
919 	xri = be16_to_cpu(fc_hdr->fh_ox_id);
920 
921 	spin_lock_irqsave(&phba->hbalock, iflag);
922 	spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
923 	list_for_each_entry_safe(ctxp, next_ctxp,
924 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
925 				 list) {
926 		if (ctxp->rqb_buffer->sglq->sli4_xritag != xri)
927 			continue;
928 
929 		spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
930 		spin_unlock_irqrestore(&phba->hbalock, iflag);
931 
932 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
933 		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
934 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
935 
936 		lpfc_nvmeio_data(phba,
937 			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
938 			xri, smp_processor_id(), 0);
939 
940 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
941 				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
942 
943 		rsp = &ctxp->ctx.fcp_req;
944 		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
945 
946 		/* Respond with BA_ACC accordingly */
947 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
948 		return 0;
949 	}
950 	spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
951 	spin_unlock_irqrestore(&phba->hbalock, iflag);
952 
953 	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
954 			 xri, smp_processor_id(), 1);
955 
956 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
957 			"6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
958 
959 	/* Respond with BA_RJT accordingly */
960 	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
961 #endif
962 	return 0;
963 }
964 
965 void
966 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
967 {
968 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
969 	struct lpfc_nvmet_tgtport *tgtp;
970 
971 	if (phba->nvmet_support == 0)
972 		return;
973 	if (phba->targetport) {
974 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
975 		init_completion(&tgtp->tport_unreg_done);
976 		nvmet_fc_unregister_targetport(phba->targetport);
977 		wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
978 	}
979 	phba->targetport = NULL;
980 #endif
981 }
982 
983 /**
984  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
985  * @phba: pointer to lpfc hba data structure.
986  * @pring: pointer to a SLI ring.
987  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
988  *
989  * This routine is used for processing the WQE associated with a unsolicited
990  * event. It first determines whether there is an existing ndlp that matches
991  * the DID from the unsolicited WQE. If not, it will create a new one with
992  * the DID from the unsolicited WQE. The ELS command from the unsolicited
993  * WQE is then used to invoke the proper routine and to set up proper state
994  * of the discovery state machine.
995  **/
996 static void
997 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
998 			   struct hbq_dmabuf *nvmebuf)
999 {
1000 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1001 	struct lpfc_nvmet_tgtport *tgtp;
1002 	struct fc_frame_header *fc_hdr;
1003 	struct lpfc_nvmet_rcv_ctx *ctxp;
1004 	uint32_t *payload;
1005 	uint32_t size, oxid, sid, rc;
1006 
1007 	if (!nvmebuf || !phba->targetport) {
1008 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1009 				"6154 LS Drop IO\n");
1010 		oxid = 0;
1011 		size = 0;
1012 		sid = 0;
1013 		goto dropit;
1014 	}
1015 
1016 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1017 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1018 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1019 	size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1020 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1021 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1022 
1023 	ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1024 	if (ctxp == NULL) {
1025 		atomic_inc(&tgtp->rcv_ls_req_drop);
1026 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1027 				"6155 LS Drop IO x%x: Alloc\n",
1028 				oxid);
1029 dropit:
1030 		lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1031 				 "xri x%x sz %d from %06x\n",
1032 				 oxid, size, sid);
1033 		if (nvmebuf)
1034 			lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1035 		return;
1036 	}
1037 	ctxp->phba = phba;
1038 	ctxp->size = size;
1039 	ctxp->oxid = oxid;
1040 	ctxp->sid = sid;
1041 	ctxp->wqeq = NULL;
1042 	ctxp->state = LPFC_NVMET_STE_RCV;
1043 	ctxp->rqb_buffer = (void *)nvmebuf;
1044 
1045 	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1046 			 oxid, size, sid);
1047 	/*
1048 	 * The calling sequence should be:
1049 	 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1050 	 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1051 	 */
1052 	atomic_inc(&tgtp->rcv_ls_req_in);
1053 	rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1054 				 payload, size);
1055 
1056 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1057 			"6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
1058 			"%08x %08x %08x\n", __func__, ctxp, size, rc,
1059 			*payload, *(payload+1), *(payload+2),
1060 			*(payload+3), *(payload+4), *(payload+5));
1061 
1062 	if (rc == 0) {
1063 		atomic_inc(&tgtp->rcv_ls_req_out);
1064 		return;
1065 	}
1066 
1067 	lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
1068 			 oxid, size, sid);
1069 
1070 	atomic_inc(&tgtp->rcv_ls_req_drop);
1071 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1072 			"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1073 			ctxp->oxid, rc);
1074 
1075 	/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1076 	if (nvmebuf)
1077 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1078 
1079 	atomic_inc(&tgtp->xmt_ls_abort);
1080 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1081 #endif
1082 }
1083 
1084 /**
1085  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1086  * @phba: pointer to lpfc hba data structure.
1087  * @pring: pointer to a SLI ring.
1088  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1089  *
1090  * This routine is used for processing the WQE associated with a unsolicited
1091  * event. It first determines whether there is an existing ndlp that matches
1092  * the DID from the unsolicited WQE. If not, it will create a new one with
1093  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1094  * WQE is then used to invoke the proper routine and to set up proper state
1095  * of the discovery state machine.
1096  **/
1097 static void
1098 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1099 			    struct lpfc_sli_ring *pring,
1100 			    struct rqb_dmabuf *nvmebuf,
1101 			    uint64_t isr_timestamp)
1102 {
1103 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1104 	struct lpfc_nvmet_rcv_ctx *ctxp;
1105 	struct lpfc_nvmet_tgtport *tgtp;
1106 	struct fc_frame_header *fc_hdr;
1107 	uint32_t *payload;
1108 	uint32_t size, oxid, sid, rc;
1109 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1110 	uint32_t id;
1111 #endif
1112 
1113 	if (!nvmebuf || !phba->targetport) {
1114 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1115 				"6157 FCP Drop IO\n");
1116 		oxid = 0;
1117 		size = 0;
1118 		sid = 0;
1119 		goto dropit;
1120 	}
1121 
1122 
1123 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1124 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1125 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1126 	size = nvmebuf->bytes_recv;
1127 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1128 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1129 
1130 	ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
1131 	if (ctxp == NULL) {
1132 		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1133 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1134 				"6158 FCP Drop IO x%x: Alloc\n",
1135 				oxid);
1136 		lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1137 		/* Cannot send ABTS without context */
1138 		return;
1139 	}
1140 	memset(ctxp, 0, sizeof(ctxp->ctx));
1141 	ctxp->wqeq = NULL;
1142 	ctxp->txrdy = NULL;
1143 	ctxp->offset = 0;
1144 	ctxp->phba = phba;
1145 	ctxp->size = size;
1146 	ctxp->oxid = oxid;
1147 	ctxp->sid = sid;
1148 	ctxp->state = LPFC_NVMET_STE_RCV;
1149 	ctxp->rqb_buffer = nvmebuf;
1150 	ctxp->entry_cnt = 1;
1151 	ctxp->flag = 0;
1152 	spin_lock_init(&ctxp->ctxlock);
1153 
1154 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1155 	if (phba->ktime_on) {
1156 		ctxp->ts_isr_cmd = isr_timestamp;
1157 		ctxp->ts_cmd_nvme = ktime_get_ns();
1158 		ctxp->ts_nvme_data = 0;
1159 		ctxp->ts_data_wqput = 0;
1160 		ctxp->ts_isr_data = 0;
1161 		ctxp->ts_data_nvme = 0;
1162 		ctxp->ts_nvme_status = 0;
1163 		ctxp->ts_status_wqput = 0;
1164 		ctxp->ts_isr_status = 0;
1165 		ctxp->ts_status_nvme = 0;
1166 	}
1167 
1168 	if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1169 		id = smp_processor_id();
1170 		if (id < LPFC_CHECK_CPU_CNT)
1171 			phba->cpucheck_rcv_io[id]++;
1172 	}
1173 #endif
1174 
1175 	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
1176 			 oxid, size, smp_processor_id());
1177 
1178 	atomic_inc(&tgtp->rcv_fcp_cmd_in);
1179 	/*
1180 	 * The calling sequence should be:
1181 	 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1182 	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1183 	 */
1184 	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1185 				  payload, size);
1186 
1187 	/* Process FCP command */
1188 	if (rc == 0) {
1189 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
1190 		return;
1191 	}
1192 
1193 	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1194 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1195 			"6159 FCP Drop IO x%x: err x%x\n",
1196 			ctxp->oxid, rc);
1197 dropit:
1198 	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1199 			 oxid, size, sid);
1200 	if (oxid) {
1201 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1202 		return;
1203 	}
1204 
1205 	if (nvmebuf) {
1206 		nvmebuf->iocbq->hba_wqidx = 0;
1207 		/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1208 		lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1209 	}
1210 #endif
1211 }
1212 
1213 /**
1214  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1215  * @phba: pointer to lpfc hba data structure.
1216  * @pring: pointer to a SLI ring.
1217  * @nvmebuf: pointer to received nvme data structure.
1218  *
1219  * This routine is used to process an unsolicited event received from a SLI
1220  * (Service Level Interface) ring. The actual processing of the data buffer
1221  * associated with the unsolicited event is done by invoking the routine
1222  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1223  * SLI RQ on which the unsolicited event was received.
1224  **/
1225 void
1226 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1227 			  struct lpfc_iocbq *piocb)
1228 {
1229 	struct lpfc_dmabuf *d_buf;
1230 	struct hbq_dmabuf *nvmebuf;
1231 
1232 	d_buf = piocb->context2;
1233 	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1234 
1235 	if (phba->nvmet_support == 0) {
1236 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1237 		return;
1238 	}
1239 	lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1240 }
1241 
1242 /**
1243  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1244  * @phba: pointer to lpfc hba data structure.
1245  * @pring: pointer to a SLI ring.
1246  * @nvmebuf: pointer to received nvme data structure.
1247  *
1248  * This routine is used to process an unsolicited event received from a SLI
1249  * (Service Level Interface) ring. The actual processing of the data buffer
1250  * associated with the unsolicited event is done by invoking the routine
1251  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1252  * SLI RQ on which the unsolicited event was received.
1253  **/
1254 void
1255 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1256 			   struct lpfc_sli_ring *pring,
1257 			   struct rqb_dmabuf *nvmebuf,
1258 			   uint64_t isr_timestamp)
1259 {
1260 	if (phba->nvmet_support == 0) {
1261 		lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1262 		return;
1263 	}
1264 	lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
1265 				    isr_timestamp);
1266 }
1267 
1268 /**
1269  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1270  * @phba: pointer to a host N_Port data structure.
1271  * @ctxp: Context info for NVME LS Request
1272  * @rspbuf: DMA buffer of NVME command.
1273  * @rspsize: size of the NVME command.
1274  *
1275  * This routine is used for allocating a lpfc-WQE data structure from
1276  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1277  * passed into the routine for discovery state machine to issue an Extended
1278  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1279  * and preparation routine that is used by all the discovery state machine
1280  * routines and the NVME command-specific fields will be later set up by
1281  * the individual discovery machine routines after calling this routine
1282  * allocating and preparing a generic WQE data structure. It fills in the
1283  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1284  * payload and response payload (if expected). The reference count on the
1285  * ndlp is incremented by 1 and the reference to the ndlp is put into
1286  * context1 of the WQE data structure for this WQE to hold the ndlp
1287  * reference for the command's callback function to access later.
1288  *
1289  * Return code
1290  *   Pointer to the newly allocated/prepared nvme wqe data structure
1291  *   NULL - when nvme wqe data structure allocation/preparation failed
1292  **/
1293 static struct lpfc_iocbq *
1294 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1295 		       struct lpfc_nvmet_rcv_ctx *ctxp,
1296 		       dma_addr_t rspbuf, uint16_t rspsize)
1297 {
1298 	struct lpfc_nodelist *ndlp;
1299 	struct lpfc_iocbq *nvmewqe;
1300 	union lpfc_wqe *wqe;
1301 
1302 	if (!lpfc_is_link_up(phba)) {
1303 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1304 				"6104 lpfc_nvmet_prep_ls_wqe: link err: "
1305 				"NPORT x%x oxid:x%x\n",
1306 				ctxp->sid, ctxp->oxid);
1307 		return NULL;
1308 	}
1309 
1310 	/* Allocate buffer for  command wqe */
1311 	nvmewqe = lpfc_sli_get_iocbq(phba);
1312 	if (nvmewqe == NULL) {
1313 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1314 				"6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1315 				"NPORT x%x oxid:x%x\n",
1316 				ctxp->sid, ctxp->oxid);
1317 		return NULL;
1318 	}
1319 
1320 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1321 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1322 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1323 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1324 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1325 				"6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1326 				"NPORT x%x oxid:x%x\n",
1327 				ctxp->sid, ctxp->oxid);
1328 		goto nvme_wqe_free_wqeq_exit;
1329 	}
1330 	ctxp->wqeq = nvmewqe;
1331 
1332 	/* prevent preparing wqe with NULL ndlp reference */
1333 	nvmewqe->context1 = lpfc_nlp_get(ndlp);
1334 	if (nvmewqe->context1 == NULL)
1335 		goto nvme_wqe_free_wqeq_exit;
1336 	nvmewqe->context2 = ctxp;
1337 
1338 	wqe = &nvmewqe->wqe;
1339 	memset(wqe, 0, sizeof(union lpfc_wqe));
1340 
1341 	/* Words 0 - 2 */
1342 	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1343 	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1344 	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1345 	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1346 
1347 	/* Word 3 */
1348 
1349 	/* Word 4 */
1350 
1351 	/* Word 5 */
1352 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1353 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1354 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1355 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1356 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1357 
1358 	/* Word 6 */
1359 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1360 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1361 	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1362 
1363 	/* Word 7 */
1364 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1365 	       CMD_XMIT_SEQUENCE64_WQE);
1366 	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1367 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1368 	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1369 
1370 	/* Word 8 */
1371 	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1372 
1373 	/* Word 9 */
1374 	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1375 	/* Needs to be set by caller */
1376 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1377 
1378 	/* Word 10 */
1379 	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1380 	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1381 	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1382 	       LPFC_WQE_LENLOC_WORD12);
1383 	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1384 
1385 	/* Word 11 */
1386 	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1387 	       LPFC_WQE_CQ_ID_DEFAULT);
1388 	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1389 	       OTHER_COMMAND);
1390 
1391 	/* Word 12 */
1392 	wqe->xmit_sequence.xmit_len = rspsize;
1393 
1394 	nvmewqe->retry = 1;
1395 	nvmewqe->vport = phba->pport;
1396 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1397 	nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1398 
1399 	/* Xmit NVME response to remote NPORT <did> */
1400 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1401 			"6039 Xmit NVME LS response to remote "
1402 			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1403 			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1404 			rspsize);
1405 	return nvmewqe;
1406 
1407 nvme_wqe_free_wqeq_exit:
1408 	nvmewqe->context2 = NULL;
1409 	nvmewqe->context3 = NULL;
1410 	lpfc_sli_release_iocbq(phba, nvmewqe);
1411 	return NULL;
1412 }
1413 
1414 
1415 static struct lpfc_iocbq *
1416 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1417 			struct lpfc_nvmet_rcv_ctx *ctxp)
1418 {
1419 	struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1420 	struct lpfc_nvmet_tgtport *tgtp;
1421 	struct sli4_sge *sgl;
1422 	struct lpfc_nodelist *ndlp;
1423 	struct lpfc_iocbq *nvmewqe;
1424 	struct scatterlist *sgel;
1425 	union lpfc_wqe128 *wqe;
1426 	uint32_t *txrdy;
1427 	dma_addr_t physaddr;
1428 	int i, cnt;
1429 	int xc = 1;
1430 
1431 	if (!lpfc_is_link_up(phba)) {
1432 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1433 				"6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1434 				"NPORT x%x oxid:x%x\n", ctxp->sid,
1435 				ctxp->oxid);
1436 		return NULL;
1437 	}
1438 
1439 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1440 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1441 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1442 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1443 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1444 				"6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1445 				"NPORT x%x oxid:x%x\n",
1446 				ctxp->sid, ctxp->oxid);
1447 		return NULL;
1448 	}
1449 
1450 	if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
1451 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1452 				"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1453 				"NPORT x%x oxid:x%x cnt %d\n",
1454 				ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
1455 		return NULL;
1456 	}
1457 
1458 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1459 	nvmewqe = ctxp->wqeq;
1460 	if (nvmewqe == NULL) {
1461 		/* Allocate buffer for  command wqe */
1462 		nvmewqe = ctxp->rqb_buffer->iocbq;
1463 		if (nvmewqe == NULL) {
1464 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1465 					"6110 lpfc_nvmet_prep_fcp_wqe: No "
1466 					"WQE: NPORT x%x oxid:x%x\n",
1467 					ctxp->sid, ctxp->oxid);
1468 			return NULL;
1469 		}
1470 		ctxp->wqeq = nvmewqe;
1471 		xc = 0; /* create new XRI */
1472 		nvmewqe->sli4_lxritag = NO_XRI;
1473 		nvmewqe->sli4_xritag = NO_XRI;
1474 	}
1475 
1476 	/* Sanity check */
1477 	if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1478 	    (ctxp->entry_cnt == 1)) ||
1479 	    ((ctxp->state == LPFC_NVMET_STE_DATA) &&
1480 	    (ctxp->entry_cnt > 1))) {
1481 		wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1482 	} else {
1483 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1484 				"6111 Wrong state %s: %d  cnt %d\n",
1485 				__func__, ctxp->state, ctxp->entry_cnt);
1486 		return NULL;
1487 	}
1488 
1489 	sgl  = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
1490 	switch (rsp->op) {
1491 	case NVMET_FCOP_READDATA:
1492 	case NVMET_FCOP_READDATA_RSP:
1493 		/* Words 0 - 2 : The first sg segment */
1494 		sgel = &rsp->sg[0];
1495 		physaddr = sg_dma_address(sgel);
1496 		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1497 		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1498 		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1499 		wqe->fcp_tsend.bde.addrHigh =
1500 			cpu_to_le32(putPaddrHigh(physaddr));
1501 
1502 		/* Word 3 */
1503 		wqe->fcp_tsend.payload_offset_len = 0;
1504 
1505 		/* Word 4 */
1506 		wqe->fcp_tsend.relative_offset = ctxp->offset;
1507 
1508 		/* Word 5 */
1509 
1510 		/* Word 6 */
1511 		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
1512 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1513 		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
1514 		       nvmewqe->sli4_xritag);
1515 
1516 		/* Word 7 */
1517 		bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
1518 
1519 		/* Word 8 */
1520 		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
1521 
1522 		/* Word 9 */
1523 		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
1524 		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
1525 
1526 		/* Word 10 */
1527 		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1528 		bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
1529 		bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
1530 		bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
1531 		       LPFC_WQE_LENLOC_WORD12);
1532 		bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
1533 		bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
1534 		bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1535 		if (phba->cfg_nvme_oas)
1536 			bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
1537 
1538 		/* Word 11 */
1539 		bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
1540 		       LPFC_WQE_CQ_ID_DEFAULT);
1541 		bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
1542 		       FCP_COMMAND_TSEND);
1543 
1544 		/* Word 12 */
1545 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1546 
1547 		/* Setup 2 SKIP SGEs */
1548 		sgl->addr_hi = 0;
1549 		sgl->addr_lo = 0;
1550 		sgl->word2 = 0;
1551 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1552 		sgl->word2 = cpu_to_le32(sgl->word2);
1553 		sgl->sge_len = 0;
1554 		sgl++;
1555 		sgl->addr_hi = 0;
1556 		sgl->addr_lo = 0;
1557 		sgl->word2 = 0;
1558 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1559 		sgl->word2 = cpu_to_le32(sgl->word2);
1560 		sgl->sge_len = 0;
1561 		sgl++;
1562 		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
1563 			atomic_inc(&tgtp->xmt_fcp_read_rsp);
1564 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
1565 			if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
1566 			    (rsp->rsplen == 12)) {
1567 				bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
1568 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1569 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1570 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1571 			} else {
1572 				bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1573 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
1574 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
1575 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
1576 				       ((rsp->rsplen >> 2) - 1));
1577 				memcpy(&wqe->words[16], rsp->rspaddr,
1578 				       rsp->rsplen);
1579 			}
1580 		} else {
1581 			atomic_inc(&tgtp->xmt_fcp_read);
1582 
1583 			bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1584 			bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1585 			bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1586 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
1587 			bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1588 		}
1589 		ctxp->state = LPFC_NVMET_STE_DATA;
1590 		break;
1591 
1592 	case NVMET_FCOP_WRITEDATA:
1593 		/* Words 0 - 2 : The first sg segment */
1594 		txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
1595 				       GFP_KERNEL, &physaddr);
1596 		if (!txrdy) {
1597 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1598 					"6041 Bad txrdy buffer: oxid x%x\n",
1599 					ctxp->oxid);
1600 			return NULL;
1601 		}
1602 		ctxp->txrdy = txrdy;
1603 		ctxp->txrdy_phys = physaddr;
1604 		wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1605 		wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
1606 		wqe->fcp_treceive.bde.addrLow =
1607 			cpu_to_le32(putPaddrLow(physaddr));
1608 		wqe->fcp_treceive.bde.addrHigh =
1609 			cpu_to_le32(putPaddrHigh(physaddr));
1610 
1611 		/* Word 3 */
1612 		wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
1613 
1614 		/* Word 4 */
1615 		wqe->fcp_treceive.relative_offset = ctxp->offset;
1616 
1617 		/* Word 5 */
1618 
1619 		/* Word 6 */
1620 		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
1621 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1622 		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
1623 		       nvmewqe->sli4_xritag);
1624 
1625 		/* Word 7 */
1626 		bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
1627 		bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
1628 		       CMD_FCP_TRECEIVE64_WQE);
1629 
1630 		/* Word 8 */
1631 		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
1632 
1633 		/* Word 9 */
1634 		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
1635 		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
1636 
1637 		/* Word 10 */
1638 		bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1639 		bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
1640 		bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
1641 		bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
1642 		       LPFC_WQE_LENLOC_WORD12);
1643 		bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
1644 		bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
1645 		bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
1646 		bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
1647 		bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1648 		if (phba->cfg_nvme_oas)
1649 			bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
1650 
1651 		/* Word 11 */
1652 		bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
1653 		       LPFC_WQE_CQ_ID_DEFAULT);
1654 		bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
1655 		       FCP_COMMAND_TRECEIVE);
1656 		bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1657 
1658 		/* Word 12 */
1659 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1660 
1661 		/* Setup 1 TXRDY and 1 SKIP SGE */
1662 		txrdy[0] = 0;
1663 		txrdy[1] = cpu_to_be32(rsp->transfer_length);
1664 		txrdy[2] = 0;
1665 
1666 		sgl->addr_hi = putPaddrHigh(physaddr);
1667 		sgl->addr_lo = putPaddrLow(physaddr);
1668 		sgl->word2 = 0;
1669 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1670 		sgl->word2 = cpu_to_le32(sgl->word2);
1671 		sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
1672 		sgl++;
1673 		sgl->addr_hi = 0;
1674 		sgl->addr_lo = 0;
1675 		sgl->word2 = 0;
1676 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1677 		sgl->word2 = cpu_to_le32(sgl->word2);
1678 		sgl->sge_len = 0;
1679 		sgl++;
1680 		ctxp->state = LPFC_NVMET_STE_DATA;
1681 		atomic_inc(&tgtp->xmt_fcp_write);
1682 		break;
1683 
1684 	case NVMET_FCOP_RSP:
1685 		/* Words 0 - 2 */
1686 		physaddr = rsp->rspdma;
1687 		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1688 		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
1689 		wqe->fcp_trsp.bde.addrLow =
1690 			cpu_to_le32(putPaddrLow(physaddr));
1691 		wqe->fcp_trsp.bde.addrHigh =
1692 			cpu_to_le32(putPaddrHigh(physaddr));
1693 
1694 		/* Word 3 */
1695 		wqe->fcp_trsp.response_len = rsp->rsplen;
1696 
1697 		/* Word 4 */
1698 		wqe->fcp_trsp.rsvd_4_5[0] = 0;
1699 
1700 
1701 		/* Word 5 */
1702 
1703 		/* Word 6 */
1704 		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
1705 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1706 		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
1707 		       nvmewqe->sli4_xritag);
1708 
1709 		/* Word 7 */
1710 		bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
1711 		bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
1712 
1713 		/* Word 8 */
1714 		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
1715 
1716 		/* Word 9 */
1717 		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
1718 		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
1719 
1720 		/* Word 10 */
1721 		bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1722 		bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
1723 		bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
1724 		bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
1725 		       LPFC_WQE_LENLOC_WORD3);
1726 		bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
1727 		bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1728 		if (phba->cfg_nvme_oas)
1729 			bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
1730 
1731 		/* Word 11 */
1732 		bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
1733 		       LPFC_WQE_CQ_ID_DEFAULT);
1734 		bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
1735 		       FCP_COMMAND_TRSP);
1736 		bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1737 		ctxp->state = LPFC_NVMET_STE_RSP;
1738 
1739 		if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
1740 			/* Good response - all zero's on wire */
1741 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
1742 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
1743 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
1744 		} else {
1745 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
1746 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
1747 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
1748 			       ((rsp->rsplen >> 2) - 1));
1749 			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
1750 		}
1751 
1752 		/* Use rspbuf, NOT sg list */
1753 		rsp->sg_cnt = 0;
1754 		sgl->word2 = 0;
1755 		atomic_inc(&tgtp->xmt_fcp_rsp);
1756 		break;
1757 
1758 	default:
1759 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1760 				"6064 Unknown Rsp Op %d\n",
1761 				rsp->op);
1762 		return NULL;
1763 	}
1764 
1765 	nvmewqe->retry = 1;
1766 	nvmewqe->vport = phba->pport;
1767 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1768 	nvmewqe->context1 = ndlp;
1769 
1770 	for (i = 0; i < rsp->sg_cnt; i++) {
1771 		sgel = &rsp->sg[i];
1772 		physaddr = sg_dma_address(sgel);
1773 		cnt = sg_dma_len(sgel);
1774 		sgl->addr_hi = putPaddrHigh(physaddr);
1775 		sgl->addr_lo = putPaddrLow(physaddr);
1776 		sgl->word2 = 0;
1777 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1778 		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
1779 		if ((i+1) == rsp->sg_cnt)
1780 			bf_set(lpfc_sli4_sge_last, sgl, 1);
1781 		sgl->word2 = cpu_to_le32(sgl->word2);
1782 		sgl->sge_len = cpu_to_le32(cnt);
1783 		sgl++;
1784 		ctxp->offset += cnt;
1785 	}
1786 	return nvmewqe;
1787 }
1788 
1789 /**
1790  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
1791  * @phba: Pointer to HBA context object.
1792  * @cmdwqe: Pointer to driver command WQE object.
1793  * @wcqe: Pointer to driver response CQE object.
1794  *
1795  * The function is called from SLI ring event handler with no
1796  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1797  * The function frees memory resources used for the NVME commands.
1798  **/
1799 static void
1800 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1801 			     struct lpfc_wcqe_complete *wcqe)
1802 {
1803 	struct lpfc_nvmet_rcv_ctx *ctxp;
1804 	struct lpfc_nvmet_tgtport *tgtp;
1805 	uint32_t status, result;
1806 	unsigned long flags;
1807 	bool released = false;
1808 
1809 	ctxp = cmdwqe->context2;
1810 	status = bf_get(lpfc_wcqe_c_status, wcqe);
1811 	result = wcqe->parameter;
1812 
1813 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1814 	atomic_inc(&tgtp->xmt_abort_cmpl);
1815 
1816 	ctxp->state = LPFC_NVMET_STE_DONE;
1817 
1818 	/* Check if we already received a free context call
1819 	 * and we have completed processing an abort situation.
1820 	 */
1821 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1822 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
1823 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
1824 		list_del(&ctxp->list);
1825 		released = true;
1826 	}
1827 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1828 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1829 
1830 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1831 			"6165 ABORT cmpl: xri x%x flg x%x (%d) "
1832 			"WCQE: %08x %08x %08x %08x\n",
1833 			ctxp->oxid, ctxp->flag, released,
1834 			wcqe->word0, wcqe->total_data_placed,
1835 			result, wcqe->word3);
1836 
1837 	/*
1838 	 * if transport has released ctx, then can reuse it. Otherwise,
1839 	 * will be recycled by transport release call.
1840 	 */
1841 	if (released)
1842 		lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1843 
1844 	cmdwqe->context2 = NULL;
1845 	cmdwqe->context3 = NULL;
1846 	lpfc_sli_release_iocbq(phba, cmdwqe);
1847 
1848 	/* Since iaab/iaar are NOT set, there is no work left.
1849 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
1850 	 * should have been called already.
1851 	 */
1852 }
1853 
1854 /**
1855  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
1856  * @phba: Pointer to HBA context object.
1857  * @cmdwqe: Pointer to driver command WQE object.
1858  * @wcqe: Pointer to driver response CQE object.
1859  *
1860  * The function is called from SLI ring event handler with no
1861  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1862  * The function frees memory resources used for the NVME commands.
1863  **/
1864 static void
1865 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1866 			       struct lpfc_wcqe_complete *wcqe)
1867 {
1868 	struct lpfc_nvmet_rcv_ctx *ctxp;
1869 	struct lpfc_nvmet_tgtport *tgtp;
1870 	unsigned long flags;
1871 	uint32_t status, result;
1872 	bool released = false;
1873 
1874 	ctxp = cmdwqe->context2;
1875 	status = bf_get(lpfc_wcqe_c_status, wcqe);
1876 	result = wcqe->parameter;
1877 
1878 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1879 	atomic_inc(&tgtp->xmt_abort_cmpl);
1880 
1881 	if (!ctxp) {
1882 		/* if context is clear, related io alrady complete */
1883 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1884 				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
1885 				wcqe->word0, wcqe->total_data_placed,
1886 				result, wcqe->word3);
1887 		return;
1888 	}
1889 
1890 	/* Sanity check */
1891 	if (ctxp->state != LPFC_NVMET_STE_ABORT) {
1892 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1893 				"6112 ABTS Wrong state:%d oxid x%x\n",
1894 				ctxp->state, ctxp->oxid);
1895 	}
1896 
1897 	/* Check if we already received a free context call
1898 	 * and we have completed processing an abort situation.
1899 	 */
1900 	ctxp->state = LPFC_NVMET_STE_DONE;
1901 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1902 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
1903 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
1904 		list_del(&ctxp->list);
1905 		released = true;
1906 	}
1907 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
1908 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1909 
1910 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1911 			"6316 ABTS cmpl xri x%x flg x%x (%x) "
1912 			"WCQE: %08x %08x %08x %08x\n",
1913 			ctxp->oxid, ctxp->flag, released,
1914 			wcqe->word0, wcqe->total_data_placed,
1915 			result, wcqe->word3);
1916 	/*
1917 	 * if transport has released ctx, then can reuse it. Otherwise,
1918 	 * will be recycled by transport release call.
1919 	 */
1920 	if (released)
1921 		lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1922 
1923 	cmdwqe->context2 = NULL;
1924 	cmdwqe->context3 = NULL;
1925 
1926 	/* Since iaab/iaar are NOT set, there is no work left.
1927 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
1928 	 * should have been called already.
1929 	 */
1930 }
1931 
1932 /**
1933  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
1934  * @phba: Pointer to HBA context object.
1935  * @cmdwqe: Pointer to driver command WQE object.
1936  * @wcqe: Pointer to driver response CQE object.
1937  *
1938  * The function is called from SLI ring event handler with no
1939  * lock held. This function is the completion handler for NVME ABTS for LS cmds
1940  * The function frees memory resources used for the NVME commands.
1941  **/
1942 static void
1943 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1944 			    struct lpfc_wcqe_complete *wcqe)
1945 {
1946 	struct lpfc_nvmet_rcv_ctx *ctxp;
1947 	struct lpfc_nvmet_tgtport *tgtp;
1948 	uint32_t status, result;
1949 
1950 	ctxp = cmdwqe->context2;
1951 	status = bf_get(lpfc_wcqe_c_status, wcqe);
1952 	result = wcqe->parameter;
1953 
1954 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1955 	atomic_inc(&tgtp->xmt_abort_cmpl);
1956 
1957 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1958 			"6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1959 			ctxp, wcqe->word0, wcqe->total_data_placed,
1960 			result, wcqe->word3);
1961 
1962 	if (ctxp) {
1963 		cmdwqe->context2 = NULL;
1964 		cmdwqe->context3 = NULL;
1965 		lpfc_sli_release_iocbq(phba, cmdwqe);
1966 		kfree(ctxp);
1967 	} else
1968 		lpfc_sli_release_iocbq(phba, cmdwqe);
1969 }
1970 
1971 static int
1972 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1973 			     struct lpfc_nvmet_rcv_ctx *ctxp,
1974 			     uint32_t sid, uint16_t xri)
1975 {
1976 	struct lpfc_nvmet_tgtport *tgtp;
1977 	struct lpfc_iocbq *abts_wqeq;
1978 	union lpfc_wqe *wqe_abts;
1979 	struct lpfc_nodelist *ndlp;
1980 
1981 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1982 			"6067 ABTS: sid %x xri x%x/x%x\n",
1983 			sid, xri, ctxp->wqeq->sli4_xritag);
1984 
1985 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1986 	if (!ctxp->wqeq) {
1987 		ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1988 		ctxp->wqeq->hba_wqidx = 0;
1989 	}
1990 
1991 	ndlp = lpfc_findnode_did(phba->pport, sid);
1992 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1993 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1994 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1995 		atomic_inc(&tgtp->xmt_abort_rsp_error);
1996 		lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1997 				"6134 Drop ABTS - wrong NDLP state x%x.\n",
1998 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1999 
2000 		/* No failure to an ABTS request. */
2001 		return 0;
2002 	}
2003 
2004 	abts_wqeq = ctxp->wqeq;
2005 	wqe_abts = &abts_wqeq->wqe;
2006 	ctxp->state = LPFC_NVMET_STE_ABORT;
2007 
2008 	/*
2009 	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2010 	 * that were initialized in lpfc_sli4_nvmet_alloc.
2011 	 */
2012 	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2013 
2014 	/* Word 5 */
2015 	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2016 	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2017 	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2018 	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2019 	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2020 
2021 	/* Word 6 */
2022 	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2023 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2024 	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2025 	       abts_wqeq->sli4_xritag);
2026 
2027 	/* Word 7 */
2028 	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2029 	       CMD_XMIT_SEQUENCE64_WQE);
2030 	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2031 	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2032 	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2033 
2034 	/* Word 8 */
2035 	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2036 
2037 	/* Word 9 */
2038 	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2039 	/* Needs to be set by caller */
2040 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2041 
2042 	/* Word 10 */
2043 	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2044 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2045 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2046 	       LPFC_WQE_LENLOC_WORD12);
2047 	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2048 	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2049 
2050 	/* Word 11 */
2051 	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2052 	       LPFC_WQE_CQ_ID_DEFAULT);
2053 	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2054 	       OTHER_COMMAND);
2055 
2056 	abts_wqeq->vport = phba->pport;
2057 	abts_wqeq->context1 = ndlp;
2058 	abts_wqeq->context2 = ctxp;
2059 	abts_wqeq->context3 = NULL;
2060 	abts_wqeq->rsvd2 = 0;
2061 	/* hba_wqidx should already be setup from command we are aborting */
2062 	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2063 	abts_wqeq->iocb.ulpLe = 1;
2064 
2065 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2066 			"6069 Issue ABTS to xri x%x reqtag x%x\n",
2067 			xri, abts_wqeq->iotag);
2068 	return 1;
2069 }
2070 
2071 static int
2072 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2073 			       struct lpfc_nvmet_rcv_ctx *ctxp,
2074 			       uint32_t sid, uint16_t xri)
2075 {
2076 	struct lpfc_nvmet_tgtport *tgtp;
2077 	struct lpfc_iocbq *abts_wqeq;
2078 	union lpfc_wqe *abts_wqe;
2079 	struct lpfc_nodelist *ndlp;
2080 	unsigned long flags;
2081 	int rc;
2082 
2083 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2084 	if (!ctxp->wqeq) {
2085 		ctxp->wqeq = ctxp->rqb_buffer->iocbq;
2086 		ctxp->wqeq->hba_wqidx = 0;
2087 	}
2088 
2089 	ndlp = lpfc_findnode_did(phba->pport, sid);
2090 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2091 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2092 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2093 		atomic_inc(&tgtp->xmt_abort_rsp_error);
2094 		lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2095 				"6160 Drop ABORT - wrong NDLP state x%x.\n",
2096 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2097 
2098 		/* No failure to an ABTS request. */
2099 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2100 		return 0;
2101 	}
2102 
2103 	/* Issue ABTS for this WQE based on iotag */
2104 	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2105 	if (!ctxp->abort_wqeq) {
2106 		lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2107 				"6161 ABORT failed: No wqeqs: "
2108 				"xri: x%x\n", ctxp->oxid);
2109 		/* No failure to an ABTS request. */
2110 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2111 		return 0;
2112 	}
2113 	abts_wqeq = ctxp->abort_wqeq;
2114 	abts_wqe = &abts_wqeq->wqe;
2115 	ctxp->state = LPFC_NVMET_STE_ABORT;
2116 
2117 	/* Announce entry to new IO submit field. */
2118 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2119 			"6162 ABORT Request to rport DID x%06x "
2120 			"for xri x%x x%x\n",
2121 			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2122 
2123 	/* If the hba is getting reset, this flag is set.  It is
2124 	 * cleared when the reset is complete and rings reestablished.
2125 	 */
2126 	spin_lock_irqsave(&phba->hbalock, flags);
2127 	/* driver queued commands are in process of being flushed */
2128 	if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2129 		spin_unlock_irqrestore(&phba->hbalock, flags);
2130 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2131 				"6163 Driver in reset cleanup - flushing "
2132 				"NVME Req now. hba_flag x%x oxid x%x\n",
2133 				phba->hba_flag, ctxp->oxid);
2134 		lpfc_sli_release_iocbq(phba, abts_wqeq);
2135 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2136 		return 0;
2137 	}
2138 
2139 	/* Outstanding abort is in progress */
2140 	if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2141 		spin_unlock_irqrestore(&phba->hbalock, flags);
2142 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2143 				"6164 Outstanding NVME I/O Abort Request "
2144 				"still pending on oxid x%x\n",
2145 				ctxp->oxid);
2146 		lpfc_sli_release_iocbq(phba, abts_wqeq);
2147 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2148 		return 0;
2149 	}
2150 
2151 	/* Ready - mark outstanding as aborted by driver. */
2152 	abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2153 
2154 	/* WQEs are reused.  Clear stale data and set key fields to
2155 	 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2156 	 */
2157 	memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2158 
2159 	/* word 3 */
2160 	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2161 
2162 	/* word 7 */
2163 	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2164 	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2165 
2166 	/* word 8 - tell the FW to abort the IO associated with this
2167 	 * outstanding exchange ID.
2168 	 */
2169 	abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2170 
2171 	/* word 9 - this is the iotag for the abts_wqe completion. */
2172 	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2173 	       abts_wqeq->iotag);
2174 
2175 	/* word 10 */
2176 	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2177 	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2178 
2179 	/* word 11 */
2180 	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2181 	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2182 	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2183 
2184 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
2185 	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2186 	abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2187 	abts_wqeq->iocb_cmpl = 0;
2188 	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2189 	abts_wqeq->context2 = ctxp;
2190 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2191 	spin_unlock_irqrestore(&phba->hbalock, flags);
2192 	if (rc == WQE_SUCCESS)
2193 		return 0;
2194 
2195 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2196 	lpfc_sli_release_iocbq(phba, abts_wqeq);
2197 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2198 			"6166 Failed ABORT issue_wqe with status x%x "
2199 			"for oxid x%x.\n",
2200 			rc, ctxp->oxid);
2201 	return 1;
2202 }
2203 
2204 
2205 static int
2206 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2207 				 struct lpfc_nvmet_rcv_ctx *ctxp,
2208 				 uint32_t sid, uint16_t xri)
2209 {
2210 	struct lpfc_nvmet_tgtport *tgtp;
2211 	struct lpfc_iocbq *abts_wqeq;
2212 	unsigned long flags;
2213 	int rc;
2214 
2215 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2216 	if (!ctxp->wqeq) {
2217 		ctxp->wqeq = ctxp->rqb_buffer->iocbq;
2218 		ctxp->wqeq->hba_wqidx = 0;
2219 	}
2220 
2221 	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2222 	if (rc == 0)
2223 		goto aerr;
2224 
2225 	spin_lock_irqsave(&phba->hbalock, flags);
2226 	abts_wqeq = ctxp->wqeq;
2227 	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2228 	abts_wqeq->iocb_cmpl = NULL;
2229 	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2230 	rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2231 	spin_unlock_irqrestore(&phba->hbalock, flags);
2232 	if (rc == WQE_SUCCESS) {
2233 		atomic_inc(&tgtp->xmt_abort_rsp);
2234 		return 0;
2235 	}
2236 
2237 aerr:
2238 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2239 	atomic_inc(&tgtp->xmt_abort_rsp_error);
2240 	lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2241 			"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2242 			ctxp->oxid, rc);
2243 	return 1;
2244 }
2245 
2246 static int
2247 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2248 				struct lpfc_nvmet_rcv_ctx *ctxp,
2249 				uint32_t sid, uint16_t xri)
2250 {
2251 	struct lpfc_nvmet_tgtport *tgtp;
2252 	struct lpfc_iocbq *abts_wqeq;
2253 	union lpfc_wqe *wqe_abts;
2254 	unsigned long flags;
2255 	int rc;
2256 
2257 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2258 	if (!ctxp->wqeq) {
2259 		/* Issue ABTS for this WQE based on iotag */
2260 		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2261 		if (!ctxp->wqeq) {
2262 			lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2263 					"6068 Abort failed: No wqeqs: "
2264 					"xri: x%x\n", xri);
2265 			/* No failure to an ABTS request. */
2266 			kfree(ctxp);
2267 			return 0;
2268 		}
2269 	}
2270 	abts_wqeq = ctxp->wqeq;
2271 	wqe_abts = &abts_wqeq->wqe;
2272 	lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2273 
2274 	spin_lock_irqsave(&phba->hbalock, flags);
2275 	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2276 	abts_wqeq->iocb_cmpl = 0;
2277 	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
2278 	rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2279 	spin_unlock_irqrestore(&phba->hbalock, flags);
2280 	if (rc == WQE_SUCCESS) {
2281 		atomic_inc(&tgtp->xmt_abort_rsp);
2282 		return 0;
2283 	}
2284 
2285 	atomic_inc(&tgtp->xmt_abort_rsp_error);
2286 	abts_wqeq->context2 = NULL;
2287 	abts_wqeq->context3 = NULL;
2288 	lpfc_sli_release_iocbq(phba, abts_wqeq);
2289 	kfree(ctxp);
2290 	lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2291 			"6056 Failed to Issue ABTS. Status x%x\n", rc);
2292 	return 0;
2293 }
2294