xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_nvmet.c (revision b58c6630)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30 
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 
43 #include "lpfc_version.h"
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_crtn.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_debugfs.h"
58 
59 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
60 						 struct lpfc_nvmet_rcv_ctx *,
61 						 dma_addr_t rspbuf,
62 						 uint16_t rspsize);
63 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
64 						  struct lpfc_nvmet_rcv_ctx *);
65 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
66 					  struct lpfc_nvmet_rcv_ctx *,
67 					  uint32_t, uint16_t);
68 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
69 					    struct lpfc_nvmet_rcv_ctx *,
70 					    uint32_t, uint16_t);
71 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
72 					   struct lpfc_nvmet_rcv_ctx *,
73 					   uint32_t, uint16_t);
74 static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
75 				    struct lpfc_nvmet_rcv_ctx *);
76 static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
77 
78 static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
79 
80 static union lpfc_wqe128 lpfc_tsend_cmd_template;
81 static union lpfc_wqe128 lpfc_treceive_cmd_template;
82 static union lpfc_wqe128 lpfc_trsp_cmd_template;
83 
84 /* Setup WQE templates for NVME IOs */
85 void
86 lpfc_nvmet_cmd_template(void)
87 {
88 	union lpfc_wqe128 *wqe;
89 
90 	/* TSEND template */
91 	wqe = &lpfc_tsend_cmd_template;
92 	memset(wqe, 0, sizeof(union lpfc_wqe128));
93 
94 	/* Word 0, 1, 2 - BDE is variable */
95 
96 	/* Word 3 - payload_offset_len is zero */
97 
98 	/* Word 4 - relative_offset is variable */
99 
100 	/* Word 5 - is zero */
101 
102 	/* Word 6 - ctxt_tag, xri_tag is variable */
103 
104 	/* Word 7 - wqe_ar is variable */
105 	bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
106 	bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
107 	bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
108 	bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
109 	bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
110 
111 	/* Word 8 - abort_tag is variable */
112 
113 	/* Word 9  - reqtag, rcvoxid is variable */
114 
115 	/* Word 10 - wqes, xc is variable */
116 	bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
117 	bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
118 	bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
119 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
120 	bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
121 	bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
122 
123 	/* Word 11 - sup, irsp, irsplen is variable */
124 	bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
125 	bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
126 	bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
127 	bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
128 	bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
129 	bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
130 
131 	/* Word 12 - fcp_data_len is variable */
132 
133 	/* Word 13, 14, 15 - PBDE is zero */
134 
135 	/* TRECEIVE template */
136 	wqe = &lpfc_treceive_cmd_template;
137 	memset(wqe, 0, sizeof(union lpfc_wqe128));
138 
139 	/* Word 0, 1, 2 - BDE is variable */
140 
141 	/* Word 3 */
142 	wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
143 
144 	/* Word 4 - relative_offset is variable */
145 
146 	/* Word 5 - is zero */
147 
148 	/* Word 6 - ctxt_tag, xri_tag is variable */
149 
150 	/* Word 7 */
151 	bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
152 	bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
153 	bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
154 	bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
155 	bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
156 
157 	/* Word 8 - abort_tag is variable */
158 
159 	/* Word 9  - reqtag, rcvoxid is variable */
160 
161 	/* Word 10 - xc is variable */
162 	bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
163 	bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
164 	bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
165 	bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
166 	bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
167 	bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
168 
169 	/* Word 11 - pbde is variable */
170 	bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
171 	bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
172 	bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
173 	bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
174 	bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
175 	bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
176 
177 	/* Word 12 - fcp_data_len is variable */
178 
179 	/* Word 13, 14, 15 - PBDE is variable */
180 
181 	/* TRSP template */
182 	wqe = &lpfc_trsp_cmd_template;
183 	memset(wqe, 0, sizeof(union lpfc_wqe128));
184 
185 	/* Word 0, 1, 2 - BDE is variable */
186 
187 	/* Word 3 - response_len is variable */
188 
189 	/* Word 4, 5 - is zero */
190 
191 	/* Word 6 - ctxt_tag, xri_tag is variable */
192 
193 	/* Word 7 */
194 	bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
195 	bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
196 	bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
197 	bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
198 	bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
199 
200 	/* Word 8 - abort_tag is variable */
201 
202 	/* Word 9  - reqtag is variable */
203 
204 	/* Word 10 wqes, xc is variable */
205 	bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
206 	bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
207 	bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
208 	bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
209 	bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
210 	bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
211 
212 	/* Word 11 irsp, irsplen is variable */
213 	bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
214 	bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
215 	bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
216 	bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
217 	bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
218 	bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
219 
220 	/* Word 12, 13, 14, 15 - is zero */
221 }
222 
223 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
224 static struct lpfc_nvmet_rcv_ctx *
225 lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
226 {
227 	struct lpfc_nvmet_rcv_ctx *ctxp;
228 	unsigned long iflag;
229 	bool found = false;
230 
231 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
232 	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
233 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
234 			continue;
235 
236 		found = true;
237 		break;
238 	}
239 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
240 	if (found)
241 		return ctxp;
242 
243 	return NULL;
244 }
245 
246 static struct lpfc_nvmet_rcv_ctx *
247 lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
248 {
249 	struct lpfc_nvmet_rcv_ctx *ctxp;
250 	unsigned long iflag;
251 	bool found = false;
252 
253 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
254 	list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
255 		if (ctxp->oxid != oxid || ctxp->sid != sid)
256 			continue;
257 
258 		found = true;
259 		break;
260 	}
261 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
262 	if (found)
263 		return ctxp;
264 
265 	return NULL;
266 }
267 #endif
268 
269 static void
270 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
271 {
272 	lockdep_assert_held(&ctxp->ctxlock);
273 
274 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
275 			"6313 NVMET Defer ctx release oxid x%x flg x%x\n",
276 			ctxp->oxid, ctxp->flag);
277 
278 	if (ctxp->flag & LPFC_NVMET_CTX_RLS)
279 		return;
280 
281 	ctxp->flag |= LPFC_NVMET_CTX_RLS;
282 	spin_lock(&phba->sli4_hba.t_active_list_lock);
283 	list_del(&ctxp->list);
284 	spin_unlock(&phba->sli4_hba.t_active_list_lock);
285 	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
286 	list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
287 	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
288 }
289 
290 /**
291  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
292  * @phba: Pointer to HBA context object.
293  * @cmdwqe: Pointer to driver command WQE object.
294  * @wcqe: Pointer to driver response CQE object.
295  *
296  * The function is called from SLI ring event handler with no
297  * lock held. This function is the completion handler for NVME LS commands
298  * The function frees memory resources used for the NVME commands.
299  **/
300 static void
301 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
302 			  struct lpfc_wcqe_complete *wcqe)
303 {
304 	struct lpfc_nvmet_tgtport *tgtp;
305 	struct nvmefc_tgt_ls_req *rsp;
306 	struct lpfc_nvmet_rcv_ctx *ctxp;
307 	uint32_t status, result;
308 
309 	status = bf_get(lpfc_wcqe_c_status, wcqe);
310 	result = wcqe->parameter;
311 	ctxp = cmdwqe->context2;
312 
313 	if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
314 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
315 				"6410 NVMET LS cmpl state mismatch IO x%x: "
316 				"%d %d\n",
317 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
318 	}
319 
320 	if (!phba->targetport)
321 		goto out;
322 
323 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
324 
325 	if (tgtp) {
326 		if (status) {
327 			atomic_inc(&tgtp->xmt_ls_rsp_error);
328 			if (result == IOERR_ABORT_REQUESTED)
329 				atomic_inc(&tgtp->xmt_ls_rsp_aborted);
330 			if (bf_get(lpfc_wcqe_c_xb, wcqe))
331 				atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
332 		} else {
333 			atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
334 		}
335 	}
336 
337 out:
338 	rsp = &ctxp->ctx.ls_req;
339 
340 	lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
341 			 ctxp->oxid, status, result);
342 
343 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
344 			"6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
345 			status, result, ctxp->oxid);
346 
347 	lpfc_nlp_put(cmdwqe->context1);
348 	cmdwqe->context2 = NULL;
349 	cmdwqe->context3 = NULL;
350 	lpfc_sli_release_iocbq(phba, cmdwqe);
351 	rsp->done(rsp);
352 	kfree(ctxp);
353 }
354 
355 /**
356  * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
357  * @phba: HBA buffer is associated with
358  * @ctxp: context to clean up
359  * @mp: Buffer to free
360  *
361  * Description: Frees the given DMA buffer in the appropriate way given by
362  * reposting it to its associated RQ so it can be reused.
363  *
364  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
365  *
366  * Returns: None
367  **/
368 void
369 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
370 {
371 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
372 	struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
373 	struct lpfc_nvmet_tgtport *tgtp;
374 	struct fc_frame_header *fc_hdr;
375 	struct rqb_dmabuf *nvmebuf;
376 	struct lpfc_nvmet_ctx_info *infop;
377 	uint32_t size, oxid, sid;
378 	int cpu;
379 	unsigned long iflag;
380 
381 	if (ctxp->state == LPFC_NVMET_STE_FREE) {
382 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
383 				"6411 NVMET free, already free IO x%x: %d %d\n",
384 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
385 	}
386 
387 	if (ctxp->rqb_buffer) {
388 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
389 		nvmebuf = ctxp->rqb_buffer;
390 		/* check if freed in another path whilst acquiring lock */
391 		if (nvmebuf) {
392 			ctxp->rqb_buffer = NULL;
393 			if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
394 				ctxp->flag &= ~LPFC_NVMET_CTX_REUSE_WQ;
395 				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
396 				nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
397 								    nvmebuf);
398 			} else {
399 				spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
400 				/* repost */
401 				lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
402 			}
403 		} else {
404 			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
405 		}
406 	}
407 	ctxp->state = LPFC_NVMET_STE_FREE;
408 
409 	spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
410 	if (phba->sli4_hba.nvmet_io_wait_cnt) {
411 		list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
412 				 nvmebuf, struct rqb_dmabuf,
413 				 hbuf.list);
414 		phba->sli4_hba.nvmet_io_wait_cnt--;
415 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
416 				       iflag);
417 
418 		fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
419 		oxid = be16_to_cpu(fc_hdr->fh_ox_id);
420 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
421 		size = nvmebuf->bytes_recv;
422 		sid = sli4_sid_from_fc_hdr(fc_hdr);
423 
424 		ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
425 		ctxp->wqeq = NULL;
426 		ctxp->offset = 0;
427 		ctxp->phba = phba;
428 		ctxp->size = size;
429 		ctxp->oxid = oxid;
430 		ctxp->sid = sid;
431 		ctxp->state = LPFC_NVMET_STE_RCV;
432 		ctxp->entry_cnt = 1;
433 		ctxp->flag = 0;
434 		ctxp->ctxbuf = ctx_buf;
435 		ctxp->rqb_buffer = (void *)nvmebuf;
436 		spin_lock_init(&ctxp->ctxlock);
437 
438 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
439 		/* NOTE: isr time stamp is stale when context is re-assigned*/
440 		if (ctxp->ts_isr_cmd) {
441 			ctxp->ts_cmd_nvme = 0;
442 			ctxp->ts_nvme_data = 0;
443 			ctxp->ts_data_wqput = 0;
444 			ctxp->ts_isr_data = 0;
445 			ctxp->ts_data_nvme = 0;
446 			ctxp->ts_nvme_status = 0;
447 			ctxp->ts_status_wqput = 0;
448 			ctxp->ts_isr_status = 0;
449 			ctxp->ts_status_nvme = 0;
450 		}
451 #endif
452 		atomic_inc(&tgtp->rcv_fcp_cmd_in);
453 
454 		/* Indicate that a replacement buffer has been posted */
455 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
456 		ctxp->flag |= LPFC_NVMET_CTX_REUSE_WQ;
457 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
458 
459 		if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
460 			atomic_inc(&tgtp->rcv_fcp_cmd_drop);
461 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
462 					"6181 Unable to queue deferred work "
463 					"for oxid x%x. "
464 					"FCP Drop IO [x%x x%x x%x]\n",
465 					ctxp->oxid,
466 					atomic_read(&tgtp->rcv_fcp_cmd_in),
467 					atomic_read(&tgtp->rcv_fcp_cmd_out),
468 					atomic_read(&tgtp->xmt_fcp_release));
469 
470 			spin_lock_irqsave(&ctxp->ctxlock, iflag);
471 			lpfc_nvmet_defer_release(phba, ctxp);
472 			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
473 			lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
474 		}
475 		return;
476 	}
477 	spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
478 
479 	/*
480 	 * Use the CPU context list, from the MRQ the IO was received on
481 	 * (ctxp->idx), to save context structure.
482 	 */
483 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
484 	list_del_init(&ctxp->list);
485 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
486 	cpu = raw_smp_processor_id();
487 	infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
488 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
489 	list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
490 	infop->nvmet_ctx_list_cnt++;
491 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
492 #endif
493 }
494 
495 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
496 static void
497 lpfc_nvmet_ktime(struct lpfc_hba *phba,
498 		 struct lpfc_nvmet_rcv_ctx *ctxp)
499 {
500 	uint64_t seg1, seg2, seg3, seg4, seg5;
501 	uint64_t seg6, seg7, seg8, seg9, seg10;
502 	uint64_t segsum;
503 
504 	if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
505 	    !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
506 	    !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
507 	    !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
508 	    !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
509 		return;
510 
511 	if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
512 		return;
513 	if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
514 		return;
515 	if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
516 		return;
517 	if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
518 		return;
519 	if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
520 		return;
521 	if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
522 		return;
523 	if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
524 		return;
525 	if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
526 		return;
527 	if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
528 		return;
529 	if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
530 		return;
531 	/*
532 	 * Segment 1 - Time from FCP command received by MSI-X ISR
533 	 * to FCP command is passed to NVME Layer.
534 	 * Segment 2 - Time from FCP command payload handed
535 	 * off to NVME Layer to Driver receives a Command op
536 	 * from NVME Layer.
537 	 * Segment 3 - Time from Driver receives a Command op
538 	 * from NVME Layer to Command is put on WQ.
539 	 * Segment 4 - Time from Driver WQ put is done
540 	 * to MSI-X ISR for Command cmpl.
541 	 * Segment 5 - Time from MSI-X ISR for Command cmpl to
542 	 * Command cmpl is passed to NVME Layer.
543 	 * Segment 6 - Time from Command cmpl is passed to NVME
544 	 * Layer to Driver receives a RSP op from NVME Layer.
545 	 * Segment 7 - Time from Driver receives a RSP op from
546 	 * NVME Layer to WQ put is done on TRSP FCP Status.
547 	 * Segment 8 - Time from Driver WQ put is done on TRSP
548 	 * FCP Status to MSI-X ISR for TRSP cmpl.
549 	 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
550 	 * TRSP cmpl is passed to NVME Layer.
551 	 * Segment 10 - Time from FCP command received by
552 	 * MSI-X ISR to command is completed on wire.
553 	 * (Segments 1 thru 8) for READDATA / WRITEDATA
554 	 * (Segments 1 thru 4) for READDATA_RSP
555 	 */
556 	seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
557 	segsum = seg1;
558 
559 	seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
560 	if (segsum > seg2)
561 		return;
562 	seg2 -= segsum;
563 	segsum += seg2;
564 
565 	seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
566 	if (segsum > seg3)
567 		return;
568 	seg3 -= segsum;
569 	segsum += seg3;
570 
571 	seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
572 	if (segsum > seg4)
573 		return;
574 	seg4 -= segsum;
575 	segsum += seg4;
576 
577 	seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
578 	if (segsum > seg5)
579 		return;
580 	seg5 -= segsum;
581 	segsum += seg5;
582 
583 
584 	/* For auto rsp commands seg6 thru seg10 will be 0 */
585 	if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
586 		seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
587 		if (segsum > seg6)
588 			return;
589 		seg6 -= segsum;
590 		segsum += seg6;
591 
592 		seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
593 		if (segsum > seg7)
594 			return;
595 		seg7 -= segsum;
596 		segsum += seg7;
597 
598 		seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
599 		if (segsum > seg8)
600 			return;
601 		seg8 -= segsum;
602 		segsum += seg8;
603 
604 		seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
605 		if (segsum > seg9)
606 			return;
607 		seg9 -= segsum;
608 		segsum += seg9;
609 
610 		if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
611 			return;
612 		seg10 = (ctxp->ts_isr_status -
613 			ctxp->ts_isr_cmd);
614 	} else {
615 		if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
616 			return;
617 		seg6 =  0;
618 		seg7 =  0;
619 		seg8 =  0;
620 		seg9 =  0;
621 		seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
622 	}
623 
624 	phba->ktime_seg1_total += seg1;
625 	if (seg1 < phba->ktime_seg1_min)
626 		phba->ktime_seg1_min = seg1;
627 	else if (seg1 > phba->ktime_seg1_max)
628 		phba->ktime_seg1_max = seg1;
629 
630 	phba->ktime_seg2_total += seg2;
631 	if (seg2 < phba->ktime_seg2_min)
632 		phba->ktime_seg2_min = seg2;
633 	else if (seg2 > phba->ktime_seg2_max)
634 		phba->ktime_seg2_max = seg2;
635 
636 	phba->ktime_seg3_total += seg3;
637 	if (seg3 < phba->ktime_seg3_min)
638 		phba->ktime_seg3_min = seg3;
639 	else if (seg3 > phba->ktime_seg3_max)
640 		phba->ktime_seg3_max = seg3;
641 
642 	phba->ktime_seg4_total += seg4;
643 	if (seg4 < phba->ktime_seg4_min)
644 		phba->ktime_seg4_min = seg4;
645 	else if (seg4 > phba->ktime_seg4_max)
646 		phba->ktime_seg4_max = seg4;
647 
648 	phba->ktime_seg5_total += seg5;
649 	if (seg5 < phba->ktime_seg5_min)
650 		phba->ktime_seg5_min = seg5;
651 	else if (seg5 > phba->ktime_seg5_max)
652 		phba->ktime_seg5_max = seg5;
653 
654 	phba->ktime_data_samples++;
655 	if (!seg6)
656 		goto out;
657 
658 	phba->ktime_seg6_total += seg6;
659 	if (seg6 < phba->ktime_seg6_min)
660 		phba->ktime_seg6_min = seg6;
661 	else if (seg6 > phba->ktime_seg6_max)
662 		phba->ktime_seg6_max = seg6;
663 
664 	phba->ktime_seg7_total += seg7;
665 	if (seg7 < phba->ktime_seg7_min)
666 		phba->ktime_seg7_min = seg7;
667 	else if (seg7 > phba->ktime_seg7_max)
668 		phba->ktime_seg7_max = seg7;
669 
670 	phba->ktime_seg8_total += seg8;
671 	if (seg8 < phba->ktime_seg8_min)
672 		phba->ktime_seg8_min = seg8;
673 	else if (seg8 > phba->ktime_seg8_max)
674 		phba->ktime_seg8_max = seg8;
675 
676 	phba->ktime_seg9_total += seg9;
677 	if (seg9 < phba->ktime_seg9_min)
678 		phba->ktime_seg9_min = seg9;
679 	else if (seg9 > phba->ktime_seg9_max)
680 		phba->ktime_seg9_max = seg9;
681 out:
682 	phba->ktime_seg10_total += seg10;
683 	if (seg10 < phba->ktime_seg10_min)
684 		phba->ktime_seg10_min = seg10;
685 	else if (seg10 > phba->ktime_seg10_max)
686 		phba->ktime_seg10_max = seg10;
687 	phba->ktime_status_samples++;
688 }
689 #endif
690 
691 /**
692  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
693  * @phba: Pointer to HBA context object.
694  * @cmdwqe: Pointer to driver command WQE object.
695  * @wcqe: Pointer to driver response CQE object.
696  *
697  * The function is called from SLI ring event handler with no
698  * lock held. This function is the completion handler for NVME FCP commands
699  * The function frees memory resources used for the NVME commands.
700  **/
701 static void
702 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
703 			  struct lpfc_wcqe_complete *wcqe)
704 {
705 	struct lpfc_nvmet_tgtport *tgtp;
706 	struct nvmefc_tgt_fcp_req *rsp;
707 	struct lpfc_nvmet_rcv_ctx *ctxp;
708 	uint32_t status, result, op, start_clean, logerr;
709 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
710 	int id;
711 #endif
712 
713 	ctxp = cmdwqe->context2;
714 	ctxp->flag &= ~LPFC_NVMET_IO_INP;
715 
716 	rsp = &ctxp->ctx.fcp_req;
717 	op = rsp->op;
718 
719 	status = bf_get(lpfc_wcqe_c_status, wcqe);
720 	result = wcqe->parameter;
721 
722 	if (phba->targetport)
723 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
724 	else
725 		tgtp = NULL;
726 
727 	lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
728 			 ctxp->oxid, op, status);
729 
730 	if (status) {
731 		rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
732 		rsp->transferred_length = 0;
733 		if (tgtp) {
734 			atomic_inc(&tgtp->xmt_fcp_rsp_error);
735 			if (result == IOERR_ABORT_REQUESTED)
736 				atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
737 		}
738 
739 		logerr = LOG_NVME_IOERR;
740 
741 		/* pick up SLI4 exhange busy condition */
742 		if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
743 			ctxp->flag |= LPFC_NVMET_XBUSY;
744 			logerr |= LOG_NVME_ABTS;
745 			if (tgtp)
746 				atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
747 
748 		} else {
749 			ctxp->flag &= ~LPFC_NVMET_XBUSY;
750 		}
751 
752 		lpfc_printf_log(phba, KERN_INFO, logerr,
753 				"6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
754 				"XBUSY:x%x\n",
755 				ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
756 				status, result, ctxp->flag);
757 
758 	} else {
759 		rsp->fcp_error = NVME_SC_SUCCESS;
760 		if (op == NVMET_FCOP_RSP)
761 			rsp->transferred_length = rsp->rsplen;
762 		else
763 			rsp->transferred_length = rsp->transfer_length;
764 		if (tgtp)
765 			atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
766 	}
767 
768 	if ((op == NVMET_FCOP_READDATA_RSP) ||
769 	    (op == NVMET_FCOP_RSP)) {
770 		/* Sanity check */
771 		ctxp->state = LPFC_NVMET_STE_DONE;
772 		ctxp->entry_cnt++;
773 
774 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
775 		if (ctxp->ts_cmd_nvme) {
776 			if (rsp->op == NVMET_FCOP_READDATA_RSP) {
777 				ctxp->ts_isr_data =
778 					cmdwqe->isr_timestamp;
779 				ctxp->ts_data_nvme =
780 					ktime_get_ns();
781 				ctxp->ts_nvme_status =
782 					ctxp->ts_data_nvme;
783 				ctxp->ts_status_wqput =
784 					ctxp->ts_data_nvme;
785 				ctxp->ts_isr_status =
786 					ctxp->ts_data_nvme;
787 				ctxp->ts_status_nvme =
788 					ctxp->ts_data_nvme;
789 			} else {
790 				ctxp->ts_isr_status =
791 					cmdwqe->isr_timestamp;
792 				ctxp->ts_status_nvme =
793 					ktime_get_ns();
794 			}
795 		}
796 #endif
797 		rsp->done(rsp);
798 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
799 		if (ctxp->ts_cmd_nvme)
800 			lpfc_nvmet_ktime(phba, ctxp);
801 #endif
802 		/* lpfc_nvmet_xmt_fcp_release() will recycle the context */
803 	} else {
804 		ctxp->entry_cnt++;
805 		start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
806 		memset(((char *)cmdwqe) + start_clean, 0,
807 		       (sizeof(struct lpfc_iocbq) - start_clean));
808 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
809 		if (ctxp->ts_cmd_nvme) {
810 			ctxp->ts_isr_data = cmdwqe->isr_timestamp;
811 			ctxp->ts_data_nvme = ktime_get_ns();
812 		}
813 #endif
814 		rsp->done(rsp);
815 	}
816 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
817 	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
818 		id = raw_smp_processor_id();
819 		this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
820 		if (ctxp->cpu != id)
821 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
822 					"6704 CPU Check cmdcmpl: "
823 					"cpu %d expect %d\n",
824 					id, ctxp->cpu);
825 	}
826 #endif
827 }
828 
829 static int
830 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
831 		      struct nvmefc_tgt_ls_req *rsp)
832 {
833 	struct lpfc_nvmet_rcv_ctx *ctxp =
834 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
835 	struct lpfc_hba *phba = ctxp->phba;
836 	struct hbq_dmabuf *nvmebuf =
837 		(struct hbq_dmabuf *)ctxp->rqb_buffer;
838 	struct lpfc_iocbq *nvmewqeq;
839 	struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
840 	struct lpfc_dmabuf dmabuf;
841 	struct ulp_bde64 bpl;
842 	int rc;
843 
844 	if (phba->pport->load_flag & FC_UNLOADING)
845 		return -ENODEV;
846 
847 	if (phba->pport->load_flag & FC_UNLOADING)
848 		return -ENODEV;
849 
850 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
851 			"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
852 
853 	if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
854 	    (ctxp->entry_cnt != 1)) {
855 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
856 				"6412 NVMET LS rsp state mismatch "
857 				"oxid x%x: %d %d\n",
858 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
859 	}
860 	ctxp->state = LPFC_NVMET_STE_LS_RSP;
861 	ctxp->entry_cnt++;
862 
863 	nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
864 				      rsp->rsplen);
865 	if (nvmewqeq == NULL) {
866 		atomic_inc(&nvmep->xmt_ls_drop);
867 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
868 				"6150 LS Drop IO x%x: Prep\n",
869 				ctxp->oxid);
870 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
871 		atomic_inc(&nvmep->xmt_ls_abort);
872 		lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
873 						ctxp->sid, ctxp->oxid);
874 		return -ENOMEM;
875 	}
876 
877 	/* Save numBdes for bpl2sgl */
878 	nvmewqeq->rsvd2 = 1;
879 	nvmewqeq->hba_wqidx = 0;
880 	nvmewqeq->context3 = &dmabuf;
881 	dmabuf.virt = &bpl;
882 	bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
883 	bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
884 	bpl.tus.f.bdeSize = rsp->rsplen;
885 	bpl.tus.f.bdeFlags = 0;
886 	bpl.tus.w = le32_to_cpu(bpl.tus.w);
887 
888 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
889 	nvmewqeq->iocb_cmpl = NULL;
890 	nvmewqeq->context2 = ctxp;
891 
892 	lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
893 			 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
894 
895 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
896 	if (rc == WQE_SUCCESS) {
897 		/*
898 		 * Okay to repost buffer here, but wait till cmpl
899 		 * before freeing ctxp and iocbq.
900 		 */
901 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
902 		atomic_inc(&nvmep->xmt_ls_rsp);
903 		return 0;
904 	}
905 	/* Give back resources */
906 	atomic_inc(&nvmep->xmt_ls_drop);
907 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
908 			"6151 LS Drop IO x%x: Issue %d\n",
909 			ctxp->oxid, rc);
910 
911 	lpfc_nlp_put(nvmewqeq->context1);
912 
913 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
914 	atomic_inc(&nvmep->xmt_ls_abort);
915 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
916 	return -ENXIO;
917 }
918 
919 static int
920 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
921 		      struct nvmefc_tgt_fcp_req *rsp)
922 {
923 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
924 	struct lpfc_nvmet_rcv_ctx *ctxp =
925 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
926 	struct lpfc_hba *phba = ctxp->phba;
927 	struct lpfc_queue *wq;
928 	struct lpfc_iocbq *nvmewqeq;
929 	struct lpfc_sli_ring *pring;
930 	unsigned long iflags;
931 	int rc;
932 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
933 	int id;
934 #endif
935 
936 	if (phba->pport->load_flag & FC_UNLOADING) {
937 		rc = -ENODEV;
938 		goto aerr;
939 	}
940 
941 	if (phba->pport->load_flag & FC_UNLOADING) {
942 		rc = -ENODEV;
943 		goto aerr;
944 	}
945 
946 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
947 	if (ctxp->ts_cmd_nvme) {
948 		if (rsp->op == NVMET_FCOP_RSP)
949 			ctxp->ts_nvme_status = ktime_get_ns();
950 		else
951 			ctxp->ts_nvme_data = ktime_get_ns();
952 	}
953 
954 	/* Setup the hdw queue if not already set */
955 	if (!ctxp->hdwq)
956 		ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
957 
958 	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
959 		id = raw_smp_processor_id();
960 		this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
961 		if (rsp->hwqid != id)
962 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
963 					"6705 CPU Check OP: "
964 					"cpu %d expect %d\n",
965 					id, rsp->hwqid);
966 		ctxp->cpu = id; /* Setup cpu for cmpl check */
967 	}
968 #endif
969 
970 	/* Sanity check */
971 	if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
972 	    (ctxp->state == LPFC_NVMET_STE_ABORT)) {
973 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
974 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
975 				"6102 IO oxid x%x aborted\n",
976 				ctxp->oxid);
977 		rc = -ENXIO;
978 		goto aerr;
979 	}
980 
981 	nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
982 	if (nvmewqeq == NULL) {
983 		atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
984 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
985 				"6152 FCP Drop IO x%x: Prep\n",
986 				ctxp->oxid);
987 		rc = -ENXIO;
988 		goto aerr;
989 	}
990 
991 	nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
992 	nvmewqeq->iocb_cmpl = NULL;
993 	nvmewqeq->context2 = ctxp;
994 	nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
995 	ctxp->wqeq->hba_wqidx = rsp->hwqid;
996 
997 	lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
998 			 ctxp->oxid, rsp->op, rsp->rsplen);
999 
1000 	ctxp->flag |= LPFC_NVMET_IO_INP;
1001 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1002 	if (rc == WQE_SUCCESS) {
1003 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1004 		if (!ctxp->ts_cmd_nvme)
1005 			return 0;
1006 		if (rsp->op == NVMET_FCOP_RSP)
1007 			ctxp->ts_status_wqput = ktime_get_ns();
1008 		else
1009 			ctxp->ts_data_wqput = ktime_get_ns();
1010 #endif
1011 		return 0;
1012 	}
1013 
1014 	if (rc == -EBUSY) {
1015 		/*
1016 		 * WQ was full, so queue nvmewqeq to be sent after
1017 		 * WQE release CQE
1018 		 */
1019 		ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
1020 		wq = ctxp->hdwq->io_wq;
1021 		pring = wq->pring;
1022 		spin_lock_irqsave(&pring->ring_lock, iflags);
1023 		list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
1024 		wq->q_flag |= HBA_NVMET_WQFULL;
1025 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
1026 		atomic_inc(&lpfc_nvmep->defer_wqfull);
1027 		return 0;
1028 	}
1029 
1030 	/* Give back resources */
1031 	atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
1032 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1033 			"6153 FCP Drop IO x%x: Issue: %d\n",
1034 			ctxp->oxid, rc);
1035 
1036 	ctxp->wqeq->hba_wqidx = 0;
1037 	nvmewqeq->context2 = NULL;
1038 	nvmewqeq->context3 = NULL;
1039 	rc = -EBUSY;
1040 aerr:
1041 	return rc;
1042 }
1043 
1044 static void
1045 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
1046 {
1047 	struct lpfc_nvmet_tgtport *tport = targetport->private;
1048 
1049 	/* release any threads waiting for the unreg to complete */
1050 	if (tport->phba->targetport)
1051 		complete(tport->tport_unreg_cmp);
1052 }
1053 
1054 static void
1055 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
1056 			 struct nvmefc_tgt_fcp_req *req)
1057 {
1058 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1059 	struct lpfc_nvmet_rcv_ctx *ctxp =
1060 		container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1061 	struct lpfc_hba *phba = ctxp->phba;
1062 	struct lpfc_queue *wq;
1063 	unsigned long flags;
1064 
1065 	if (phba->pport->load_flag & FC_UNLOADING)
1066 		return;
1067 
1068 	if (phba->pport->load_flag & FC_UNLOADING)
1069 		return;
1070 
1071 	if (!ctxp->hdwq)
1072 		ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1073 
1074 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1075 			"6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
1076 			ctxp->oxid, ctxp->flag, ctxp->state);
1077 
1078 	lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
1079 			 ctxp->oxid, ctxp->flag, ctxp->state);
1080 
1081 	atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
1082 
1083 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1084 
1085 	/* Since iaab/iaar are NOT set, we need to check
1086 	 * if the firmware is in process of aborting IO
1087 	 */
1088 	if (ctxp->flag & (LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP)) {
1089 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1090 		return;
1091 	}
1092 	ctxp->flag |= LPFC_NVMET_ABORT_OP;
1093 
1094 	if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
1095 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1096 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1097 						 ctxp->oxid);
1098 		wq = ctxp->hdwq->io_wq;
1099 		lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
1100 		return;
1101 	}
1102 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1103 
1104 	/* An state of LPFC_NVMET_STE_RCV means we have just received
1105 	 * the NVME command and have not started processing it.
1106 	 * (by issuing any IO WQEs on this exchange yet)
1107 	 */
1108 	if (ctxp->state == LPFC_NVMET_STE_RCV)
1109 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1110 						 ctxp->oxid);
1111 	else
1112 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1113 					       ctxp->oxid);
1114 }
1115 
1116 static void
1117 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
1118 			   struct nvmefc_tgt_fcp_req *rsp)
1119 {
1120 	struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
1121 	struct lpfc_nvmet_rcv_ctx *ctxp =
1122 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1123 	struct lpfc_hba *phba = ctxp->phba;
1124 	unsigned long flags;
1125 	bool aborting = false;
1126 
1127 	spin_lock_irqsave(&ctxp->ctxlock, flags);
1128 	if (ctxp->flag & LPFC_NVMET_XBUSY)
1129 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1130 				"6027 NVMET release with XBUSY flag x%x"
1131 				" oxid x%x\n",
1132 				ctxp->flag, ctxp->oxid);
1133 	else if (ctxp->state != LPFC_NVMET_STE_DONE &&
1134 		 ctxp->state != LPFC_NVMET_STE_ABORT)
1135 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1136 				"6413 NVMET release bad state %d %d oxid x%x\n",
1137 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
1138 
1139 	if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
1140 	    (ctxp->flag & LPFC_NVMET_XBUSY)) {
1141 		aborting = true;
1142 		/* let the abort path do the real release */
1143 		lpfc_nvmet_defer_release(phba, ctxp);
1144 	}
1145 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
1146 
1147 	lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
1148 			 ctxp->state, aborting);
1149 
1150 	atomic_inc(&lpfc_nvmep->xmt_fcp_release);
1151 	ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
1152 
1153 	if (aborting)
1154 		return;
1155 
1156 	lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1157 }
1158 
1159 static void
1160 lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
1161 		     struct nvmefc_tgt_fcp_req *rsp)
1162 {
1163 	struct lpfc_nvmet_tgtport *tgtp;
1164 	struct lpfc_nvmet_rcv_ctx *ctxp =
1165 		container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
1166 	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
1167 	struct lpfc_hba *phba = ctxp->phba;
1168 	unsigned long iflag;
1169 
1170 
1171 	lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
1172 			 ctxp->oxid, ctxp->size, raw_smp_processor_id());
1173 
1174 	if (!nvmebuf) {
1175 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1176 				"6425 Defer rcv: no buffer oxid x%x: "
1177 				"flg %x ste %x\n",
1178 				ctxp->oxid, ctxp->flag, ctxp->state);
1179 		return;
1180 	}
1181 
1182 	tgtp = phba->targetport->private;
1183 	if (tgtp)
1184 		atomic_inc(&tgtp->rcv_fcp_cmd_defer);
1185 
1186 	/* Free the nvmebuf since a new buffer already replaced it */
1187 	nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1188 	spin_lock_irqsave(&ctxp->ctxlock, iflag);
1189 	ctxp->rqb_buffer = NULL;
1190 	spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1191 }
1192 
1193 static void
1194 lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
1195 {
1196 	struct lpfc_nvmet_tgtport *tgtp;
1197 	struct lpfc_hba *phba;
1198 	uint32_t rc;
1199 
1200 	tgtp = tgtport->private;
1201 	phba = tgtp->phba;
1202 
1203 	rc = lpfc_issue_els_rscn(phba->pport, 0);
1204 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1205 			"6420 NVMET subsystem change: Notification %s\n",
1206 			(rc) ? "Failed" : "Sent");
1207 }
1208 
1209 static struct nvmet_fc_target_template lpfc_tgttemplate = {
1210 	.targetport_delete = lpfc_nvmet_targetport_delete,
1211 	.xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
1212 	.fcp_op         = lpfc_nvmet_xmt_fcp_op,
1213 	.fcp_abort      = lpfc_nvmet_xmt_fcp_abort,
1214 	.fcp_req_release = lpfc_nvmet_xmt_fcp_release,
1215 	.defer_rcv	= lpfc_nvmet_defer_rcv,
1216 	.discovery_event = lpfc_nvmet_discovery_event,
1217 
1218 	.max_hw_queues  = 1,
1219 	.max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1220 	.max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
1221 	.dma_boundary = 0xFFFFFFFF,
1222 
1223 	/* optional features */
1224 	.target_features = 0,
1225 	/* sizes of additional private data for data structures */
1226 	.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
1227 };
1228 
1229 static void
1230 __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
1231 		struct lpfc_nvmet_ctx_info *infop)
1232 {
1233 	struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
1234 	unsigned long flags;
1235 
1236 	spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
1237 	list_for_each_entry_safe(ctx_buf, next_ctx_buf,
1238 				&infop->nvmet_ctx_list, list) {
1239 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1240 		list_del_init(&ctx_buf->list);
1241 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1242 
1243 		__lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
1244 		ctx_buf->sglq->state = SGL_FREED;
1245 		ctx_buf->sglq->ndlp = NULL;
1246 
1247 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1248 		list_add_tail(&ctx_buf->sglq->list,
1249 				&phba->sli4_hba.lpfc_nvmet_sgl_list);
1250 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1251 
1252 		lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1253 		kfree(ctx_buf->context);
1254 	}
1255 	spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
1256 }
1257 
1258 static void
1259 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
1260 {
1261 	struct lpfc_nvmet_ctx_info *infop;
1262 	int i, j;
1263 
1264 	/* The first context list, MRQ 0 CPU 0 */
1265 	infop = phba->sli4_hba.nvmet_ctx_info;
1266 	if (!infop)
1267 		return;
1268 
1269 	/* Cycle the the entire CPU context list for every MRQ */
1270 	for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
1271 		for_each_present_cpu(j) {
1272 			infop = lpfc_get_ctx_list(phba, j, i);
1273 			__lpfc_nvmet_clean_io_for_cpu(phba, infop);
1274 		}
1275 	}
1276 	kfree(phba->sli4_hba.nvmet_ctx_info);
1277 	phba->sli4_hba.nvmet_ctx_info = NULL;
1278 }
1279 
1280 static int
1281 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
1282 {
1283 	struct lpfc_nvmet_ctxbuf *ctx_buf;
1284 	struct lpfc_iocbq *nvmewqe;
1285 	union lpfc_wqe128 *wqe;
1286 	struct lpfc_nvmet_ctx_info *last_infop;
1287 	struct lpfc_nvmet_ctx_info *infop;
1288 	int i, j, idx, cpu;
1289 
1290 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
1291 			"6403 Allocate NVMET resources for %d XRIs\n",
1292 			phba->sli4_hba.nvmet_xri_cnt);
1293 
1294 	phba->sli4_hba.nvmet_ctx_info = kcalloc(
1295 		phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
1296 		sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
1297 	if (!phba->sli4_hba.nvmet_ctx_info) {
1298 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1299 				"6419 Failed allocate memory for "
1300 				"nvmet context lists\n");
1301 		return -ENOMEM;
1302 	}
1303 
1304 	/*
1305 	 * Assuming X CPUs in the system, and Y MRQs, allocate some
1306 	 * lpfc_nvmet_ctx_info structures as follows:
1307 	 *
1308 	 * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
1309 	 * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
1310 	 * ...
1311 	 * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
1312 	 *
1313 	 * Each line represents a MRQ "silo" containing an entry for
1314 	 * every CPU.
1315 	 *
1316 	 * MRQ X is initially assumed to be associated with CPU X, thus
1317 	 * contexts are initially distributed across all MRQs using
1318 	 * the MRQ index (N) as follows cpuN/mrqN. When contexts are
1319 	 * freed, the are freed to the MRQ silo based on the CPU number
1320 	 * of the IO completion. Thus a context that was allocated for MRQ A
1321 	 * whose IO completed on CPU B will be freed to cpuB/mrqA.
1322 	 */
1323 	for_each_possible_cpu(i) {
1324 		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1325 			infop = lpfc_get_ctx_list(phba, i, j);
1326 			INIT_LIST_HEAD(&infop->nvmet_ctx_list);
1327 			spin_lock_init(&infop->nvmet_ctx_list_lock);
1328 			infop->nvmet_ctx_list_cnt = 0;
1329 		}
1330 	}
1331 
1332 	/*
1333 	 * Setup the next CPU context info ptr for each MRQ.
1334 	 * MRQ 0 will cycle thru CPUs 0 - X separately from
1335 	 * MRQ 1 cycling thru CPUs 0 - X, and so on.
1336 	 */
1337 	for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1338 		last_infop = lpfc_get_ctx_list(phba,
1339 					       cpumask_first(cpu_present_mask),
1340 					       j);
1341 		for (i = phba->sli4_hba.num_possible_cpu - 1;  i >= 0; i--) {
1342 			infop = lpfc_get_ctx_list(phba, i, j);
1343 			infop->nvmet_ctx_next_cpu = last_infop;
1344 			last_infop = infop;
1345 		}
1346 	}
1347 
1348 	/* For all nvmet xris, allocate resources needed to process a
1349 	 * received command on a per xri basis.
1350 	 */
1351 	idx = 0;
1352 	cpu = cpumask_first(cpu_present_mask);
1353 	for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
1354 		ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
1355 		if (!ctx_buf) {
1356 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1357 					"6404 Ran out of memory for NVMET\n");
1358 			return -ENOMEM;
1359 		}
1360 
1361 		ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
1362 					   GFP_KERNEL);
1363 		if (!ctx_buf->context) {
1364 			kfree(ctx_buf);
1365 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1366 					"6405 Ran out of NVMET "
1367 					"context memory\n");
1368 			return -ENOMEM;
1369 		}
1370 		ctx_buf->context->ctxbuf = ctx_buf;
1371 		ctx_buf->context->state = LPFC_NVMET_STE_FREE;
1372 
1373 		ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
1374 		if (!ctx_buf->iocbq) {
1375 			kfree(ctx_buf->context);
1376 			kfree(ctx_buf);
1377 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1378 					"6406 Ran out of NVMET iocb/WQEs\n");
1379 			return -ENOMEM;
1380 		}
1381 		ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
1382 		nvmewqe = ctx_buf->iocbq;
1383 		wqe = &nvmewqe->wqe;
1384 
1385 		/* Initialize WQE */
1386 		memset(wqe, 0, sizeof(union lpfc_wqe));
1387 
1388 		ctx_buf->iocbq->context1 = NULL;
1389 		spin_lock(&phba->sli4_hba.sgl_list_lock);
1390 		ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
1391 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
1392 		if (!ctx_buf->sglq) {
1393 			lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
1394 			kfree(ctx_buf->context);
1395 			kfree(ctx_buf);
1396 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1397 					"6407 Ran out of NVMET XRIs\n");
1398 			return -ENOMEM;
1399 		}
1400 		INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
1401 
1402 		/*
1403 		 * Add ctx to MRQidx context list. Our initial assumption
1404 		 * is MRQidx will be associated with CPUidx. This association
1405 		 * can change on the fly.
1406 		 */
1407 		infop = lpfc_get_ctx_list(phba, cpu, idx);
1408 		spin_lock(&infop->nvmet_ctx_list_lock);
1409 		list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
1410 		infop->nvmet_ctx_list_cnt++;
1411 		spin_unlock(&infop->nvmet_ctx_list_lock);
1412 
1413 		/* Spread ctx structures evenly across all MRQs */
1414 		idx++;
1415 		if (idx >= phba->cfg_nvmet_mrq) {
1416 			idx = 0;
1417 			cpu = cpumask_first(cpu_present_mask);
1418 			continue;
1419 		}
1420 		cpu = cpumask_next(cpu, cpu_present_mask);
1421 		if (cpu == nr_cpu_ids)
1422 			cpu = cpumask_first(cpu_present_mask);
1423 
1424 	}
1425 
1426 	for_each_present_cpu(i) {
1427 		for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
1428 			infop = lpfc_get_ctx_list(phba, i, j);
1429 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
1430 					"6408 TOTAL NVMET ctx for CPU %d "
1431 					"MRQ %d: cnt %d nextcpu x%px\n",
1432 					i, j, infop->nvmet_ctx_list_cnt,
1433 					infop->nvmet_ctx_next_cpu);
1434 		}
1435 	}
1436 	return 0;
1437 }
1438 
1439 int
1440 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
1441 {
1442 	struct lpfc_vport  *vport = phba->pport;
1443 	struct lpfc_nvmet_tgtport *tgtp;
1444 	struct nvmet_fc_port_info pinfo;
1445 	int error;
1446 
1447 	if (phba->targetport)
1448 		return 0;
1449 
1450 	error = lpfc_nvmet_setup_io_context(phba);
1451 	if (error)
1452 		return error;
1453 
1454 	memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
1455 	pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
1456 	pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
1457 	pinfo.port_id = vport->fc_myDID;
1458 
1459 	/* We need to tell the transport layer + 1 because it takes page
1460 	 * alignment into account. When space for the SGL is allocated we
1461 	 * allocate + 3, one for cmd, one for rsp and one for this alignment
1462 	 */
1463 	lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
1464 	lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
1465 	lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
1466 
1467 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1468 	error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
1469 					     &phba->pcidev->dev,
1470 					     &phba->targetport);
1471 #else
1472 	error = -ENOENT;
1473 #endif
1474 	if (error) {
1475 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1476 				"6025 Cannot register NVME targetport x%x: "
1477 				"portnm %llx nodenm %llx segs %d qs %d\n",
1478 				error,
1479 				pinfo.port_name, pinfo.node_name,
1480 				lpfc_tgttemplate.max_sgl_segments,
1481 				lpfc_tgttemplate.max_hw_queues);
1482 		phba->targetport = NULL;
1483 		phba->nvmet_support = 0;
1484 
1485 		lpfc_nvmet_cleanup_io_context(phba);
1486 
1487 	} else {
1488 		tgtp = (struct lpfc_nvmet_tgtport *)
1489 			phba->targetport->private;
1490 		tgtp->phba = phba;
1491 
1492 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1493 				"6026 Registered NVME "
1494 				"targetport: x%px, private x%px "
1495 				"portnm %llx nodenm %llx segs %d qs %d\n",
1496 				phba->targetport, tgtp,
1497 				pinfo.port_name, pinfo.node_name,
1498 				lpfc_tgttemplate.max_sgl_segments,
1499 				lpfc_tgttemplate.max_hw_queues);
1500 
1501 		atomic_set(&tgtp->rcv_ls_req_in, 0);
1502 		atomic_set(&tgtp->rcv_ls_req_out, 0);
1503 		atomic_set(&tgtp->rcv_ls_req_drop, 0);
1504 		atomic_set(&tgtp->xmt_ls_abort, 0);
1505 		atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1506 		atomic_set(&tgtp->xmt_ls_rsp, 0);
1507 		atomic_set(&tgtp->xmt_ls_drop, 0);
1508 		atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1509 		atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
1510 		atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
1511 		atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1512 		atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1513 		atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1514 		atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1515 		atomic_set(&tgtp->xmt_fcp_drop, 0);
1516 		atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1517 		atomic_set(&tgtp->xmt_fcp_read, 0);
1518 		atomic_set(&tgtp->xmt_fcp_write, 0);
1519 		atomic_set(&tgtp->xmt_fcp_rsp, 0);
1520 		atomic_set(&tgtp->xmt_fcp_release, 0);
1521 		atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1522 		atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1523 		atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
1524 		atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
1525 		atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1526 		atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
1527 		atomic_set(&tgtp->xmt_fcp_abort, 0);
1528 		atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1529 		atomic_set(&tgtp->xmt_abort_unsol, 0);
1530 		atomic_set(&tgtp->xmt_abort_sol, 0);
1531 		atomic_set(&tgtp->xmt_abort_rsp, 0);
1532 		atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1533 		atomic_set(&tgtp->defer_ctx, 0);
1534 		atomic_set(&tgtp->defer_fod, 0);
1535 		atomic_set(&tgtp->defer_wqfull, 0);
1536 	}
1537 	return error;
1538 }
1539 
1540 int
1541 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1542 {
1543 	struct lpfc_vport  *vport = phba->pport;
1544 
1545 	if (!phba->targetport)
1546 		return 0;
1547 
1548 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1549 			 "6007 Update NVMET port x%px did x%x\n",
1550 			 phba->targetport, vport->fc_myDID);
1551 
1552 	phba->targetport->port_id = vport->fc_myDID;
1553 	return 0;
1554 }
1555 
1556 /**
1557  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1558  * @phba: pointer to lpfc hba data structure.
1559  * @axri: pointer to the nvmet xri abort wcqe structure.
1560  *
1561  * This routine is invoked by the worker thread to process a SLI4 fast-path
1562  * NVMET aborted xri.
1563  **/
1564 void
1565 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1566 			    struct sli4_wcqe_xri_aborted *axri)
1567 {
1568 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1569 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1570 	uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1571 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1572 	struct lpfc_nvmet_tgtport *tgtp;
1573 	struct nvmefc_tgt_fcp_req *req = NULL;
1574 	struct lpfc_nodelist *ndlp;
1575 	unsigned long iflag = 0;
1576 	int rrq_empty = 0;
1577 	bool released = false;
1578 
1579 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1580 			"6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1581 
1582 	if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1583 		return;
1584 
1585 	if (phba->targetport) {
1586 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1587 		atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
1588 	}
1589 
1590 	spin_lock_irqsave(&phba->hbalock, iflag);
1591 	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1592 	list_for_each_entry_safe(ctxp, next_ctxp,
1593 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1594 				 list) {
1595 		if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1596 			continue;
1597 
1598 		spin_lock(&ctxp->ctxlock);
1599 		/* Check if we already received a free context call
1600 		 * and we have completed processing an abort situation.
1601 		 */
1602 		if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1603 		    !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1604 			list_del_init(&ctxp->list);
1605 			released = true;
1606 		}
1607 		ctxp->flag &= ~LPFC_NVMET_XBUSY;
1608 		spin_unlock(&ctxp->ctxlock);
1609 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1610 
1611 		rrq_empty = list_empty(&phba->active_rrq_list);
1612 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1613 		ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1614 		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1615 		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1616 		     ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1617 			lpfc_set_rrq_active(phba, ndlp,
1618 				ctxp->ctxbuf->sglq->sli4_lxritag,
1619 				rxid, 1);
1620 			lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1621 		}
1622 
1623 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1624 				"6318 XB aborted oxid x%x flg x%x (%x)\n",
1625 				ctxp->oxid, ctxp->flag, released);
1626 		if (released)
1627 			lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1628 
1629 		if (rrq_empty)
1630 			lpfc_worker_wake_up(phba);
1631 		return;
1632 	}
1633 	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1634 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1635 
1636 	ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
1637 	if (ctxp) {
1638 		/*
1639 		 *  Abort already done by FW, so BA_ACC sent.
1640 		 *  However, the transport may be unaware.
1641 		 */
1642 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1643 				"6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
1644 				"flag x%x oxid x%x rxid x%x\n",
1645 				xri, ctxp->state, ctxp->flag, ctxp->oxid,
1646 				rxid);
1647 
1648 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1649 		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1650 		ctxp->state = LPFC_NVMET_STE_ABORT;
1651 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1652 
1653 		lpfc_nvmeio_data(phba,
1654 				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1655 				 xri, raw_smp_processor_id(), 0);
1656 
1657 		req = &ctxp->ctx.fcp_req;
1658 		if (req)
1659 			nvmet_fc_rcv_fcp_abort(phba->targetport, req);
1660 	}
1661 #endif
1662 }
1663 
1664 int
1665 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1666 			   struct fc_frame_header *fc_hdr)
1667 {
1668 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1669 	struct lpfc_hba *phba = vport->phba;
1670 	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1671 	struct nvmefc_tgt_fcp_req *rsp;
1672 	uint32_t sid;
1673 	uint16_t oxid, xri;
1674 	unsigned long iflag = 0;
1675 
1676 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1677 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1678 
1679 	spin_lock_irqsave(&phba->hbalock, iflag);
1680 	spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1681 	list_for_each_entry_safe(ctxp, next_ctxp,
1682 				 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1683 				 list) {
1684 		if (ctxp->oxid != oxid || ctxp->sid != sid)
1685 			continue;
1686 
1687 		xri = ctxp->ctxbuf->sglq->sli4_xritag;
1688 
1689 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1690 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1691 
1692 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1693 		ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1694 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1695 
1696 		lpfc_nvmeio_data(phba,
1697 			"NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1698 			xri, raw_smp_processor_id(), 0);
1699 
1700 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1701 				"6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1702 
1703 		rsp = &ctxp->ctx.fcp_req;
1704 		nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1705 
1706 		/* Respond with BA_ACC accordingly */
1707 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1708 		return 0;
1709 	}
1710 	spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1711 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1712 
1713 	/* check the wait list */
1714 	if (phba->sli4_hba.nvmet_io_wait_cnt) {
1715 		struct rqb_dmabuf *nvmebuf;
1716 		struct fc_frame_header *fc_hdr_tmp;
1717 		u32 sid_tmp;
1718 		u16 oxid_tmp;
1719 		bool found = false;
1720 
1721 		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1722 
1723 		/* match by oxid and s_id */
1724 		list_for_each_entry(nvmebuf,
1725 				    &phba->sli4_hba.lpfc_nvmet_io_wait_list,
1726 				    hbuf.list) {
1727 			fc_hdr_tmp = (struct fc_frame_header *)
1728 					(nvmebuf->hbuf.virt);
1729 			oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
1730 			sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
1731 			if (oxid_tmp != oxid || sid_tmp != sid)
1732 				continue;
1733 
1734 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1735 					"6321 NVMET Rcv ABTS oxid x%x from x%x "
1736 					"is waiting for a ctxp\n",
1737 					oxid, sid);
1738 
1739 			list_del_init(&nvmebuf->hbuf.list);
1740 			phba->sli4_hba.nvmet_io_wait_cnt--;
1741 			found = true;
1742 			break;
1743 		}
1744 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1745 				       iflag);
1746 
1747 		/* free buffer since already posted a new DMA buffer to RQ */
1748 		if (found) {
1749 			nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
1750 			/* Respond with BA_ACC accordingly */
1751 			lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1752 			return 0;
1753 		}
1754 	}
1755 
1756 	/* check active list */
1757 	ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
1758 	if (ctxp) {
1759 		xri = ctxp->ctxbuf->sglq->sli4_xritag;
1760 
1761 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
1762 		ctxp->flag |= (LPFC_NVMET_ABTS_RCV | LPFC_NVMET_ABORT_OP);
1763 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1764 
1765 		lpfc_nvmeio_data(phba,
1766 				 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1767 				 xri, raw_smp_processor_id(), 0);
1768 
1769 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1770 				"6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
1771 				"flag x%x state x%x\n",
1772 				ctxp->oxid, xri, ctxp->flag, ctxp->state);
1773 
1774 		if (ctxp->flag & LPFC_NVMET_TNOTIFY) {
1775 			/* Notify the transport */
1776 			nvmet_fc_rcv_fcp_abort(phba->targetport,
1777 					       &ctxp->ctx.fcp_req);
1778 		} else {
1779 			cancel_work_sync(&ctxp->ctxbuf->defer_work);
1780 			spin_lock_irqsave(&ctxp->ctxlock, iflag);
1781 			lpfc_nvmet_defer_release(phba, ctxp);
1782 			spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1783 		}
1784 		lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
1785 					       ctxp->oxid);
1786 
1787 		lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1788 		return 0;
1789 	}
1790 
1791 	lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
1792 			 oxid, raw_smp_processor_id(), 1);
1793 
1794 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1795 			"6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
1796 
1797 	/* Respond with BA_RJT accordingly */
1798 	lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1799 #endif
1800 	return 0;
1801 }
1802 
1803 static void
1804 lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
1805 			struct lpfc_nvmet_rcv_ctx *ctxp)
1806 {
1807 	struct lpfc_sli_ring *pring;
1808 	struct lpfc_iocbq *nvmewqeq;
1809 	struct lpfc_iocbq *next_nvmewqeq;
1810 	unsigned long iflags;
1811 	struct lpfc_wcqe_complete wcqe;
1812 	struct lpfc_wcqe_complete *wcqep;
1813 
1814 	pring = wq->pring;
1815 	wcqep = &wcqe;
1816 
1817 	/* Fake an ABORT error code back to cmpl routine */
1818 	memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
1819 	bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
1820 	wcqep->parameter = IOERR_ABORT_REQUESTED;
1821 
1822 	spin_lock_irqsave(&pring->ring_lock, iflags);
1823 	list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
1824 				 &wq->wqfull_list, list) {
1825 		if (ctxp) {
1826 			/* Checking for a specific IO to flush */
1827 			if (nvmewqeq->context2 == ctxp) {
1828 				list_del(&nvmewqeq->list);
1829 				spin_unlock_irqrestore(&pring->ring_lock,
1830 						       iflags);
1831 				lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
1832 							  wcqep);
1833 				return;
1834 			}
1835 			continue;
1836 		} else {
1837 			/* Flush all IOs */
1838 			list_del(&nvmewqeq->list);
1839 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
1840 			lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
1841 			spin_lock_irqsave(&pring->ring_lock, iflags);
1842 		}
1843 	}
1844 	if (!ctxp)
1845 		wq->q_flag &= ~HBA_NVMET_WQFULL;
1846 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
1847 }
1848 
1849 void
1850 lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
1851 			  struct lpfc_queue *wq)
1852 {
1853 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1854 	struct lpfc_sli_ring *pring;
1855 	struct lpfc_iocbq *nvmewqeq;
1856 	struct lpfc_nvmet_rcv_ctx *ctxp;
1857 	unsigned long iflags;
1858 	int rc;
1859 
1860 	/*
1861 	 * Some WQE slots are available, so try to re-issue anything
1862 	 * on the WQ wqfull_list.
1863 	 */
1864 	pring = wq->pring;
1865 	spin_lock_irqsave(&pring->ring_lock, iflags);
1866 	while (!list_empty(&wq->wqfull_list)) {
1867 		list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
1868 				 list);
1869 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
1870 		ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
1871 		rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
1872 		spin_lock_irqsave(&pring->ring_lock, iflags);
1873 		if (rc == -EBUSY) {
1874 			/* WQ was full again, so put it back on the list */
1875 			list_add(&nvmewqeq->list, &wq->wqfull_list);
1876 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
1877 			return;
1878 		}
1879 		if (rc == WQE_SUCCESS) {
1880 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1881 			if (ctxp->ts_cmd_nvme) {
1882 				if (ctxp->ctx.fcp_req.op == NVMET_FCOP_RSP)
1883 					ctxp->ts_status_wqput = ktime_get_ns();
1884 				else
1885 					ctxp->ts_data_wqput = ktime_get_ns();
1886 			}
1887 #endif
1888 		} else {
1889 			WARN_ON(rc);
1890 		}
1891 	}
1892 	wq->q_flag &= ~HBA_NVMET_WQFULL;
1893 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
1894 
1895 #endif
1896 }
1897 
1898 void
1899 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1900 {
1901 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1902 	struct lpfc_nvmet_tgtport *tgtp;
1903 	struct lpfc_queue *wq;
1904 	uint32_t qidx;
1905 	DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
1906 
1907 	if (phba->nvmet_support == 0)
1908 		return;
1909 	if (phba->targetport) {
1910 		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1911 		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
1912 			wq = phba->sli4_hba.hdwq[qidx].io_wq;
1913 			lpfc_nvmet_wqfull_flush(phba, wq, NULL);
1914 		}
1915 		tgtp->tport_unreg_cmp = &tport_unreg_cmp;
1916 		nvmet_fc_unregister_targetport(phba->targetport);
1917 		if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
1918 					msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
1919 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1920 					"6179 Unreg targetport x%px timeout "
1921 					"reached.\n", phba->targetport);
1922 		lpfc_nvmet_cleanup_io_context(phba);
1923 	}
1924 	phba->targetport = NULL;
1925 #endif
1926 }
1927 
1928 /**
1929  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1930  * @phba: pointer to lpfc hba data structure.
1931  * @pring: pointer to a SLI ring.
1932  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1933  *
1934  * This routine is used for processing the WQE associated with a unsolicited
1935  * event. It first determines whether there is an existing ndlp that matches
1936  * the DID from the unsolicited WQE. If not, it will create a new one with
1937  * the DID from the unsolicited WQE. The ELS command from the unsolicited
1938  * WQE is then used to invoke the proper routine and to set up proper state
1939  * of the discovery state machine.
1940  **/
1941 static void
1942 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1943 			   struct hbq_dmabuf *nvmebuf)
1944 {
1945 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1946 	struct lpfc_nvmet_tgtport *tgtp;
1947 	struct fc_frame_header *fc_hdr;
1948 	struct lpfc_nvmet_rcv_ctx *ctxp;
1949 	uint32_t *payload;
1950 	uint32_t size, oxid, sid, rc;
1951 
1952 
1953 	if (!nvmebuf || !phba->targetport) {
1954 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1955 				"6154 LS Drop IO\n");
1956 		oxid = 0;
1957 		size = 0;
1958 		sid = 0;
1959 		ctxp = NULL;
1960 		goto dropit;
1961 	}
1962 
1963 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1964 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1965 
1966 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1967 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
1968 	size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
1969 	sid = sli4_sid_from_fc_hdr(fc_hdr);
1970 
1971 	ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1972 	if (ctxp == NULL) {
1973 		atomic_inc(&tgtp->rcv_ls_req_drop);
1974 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1975 				"6155 LS Drop IO x%x: Alloc\n",
1976 				oxid);
1977 dropit:
1978 		lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
1979 				 "xri x%x sz %d from %06x\n",
1980 				 oxid, size, sid);
1981 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1982 		return;
1983 	}
1984 	ctxp->phba = phba;
1985 	ctxp->size = size;
1986 	ctxp->oxid = oxid;
1987 	ctxp->sid = sid;
1988 	ctxp->wqeq = NULL;
1989 	ctxp->state = LPFC_NVMET_STE_LS_RCV;
1990 	ctxp->entry_cnt = 1;
1991 	ctxp->rqb_buffer = (void *)nvmebuf;
1992 	ctxp->hdwq = &phba->sli4_hba.hdwq[0];
1993 
1994 	lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
1995 			 oxid, size, sid);
1996 	/*
1997 	 * The calling sequence should be:
1998 	 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1999 	 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
2000 	 */
2001 	atomic_inc(&tgtp->rcv_ls_req_in);
2002 	rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
2003 				 payload, size);
2004 
2005 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2006 			"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
2007 			"%08x %08x %08x\n", size, rc,
2008 			*payload, *(payload+1), *(payload+2),
2009 			*(payload+3), *(payload+4), *(payload+5));
2010 
2011 	if (rc == 0) {
2012 		atomic_inc(&tgtp->rcv_ls_req_out);
2013 		return;
2014 	}
2015 
2016 	lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
2017 			 oxid, size, sid);
2018 
2019 	atomic_inc(&tgtp->rcv_ls_req_drop);
2020 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2021 			"6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
2022 			ctxp->oxid, rc);
2023 
2024 	/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
2025 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2026 
2027 	atomic_inc(&tgtp->xmt_ls_abort);
2028 	lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
2029 #endif
2030 }
2031 
2032 static void
2033 lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
2034 {
2035 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2036 	struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
2037 	struct lpfc_hba *phba = ctxp->phba;
2038 	struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
2039 	struct lpfc_nvmet_tgtport *tgtp;
2040 	uint32_t *payload, qno;
2041 	uint32_t rc;
2042 	unsigned long iflags;
2043 
2044 	if (!nvmebuf) {
2045 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2046 			"6159 process_rcv_fcp_req, nvmebuf is NULL, "
2047 			"oxid: x%x flg: x%x state: x%x\n",
2048 			ctxp->oxid, ctxp->flag, ctxp->state);
2049 		spin_lock_irqsave(&ctxp->ctxlock, iflags);
2050 		lpfc_nvmet_defer_release(phba, ctxp);
2051 		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2052 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
2053 						 ctxp->oxid);
2054 		return;
2055 	}
2056 
2057 	if (ctxp->flag & LPFC_NVMET_ABTS_RCV) {
2058 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2059 				"6324 IO oxid x%x aborted\n",
2060 				ctxp->oxid);
2061 		return;
2062 	}
2063 
2064 	payload = (uint32_t *)(nvmebuf->dbuf.virt);
2065 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2066 	ctxp->flag |= LPFC_NVMET_TNOTIFY;
2067 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2068 	if (ctxp->ts_isr_cmd)
2069 		ctxp->ts_cmd_nvme = ktime_get_ns();
2070 #endif
2071 	/*
2072 	 * The calling sequence should be:
2073 	 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
2074 	 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
2075 	 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
2076 	 * the NVME command / FC header is stored.
2077 	 * A buffer has already been reposted for this IO, so just free
2078 	 * the nvmebuf.
2079 	 */
2080 	rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
2081 				  payload, ctxp->size);
2082 	/* Process FCP command */
2083 	if (rc == 0) {
2084 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2085 		spin_lock_irqsave(&ctxp->ctxlock, iflags);
2086 		if ((ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) ||
2087 		    (nvmebuf != ctxp->rqb_buffer)) {
2088 			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2089 			return;
2090 		}
2091 		ctxp->rqb_buffer = NULL;
2092 		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2093 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
2094 		return;
2095 	}
2096 
2097 	/* Processing of FCP command is deferred */
2098 	if (rc == -EOVERFLOW) {
2099 		lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
2100 				 "from %06x\n",
2101 				 ctxp->oxid, ctxp->size, ctxp->sid);
2102 		atomic_inc(&tgtp->rcv_fcp_cmd_out);
2103 		atomic_inc(&tgtp->defer_fod);
2104 		spin_lock_irqsave(&ctxp->ctxlock, iflags);
2105 		if (ctxp->flag & LPFC_NVMET_CTX_REUSE_WQ) {
2106 			spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2107 			return;
2108 		}
2109 		spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2110 		/*
2111 		 * Post a replacement DMA buffer to RQ and defer
2112 		 * freeing rcv buffer till .defer_rcv callback
2113 		 */
2114 		qno = nvmebuf->idx;
2115 		lpfc_post_rq_buffer(
2116 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2117 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2118 		return;
2119 	}
2120 	ctxp->flag &= ~LPFC_NVMET_TNOTIFY;
2121 	atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2122 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2123 			"2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
2124 			ctxp->oxid, rc,
2125 			atomic_read(&tgtp->rcv_fcp_cmd_in),
2126 			atomic_read(&tgtp->rcv_fcp_cmd_out),
2127 			atomic_read(&tgtp->xmt_fcp_release));
2128 	lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
2129 			 ctxp->oxid, ctxp->size, ctxp->sid);
2130 	spin_lock_irqsave(&ctxp->ctxlock, iflags);
2131 	lpfc_nvmet_defer_release(phba, ctxp);
2132 	spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
2133 	lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
2134 #endif
2135 }
2136 
2137 static void
2138 lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
2139 {
2140 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2141 	struct lpfc_nvmet_ctxbuf *ctx_buf =
2142 		container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
2143 
2144 	lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2145 #endif
2146 }
2147 
2148 static struct lpfc_nvmet_ctxbuf *
2149 lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
2150 			     struct lpfc_nvmet_ctx_info *current_infop)
2151 {
2152 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
2153 	struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
2154 	struct lpfc_nvmet_ctx_info *get_infop;
2155 	int i;
2156 
2157 	/*
2158 	 * The current_infop for the MRQ a NVME command IU was received
2159 	 * on is empty. Our goal is to replenish this MRQs context
2160 	 * list from a another CPUs.
2161 	 *
2162 	 * First we need to pick a context list to start looking on.
2163 	 * nvmet_ctx_start_cpu has available context the last time
2164 	 * we needed to replenish this CPU where nvmet_ctx_next_cpu
2165 	 * is just the next sequential CPU for this MRQ.
2166 	 */
2167 	if (current_infop->nvmet_ctx_start_cpu)
2168 		get_infop = current_infop->nvmet_ctx_start_cpu;
2169 	else
2170 		get_infop = current_infop->nvmet_ctx_next_cpu;
2171 
2172 	for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
2173 		if (get_infop == current_infop) {
2174 			get_infop = get_infop->nvmet_ctx_next_cpu;
2175 			continue;
2176 		}
2177 		spin_lock(&get_infop->nvmet_ctx_list_lock);
2178 
2179 		/* Just take the entire context list, if there are any */
2180 		if (get_infop->nvmet_ctx_list_cnt) {
2181 			list_splice_init(&get_infop->nvmet_ctx_list,
2182 				    &current_infop->nvmet_ctx_list);
2183 			current_infop->nvmet_ctx_list_cnt =
2184 				get_infop->nvmet_ctx_list_cnt - 1;
2185 			get_infop->nvmet_ctx_list_cnt = 0;
2186 			spin_unlock(&get_infop->nvmet_ctx_list_lock);
2187 
2188 			current_infop->nvmet_ctx_start_cpu = get_infop;
2189 			list_remove_head(&current_infop->nvmet_ctx_list,
2190 					 ctx_buf, struct lpfc_nvmet_ctxbuf,
2191 					 list);
2192 			return ctx_buf;
2193 		}
2194 
2195 		/* Otherwise, move on to the next CPU for this MRQ */
2196 		spin_unlock(&get_infop->nvmet_ctx_list_lock);
2197 		get_infop = get_infop->nvmet_ctx_next_cpu;
2198 	}
2199 
2200 #endif
2201 	/* Nothing found, all contexts for the MRQ are in-flight */
2202 	return NULL;
2203 }
2204 
2205 /**
2206  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
2207  * @phba: pointer to lpfc hba data structure.
2208  * @idx: relative index of MRQ vector
2209  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
2210  * @isr_timestamp: in jiffies.
2211  * @cqflag: cq processing information regarding workload.
2212  *
2213  * This routine is used for processing the WQE associated with a unsolicited
2214  * event. It first determines whether there is an existing ndlp that matches
2215  * the DID from the unsolicited WQE. If not, it will create a new one with
2216  * the DID from the unsolicited WQE. The ELS command from the unsolicited
2217  * WQE is then used to invoke the proper routine and to set up proper state
2218  * of the discovery state machine.
2219  **/
2220 static void
2221 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
2222 			    uint32_t idx,
2223 			    struct rqb_dmabuf *nvmebuf,
2224 			    uint64_t isr_timestamp,
2225 			    uint8_t cqflag)
2226 {
2227 	struct lpfc_nvmet_rcv_ctx *ctxp;
2228 	struct lpfc_nvmet_tgtport *tgtp;
2229 	struct fc_frame_header *fc_hdr;
2230 	struct lpfc_nvmet_ctxbuf *ctx_buf;
2231 	struct lpfc_nvmet_ctx_info *current_infop;
2232 	uint32_t size, oxid, sid, qno;
2233 	unsigned long iflag;
2234 	int current_cpu;
2235 
2236 	if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
2237 		return;
2238 
2239 	ctx_buf = NULL;
2240 	if (!nvmebuf || !phba->targetport) {
2241 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2242 				"6157 NVMET FCP Drop IO\n");
2243 		if (nvmebuf)
2244 			lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2245 		return;
2246 	}
2247 
2248 	/*
2249 	 * Get a pointer to the context list for this MRQ based on
2250 	 * the CPU this MRQ IRQ is associated with. If the CPU association
2251 	 * changes from our initial assumption, the context list could
2252 	 * be empty, thus it would need to be replenished with the
2253 	 * context list from another CPU for this MRQ.
2254 	 */
2255 	current_cpu = raw_smp_processor_id();
2256 	current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
2257 	spin_lock_irqsave(&current_infop->nvmet_ctx_list_lock, iflag);
2258 	if (current_infop->nvmet_ctx_list_cnt) {
2259 		list_remove_head(&current_infop->nvmet_ctx_list,
2260 				 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
2261 		current_infop->nvmet_ctx_list_cnt--;
2262 	} else {
2263 		ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
2264 	}
2265 	spin_unlock_irqrestore(&current_infop->nvmet_ctx_list_lock, iflag);
2266 
2267 	fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
2268 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
2269 	size = nvmebuf->bytes_recv;
2270 
2271 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2272 	if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
2273 		this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
2274 		if (idx != current_cpu)
2275 			lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2276 					"6703 CPU Check rcv: "
2277 					"cpu %d expect %d\n",
2278 					current_cpu, idx);
2279 	}
2280 #endif
2281 
2282 	lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d CPU %02x\n",
2283 			 oxid, size, raw_smp_processor_id());
2284 
2285 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2286 
2287 	if (!ctx_buf) {
2288 		/* Queue this NVME IO to process later */
2289 		spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
2290 		list_add_tail(&nvmebuf->hbuf.list,
2291 			      &phba->sli4_hba.lpfc_nvmet_io_wait_list);
2292 		phba->sli4_hba.nvmet_io_wait_cnt++;
2293 		phba->sli4_hba.nvmet_io_wait_total++;
2294 		spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
2295 				       iflag);
2296 
2297 		/* Post a brand new DMA buffer to RQ */
2298 		qno = nvmebuf->idx;
2299 		lpfc_post_rq_buffer(
2300 			phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
2301 			phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
2302 
2303 		atomic_inc(&tgtp->defer_ctx);
2304 		return;
2305 	}
2306 
2307 	sid = sli4_sid_from_fc_hdr(fc_hdr);
2308 
2309 	ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
2310 	spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
2311 	list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
2312 	spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
2313 	if (ctxp->state != LPFC_NVMET_STE_FREE) {
2314 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2315 				"6414 NVMET Context corrupt %d %d oxid x%x\n",
2316 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
2317 	}
2318 	ctxp->wqeq = NULL;
2319 	ctxp->offset = 0;
2320 	ctxp->phba = phba;
2321 	ctxp->size = size;
2322 	ctxp->oxid = oxid;
2323 	ctxp->sid = sid;
2324 	ctxp->idx = idx;
2325 	ctxp->state = LPFC_NVMET_STE_RCV;
2326 	ctxp->entry_cnt = 1;
2327 	ctxp->flag = 0;
2328 	ctxp->ctxbuf = ctx_buf;
2329 	ctxp->rqb_buffer = (void *)nvmebuf;
2330 	ctxp->hdwq = NULL;
2331 	spin_lock_init(&ctxp->ctxlock);
2332 
2333 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
2334 	if (isr_timestamp)
2335 		ctxp->ts_isr_cmd = isr_timestamp;
2336 	ctxp->ts_cmd_nvme = 0;
2337 	ctxp->ts_nvme_data = 0;
2338 	ctxp->ts_data_wqput = 0;
2339 	ctxp->ts_isr_data = 0;
2340 	ctxp->ts_data_nvme = 0;
2341 	ctxp->ts_nvme_status = 0;
2342 	ctxp->ts_status_wqput = 0;
2343 	ctxp->ts_isr_status = 0;
2344 	ctxp->ts_status_nvme = 0;
2345 #endif
2346 
2347 	atomic_inc(&tgtp->rcv_fcp_cmd_in);
2348 	/* check for cq processing load */
2349 	if (!cqflag) {
2350 		lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
2351 		return;
2352 	}
2353 
2354 	if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
2355 		atomic_inc(&tgtp->rcv_fcp_cmd_drop);
2356 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2357 				"6325 Unable to queue work for oxid x%x. "
2358 				"FCP Drop IO [x%x x%x x%x]\n",
2359 				ctxp->oxid,
2360 				atomic_read(&tgtp->rcv_fcp_cmd_in),
2361 				atomic_read(&tgtp->rcv_fcp_cmd_out),
2362 				atomic_read(&tgtp->xmt_fcp_release));
2363 
2364 		spin_lock_irqsave(&ctxp->ctxlock, iflag);
2365 		lpfc_nvmet_defer_release(phba, ctxp);
2366 		spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
2367 		lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
2368 	}
2369 }
2370 
2371 /**
2372  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
2373  * @phba: pointer to lpfc hba data structure.
2374  * @pring: pointer to a SLI ring.
2375  * @nvmebuf: pointer to received nvme data structure.
2376  *
2377  * This routine is used to process an unsolicited event received from a SLI
2378  * (Service Level Interface) ring. The actual processing of the data buffer
2379  * associated with the unsolicited event is done by invoking the routine
2380  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
2381  * SLI RQ on which the unsolicited event was received.
2382  **/
2383 void
2384 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2385 			  struct lpfc_iocbq *piocb)
2386 {
2387 	struct lpfc_dmabuf *d_buf;
2388 	struct hbq_dmabuf *nvmebuf;
2389 
2390 	d_buf = piocb->context2;
2391 	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2392 
2393 	if (!nvmebuf) {
2394 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2395 				"3015 LS Drop IO\n");
2396 		return;
2397 	}
2398 	if (phba->nvmet_support == 0) {
2399 		lpfc_in_buf_free(phba, &nvmebuf->dbuf);
2400 		return;
2401 	}
2402 	lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
2403 }
2404 
2405 /**
2406  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
2407  * @phba: pointer to lpfc hba data structure.
2408  * @idx: relative index of MRQ vector
2409  * @nvmebuf: pointer to received nvme data structure.
2410  * @isr_timestamp: in jiffies.
2411  * @cqflag: cq processing information regarding workload.
2412  *
2413  * This routine is used to process an unsolicited event received from a SLI
2414  * (Service Level Interface) ring. The actual processing of the data buffer
2415  * associated with the unsolicited event is done by invoking the routine
2416  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
2417  * SLI RQ on which the unsolicited event was received.
2418  **/
2419 void
2420 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
2421 			   uint32_t idx,
2422 			   struct rqb_dmabuf *nvmebuf,
2423 			   uint64_t isr_timestamp,
2424 			   uint8_t cqflag)
2425 {
2426 	if (!nvmebuf) {
2427 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2428 				"3167 NVMET FCP Drop IO\n");
2429 		return;
2430 	}
2431 	if (phba->nvmet_support == 0) {
2432 		lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
2433 		return;
2434 	}
2435 	lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
2436 }
2437 
2438 /**
2439  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
2440  * @phba: pointer to a host N_Port data structure.
2441  * @ctxp: Context info for NVME LS Request
2442  * @rspbuf: DMA buffer of NVME command.
2443  * @rspsize: size of the NVME command.
2444  *
2445  * This routine is used for allocating a lpfc-WQE data structure from
2446  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
2447  * passed into the routine for discovery state machine to issue an Extended
2448  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
2449  * and preparation routine that is used by all the discovery state machine
2450  * routines and the NVME command-specific fields will be later set up by
2451  * the individual discovery machine routines after calling this routine
2452  * allocating and preparing a generic WQE data structure. It fills in the
2453  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
2454  * payload and response payload (if expected). The reference count on the
2455  * ndlp is incremented by 1 and the reference to the ndlp is put into
2456  * context1 of the WQE data structure for this WQE to hold the ndlp
2457  * reference for the command's callback function to access later.
2458  *
2459  * Return code
2460  *   Pointer to the newly allocated/prepared nvme wqe data structure
2461  *   NULL - when nvme wqe data structure allocation/preparation failed
2462  **/
2463 static struct lpfc_iocbq *
2464 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
2465 		       struct lpfc_nvmet_rcv_ctx *ctxp,
2466 		       dma_addr_t rspbuf, uint16_t rspsize)
2467 {
2468 	struct lpfc_nodelist *ndlp;
2469 	struct lpfc_iocbq *nvmewqe;
2470 	union lpfc_wqe128 *wqe;
2471 
2472 	if (!lpfc_is_link_up(phba)) {
2473 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2474 				"6104 NVMET prep LS wqe: link err: "
2475 				"NPORT x%x oxid:x%x ste %d\n",
2476 				ctxp->sid, ctxp->oxid, ctxp->state);
2477 		return NULL;
2478 	}
2479 
2480 	/* Allocate buffer for  command wqe */
2481 	nvmewqe = lpfc_sli_get_iocbq(phba);
2482 	if (nvmewqe == NULL) {
2483 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2484 				"6105 NVMET prep LS wqe: No WQE: "
2485 				"NPORT x%x oxid x%x ste %d\n",
2486 				ctxp->sid, ctxp->oxid, ctxp->state);
2487 		return NULL;
2488 	}
2489 
2490 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2491 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2492 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2493 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2494 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
2495 				"6106 NVMET prep LS wqe: No ndlp: "
2496 				"NPORT x%x oxid x%x ste %d\n",
2497 				ctxp->sid, ctxp->oxid, ctxp->state);
2498 		goto nvme_wqe_free_wqeq_exit;
2499 	}
2500 	ctxp->wqeq = nvmewqe;
2501 
2502 	/* prevent preparing wqe with NULL ndlp reference */
2503 	nvmewqe->context1 = lpfc_nlp_get(ndlp);
2504 	if (nvmewqe->context1 == NULL)
2505 		goto nvme_wqe_free_wqeq_exit;
2506 	nvmewqe->context2 = ctxp;
2507 
2508 	wqe = &nvmewqe->wqe;
2509 	memset(wqe, 0, sizeof(union lpfc_wqe));
2510 
2511 	/* Words 0 - 2 */
2512 	wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2513 	wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
2514 	wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
2515 	wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
2516 
2517 	/* Word 3 */
2518 
2519 	/* Word 4 */
2520 
2521 	/* Word 5 */
2522 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
2523 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
2524 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
2525 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
2526 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
2527 
2528 	/* Word 6 */
2529 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
2530 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2531 	bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
2532 
2533 	/* Word 7 */
2534 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
2535 	       CMD_XMIT_SEQUENCE64_WQE);
2536 	bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
2537 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
2538 	bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
2539 
2540 	/* Word 8 */
2541 	wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
2542 
2543 	/* Word 9 */
2544 	bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
2545 	/* Needs to be set by caller */
2546 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
2547 
2548 	/* Word 10 */
2549 	bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
2550 	bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2551 	bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
2552 	       LPFC_WQE_LENLOC_WORD12);
2553 	bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
2554 
2555 	/* Word 11 */
2556 	bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
2557 	       LPFC_WQE_CQ_ID_DEFAULT);
2558 	bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
2559 	       OTHER_COMMAND);
2560 
2561 	/* Word 12 */
2562 	wqe->xmit_sequence.xmit_len = rspsize;
2563 
2564 	nvmewqe->retry = 1;
2565 	nvmewqe->vport = phba->pport;
2566 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2567 	nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
2568 
2569 	/* Xmit NVMET response to remote NPORT <did> */
2570 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
2571 			"6039 Xmit NVMET LS response to remote "
2572 			"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
2573 			ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
2574 			rspsize);
2575 	return nvmewqe;
2576 
2577 nvme_wqe_free_wqeq_exit:
2578 	nvmewqe->context2 = NULL;
2579 	nvmewqe->context3 = NULL;
2580 	lpfc_sli_release_iocbq(phba, nvmewqe);
2581 	return NULL;
2582 }
2583 
2584 
2585 static struct lpfc_iocbq *
2586 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
2587 			struct lpfc_nvmet_rcv_ctx *ctxp)
2588 {
2589 	struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
2590 	struct lpfc_nvmet_tgtport *tgtp;
2591 	struct sli4_sge *sgl;
2592 	struct lpfc_nodelist *ndlp;
2593 	struct lpfc_iocbq *nvmewqe;
2594 	struct scatterlist *sgel;
2595 	union lpfc_wqe128 *wqe;
2596 	struct ulp_bde64 *bde;
2597 	dma_addr_t physaddr;
2598 	int i, cnt, nsegs;
2599 	int do_pbde;
2600 	int xc = 1;
2601 
2602 	if (!lpfc_is_link_up(phba)) {
2603 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2604 				"6107 NVMET prep FCP wqe: link err:"
2605 				"NPORT x%x oxid x%x ste %d\n",
2606 				ctxp->sid, ctxp->oxid, ctxp->state);
2607 		return NULL;
2608 	}
2609 
2610 	ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
2611 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2612 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2613 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2614 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2615 				"6108 NVMET prep FCP wqe: no ndlp: "
2616 				"NPORT x%x oxid x%x ste %d\n",
2617 				ctxp->sid, ctxp->oxid, ctxp->state);
2618 		return NULL;
2619 	}
2620 
2621 	if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
2622 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2623 				"6109 NVMET prep FCP wqe: seg cnt err: "
2624 				"NPORT x%x oxid x%x ste %d cnt %d\n",
2625 				ctxp->sid, ctxp->oxid, ctxp->state,
2626 				phba->cfg_nvme_seg_cnt);
2627 		return NULL;
2628 	}
2629 	nsegs = rsp->sg_cnt;
2630 
2631 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2632 	nvmewqe = ctxp->wqeq;
2633 	if (nvmewqe == NULL) {
2634 		/* Allocate buffer for  command wqe */
2635 		nvmewqe = ctxp->ctxbuf->iocbq;
2636 		if (nvmewqe == NULL) {
2637 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2638 					"6110 NVMET prep FCP wqe: No "
2639 					"WQE: NPORT x%x oxid x%x ste %d\n",
2640 					ctxp->sid, ctxp->oxid, ctxp->state);
2641 			return NULL;
2642 		}
2643 		ctxp->wqeq = nvmewqe;
2644 		xc = 0; /* create new XRI */
2645 		nvmewqe->sli4_lxritag = NO_XRI;
2646 		nvmewqe->sli4_xritag = NO_XRI;
2647 	}
2648 
2649 	/* Sanity check */
2650 	if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
2651 	    (ctxp->entry_cnt == 1)) ||
2652 	    (ctxp->state == LPFC_NVMET_STE_DATA)) {
2653 		wqe = &nvmewqe->wqe;
2654 	} else {
2655 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
2656 				"6111 Wrong state NVMET FCP: %d  cnt %d\n",
2657 				ctxp->state, ctxp->entry_cnt);
2658 		return NULL;
2659 	}
2660 
2661 	sgl  = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
2662 	switch (rsp->op) {
2663 	case NVMET_FCOP_READDATA:
2664 	case NVMET_FCOP_READDATA_RSP:
2665 		/* From the tsend template, initialize words 7 - 11 */
2666 		memcpy(&wqe->words[7],
2667 		       &lpfc_tsend_cmd_template.words[7],
2668 		       sizeof(uint32_t) * 5);
2669 
2670 		/* Words 0 - 2 : The first sg segment */
2671 		sgel = &rsp->sg[0];
2672 		physaddr = sg_dma_address(sgel);
2673 		wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2674 		wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
2675 		wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
2676 		wqe->fcp_tsend.bde.addrHigh =
2677 			cpu_to_le32(putPaddrHigh(physaddr));
2678 
2679 		/* Word 3 */
2680 		wqe->fcp_tsend.payload_offset_len = 0;
2681 
2682 		/* Word 4 */
2683 		wqe->fcp_tsend.relative_offset = ctxp->offset;
2684 
2685 		/* Word 5 */
2686 		wqe->fcp_tsend.reserved = 0;
2687 
2688 		/* Word 6 */
2689 		bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
2690 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2691 		bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
2692 		       nvmewqe->sli4_xritag);
2693 
2694 		/* Word 7 - set ar later */
2695 
2696 		/* Word 8 */
2697 		wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
2698 
2699 		/* Word 9 */
2700 		bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
2701 		bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
2702 
2703 		/* Word 10 - set wqes later, in template xc=1 */
2704 		if (!xc)
2705 			bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
2706 
2707 		/* Word 11 - set sup, irsp, irsplen later */
2708 		do_pbde = 0;
2709 
2710 		/* Word 12 */
2711 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2712 
2713 		/* Setup 2 SKIP SGEs */
2714 		sgl->addr_hi = 0;
2715 		sgl->addr_lo = 0;
2716 		sgl->word2 = 0;
2717 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2718 		sgl->word2 = cpu_to_le32(sgl->word2);
2719 		sgl->sge_len = 0;
2720 		sgl++;
2721 		sgl->addr_hi = 0;
2722 		sgl->addr_lo = 0;
2723 		sgl->word2 = 0;
2724 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2725 		sgl->word2 = cpu_to_le32(sgl->word2);
2726 		sgl->sge_len = 0;
2727 		sgl++;
2728 		if (rsp->op == NVMET_FCOP_READDATA_RSP) {
2729 			atomic_inc(&tgtp->xmt_fcp_read_rsp);
2730 
2731 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2732 
2733 			if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
2734 				if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
2735 					bf_set(wqe_sup,
2736 					       &wqe->fcp_tsend.wqe_com, 1);
2737 			} else {
2738 				bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
2739 				bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
2740 				bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
2741 				       ((rsp->rsplen >> 2) - 1));
2742 				memcpy(&wqe->words[16], rsp->rspaddr,
2743 				       rsp->rsplen);
2744 			}
2745 		} else {
2746 			atomic_inc(&tgtp->xmt_fcp_read);
2747 
2748 			/* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
2749 			bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
2750 		}
2751 		break;
2752 
2753 	case NVMET_FCOP_WRITEDATA:
2754 		/* From the treceive template, initialize words 3 - 11 */
2755 		memcpy(&wqe->words[3],
2756 		       &lpfc_treceive_cmd_template.words[3],
2757 		       sizeof(uint32_t) * 9);
2758 
2759 		/* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
2760 		wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
2761 		wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
2762 		wqe->fcp_treceive.bde.addrLow = 0;
2763 		wqe->fcp_treceive.bde.addrHigh = 0;
2764 
2765 		/* Word 4 */
2766 		wqe->fcp_treceive.relative_offset = ctxp->offset;
2767 
2768 		/* Word 6 */
2769 		bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
2770 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2771 		bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
2772 		       nvmewqe->sli4_xritag);
2773 
2774 		/* Word 7 */
2775 
2776 		/* Word 8 */
2777 		wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
2778 
2779 		/* Word 9 */
2780 		bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
2781 		bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
2782 
2783 		/* Word 10 - in template xc=1 */
2784 		if (!xc)
2785 			bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
2786 
2787 		/* Word 11 - set pbde later */
2788 		if (phba->cfg_enable_pbde) {
2789 			do_pbde = 1;
2790 		} else {
2791 			bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
2792 			do_pbde = 0;
2793 		}
2794 
2795 		/* Word 12 */
2796 		wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
2797 
2798 		/* Setup 2 SKIP SGEs */
2799 		sgl->addr_hi = 0;
2800 		sgl->addr_lo = 0;
2801 		sgl->word2 = 0;
2802 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2803 		sgl->word2 = cpu_to_le32(sgl->word2);
2804 		sgl->sge_len = 0;
2805 		sgl++;
2806 		sgl->addr_hi = 0;
2807 		sgl->addr_lo = 0;
2808 		sgl->word2 = 0;
2809 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2810 		sgl->word2 = cpu_to_le32(sgl->word2);
2811 		sgl->sge_len = 0;
2812 		sgl++;
2813 		atomic_inc(&tgtp->xmt_fcp_write);
2814 		break;
2815 
2816 	case NVMET_FCOP_RSP:
2817 		/* From the treceive template, initialize words 4 - 11 */
2818 		memcpy(&wqe->words[4],
2819 		       &lpfc_trsp_cmd_template.words[4],
2820 		       sizeof(uint32_t) * 8);
2821 
2822 		/* Words 0 - 2 */
2823 		physaddr = rsp->rspdma;
2824 		wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2825 		wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
2826 		wqe->fcp_trsp.bde.addrLow =
2827 			cpu_to_le32(putPaddrLow(physaddr));
2828 		wqe->fcp_trsp.bde.addrHigh =
2829 			cpu_to_le32(putPaddrHigh(physaddr));
2830 
2831 		/* Word 3 */
2832 		wqe->fcp_trsp.response_len = rsp->rsplen;
2833 
2834 		/* Word 6 */
2835 		bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
2836 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2837 		bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
2838 		       nvmewqe->sli4_xritag);
2839 
2840 		/* Word 7 */
2841 
2842 		/* Word 8 */
2843 		wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
2844 
2845 		/* Word 9 */
2846 		bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
2847 		bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
2848 
2849 		/* Word 10 */
2850 		if (xc)
2851 			bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
2852 
2853 		/* Word 11 */
2854 		/* In template wqes=0 irsp=0 irsplen=0 - good response */
2855 		if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
2856 			/* Bad response - embed it */
2857 			bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
2858 			bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
2859 			bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
2860 			       ((rsp->rsplen >> 2) - 1));
2861 			memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
2862 		}
2863 		do_pbde = 0;
2864 
2865 		/* Word 12 */
2866 		wqe->fcp_trsp.rsvd_12_15[0] = 0;
2867 
2868 		/* Use rspbuf, NOT sg list */
2869 		nsegs = 0;
2870 		sgl->word2 = 0;
2871 		atomic_inc(&tgtp->xmt_fcp_rsp);
2872 		break;
2873 
2874 	default:
2875 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2876 				"6064 Unknown Rsp Op %d\n",
2877 				rsp->op);
2878 		return NULL;
2879 	}
2880 
2881 	nvmewqe->retry = 1;
2882 	nvmewqe->vport = phba->pport;
2883 	nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2884 	nvmewqe->context1 = ndlp;
2885 
2886 	for_each_sg(rsp->sg, sgel, nsegs, i) {
2887 		physaddr = sg_dma_address(sgel);
2888 		cnt = sg_dma_len(sgel);
2889 		sgl->addr_hi = putPaddrHigh(physaddr);
2890 		sgl->addr_lo = putPaddrLow(physaddr);
2891 		sgl->word2 = 0;
2892 		bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2893 		bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2894 		if ((i+1) == rsp->sg_cnt)
2895 			bf_set(lpfc_sli4_sge_last, sgl, 1);
2896 		sgl->word2 = cpu_to_le32(sgl->word2);
2897 		sgl->sge_len = cpu_to_le32(cnt);
2898 		if (i == 0) {
2899 			bde = (struct ulp_bde64 *)&wqe->words[13];
2900 			if (do_pbde) {
2901 				/* Words 13-15  (PBDE) */
2902 				bde->addrLow = sgl->addr_lo;
2903 				bde->addrHigh = sgl->addr_hi;
2904 				bde->tus.f.bdeSize =
2905 					le32_to_cpu(sgl->sge_len);
2906 				bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2907 				bde->tus.w = cpu_to_le32(bde->tus.w);
2908 			} else {
2909 				memset(bde, 0, sizeof(struct ulp_bde64));
2910 			}
2911 		}
2912 		sgl++;
2913 		ctxp->offset += cnt;
2914 	}
2915 	ctxp->state = LPFC_NVMET_STE_DATA;
2916 	ctxp->entry_cnt++;
2917 	return nvmewqe;
2918 }
2919 
2920 /**
2921  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2922  * @phba: Pointer to HBA context object.
2923  * @cmdwqe: Pointer to driver command WQE object.
2924  * @wcqe: Pointer to driver response CQE object.
2925  *
2926  * The function is called from SLI ring event handler with no
2927  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2928  * The function frees memory resources used for the NVME commands.
2929  **/
2930 static void
2931 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2932 			     struct lpfc_wcqe_complete *wcqe)
2933 {
2934 	struct lpfc_nvmet_rcv_ctx *ctxp;
2935 	struct lpfc_nvmet_tgtport *tgtp;
2936 	uint32_t result;
2937 	unsigned long flags;
2938 	bool released = false;
2939 
2940 	ctxp = cmdwqe->context2;
2941 	result = wcqe->parameter;
2942 
2943 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2944 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2945 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2946 
2947 	spin_lock_irqsave(&ctxp->ctxlock, flags);
2948 	ctxp->state = LPFC_NVMET_STE_DONE;
2949 
2950 	/* Check if we already received a free context call
2951 	 * and we have completed processing an abort situation.
2952 	 */
2953 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2954 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2955 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2956 		list_del_init(&ctxp->list);
2957 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
2958 		released = true;
2959 	}
2960 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2961 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2962 	atomic_inc(&tgtp->xmt_abort_rsp);
2963 
2964 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2965 			"6165 ABORT cmpl: oxid x%x flg x%x (%d) "
2966 			"WCQE: %08x %08x %08x %08x\n",
2967 			ctxp->oxid, ctxp->flag, released,
2968 			wcqe->word0, wcqe->total_data_placed,
2969 			result, wcqe->word3);
2970 
2971 	cmdwqe->context2 = NULL;
2972 	cmdwqe->context3 = NULL;
2973 	/*
2974 	 * if transport has released ctx, then can reuse it. Otherwise,
2975 	 * will be recycled by transport release call.
2976 	 */
2977 	if (released)
2978 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2979 
2980 	/* This is the iocbq for the abort, not the command */
2981 	lpfc_sli_release_iocbq(phba, cmdwqe);
2982 
2983 	/* Since iaab/iaar are NOT set, there is no work left.
2984 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2985 	 * should have been called already.
2986 	 */
2987 }
2988 
2989 /**
2990  * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2991  * @phba: Pointer to HBA context object.
2992  * @cmdwqe: Pointer to driver command WQE object.
2993  * @wcqe: Pointer to driver response CQE object.
2994  *
2995  * The function is called from SLI ring event handler with no
2996  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2997  * The function frees memory resources used for the NVME commands.
2998  **/
2999 static void
3000 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3001 			       struct lpfc_wcqe_complete *wcqe)
3002 {
3003 	struct lpfc_nvmet_rcv_ctx *ctxp;
3004 	struct lpfc_nvmet_tgtport *tgtp;
3005 	unsigned long flags;
3006 	uint32_t result;
3007 	bool released = false;
3008 
3009 	ctxp = cmdwqe->context2;
3010 	result = wcqe->parameter;
3011 
3012 	if (!ctxp) {
3013 		/* if context is clear, related io alrady complete */
3014 		lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3015 				"6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
3016 				wcqe->word0, wcqe->total_data_placed,
3017 				result, wcqe->word3);
3018 		return;
3019 	}
3020 
3021 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3022 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3023 	if (ctxp->flag & LPFC_NVMET_ABORT_OP)
3024 		atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
3025 
3026 	/* Sanity check */
3027 	if (ctxp->state != LPFC_NVMET_STE_ABORT) {
3028 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3029 				"6112 ABTS Wrong state:%d oxid x%x\n",
3030 				ctxp->state, ctxp->oxid);
3031 	}
3032 
3033 	/* Check if we already received a free context call
3034 	 * and we have completed processing an abort situation.
3035 	 */
3036 	ctxp->state = LPFC_NVMET_STE_DONE;
3037 	if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
3038 	    !(ctxp->flag & LPFC_NVMET_XBUSY)) {
3039 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3040 		list_del_init(&ctxp->list);
3041 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3042 		released = true;
3043 	}
3044 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3045 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3046 	atomic_inc(&tgtp->xmt_abort_rsp);
3047 
3048 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3049 			"6316 ABTS cmpl oxid x%x flg x%x (%x) "
3050 			"WCQE: %08x %08x %08x %08x\n",
3051 			ctxp->oxid, ctxp->flag, released,
3052 			wcqe->word0, wcqe->total_data_placed,
3053 			result, wcqe->word3);
3054 
3055 	cmdwqe->context2 = NULL;
3056 	cmdwqe->context3 = NULL;
3057 	/*
3058 	 * if transport has released ctx, then can reuse it. Otherwise,
3059 	 * will be recycled by transport release call.
3060 	 */
3061 	if (released)
3062 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3063 
3064 	/* Since iaab/iaar are NOT set, there is no work left.
3065 	 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
3066 	 * should have been called already.
3067 	 */
3068 }
3069 
3070 /**
3071  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
3072  * @phba: Pointer to HBA context object.
3073  * @cmdwqe: Pointer to driver command WQE object.
3074  * @wcqe: Pointer to driver response CQE object.
3075  *
3076  * The function is called from SLI ring event handler with no
3077  * lock held. This function is the completion handler for NVME ABTS for LS cmds
3078  * The function frees memory resources used for the NVME commands.
3079  **/
3080 static void
3081 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
3082 			    struct lpfc_wcqe_complete *wcqe)
3083 {
3084 	struct lpfc_nvmet_rcv_ctx *ctxp;
3085 	struct lpfc_nvmet_tgtport *tgtp;
3086 	uint32_t result;
3087 
3088 	ctxp = cmdwqe->context2;
3089 	result = wcqe->parameter;
3090 
3091 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3092 	atomic_inc(&tgtp->xmt_ls_abort_cmpl);
3093 
3094 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3095 			"6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
3096 			ctxp, wcqe->word0, wcqe->total_data_placed,
3097 			result, wcqe->word3);
3098 
3099 	if (!ctxp) {
3100 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3101 				"6415 NVMET LS Abort No ctx: WCQE: "
3102 				 "%08x %08x %08x %08x\n",
3103 				wcqe->word0, wcqe->total_data_placed,
3104 				result, wcqe->word3);
3105 
3106 		lpfc_sli_release_iocbq(phba, cmdwqe);
3107 		return;
3108 	}
3109 
3110 	if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
3111 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3112 				"6416 NVMET LS abort cmpl state mismatch: "
3113 				"oxid x%x: %d %d\n",
3114 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3115 	}
3116 
3117 	cmdwqe->context2 = NULL;
3118 	cmdwqe->context3 = NULL;
3119 	lpfc_sli_release_iocbq(phba, cmdwqe);
3120 	kfree(ctxp);
3121 }
3122 
3123 static int
3124 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
3125 			     struct lpfc_nvmet_rcv_ctx *ctxp,
3126 			     uint32_t sid, uint16_t xri)
3127 {
3128 	struct lpfc_nvmet_tgtport *tgtp;
3129 	struct lpfc_iocbq *abts_wqeq;
3130 	union lpfc_wqe128 *wqe_abts;
3131 	struct lpfc_nodelist *ndlp;
3132 
3133 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3134 			"6067 ABTS: sid %x xri x%x/x%x\n",
3135 			sid, xri, ctxp->wqeq->sli4_xritag);
3136 
3137 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3138 
3139 	ndlp = lpfc_findnode_did(phba->pport, sid);
3140 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3141 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3142 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3143 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3144 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3145 				"6134 Drop ABTS - wrong NDLP state x%x.\n",
3146 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3147 
3148 		/* No failure to an ABTS request. */
3149 		return 0;
3150 	}
3151 
3152 	abts_wqeq = ctxp->wqeq;
3153 	wqe_abts = &abts_wqeq->wqe;
3154 
3155 	/*
3156 	 * Since we zero the whole WQE, we need to ensure we set the WQE fields
3157 	 * that were initialized in lpfc_sli4_nvmet_alloc.
3158 	 */
3159 	memset(wqe_abts, 0, sizeof(union lpfc_wqe));
3160 
3161 	/* Word 5 */
3162 	bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
3163 	bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
3164 	bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
3165 	bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
3166 	bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
3167 
3168 	/* Word 6 */
3169 	bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
3170 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
3171 	bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
3172 	       abts_wqeq->sli4_xritag);
3173 
3174 	/* Word 7 */
3175 	bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
3176 	       CMD_XMIT_SEQUENCE64_WQE);
3177 	bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
3178 	bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
3179 	bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
3180 
3181 	/* Word 8 */
3182 	wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
3183 
3184 	/* Word 9 */
3185 	bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
3186 	/* Needs to be set by caller */
3187 	bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
3188 
3189 	/* Word 10 */
3190 	bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
3191 	bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
3192 	bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
3193 	       LPFC_WQE_LENLOC_WORD12);
3194 	bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
3195 	bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
3196 
3197 	/* Word 11 */
3198 	bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
3199 	       LPFC_WQE_CQ_ID_DEFAULT);
3200 	bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
3201 	       OTHER_COMMAND);
3202 
3203 	abts_wqeq->vport = phba->pport;
3204 	abts_wqeq->context1 = ndlp;
3205 	abts_wqeq->context2 = ctxp;
3206 	abts_wqeq->context3 = NULL;
3207 	abts_wqeq->rsvd2 = 0;
3208 	/* hba_wqidx should already be setup from command we are aborting */
3209 	abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
3210 	abts_wqeq->iocb.ulpLe = 1;
3211 
3212 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3213 			"6069 Issue ABTS to xri x%x reqtag x%x\n",
3214 			xri, abts_wqeq->iotag);
3215 	return 1;
3216 }
3217 
3218 static int
3219 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
3220 			       struct lpfc_nvmet_rcv_ctx *ctxp,
3221 			       uint32_t sid, uint16_t xri)
3222 {
3223 	struct lpfc_nvmet_tgtport *tgtp;
3224 	struct lpfc_iocbq *abts_wqeq;
3225 	struct lpfc_nodelist *ndlp;
3226 	unsigned long flags;
3227 	u8 opt;
3228 	int rc;
3229 
3230 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3231 	if (!ctxp->wqeq) {
3232 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
3233 		ctxp->wqeq->hba_wqidx = 0;
3234 	}
3235 
3236 	ndlp = lpfc_findnode_did(phba->pport, sid);
3237 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
3238 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3239 	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3240 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3241 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3242 				"6160 Drop ABORT - wrong NDLP state x%x.\n",
3243 				(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
3244 
3245 		/* No failure to an ABTS request. */
3246 		spin_lock_irqsave(&ctxp->ctxlock, flags);
3247 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3248 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3249 		return 0;
3250 	}
3251 
3252 	/* Issue ABTS for this WQE based on iotag */
3253 	ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
3254 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3255 	if (!ctxp->abort_wqeq) {
3256 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3257 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3258 				"6161 ABORT failed: No wqeqs: "
3259 				"xri: x%x\n", ctxp->oxid);
3260 		/* No failure to an ABTS request. */
3261 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3262 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3263 		return 0;
3264 	}
3265 	abts_wqeq = ctxp->abort_wqeq;
3266 	ctxp->state = LPFC_NVMET_STE_ABORT;
3267 	opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0;
3268 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3269 
3270 	/* Announce entry to new IO submit field. */
3271 	lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
3272 			"6162 ABORT Request to rport DID x%06x "
3273 			"for xri x%x x%x\n",
3274 			ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
3275 
3276 	/* If the hba is getting reset, this flag is set.  It is
3277 	 * cleared when the reset is complete and rings reestablished.
3278 	 */
3279 	spin_lock_irqsave(&phba->hbalock, flags);
3280 	/* driver queued commands are in process of being flushed */
3281 	if (phba->hba_flag & HBA_IOQ_FLUSH) {
3282 		spin_unlock_irqrestore(&phba->hbalock, flags);
3283 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3284 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3285 				"6163 Driver in reset cleanup - flushing "
3286 				"NVME Req now. hba_flag x%x oxid x%x\n",
3287 				phba->hba_flag, ctxp->oxid);
3288 		lpfc_sli_release_iocbq(phba, abts_wqeq);
3289 		spin_lock_irqsave(&ctxp->ctxlock, flags);
3290 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3291 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3292 		return 0;
3293 	}
3294 
3295 	/* Outstanding abort is in progress */
3296 	if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
3297 		spin_unlock_irqrestore(&phba->hbalock, flags);
3298 		atomic_inc(&tgtp->xmt_abort_rsp_error);
3299 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
3300 				"6164 Outstanding NVME I/O Abort Request "
3301 				"still pending on oxid x%x\n",
3302 				ctxp->oxid);
3303 		lpfc_sli_release_iocbq(phba, abts_wqeq);
3304 		spin_lock_irqsave(&ctxp->ctxlock, flags);
3305 		ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3306 		spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3307 		return 0;
3308 	}
3309 
3310 	/* Ready - mark outstanding as aborted by driver. */
3311 	abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
3312 
3313 	lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
3314 
3315 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
3316 	abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
3317 	abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
3318 	abts_wqeq->iocb_cmpl = NULL;
3319 	abts_wqeq->iocb_flag |= LPFC_IO_NVME;
3320 	abts_wqeq->context2 = ctxp;
3321 	abts_wqeq->vport = phba->pport;
3322 	if (!ctxp->hdwq)
3323 		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3324 
3325 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3326 	spin_unlock_irqrestore(&phba->hbalock, flags);
3327 	if (rc == WQE_SUCCESS) {
3328 		atomic_inc(&tgtp->xmt_abort_sol);
3329 		return 0;
3330 	}
3331 
3332 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3333 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3334 	ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
3335 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3336 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3337 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3338 			"6166 Failed ABORT issue_wqe with status x%x "
3339 			"for oxid x%x.\n",
3340 			rc, ctxp->oxid);
3341 	return 1;
3342 }
3343 
3344 static int
3345 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
3346 				 struct lpfc_nvmet_rcv_ctx *ctxp,
3347 				 uint32_t sid, uint16_t xri)
3348 {
3349 	struct lpfc_nvmet_tgtport *tgtp;
3350 	struct lpfc_iocbq *abts_wqeq;
3351 	unsigned long flags;
3352 	bool released = false;
3353 	int rc;
3354 
3355 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3356 	if (!ctxp->wqeq) {
3357 		ctxp->wqeq = ctxp->ctxbuf->iocbq;
3358 		ctxp->wqeq->hba_wqidx = 0;
3359 	}
3360 
3361 	if (ctxp->state == LPFC_NVMET_STE_FREE) {
3362 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3363 				"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
3364 				ctxp->state, ctxp->entry_cnt, ctxp->oxid);
3365 		rc = WQE_BUSY;
3366 		goto aerr;
3367 	}
3368 	ctxp->state = LPFC_NVMET_STE_ABORT;
3369 	ctxp->entry_cnt++;
3370 	rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
3371 	if (rc == 0)
3372 		goto aerr;
3373 
3374 	spin_lock_irqsave(&phba->hbalock, flags);
3375 	abts_wqeq = ctxp->wqeq;
3376 	abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
3377 	abts_wqeq->iocb_cmpl = NULL;
3378 	abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
3379 	if (!ctxp->hdwq)
3380 		ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
3381 
3382 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3383 	spin_unlock_irqrestore(&phba->hbalock, flags);
3384 	if (rc == WQE_SUCCESS) {
3385 		return 0;
3386 	}
3387 
3388 aerr:
3389 	spin_lock_irqsave(&ctxp->ctxlock, flags);
3390 	if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
3391 		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3392 		list_del_init(&ctxp->list);
3393 		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
3394 		released = true;
3395 	}
3396 	ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
3397 	spin_unlock_irqrestore(&ctxp->ctxlock, flags);
3398 
3399 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3400 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3401 			"6135 Failed to Issue ABTS for oxid x%x. Status x%x "
3402 			"(%x)\n",
3403 			ctxp->oxid, rc, released);
3404 	if (released)
3405 		lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
3406 	return 1;
3407 }
3408 
3409 static int
3410 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
3411 				struct lpfc_nvmet_rcv_ctx *ctxp,
3412 				uint32_t sid, uint16_t xri)
3413 {
3414 	struct lpfc_nvmet_tgtport *tgtp;
3415 	struct lpfc_iocbq *abts_wqeq;
3416 	unsigned long flags;
3417 	int rc;
3418 
3419 	if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
3420 	    (ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
3421 		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3422 		ctxp->entry_cnt++;
3423 	} else {
3424 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
3425 				"6418 NVMET LS abort state mismatch "
3426 				"IO x%x: %d %d\n",
3427 				ctxp->oxid, ctxp->state, ctxp->entry_cnt);
3428 		ctxp->state = LPFC_NVMET_STE_LS_ABORT;
3429 	}
3430 
3431 	tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
3432 	if (!ctxp->wqeq) {
3433 		/* Issue ABTS for this WQE based on iotag */
3434 		ctxp->wqeq = lpfc_sli_get_iocbq(phba);
3435 		if (!ctxp->wqeq) {
3436 			lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3437 					"6068 Abort failed: No wqeqs: "
3438 					"xri: x%x\n", xri);
3439 			/* No failure to an ABTS request. */
3440 			kfree(ctxp);
3441 			return 0;
3442 		}
3443 	}
3444 	abts_wqeq = ctxp->wqeq;
3445 
3446 	if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
3447 		rc = WQE_BUSY;
3448 		goto out;
3449 	}
3450 
3451 	spin_lock_irqsave(&phba->hbalock, flags);
3452 	abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
3453 	abts_wqeq->iocb_cmpl = NULL;
3454 	abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
3455 	rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
3456 	spin_unlock_irqrestore(&phba->hbalock, flags);
3457 	if (rc == WQE_SUCCESS) {
3458 		atomic_inc(&tgtp->xmt_abort_unsol);
3459 		return 0;
3460 	}
3461 out:
3462 	atomic_inc(&tgtp->xmt_abort_rsp_error);
3463 	abts_wqeq->context2 = NULL;
3464 	abts_wqeq->context3 = NULL;
3465 	lpfc_sli_release_iocbq(phba, abts_wqeq);
3466 	kfree(ctxp);
3467 	lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
3468 			"6056 Failed to Issue ABTS. Status x%x\n", rc);
3469 	return 0;
3470 }
3471