xref: /openbmc/linux/drivers/scsi/qla2xxx/qla_nvme.c (revision 92cc94ad)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * QLogic Fibre Channel HBA Driver
4  * Copyright (c)  2003-2017 QLogic Corporation
5  */
6 #include "qla_nvme.h"
7 #include <linux/scatterlist.h>
8 #include <linux/delay.h>
9 #include <linux/nvme.h>
10 #include <linux/nvme-fc.h>
11 
12 static struct nvme_fc_port_template qla_nvme_fc_transport;
13 
14 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15 {
16 	struct qla_nvme_rport *rport;
17 	struct nvme_fc_port_info req;
18 	int ret;
19 
20 	if (!IS_ENABLED(CONFIG_NVME_FC))
21 		return 0;
22 
23 	if (!vha->flags.nvme_enabled) {
24 		ql_log(ql_log_info, vha, 0x2100,
25 		    "%s: Not registering target since Host NVME is not enabled\n",
26 		    __func__);
27 		return 0;
28 	}
29 
30 	if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
31 		return 0;
32 
33 	if (!(fcport->nvme_prli_service_param &
34 	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
35 		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
36 		return 0;
37 
38 	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
39 
40 	memset(&req, 0, sizeof(struct nvme_fc_port_info));
41 	req.port_name = wwn_to_u64(fcport->port_name);
42 	req.node_name = wwn_to_u64(fcport->node_name);
43 	req.port_role = 0;
44 	req.dev_loss_tmo = 0;
45 
46 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
47 		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
48 
49 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
50 		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
51 
52 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
53 		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
54 
55 	req.port_id = fcport->d_id.b24;
56 
57 	ql_log(ql_log_info, vha, 0x2102,
58 	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
59 	    __func__, req.node_name, req.port_name,
60 	    req.port_id);
61 
62 	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
63 	    &fcport->nvme_remote_port);
64 	if (ret) {
65 		ql_log(ql_log_warn, vha, 0x212e,
66 		    "Failed to register remote port. Transport returned %d\n",
67 		    ret);
68 		return ret;
69 	}
70 
71 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
72 		ql_log(ql_log_info, vha, 0x212a,
73 		       "PortID:%06x Supports SLER\n", req.port_id);
74 
75 	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
76 		ql_log(ql_log_info, vha, 0x212b,
77 		       "PortID:%06x Supports PI control\n", req.port_id);
78 
79 	rport = fcport->nvme_remote_port->private;
80 	rport->fcport = fcport;
81 
82 	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
83 	return 0;
84 }
85 
86 /* Allocate a queue for NVMe traffic */
87 static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
88     unsigned int qidx, u16 qsize, void **handle)
89 {
90 	struct scsi_qla_host *vha;
91 	struct qla_hw_data *ha;
92 	struct qla_qpair *qpair;
93 
94 	/* Map admin queue and 1st IO queue to index 0 */
95 	if (qidx)
96 		qidx--;
97 
98 	vha = (struct scsi_qla_host *)lport->private;
99 	ha = vha->hw;
100 
101 	ql_log(ql_log_info, vha, 0x2104,
102 	    "%s: handle %p, idx =%d, qsize %d\n",
103 	    __func__, handle, qidx, qsize);
104 
105 	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
106 		ql_log(ql_log_warn, vha, 0x212f,
107 		    "%s: Illegal qidx=%d. Max=%d\n",
108 		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
109 		return -EINVAL;
110 	}
111 
112 	/* Use base qpair if max_qpairs is 0 */
113 	if (!ha->max_qpairs) {
114 		qpair = ha->base_qpair;
115 	} else {
116 		if (ha->queue_pair_map[qidx]) {
117 			*handle = ha->queue_pair_map[qidx];
118 			ql_log(ql_log_info, vha, 0x2121,
119 			       "Returning existing qpair of %p for idx=%x\n",
120 			       *handle, qidx);
121 			return 0;
122 		}
123 
124 		qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
125 		if (!qpair) {
126 			ql_log(ql_log_warn, vha, 0x2122,
127 			       "Failed to allocate qpair\n");
128 			return -EINVAL;
129 		}
130 	}
131 	*handle = qpair;
132 
133 	return 0;
134 }
135 
136 static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
137 {
138 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
139 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
140 	struct nvmefc_fcp_req *fd;
141 	struct srb_iocb *nvme;
142 	unsigned long flags;
143 
144 	if (!priv)
145 		goto out;
146 
147 	nvme = &sp->u.iocb_cmd;
148 	fd = nvme->u.nvme.desc;
149 
150 	spin_lock_irqsave(&priv->cmd_lock, flags);
151 	priv->sp = NULL;
152 	sp->priv = NULL;
153 	if (priv->comp_status == QLA_SUCCESS) {
154 		fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
155 		fd->status = NVME_SC_SUCCESS;
156 	} else {
157 		fd->rcv_rsplen = 0;
158 		fd->transferred_length = 0;
159 		fd->status = NVME_SC_INTERNAL;
160 	}
161 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
162 
163 	fd->done(fd);
164 out:
165 	qla2xxx_rel_qpair_sp(sp->qpair, sp);
166 }
167 
168 static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
169 {
170 	struct srb *sp = container_of(kref, struct srb, cmd_kref);
171 	struct nvme_private *priv = (struct nvme_private *)sp->priv;
172 	struct nvmefc_ls_req *fd;
173 	unsigned long flags;
174 
175 	if (!priv)
176 		goto out;
177 
178 	spin_lock_irqsave(&priv->cmd_lock, flags);
179 	priv->sp = NULL;
180 	sp->priv = NULL;
181 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
182 
183 	fd = priv->fd;
184 	fd->done(fd, priv->comp_status);
185 out:
186 	qla2x00_rel_sp(sp);
187 }
188 
189 static void qla_nvme_ls_complete(struct work_struct *work)
190 {
191 	struct nvme_private *priv =
192 		container_of(work, struct nvme_private, ls_work);
193 
194 	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
195 }
196 
197 static void qla_nvme_sp_ls_done(srb_t *sp, int res)
198 {
199 	struct nvme_private *priv = sp->priv;
200 
201 	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
202 		return;
203 
204 	if (res)
205 		res = -EINVAL;
206 
207 	priv->comp_status = res;
208 	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
209 	schedule_work(&priv->ls_work);
210 }
211 
212 /* it assumed that QPair lock is held. */
213 static void qla_nvme_sp_done(srb_t *sp, int res)
214 {
215 	struct nvme_private *priv = sp->priv;
216 
217 	priv->comp_status = res;
218 	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
219 
220 	return;
221 }
222 
223 static void qla_nvme_abort_work(struct work_struct *work)
224 {
225 	struct nvme_private *priv =
226 		container_of(work, struct nvme_private, abort_work);
227 	srb_t *sp = priv->sp;
228 	fc_port_t *fcport = sp->fcport;
229 	struct qla_hw_data *ha = fcport->vha->hw;
230 	int rval;
231 
232 	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
233 	       "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
234 	       __func__, sp, sp->handle, fcport, fcport->deleted);
235 
236 	if (!ha->flags.fw_started || fcport->deleted)
237 		goto out;
238 
239 	if (ha->flags.host_shutting_down) {
240 		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
241 		    "%s Calling done on sp: %p, type: 0x%x\n",
242 		    __func__, sp, sp->type);
243 		sp->done(sp, 0);
244 		goto out;
245 	}
246 
247 	rval = ha->isp_ops->abort_command(sp);
248 
249 	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
250 	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
251 	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
252 	    sp, sp->handle, fcport, rval);
253 
254 	/*
255 	 * Returned before decreasing kref so that I/O requests
256 	 * are waited until ABTS complete. This kref is decreased
257 	 * at qla24xx_abort_sp_done function.
258 	 */
259 	if (ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
260 		return;
261 out:
262 	/* kref_get was done before work was schedule. */
263 	kref_put(&sp->cmd_kref, sp->put_fn);
264 }
265 
266 static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
267     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
268 {
269 	struct nvme_private *priv = fd->private;
270 	unsigned long flags;
271 
272 	spin_lock_irqsave(&priv->cmd_lock, flags);
273 	if (!priv->sp) {
274 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
275 		return;
276 	}
277 
278 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
279 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
280 		return;
281 	}
282 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
283 
284 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
285 	schedule_work(&priv->abort_work);
286 }
287 
288 static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
289     struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
290 {
291 	struct qla_nvme_rport *qla_rport = rport->private;
292 	fc_port_t *fcport = qla_rport->fcport;
293 	struct srb_iocb   *nvme;
294 	struct nvme_private *priv = fd->private;
295 	struct scsi_qla_host *vha;
296 	int     rval = QLA_FUNCTION_FAILED;
297 	struct qla_hw_data *ha;
298 	srb_t           *sp;
299 
300 	if (!fcport || fcport->deleted)
301 		return rval;
302 
303 	vha = fcport->vha;
304 	ha = vha->hw;
305 
306 	if (!ha->flags.fw_started)
307 		return rval;
308 
309 	/* Alloc SRB structure */
310 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
311 	if (!sp)
312 		return rval;
313 
314 	sp->type = SRB_NVME_LS;
315 	sp->name = "nvme_ls";
316 	sp->done = qla_nvme_sp_ls_done;
317 	sp->put_fn = qla_nvme_release_ls_cmd_kref;
318 	sp->priv = priv;
319 	priv->sp = sp;
320 	kref_init(&sp->cmd_kref);
321 	spin_lock_init(&priv->cmd_lock);
322 	nvme = &sp->u.iocb_cmd;
323 	priv->fd = fd;
324 	nvme->u.nvme.desc = fd;
325 	nvme->u.nvme.dir = 0;
326 	nvme->u.nvme.dl = 0;
327 	nvme->u.nvme.cmd_len = fd->rqstlen;
328 	nvme->u.nvme.rsp_len = fd->rsplen;
329 	nvme->u.nvme.rsp_dma = fd->rspdma;
330 	nvme->u.nvme.timeout_sec = fd->timeout;
331 	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
332 	    fd->rqstlen, DMA_TO_DEVICE);
333 	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
334 	    fd->rqstlen, DMA_TO_DEVICE);
335 
336 	rval = qla2x00_start_sp(sp);
337 	if (rval != QLA_SUCCESS) {
338 		ql_log(ql_log_warn, vha, 0x700e,
339 		    "qla2x00_start_sp failed = %d\n", rval);
340 		wake_up(&sp->nvme_ls_waitq);
341 		sp->priv = NULL;
342 		priv->sp = NULL;
343 		qla2x00_rel_sp(sp);
344 		return rval;
345 	}
346 
347 	return rval;
348 }
349 
350 static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
351     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
352     struct nvmefc_fcp_req *fd)
353 {
354 	struct nvme_private *priv = fd->private;
355 	unsigned long flags;
356 
357 	spin_lock_irqsave(&priv->cmd_lock, flags);
358 	if (!priv->sp) {
359 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
360 		return;
361 	}
362 	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
363 		spin_unlock_irqrestore(&priv->cmd_lock, flags);
364 		return;
365 	}
366 	spin_unlock_irqrestore(&priv->cmd_lock, flags);
367 
368 	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
369 	schedule_work(&priv->abort_work);
370 }
371 
372 static inline int qla2x00_start_nvme_mq(srb_t *sp)
373 {
374 	unsigned long   flags;
375 	uint32_t        *clr_ptr;
376 	uint32_t        handle;
377 	struct cmd_nvme *cmd_pkt;
378 	uint16_t        cnt, i;
379 	uint16_t        req_cnt;
380 	uint16_t        tot_dsds;
381 	uint16_t	avail_dsds;
382 	struct dsd64	*cur_dsd;
383 	struct req_que *req = NULL;
384 	struct scsi_qla_host *vha = sp->fcport->vha;
385 	struct qla_hw_data *ha = vha->hw;
386 	struct qla_qpair *qpair = sp->qpair;
387 	struct srb_iocb *nvme = &sp->u.iocb_cmd;
388 	struct scatterlist *sgl, *sg;
389 	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
390 	struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
391 	uint32_t        rval = QLA_SUCCESS;
392 
393 	/* Setup qpair pointers */
394 	req = qpair->req;
395 	tot_dsds = fd->sg_cnt;
396 
397 	/* Acquire qpair specific lock */
398 	spin_lock_irqsave(&qpair->qp_lock, flags);
399 
400 	handle = qla2xxx_get_next_handle(req);
401 	if (handle == 0) {
402 		rval = -EBUSY;
403 		goto queuing_error;
404 	}
405 	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
406 	if (req->cnt < (req_cnt + 2)) {
407 		if (IS_SHADOW_REG_CAPABLE(ha)) {
408 			cnt = *req->out_ptr;
409 		} else {
410 			cnt = rd_reg_dword_relaxed(req->req_q_out);
411 			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
412 				goto queuing_error;
413 		}
414 
415 		if (req->ring_index < cnt)
416 			req->cnt = cnt - req->ring_index;
417 		else
418 			req->cnt = req->length - (req->ring_index - cnt);
419 
420 		if (req->cnt < (req_cnt + 2)){
421 			rval = -EBUSY;
422 			goto queuing_error;
423 		}
424 	}
425 
426 	if (unlikely(!fd->sqid)) {
427 		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
428 			nvme->u.nvme.aen_op = 1;
429 			atomic_inc(&ha->nvme_active_aen_cnt);
430 		}
431 	}
432 
433 	/* Build command packet. */
434 	req->current_outstanding_cmd = handle;
435 	req->outstanding_cmds[handle] = sp;
436 	sp->handle = handle;
437 	req->cnt -= req_cnt;
438 
439 	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
440 	cmd_pkt->handle = make_handle(req->id, handle);
441 
442 	/* Zero out remaining portion of packet. */
443 	clr_ptr = (uint32_t *)cmd_pkt + 2;
444 	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
445 
446 	cmd_pkt->entry_status = 0;
447 
448 	/* Update entry type to indicate Command NVME IOCB */
449 	cmd_pkt->entry_type = COMMAND_NVME;
450 
451 	/* No data transfer how do we check buffer len == 0?? */
452 	if (fd->io_dir == NVMEFC_FCP_READ) {
453 		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
454 		qpair->counters.input_bytes += fd->payload_length;
455 		qpair->counters.input_requests++;
456 	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
457 		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
458 		if ((vha->flags.nvme_first_burst) &&
459 		    (sp->fcport->nvme_prli_service_param &
460 			NVME_PRLI_SP_FIRST_BURST)) {
461 			if ((fd->payload_length <=
462 			    sp->fcport->nvme_first_burst_size) ||
463 				(sp->fcport->nvme_first_burst_size == 0))
464 				cmd_pkt->control_flags |=
465 					cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
466 		}
467 		qpair->counters.output_bytes += fd->payload_length;
468 		qpair->counters.output_requests++;
469 	} else if (fd->io_dir == 0) {
470 		cmd_pkt->control_flags = 0;
471 	}
472 
473 	if (sp->fcport->edif.enable && fd->io_dir != 0)
474 		cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF);
475 
476 	/* Set BIT_13 of control flags for Async event */
477 	if (vha->flags.nvme2_enabled &&
478 	    cmd->sqe.common.opcode == nvme_admin_async_event) {
479 		cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
480 	}
481 
482 	/* Set NPORT-ID */
483 	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
484 	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
485 	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
486 	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
487 	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
488 
489 	/* NVME RSP IU */
490 	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
491 	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
492 
493 	/* NVME CNMD IU */
494 	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
495 	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
496 
497 	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
498 	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
499 
500 	/* One DSD is available in the Command Type NVME IOCB */
501 	avail_dsds = 1;
502 	cur_dsd = &cmd_pkt->nvme_dsd;
503 	sgl = fd->first_sgl;
504 
505 	/* Load data segments */
506 	for_each_sg(sgl, sg, tot_dsds, i) {
507 		cont_a64_entry_t *cont_pkt;
508 
509 		/* Allocate additional continuation packets? */
510 		if (avail_dsds == 0) {
511 			/*
512 			 * Five DSDs are available in the Continuation
513 			 * Type 1 IOCB.
514 			 */
515 
516 			/* Adjust ring index */
517 			req->ring_index++;
518 			if (req->ring_index == req->length) {
519 				req->ring_index = 0;
520 				req->ring_ptr = req->ring;
521 			} else {
522 				req->ring_ptr++;
523 			}
524 			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
525 			put_unaligned_le32(CONTINUE_A64_TYPE,
526 					   &cont_pkt->entry_type);
527 
528 			cur_dsd = cont_pkt->dsd;
529 			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
530 		}
531 
532 		append_dsd64(&cur_dsd, sg);
533 		avail_dsds--;
534 	}
535 
536 	/* Set total entry count. */
537 	cmd_pkt->entry_count = (uint8_t)req_cnt;
538 	wmb();
539 
540 	/* Adjust ring index. */
541 	req->ring_index++;
542 	if (req->ring_index == req->length) {
543 		req->ring_index = 0;
544 		req->ring_ptr = req->ring;
545 	} else {
546 		req->ring_ptr++;
547 	}
548 
549 	/* ignore nvme async cmd due to long timeout */
550 	if (!nvme->u.nvme.aen_op)
551 		sp->qpair->cmd_cnt++;
552 
553 	/* Set chip new ring index. */
554 	wrt_reg_dword(req->req_q_in, req->ring_index);
555 
556 queuing_error:
557 	spin_unlock_irqrestore(&qpair->qp_lock, flags);
558 
559 	return rval;
560 }
561 
562 /* Post a command */
563 static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
564     struct nvme_fc_remote_port *rport, void *hw_queue_handle,
565     struct nvmefc_fcp_req *fd)
566 {
567 	fc_port_t *fcport;
568 	struct srb_iocb *nvme;
569 	struct scsi_qla_host *vha;
570 	int rval;
571 	srb_t *sp;
572 	struct qla_qpair *qpair = hw_queue_handle;
573 	struct nvme_private *priv = fd->private;
574 	struct qla_nvme_rport *qla_rport = rport->private;
575 
576 	if (!priv) {
577 		/* nvme association has been torn down */
578 		return -ENODEV;
579 	}
580 
581 	fcport = qla_rport->fcport;
582 
583 	if (unlikely(!qpair || !fcport || fcport->deleted))
584 		return -EBUSY;
585 
586 	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
587 		return -ENODEV;
588 
589 	vha = fcport->vha;
590 
591 	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
592 		return -EBUSY;
593 
594 	/*
595 	 * If we know the dev is going away while the transport is still sending
596 	 * IO's return busy back to stall the IO Q.  This happens when the
597 	 * link goes away and fw hasn't notified us yet, but IO's are being
598 	 * returned. If the dev comes back quickly we won't exhaust the IO
599 	 * retry count at the core.
600 	 */
601 	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
602 		return -EBUSY;
603 
604 	/* Alloc SRB structure */
605 	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
606 	if (!sp)
607 		return -EBUSY;
608 
609 	init_waitqueue_head(&sp->nvme_ls_waitq);
610 	kref_init(&sp->cmd_kref);
611 	spin_lock_init(&priv->cmd_lock);
612 	sp->priv = priv;
613 	priv->sp = sp;
614 	sp->type = SRB_NVME_CMD;
615 	sp->name = "nvme_cmd";
616 	sp->done = qla_nvme_sp_done;
617 	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
618 	sp->qpair = qpair;
619 	sp->vha = vha;
620 	sp->cmd_sp = sp;
621 	nvme = &sp->u.iocb_cmd;
622 	nvme->u.nvme.desc = fd;
623 
624 	rval = qla2x00_start_nvme_mq(sp);
625 	if (rval != QLA_SUCCESS) {
626 		ql_log(ql_log_warn, vha, 0x212d,
627 		    "qla2x00_start_nvme_mq failed = %d\n", rval);
628 		wake_up(&sp->nvme_ls_waitq);
629 		sp->priv = NULL;
630 		priv->sp = NULL;
631 		qla2xxx_rel_qpair_sp(sp->qpair, sp);
632 	}
633 
634 	return rval;
635 }
636 
637 static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
638 {
639 	struct scsi_qla_host *vha = lport->private;
640 
641 	ql_log(ql_log_info, vha, 0x210f,
642 	    "localport delete of %p completed.\n", vha->nvme_local_port);
643 	vha->nvme_local_port = NULL;
644 	complete(&vha->nvme_del_done);
645 }
646 
647 static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
648 {
649 	fc_port_t *fcport;
650 	struct qla_nvme_rport *qla_rport = rport->private;
651 
652 	fcport = qla_rport->fcport;
653 	fcport->nvme_remote_port = NULL;
654 	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
655 	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
656 	ql_log(ql_log_info, fcport->vha, 0x2110,
657 	    "remoteport_delete of %p %8phN completed.\n",
658 	    fcport, fcport->port_name);
659 	complete(&fcport->nvme_del_done);
660 }
661 
662 static struct nvme_fc_port_template qla_nvme_fc_transport = {
663 	.localport_delete = qla_nvme_localport_delete,
664 	.remoteport_delete = qla_nvme_remoteport_delete,
665 	.create_queue   = qla_nvme_alloc_queue,
666 	.delete_queue 	= NULL,
667 	.ls_req		= qla_nvme_ls_req,
668 	.ls_abort	= qla_nvme_ls_abort,
669 	.fcp_io		= qla_nvme_post_cmd,
670 	.fcp_abort	= qla_nvme_fcp_abort,
671 	.max_hw_queues  = 8,
672 	.max_sgl_segments = 1024,
673 	.max_dif_sgl_segments = 64,
674 	.dma_boundary = 0xFFFFFFFF,
675 	.local_priv_sz  = 8,
676 	.remote_priv_sz = sizeof(struct qla_nvme_rport),
677 	.lsrqst_priv_sz = sizeof(struct nvme_private),
678 	.fcprqst_priv_sz = sizeof(struct nvme_private),
679 };
680 
681 void qla_nvme_unregister_remote_port(struct fc_port *fcport)
682 {
683 	int ret;
684 
685 	if (!IS_ENABLED(CONFIG_NVME_FC))
686 		return;
687 
688 	ql_log(ql_log_warn, fcport->vha, 0x2112,
689 	    "%s: unregister remoteport on %p %8phN\n",
690 	    __func__, fcport, fcport->port_name);
691 
692 	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
693 		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
694 
695 	init_completion(&fcport->nvme_del_done);
696 	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
697 	if (ret)
698 		ql_log(ql_log_info, fcport->vha, 0x2114,
699 			"%s: Failed to unregister nvme_remote_port (%d)\n",
700 			    __func__, ret);
701 	wait_for_completion(&fcport->nvme_del_done);
702 }
703 
704 void qla_nvme_delete(struct scsi_qla_host *vha)
705 {
706 	int nv_ret;
707 
708 	if (!IS_ENABLED(CONFIG_NVME_FC))
709 		return;
710 
711 	if (vha->nvme_local_port) {
712 		init_completion(&vha->nvme_del_done);
713 		ql_log(ql_log_info, vha, 0x2116,
714 			"unregister localport=%p\n",
715 			vha->nvme_local_port);
716 		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
717 		if (nv_ret)
718 			ql_log(ql_log_info, vha, 0x2115,
719 			    "Unregister of localport failed\n");
720 		else
721 			wait_for_completion(&vha->nvme_del_done);
722 	}
723 }
724 
725 int qla_nvme_register_hba(struct scsi_qla_host *vha)
726 {
727 	struct nvme_fc_port_template *tmpl;
728 	struct qla_hw_data *ha;
729 	struct nvme_fc_port_info pinfo;
730 	int ret = -EINVAL;
731 
732 	if (!IS_ENABLED(CONFIG_NVME_FC))
733 		return ret;
734 
735 	ha = vha->hw;
736 	tmpl = &qla_nvme_fc_transport;
737 
738 	WARN_ON(vha->nvme_local_port);
739 
740 	qla_nvme_fc_transport.max_hw_queues =
741 	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
742 		(uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
743 
744 	pinfo.node_name = wwn_to_u64(vha->node_name);
745 	pinfo.port_name = wwn_to_u64(vha->port_name);
746 	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
747 	pinfo.port_id = vha->d_id.b24;
748 
749 	ql_log(ql_log_info, vha, 0xffff,
750 	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
751 	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
752 	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
753 
754 	ret = nvme_fc_register_localport(&pinfo, tmpl,
755 	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
756 	if (ret) {
757 		ql_log(ql_log_warn, vha, 0xffff,
758 		    "register_localport failed: ret=%x\n", ret);
759 	} else {
760 		vha->nvme_local_port->private = vha;
761 	}
762 
763 	return ret;
764 }
765 
766 void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
767 {
768 	struct qla_hw_data *ha;
769 
770 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
771 		return;
772 
773 	ha = orig_sp->fcport->vha->hw;
774 
775 	WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
776 	/* Use Driver Specified Retry Count */
777 	abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
778 	abt->drv.abts_rty_cnt = cpu_to_le16(2);
779 	/* Use specified response timeout */
780 	abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
781 	/* set it to 2 * r_a_tov in secs */
782 	abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
783 }
784 
785 void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
786 {
787 	u16	comp_status;
788 	struct scsi_qla_host *vha;
789 
790 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
791 		return;
792 
793 	vha = orig_sp->fcport->vha;
794 
795 	comp_status = le16_to_cpu(abt->comp_status);
796 	switch (comp_status) {
797 	case CS_RESET:		/* reset event aborted */
798 	case CS_ABORTED:	/* IOCB was cleaned */
799 	/* N_Port handle is not currently logged in */
800 	case CS_TIMEOUT:
801 	/* N_Port handle was logged out while waiting for ABTS to complete */
802 	case CS_PORT_UNAVAILABLE:
803 	/* Firmware found that the port name changed */
804 	case CS_PORT_LOGGED_OUT:
805 	/* BA_RJT was received for the ABTS */
806 	case CS_PORT_CONFIG_CHG:
807 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09d,
808 		       "Abort I/O IOCB completed with error, comp_status=%x\n",
809 		comp_status);
810 		break;
811 
812 	/* BA_RJT was received for the ABTS */
813 	case CS_REJECT_RECEIVED:
814 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
815 		       "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
816 			abt->fw.ba_rjt_vendorUnique);
817 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
818 		       "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
819 		       abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
820 		break;
821 
822 	case CS_COMPLETE:
823 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09f,
824 		       "IOCB request is completed successfully comp_status=%x\n",
825 		comp_status);
826 		break;
827 
828 	case CS_IOCB_ERROR:
829 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a0,
830 		       "IOCB request is failed, comp_status=%x\n", comp_status);
831 		break;
832 
833 	default:
834 		ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a1,
835 		       "Invalid Abort IO IOCB Completion Status %x\n",
836 		comp_status);
837 		break;
838 	}
839 }
840 
841 inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
842 {
843 	if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
844 		return;
845 	kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
846 }
847